index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
1,000 | 153a33b85cf8b3ef9c742f05b460e94e0c684682 | class Data(object):
<mask token>
<mask token>
class Time(Data):
def getTime(self):
print('Time:', self.data)
<mask token>
| class Data(object):
def __init__(self, data):
self.data = data
<mask token>
class Time(Data):
def getTime(self):
print('Time:', self.data)
<mask token>
| class Data(object):
def __init__(self, data):
self.data = data
def getData(self):
print('Data:', self.data)
class Time(Data):
def getTime(self):
print('Time:', self.data)
<mask token>
| class Data(object):
def __init__(self, data):
self.data = data
def getData(self):
print('Data:', self.data)
class Time(Data):
def getTime(self):
print('Time:', self.data)
if __name__ == '__main__':
data = Data(10)
time = Time(20)
time.getTime()
data.getData()
time.getData()
print(Time.mro())
| #Author: AKHILESH
#This program illustrates the advanced concepts of inheritance
#Python looks up for method in following order: Instance attributes, class attributes and the
#from the base class
#mro: Method Resolution order
class Data(object):
def __init__(self, data):
self.data = data
def getData(self):
print('Data:',self.data)
class Time(Data): #Inhertiting from Data class
def getTime(self):
print('Time:',self.data)
if __name__ == '__main__':
data = Data(10)
time = Time(20) #inherited Class -> Value passed to __init__of Data (Base class)
time.getTime()
data.getData()
time.getData()
print(Time.mro())
| [
3,
4,
5,
6,
7
] |
1,001 | e207063eb3eb1929e0e24b62e6b77a8924a80489 | <mask token>
def separa_sentencas(texto):
"""A funcao recebe um texto e devolve uma lista das sentencas dentro do texto"""
sentencas = re.split('[.!?]+', texto)
if sentencas[-1] == '':
del sentencas[-1]
return sentencas
def separa_frases(sentenca):
"""A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca"""
sentenca = re.split('[,:;]+', sentenca)
return sentenca
<mask token>
def soma_caracteres_sentenca(lista_sent):
lista_sent = separa_sentencas(texto)
i = 0
soma = 0
while i < len(lista_sent):
x = lista_sent[i]
len(x)
soma = soma + len(x)
i += 1
return soma
def tam_medio_sentenca(lista_sent):
TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(
lista_sent))
return TMS
<mask token>
def complexidade_sentenca(texto):
CS = len(frases(texto)) / len(separa_sentencas(texto))
return CS
<mask token>
def compara_assinatura(as_a, as_b):
"""IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas."""
i = 0
soma = 0
for i in range(6):
soma += abs(as_a[i] - as_b[i])
Sab = soma / 6
return Sab
<mask token>
| <mask token>
def separa_sentencas(texto):
"""A funcao recebe um texto e devolve uma lista das sentencas dentro do texto"""
sentencas = re.split('[.!?]+', texto)
if sentencas[-1] == '':
del sentencas[-1]
return sentencas
def separa_frases(sentenca):
"""A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca"""
sentenca = re.split('[,:;]+', sentenca)
return sentenca
def separa_palavras(frase):
"""A funcao recebe uma frase e devolve uma lista das palavras dentro da frase"""
return frase.split()
def n_palavras_unicas(lista_palavras):
"""Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez"""
freq = dict()
unicas = 0
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
if freq[p] == 1:
unicas -= 1
freq[p] += 1
else:
freq[p] = 1
unicas += 1
return unicas
def n_palavras_diferentes(lista_palavras):
"""Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas"""
freq = dict()
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
freq[p] += 1
else:
freq[p] = 1
return len(freq)
def lista_frases(sentenca):
list_frases = []
list_sent = separa_sentencas(sentenca)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def lista_palavras(frases):
list_palavras = []
list_fr = lista_frases(frases)
for frase in list_fr:
novas_palavras = separa_palavras(frase)
list_palavras.extend(novas_palavras)
return list_palavras
def tam_medio(list_palavras):
palavras = lista_palavras(texto)
i = 0
soma_palavras = 0
while i < len(palavras):
x = palavras[i]
soma_palavras = soma_palavras + len(x)
i += 1
tam = soma_palavras / len(palavras)
return tam
def type_token(list_palavras):
palavras = lista_palavras(texto)
TT = n_palavras_diferentes(palavras) / len(palavras)
return TT
<mask token>
def soma_caracteres_sentenca(lista_sent):
lista_sent = separa_sentencas(texto)
i = 0
soma = 0
while i < len(lista_sent):
x = lista_sent[i]
len(x)
soma = soma + len(x)
i += 1
return soma
def tam_medio_sentenca(lista_sent):
TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(
lista_sent))
return TMS
def frases(sentenca):
list_frases = []
list_sent = separa_sentencas(texto)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def complexidade_sentenca(texto):
CS = len(frases(texto)) / len(separa_sentencas(texto))
return CS
def soma_caracteres_frases(lista_frases):
lista_fr = frases(lista_frases)
i = 0
soma_fr = 0
while i < len(lista_fr):
x = lista_fr[i]
len(x)
soma_fr = soma_fr + len(x)
i += 1
return soma_fr
<mask token>
def compara_assinatura(as_a, as_b):
"""IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas."""
i = 0
soma = 0
for i in range(6):
soma += abs(as_a[i] - as_b[i])
Sab = soma / 6
return Sab
def calcula_assinatura(texto):
as_b = []
lista.append(tam_medio(texto))
lista.append(type_token(texto))
lista.append(hapax_legomana(texto))
lista.append(tam_medio_sentenca(texto))
lista.append(complexidade_sentenca(texto))
lista.append(tam_medio_frase(texto))
return as_b
def avalia_textos(textos, ass_cp):
"""IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH."""
lista_sab = []
menor = 0
for texto in textos:
as_texto = calcula_assinatura(texto)
comparar = compara_assinatura(ass_cp, as_texto)
lista_sab.append(comparar)
menor = min(lista_sab)
return lista.index(menor) + 1
| <mask token>
def separa_sentencas(texto):
"""A funcao recebe um texto e devolve uma lista das sentencas dentro do texto"""
sentencas = re.split('[.!?]+', texto)
if sentencas[-1] == '':
del sentencas[-1]
return sentencas
def separa_frases(sentenca):
"""A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca"""
sentenca = re.split('[,:;]+', sentenca)
return sentenca
def separa_palavras(frase):
"""A funcao recebe uma frase e devolve uma lista das palavras dentro da frase"""
return frase.split()
def n_palavras_unicas(lista_palavras):
"""Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez"""
freq = dict()
unicas = 0
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
if freq[p] == 1:
unicas -= 1
freq[p] += 1
else:
freq[p] = 1
unicas += 1
return unicas
def n_palavras_diferentes(lista_palavras):
"""Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas"""
freq = dict()
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
freq[p] += 1
else:
freq[p] = 1
return len(freq)
def lista_frases(sentenca):
list_frases = []
list_sent = separa_sentencas(sentenca)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def lista_palavras(frases):
list_palavras = []
list_fr = lista_frases(frases)
for frase in list_fr:
novas_palavras = separa_palavras(frase)
list_palavras.extend(novas_palavras)
return list_palavras
def tam_medio(list_palavras):
palavras = lista_palavras(texto)
i = 0
soma_palavras = 0
while i < len(palavras):
x = palavras[i]
soma_palavras = soma_palavras + len(x)
i += 1
tam = soma_palavras / len(palavras)
return tam
def type_token(list_palavras):
palavras = lista_palavras(texto)
TT = n_palavras_diferentes(palavras) / len(palavras)
return TT
<mask token>
def soma_caracteres_sentenca(lista_sent):
lista_sent = separa_sentencas(texto)
i = 0
soma = 0
while i < len(lista_sent):
x = lista_sent[i]
len(x)
soma = soma + len(x)
i += 1
return soma
def tam_medio_sentenca(lista_sent):
TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(
lista_sent))
return TMS
def frases(sentenca):
list_frases = []
list_sent = separa_sentencas(texto)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def complexidade_sentenca(texto):
CS = len(frases(texto)) / len(separa_sentencas(texto))
return CS
def soma_caracteres_frases(lista_frases):
lista_fr = frases(lista_frases)
i = 0
soma_fr = 0
while i < len(lista_fr):
x = lista_fr[i]
len(x)
soma_fr = soma_fr + len(x)
i += 1
return soma_fr
def tam_medio_frase(lista_frases):
TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))
return TMF
def le_textos():
"""A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento"""
i = 1
textos = []
texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')
while texto:
textos.append(texto)
i += 1
texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'
)
return textos
def compara_assinatura(as_a, as_b):
"""IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas."""
i = 0
soma = 0
for i in range(6):
soma += abs(as_a[i] - as_b[i])
Sab = soma / 6
return Sab
def calcula_assinatura(texto):
as_b = []
lista.append(tam_medio(texto))
lista.append(type_token(texto))
lista.append(hapax_legomana(texto))
lista.append(tam_medio_sentenca(texto))
lista.append(complexidade_sentenca(texto))
lista.append(tam_medio_frase(texto))
return as_b
def avalia_textos(textos, ass_cp):
"""IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH."""
lista_sab = []
menor = 0
for texto in textos:
as_texto = calcula_assinatura(texto)
comparar = compara_assinatura(ass_cp, as_texto)
lista_sab.append(comparar)
menor = min(lista_sab)
return lista.index(menor) + 1
| <mask token>
import re
texto = (
'Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido. Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia.'
)
texto1 = [
'Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido'
,
' Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia'
]
def separa_sentencas(texto):
"""A funcao recebe um texto e devolve uma lista das sentencas dentro do texto"""
sentencas = re.split('[.!?]+', texto)
if sentencas[-1] == '':
del sentencas[-1]
return sentencas
def separa_frases(sentenca):
"""A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca"""
sentenca = re.split('[,:;]+', sentenca)
return sentenca
def separa_palavras(frase):
"""A funcao recebe uma frase e devolve uma lista das palavras dentro da frase"""
return frase.split()
def n_palavras_unicas(lista_palavras):
"""Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez"""
freq = dict()
unicas = 0
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
if freq[p] == 1:
unicas -= 1
freq[p] += 1
else:
freq[p] = 1
unicas += 1
return unicas
def n_palavras_diferentes(lista_palavras):
"""Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas"""
freq = dict()
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
freq[p] += 1
else:
freq[p] = 1
return len(freq)
def lista_frases(sentenca):
list_frases = []
list_sent = separa_sentencas(sentenca)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def lista_palavras(frases):
list_palavras = []
list_fr = lista_frases(frases)
for frase in list_fr:
novas_palavras = separa_palavras(frase)
list_palavras.extend(novas_palavras)
return list_palavras
def tam_medio(list_palavras):
palavras = lista_palavras(texto)
i = 0
soma_palavras = 0
while i < len(palavras):
x = palavras[i]
soma_palavras = soma_palavras + len(x)
i += 1
tam = soma_palavras / len(palavras)
return tam
def type_token(list_palavras):
palavras = lista_palavras(texto)
TT = n_palavras_diferentes(palavras) / len(palavras)
return TT
def hapax_legomana(list_palavras):
palavras = lista_palavras(texto)
HL = n_palavras_unicas(palavras) / len(palavras)
return HL
def soma_caracteres_sentenca(lista_sent):
lista_sent = separa_sentencas(texto)
i = 0
soma = 0
while i < len(lista_sent):
x = lista_sent[i]
len(x)
soma = soma + len(x)
i += 1
return soma
def tam_medio_sentenca(lista_sent):
TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(
lista_sent))
return TMS
def frases(sentenca):
list_frases = []
list_sent = separa_sentencas(texto)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def complexidade_sentenca(texto):
CS = len(frases(texto)) / len(separa_sentencas(texto))
return CS
def soma_caracteres_frases(lista_frases):
lista_fr = frases(lista_frases)
i = 0
soma_fr = 0
while i < len(lista_fr):
x = lista_fr[i]
len(x)
soma_fr = soma_fr + len(x)
i += 1
return soma_fr
def tam_medio_frase(lista_frases):
TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))
return TMF
def le_textos():
"""A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento"""
i = 1
textos = []
texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')
while texto:
textos.append(texto)
i += 1
texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'
)
return textos
def compara_assinatura(as_a, as_b):
"""IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas."""
i = 0
soma = 0
for i in range(6):
soma += abs(as_a[i] - as_b[i])
Sab = soma / 6
return Sab
def calcula_assinatura(texto):
as_b = []
lista.append(tam_medio(texto))
lista.append(type_token(texto))
lista.append(hapax_legomana(texto))
lista.append(tam_medio_sentenca(texto))
lista.append(complexidade_sentenca(texto))
lista.append(tam_medio_frase(texto))
return as_b
def avalia_textos(textos, ass_cp):
"""IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH."""
lista_sab = []
menor = 0
for texto in textos:
as_texto = calcula_assinatura(texto)
comparar = compara_assinatura(ass_cp, as_texto)
lista_sab.append(comparar)
menor = min(lista_sab)
return lista.index(menor) + 1
| # -*- coding: utf-8 -*-
"""
Created on Sat Jul 18 20:24:53 2020
@author: filip
"""
import re
texto = "Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido. Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia."
texto1 = ['Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido', ' Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia']
def separa_sentencas(texto):
'''A funcao recebe um texto e devolve uma lista das sentencas dentro do texto'''
sentencas = re.split(r'[.!?]+', texto)
if sentencas[-1] == '':
del sentencas[-1]
return sentencas
def separa_frases(sentenca):
'''A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca'''
sentenca = re.split(r'[,:;]+', sentenca)
return sentenca
def separa_palavras(frase):
'''A funcao recebe uma frase e devolve uma lista das palavras dentro da frase'''
return frase.split()
def n_palavras_unicas(lista_palavras):
'''Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez'''
freq = dict()
unicas = 0
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
if freq[p] == 1:
unicas -= 1
freq[p] += 1
else:
freq[p] = 1
unicas += 1
return unicas
def n_palavras_diferentes(lista_palavras):
'''Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas'''
freq = dict()
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
freq[p] += 1
else:
freq[p] = 1
return len(freq)
def lista_frases (sentenca):
list_frases = []
list_sent = separa_sentencas(sentenca)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def lista_palavras (frases):
list_palavras = []
list_fr = lista_frases(frases)
for frase in list_fr:
novas_palavras = separa_palavras(frase)
list_palavras.extend(novas_palavras)
return list_palavras
def tam_medio (list_palavras): # Traço linguístico 1
palavras = lista_palavras(texto)
i = 0
soma_palavras = 0
while i < len(palavras):
x = palavras[i]
soma_palavras = soma_palavras + len(x)
i +=1
tam = soma_palavras/len(palavras)
return tam
def type_token(list_palavras): # Traço linguístico 2
palavras = lista_palavras(texto)
TT = n_palavras_diferentes(palavras)/ len(palavras)
return TT
def hapax_legomana (list_palavras): # Traço linguístico 3
palavras = lista_palavras(texto)
HL = n_palavras_unicas(palavras)/ len(palavras)
return HL
def soma_caracteres_sentenca(lista_sent):
lista_sent = separa_sentencas(texto)
i = 0
soma = 0
while i < len(lista_sent):
x = lista_sent[i]
len(x)
soma = soma + len(x)
i +=1
return soma
def tam_medio_sentenca(lista_sent): # Traço linguístico 4
TMS = soma_caracteres_sentenca(lista_sent)/ len(separa_sentencas(lista_sent))
return TMS
def frases (sentenca):
list_frases = []
list_sent = separa_sentencas(texto)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def complexidade_sentenca (texto): # Traço linguístico 5
CS = len(frases(texto))/ len(separa_sentencas(texto))
return CS
def soma_caracteres_frases(lista_frases):
lista_fr = frases(lista_frases)
i = 0
soma_fr = 0
while i < len(lista_fr):
x = lista_fr[i]
len(x)
soma_fr = soma_fr + len(x)
i +=1
return soma_fr
def tam_medio_frase(lista_frases): # Traço linguístico 6
TMF = soma_caracteres_frases(lista_frases)/ len (frases(lista_frases))
return TMF
def le_textos():
'''A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento'''
i = 1
textos = []
texto = input("Digite o texto " + str(i) +" (aperte enter para sair):")
while texto:
textos.append(texto)
i += 1
texto = input("Digite o texto " + str(i) +" (aperte enter para sair):")
return textos
def compara_assinatura(as_a, as_b):
'''IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.'''
i = 0
soma = 0
for i in range(6):
soma += abs (as_a[i] - as_b[i])
Sab = soma / 6
return Sab
def calcula_assinatura(texto):
as_b = []
lista.append(tam_medio(texto))
lista.append(type_token(texto))
lista.append(hapax_legomana (texto))
lista.append(tam_medio_sentenca(texto))
lista.append(complexidade_sentenca (texto))
lista.append(tam_medio_frase(texto))
return as_b
def avalia_textos(textos, ass_cp):
'''IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.'''
lista_sab = []
menor = 0
for texto in textos:
as_texto = calcula_assinatura(texto)
comparar = compara_assinatura(ass_cp, as_texto)
lista_sab.append(comparar)
menor = min(lista_sab)
return (lista.index(menor) + 1)
| [
6,
17,
19,
22,
23
] |
1,002 | 5d7080f2778133d1938853512ca038edcf7c0dc4 | <mask token>
class TicketshopATLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes, context=
configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain('one_state_workflow')
setRoles(portal, TEST_USER_ID, ['Manager'])
cru = plone.api.user.create
cru(email='[email protected]', username='customer1', password='customer1')
cru(email='[email protected]', username='customer2', password='customer2')
cru(email='[email protected]', username='vendor1', password='vendor1')
cru(email='[email protected]', username='vendor2', password='vendor2')
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title='item_11')
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title='item_12')
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title='item_21')
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title='item_22')
<mask token>
| <mask token>
class TicketshopLayer(PloneSandboxLayer):
<mask token>
<mask token>
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'bda.plone.ticketshop:default')
def tearDownZope(self, app):
z2.uninstallProduct(app, 'Products.DateRecurringIndex')
<mask token>
class TicketshopATLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes, context=
configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain('one_state_workflow')
setRoles(portal, TEST_USER_ID, ['Manager'])
cru = plone.api.user.create
cru(email='[email protected]', username='customer1', password='customer1')
cru(email='[email protected]', username='customer2', password='customer2')
cru(email='[email protected]', username='vendor1', password='vendor1')
cru(email='[email protected]', username='vendor2', password='vendor2')
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title='item_11')
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title='item_12')
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title='item_21')
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title='item_22')
<mask token>
| <mask token>
def set_browserlayer(request):
"""Set the BrowserLayer for the request.
We have to set the browserlayer manually, since importing the profile alone
doesn't do it in tests.
"""
alsoProvides(request, ITicketShopExtensionLayer)
class TicketshopLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'bda.plone.ticketshop:default')
def tearDownZope(self, app):
z2.uninstallProduct(app, 'Products.DateRecurringIndex')
<mask token>
class TicketshopATLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes, context=
configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain('one_state_workflow')
setRoles(portal, TEST_USER_ID, ['Manager'])
cru = plone.api.user.create
cru(email='[email protected]', username='customer1', password='customer1')
cru(email='[email protected]', username='customer2', password='customer2')
cru(email='[email protected]', username='vendor1', password='vendor1')
cru(email='[email protected]', username='vendor2', password='vendor2')
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title='item_11')
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title='item_12')
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title='item_21')
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title='item_22')
<mask token>
| <mask token>
if getFSVersionTuple()[0] >= 5:
PLONE5 = 1
else:
PLONE5 = 0
def set_browserlayer(request):
"""Set the BrowserLayer for the request.
We have to set the browserlayer manually, since importing the profile alone
doesn't do it in tests.
"""
alsoProvides(request, ITicketShopExtensionLayer)
class TicketshopLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'bda.plone.ticketshop:default')
def tearDownZope(self, app):
z2.uninstallProduct(app, 'Products.DateRecurringIndex')
<mask token>
class TicketshopATLayer(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes, context=
configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop, context=
configurationContext)
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain('one_state_workflow')
setRoles(portal, TEST_USER_ID, ['Manager'])
cru = plone.api.user.create
cru(email='[email protected]', username='customer1', password='customer1')
cru(email='[email protected]', username='customer2', password='customer2')
cru(email='[email protected]', username='vendor1', password='vendor1')
cru(email='[email protected]', username='vendor2', password='vendor2')
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title='item_11')
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title='item_12')
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title='item_21')
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title='item_22')
<mask token>
| from Products.CMFPlone.utils import getFSVersionTuple
from bda.plone.ticketshop.interfaces import ITicketShopExtensionLayer
from plone.app.robotframework.testing import MOCK_MAILHOST_FIXTURE
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.app.testing import TEST_USER_ID
from plone.app.testing import setRoles
from plone.testing import z2
from zope.interface import alsoProvides
import plone.api
if getFSVersionTuple()[0] >= 5:
PLONE5 = 1
else:
PLONE5 = 0
def set_browserlayer(request):
"""Set the BrowserLayer for the request.
We have to set the browserlayer manually, since importing the profile alone
doesn't do it in tests.
"""
alsoProvides(request, ITicketShopExtensionLayer)
class TicketshopLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop,
context=configurationContext)
# Install products that use an old-style initialize() function
z2.installProduct(app, 'Products.DateRecurringIndex')
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'bda.plone.ticketshop:default')
def tearDownZope(self, app):
# Uninstall old-style Products
z2.uninstallProduct(app, 'Products.DateRecurringIndex')
Ticketshop_FIXTURE = TicketshopLayer()
Ticketshop_INTEGRATION_TESTING = IntegrationTesting(
bases=(Ticketshop_FIXTURE,),
name="Ticketshop:Integration")
class TicketshopATLayer(PloneSandboxLayer):
# don't use shop fixture here. looks like, test layers use differen ZODB
# connections and c.z.datagriedfield fails with a ZODB object reference
# error.
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes,
context=configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop,
context=configurationContext)
# Install products that use an old-style initialize() function
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain("one_state_workflow")
setRoles(portal, TEST_USER_ID, ['Manager'])
# Create test users
cru = plone.api.user.create
cru(email="[email protected]", username="customer1", password="customer1")
cru(email="[email protected]", username="customer2", password="customer2")
cru(email="[email protected]", username="vendor1", password="vendor1")
cru(email="[email protected]", username="vendor2", password="vendor2")
# Create test content
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title="item_11")
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title="item_12")
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title="item_21")
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title="item_22")
TicketshopAT_FIXTURE = TicketshopATLayer()
TicketshopAT_INTEGRATION_TESTING = IntegrationTesting(
bases=(TicketshopAT_FIXTURE,),
name="TicketshopAT:Integration")
TicketshopAT_ROBOT_TESTING = FunctionalTesting(
bases=(
MOCK_MAILHOST_FIXTURE,
TicketshopAT_FIXTURE,
z2.ZSERVER_FIXTURE
),
name="TicketshopAT:Robot")
| [
4,
7,
10,
11,
14
] |
1,003 | 646f6a0afc3dc129250c26270dda4355b8cea080 | <mask token>
| <mask token>
def problem127():
GOAL = 120000
rad = {}
for primes in genFactors(GOAL):
rad[product(primes)] = set(primes), product(set(primes))
def relprime(s, t):
return s & t == set()
found = 0
total = 0
for b in range(1, GOAL):
for a in range(1, min(b, GOAL - b)):
c = a + b
x, y, z = rad[a], rad[b], rad[c]
if x[0] & y[0] != set():
continue
if x[1] * y[1] * z[1] < c:
found += 1
total += c
return total
<mask token>
| <mask token>
def problem127():
GOAL = 120000
rad = {}
for primes in genFactors(GOAL):
rad[product(primes)] = set(primes), product(set(primes))
def relprime(s, t):
return s & t == set()
found = 0
total = 0
for b in range(1, GOAL):
for a in range(1, min(b, GOAL - b)):
c = a + b
x, y, z = rad[a], rad[b], rad[c]
if x[0] & y[0] != set():
continue
if x[1] * y[1] * z[1] < c:
found += 1
total += c
return total
if __name__ == '__main__':
print(problem127() == 18407904)
| <mask token>
from PE_factors import genFactors
from PE_basic import product
def problem127():
GOAL = 120000
rad = {}
for primes in genFactors(GOAL):
rad[product(primes)] = set(primes), product(set(primes))
def relprime(s, t):
return s & t == set()
found = 0
total = 0
for b in range(1, GOAL):
for a in range(1, min(b, GOAL - b)):
c = a + b
x, y, z = rad[a], rad[b], rad[c]
if x[0] & y[0] != set():
continue
if x[1] * y[1] * z[1] < c:
found += 1
total += c
return total
if __name__ == '__main__':
print(problem127() == 18407904)
| #!/usr/local/bin/python3.3
'''
http://projecteuler.net/problem=127()
abc-hits
Problem 127
The radical of n, rad(n), is the product of distinct prime factors of n. For example, 504 = 23 × 32 × 7, so rad(504) = 2 × 3 × 7 = 42.
We shall define the triplet of positive integers (a, b, c) to be an abc-hit if:
GCD(a, b) = GCD(a, c) = GCD(b, c) = 1
a < b
a + b = c
rad(abc) < c
For example, (5, 27, 32) is an abc-hit, because:
GCD(5, 27) = GCD(5, 32) = GCD(27, 32) = 1
5 < 27
5 + 27 = 32
rad(4320) = 30 < 32
It turns out that abc-hits are quite rare and there are only thirty-one abc-hits for c < 1000, with ∑c = 12523.
Find ∑c for c < 120000.
'''
'''
Notes on problem 127():
Very slow
'''
from PE_factors import genFactors
from PE_basic import product
def problem127():
GOAL = 120000
rad = {} # rad[6] = {2,3}, radn[8] = {2}
for primes in genFactors(GOAL):
rad[product(primes)] = (set(primes), product(set(primes)))
def relprime(s, t):
return s & t == set()
found = 0
total = 0
for b in range(1, GOAL):
for a in range(1, min(b, GOAL - b)):
c = a + b
x, y, z = rad[a], rad[b], rad[c]
if x[0] & y[0] != set():
continue
if x[1] * y[1] * z[1] < c:
found += 1
total += c
return total
if __name__ == "__main__":
print(problem127() == 18407904)
| [
0,
1,
2,
3,
4
] |
1,004 | 3079fdbe6319454ad166d06bda5670554a5746ee | <mask token>
| <mask token>
print(a)
<mask token>
print(b)
<mask token>
print(c)
<mask token>
print(d)
| a = len('Karen')
print(a)
b = 'Rainha Elizabeth'.count('a')
print(b)
c = 'karen nayara'.replace('a', '@')
print(c)
d = 'karen meeseeks gomes'.split()
print(d)
| # len(): tamanho da string
# count(): conta quantas vezes um caractere aparece
# lower(), upper()
# replace(): substitui as letras por outra
# split(): quebra uma string a partir dos espacos em branco
a = len('Karen')
print(a)
b = 'Rainha Elizabeth'.count('a')
print(b)
c = 'karen nayara'.replace('a','@')
print(c)
d = 'karen meeseeks gomes'.split()
print(d) | null | [
0,
1,
2,
3
] |
1,005 | 2cdcd6976a1ec99b927adcedc48c36bbda1b4e18 | """ Generate test pads for padder. """
# usage: python gen.py > pads.txt
import random
pad = ""
count = 0
# The pad chars MUST match the character set used by padder.
# See the 'characters' variable in 'main.hpp' for more
# information.
chars = "abcdefghijklmnopqrstuvwxyz0123456789-"
print "#", "Pad"
while count < 12:
for x in xrange(0, 98):
pad += random.choice(chars)
count = count+1
print count, pad
pad = ""
| null | null | null | null | [
0
] |
1,006 | d68bd9c90a106a9eac767607ad77bdd84d0f18d2 | <mask token>
@app.route('/')
def hello_world():
return '你好'
@app.route('/test1/<name>')
def test1(name):
return '你好,%s' % name
@app.route('/test2/<int:id>')
def test2(id):
return '你好,%d' % id
<mask token>
@app.route('/test/register')
def register():
return render_template('test/register.html')
@app.route('/result', methods=['POST', 'GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template('test/result.html', result=result)
<mask token>
| <mask token>
@app.route('/')
def hello_world():
return '你好'
@app.route('/test1/<name>')
def test1(name):
return '你好,%s' % name
@app.route('/test2/<int:id>')
def test2(id):
return '你好,%d' % id
@app.route('/index1')
def index2():
time = datetime.date.today()
name = ['小新', '小英', '小红']
task = {'任务': '打扫卫生', '时间': '3小时'}
return render_template('index.html', var=time, list=name, task=task)
@app.route('/test/register')
def register():
return render_template('test/register.html')
@app.route('/result', methods=['POST', 'GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template('test/result.html', result=result)
<mask token>
| <mask token>
@app.route('/')
def hello_world():
return '你好'
@app.route('/test1/<name>')
def test1(name):
return '你好,%s' % name
@app.route('/test2/<int:id>')
def test2(id):
return '你好,%d' % id
@app.route('/index1')
def index2():
time = datetime.date.today()
name = ['小新', '小英', '小红']
task = {'任务': '打扫卫生', '时间': '3小时'}
return render_template('index.html', var=time, list=name, task=task)
@app.route('/test/register')
def register():
return render_template('test/register.html')
@app.route('/result', methods=['POST', 'GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template('test/result.html', result=result)
if __name__ == '__main__':
app.run(debug=True)
| import time
import datetime
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def hello_world():
return '你好'
@app.route('/test1/<name>')
def test1(name):
return '你好,%s' % name
@app.route('/test2/<int:id>')
def test2(id):
return '你好,%d' % id
@app.route('/index1')
def index2():
time = datetime.date.today()
name = ['小新', '小英', '小红']
task = {'任务': '打扫卫生', '时间': '3小时'}
return render_template('index.html', var=time, list=name, task=task)
@app.route('/test/register')
def register():
return render_template('test/register.html')
@app.route('/result', methods=['POST', 'GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template('test/result.html', result=result)
if __name__ == '__main__':
app.run(debug=True)
| #-*- coding = utf-8-*-
#@Time : 2020/6/26 11:02
#@Author :Ella
#@File :app.py
#@Software : PyCharm
import time
import datetime
from flask import Flask,render_template,request #render_template渲染模板
app = Flask(__name__) #初始化的对象
#路由解析,通过用户访问的路径,匹配想要的函数
@app.route('/')
def hello_world():
return '你好'
#通过访问路径,获取用户的字符串参数
@app.route('/test1/<name>')
def test1(name):
return '你好,%s'%name
#通过访问路径,获取用户的整形参数 此外,还有float类型
@app.route('/test2/<int:id>')
def test2(id):
return '你好,%d'%id
#返回给用户渲染后的网页文件
# @app.route('/index1')
# def index1():
# return render_template("index.html")
#向页面传递变量
@app.route('/index1')
def index2():
time = datetime.date.today() #普通变量
name = ['小新','小英','小红'] #列表类型
task = {"任务":"打扫卫生","时间":"3小时"} #字典类型
return render_template("index.html",var = time,list = name,task = task)
#表单提交
@app.route('/test/register')
def register():
return render_template("test/register.html")
#接受表单提交的路由,需要指定methods为post
@app.route('/result',methods = ['POST','GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template("test/result.html",result = result)
if __name__ == '__main__':
app.run(debug=True) | [
5,
6,
7,
9,
10
] |
1,007 | 6da828a797efac7c37723db96a2682e960c317b5 | <mask token>
| <mask token>
def readme():
with open('README.rst') as f:
return f.read()
<mask token>
| <mask token>
def readme():
with open('README.rst') as f:
return f.read()
setup(name='keputils', version='0.2.1', description=
'Basic module for interaction with KOI and Kepler-stellar tables.',
long_description=readme(), author='Timothy D. Morton', author_email=
'[email protected]', url=
'https://github.com/timothydmorton/keputils', packages=['keputils'],
scripts=['scripts/koiquery'], classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent', 'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy'], install_requires=[
'pandas>=0.13', 'simpledist'], zip_safe=False)
| from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='keputils', version='0.2.1', description=
'Basic module for interaction with KOI and Kepler-stellar tables.',
long_description=readme(), author='Timothy D. Morton', author_email=
'[email protected]', url=
'https://github.com/timothydmorton/keputils', packages=['keputils'],
scripts=['scripts/koiquery'], classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent', 'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy'], install_requires=[
'pandas>=0.13', 'simpledist'], zip_safe=False)
| from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name = "keputils",
version = "0.2.1",
description = "Basic module for interaction with KOI and Kepler-stellar tables.",
long_description = readme(),
author = "Timothy D. Morton",
author_email = "[email protected]",
url = "https://github.com/timothydmorton/keputils",
packages = ['keputils'],
scripts = ['scripts/koiquery'],
#entry_points = {'console_scripts' : ['koiquery = koiquery:main']},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy'
],
install_requires=['pandas>=0.13','simpledist'],
zip_safe=False
)
| [
0,
1,
2,
3,
4
] |
1,008 | 9c6bb885c05ee13a283b09861a5aa7c5e62677cb | #!/usr/bin/python
def check(n):
if n == 0 :
print "neither Positive nor Negative"
if n < 0 :
print "Negative"
if n > 0 :
print "Positive"
print "10 is ", check(10)
print "-5 is ", check(-5)
print "0 is ", check(0) | null | null | null | null | [
0
] |
1,009 | 93e8e9fc4f0503dfc3243bef5ab8261a4cdfc296 | <mask token>
class Version(object):
<mask token>
<mask token>
<mask token>
def __init__(self, soft):
"""
Constructor that takes software name
"""
self.soft = soft
self.app_dir = os.environ.get('APP_DIR')
if self.app_dir is None:
self.app_dir = '/opt'
self.sudo = True
if os.access(self.app_dir, os.W_OK):
self.sudo = False
self.soft_root = os.path.join(self.app_dir, self.soft)
self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))
self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]
path = os.path.realpath('%s/current' % self.soft_root)
self.current_version = path[path.rindex(os.path.sep) + 1:]
def set_version(self, index):
"""
Set software version by index
"""
sudo = 'sudo ' if self.sudo else ''
old_dir = 'current'
if index == -1:
print('Selecting system version')
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
else:
print("Selecting %s version '%s'" % (self.soft, self.versions[
index]))
directory = self.versions[index]
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,
directory, old_dir))
def ask_version(self):
"""
Prompt user for software version in the list of installed versions
"""
print('Please choose a version:')
index = 1
if self.current_version == 'current':
selected = self.SELECTED
else:
selected = ''
print('0: System' + selected)
for version in self.soft_paths:
number = version[len(self.soft_root) + 1:]
if number == self.current_version:
selected = self.SELECTED
else:
selected = ''
print(str(index) + ': ' + str(number) + selected)
index += 1
chosen = None
maximum = len(self.soft_paths)
while not chosen:
try:
choice = input()
except KeyboardInterrupt:
print('\nUser abort!')
sys.exit(0)
if re.match('\\d+', choice) and int(choice) <= maximum and int(
choice) >= 0:
index = int(choice) - 1
chosen = True
elif choice == '':
print('Keeping current')
sys.exit(0)
else:
print(
'Bad version, please choose a number between 0 and %s' %
str(maximum))
return index
@staticmethod
def run():
"""
Read software name on command line and run version selection
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])
except getopt.GetoptError as exception:
print('Error parsing command line: %s' % exception)
print(Version.HELP)
sys.exit(1)
for option, _ in opts:
if option in ('-h', '--help'):
print(Version.HELP)
sys.exit(0)
else:
print("Error parsing command line: Unhandled option '%s'" %
option)
print(Version.HELP)
sys.exit(2)
if len(args) != 1:
print('Error parsing command line: You must pass software')
print(Version.HELP)
sys.exit(1)
soft = args[0]
version = Version(soft)
version.set_version(version.ask_version())
<mask token>
| <mask token>
class Version(object):
<mask token>
HELP = """version [-h] software
Select software version in a menu:
-h To print this help screen.
software Software version to choose."""
SELECTED = ' *'
def __init__(self, soft):
"""
Constructor that takes software name
"""
self.soft = soft
self.app_dir = os.environ.get('APP_DIR')
if self.app_dir is None:
self.app_dir = '/opt'
self.sudo = True
if os.access(self.app_dir, os.W_OK):
self.sudo = False
self.soft_root = os.path.join(self.app_dir, self.soft)
self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))
self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]
path = os.path.realpath('%s/current' % self.soft_root)
self.current_version = path[path.rindex(os.path.sep) + 1:]
def set_version(self, index):
"""
Set software version by index
"""
sudo = 'sudo ' if self.sudo else ''
old_dir = 'current'
if index == -1:
print('Selecting system version')
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
else:
print("Selecting %s version '%s'" % (self.soft, self.versions[
index]))
directory = self.versions[index]
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,
directory, old_dir))
def ask_version(self):
"""
Prompt user for software version in the list of installed versions
"""
print('Please choose a version:')
index = 1
if self.current_version == 'current':
selected = self.SELECTED
else:
selected = ''
print('0: System' + selected)
for version in self.soft_paths:
number = version[len(self.soft_root) + 1:]
if number == self.current_version:
selected = self.SELECTED
else:
selected = ''
print(str(index) + ': ' + str(number) + selected)
index += 1
chosen = None
maximum = len(self.soft_paths)
while not chosen:
try:
choice = input()
except KeyboardInterrupt:
print('\nUser abort!')
sys.exit(0)
if re.match('\\d+', choice) and int(choice) <= maximum and int(
choice) >= 0:
index = int(choice) - 1
chosen = True
elif choice == '':
print('Keeping current')
sys.exit(0)
else:
print(
'Bad version, please choose a number between 0 and %s' %
str(maximum))
return index
@staticmethod
def run():
"""
Read software name on command line and run version selection
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])
except getopt.GetoptError as exception:
print('Error parsing command line: %s' % exception)
print(Version.HELP)
sys.exit(1)
for option, _ in opts:
if option in ('-h', '--help'):
print(Version.HELP)
sys.exit(0)
else:
print("Error parsing command line: Unhandled option '%s'" %
option)
print(Version.HELP)
sys.exit(2)
if len(args) != 1:
print('Error parsing command line: You must pass software')
print(Version.HELP)
sys.exit(1)
soft = args[0]
version = Version(soft)
version.set_version(version.ask_version())
<mask token>
| <mask token>
class Version(object):
"""
Software management class
"""
HELP = """version [-h] software
Select software version in a menu:
-h To print this help screen.
software Software version to choose."""
SELECTED = ' *'
def __init__(self, soft):
"""
Constructor that takes software name
"""
self.soft = soft
self.app_dir = os.environ.get('APP_DIR')
if self.app_dir is None:
self.app_dir = '/opt'
self.sudo = True
if os.access(self.app_dir, os.W_OK):
self.sudo = False
self.soft_root = os.path.join(self.app_dir, self.soft)
self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))
self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]
path = os.path.realpath('%s/current' % self.soft_root)
self.current_version = path[path.rindex(os.path.sep) + 1:]
def set_version(self, index):
"""
Set software version by index
"""
sudo = 'sudo ' if self.sudo else ''
old_dir = 'current'
if index == -1:
print('Selecting system version')
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
else:
print("Selecting %s version '%s'" % (self.soft, self.versions[
index]))
directory = self.versions[index]
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,
directory, old_dir))
def ask_version(self):
"""
Prompt user for software version in the list of installed versions
"""
print('Please choose a version:')
index = 1
if self.current_version == 'current':
selected = self.SELECTED
else:
selected = ''
print('0: System' + selected)
for version in self.soft_paths:
number = version[len(self.soft_root) + 1:]
if number == self.current_version:
selected = self.SELECTED
else:
selected = ''
print(str(index) + ': ' + str(number) + selected)
index += 1
chosen = None
maximum = len(self.soft_paths)
while not chosen:
try:
choice = input()
except KeyboardInterrupt:
print('\nUser abort!')
sys.exit(0)
if re.match('\\d+', choice) and int(choice) <= maximum and int(
choice) >= 0:
index = int(choice) - 1
chosen = True
elif choice == '':
print('Keeping current')
sys.exit(0)
else:
print(
'Bad version, please choose a number between 0 and %s' %
str(maximum))
return index
@staticmethod
def run():
"""
Read software name on command line and run version selection
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])
except getopt.GetoptError as exception:
print('Error parsing command line: %s' % exception)
print(Version.HELP)
sys.exit(1)
for option, _ in opts:
if option in ('-h', '--help'):
print(Version.HELP)
sys.exit(0)
else:
print("Error parsing command line: Unhandled option '%s'" %
option)
print(Version.HELP)
sys.exit(2)
if len(args) != 1:
print('Error parsing command line: You must pass software')
print(Version.HELP)
sys.exit(1)
soft = args[0]
version = Version(soft)
version.set_version(version.ask_version())
<mask token>
| <mask token>
try:
input = raw_input
except NameError:
pass
class Version(object):
"""
Software management class
"""
HELP = """version [-h] software
Select software version in a menu:
-h To print this help screen.
software Software version to choose."""
SELECTED = ' *'
def __init__(self, soft):
"""
Constructor that takes software name
"""
self.soft = soft
self.app_dir = os.environ.get('APP_DIR')
if self.app_dir is None:
self.app_dir = '/opt'
self.sudo = True
if os.access(self.app_dir, os.W_OK):
self.sudo = False
self.soft_root = os.path.join(self.app_dir, self.soft)
self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))
self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]
path = os.path.realpath('%s/current' % self.soft_root)
self.current_version = path[path.rindex(os.path.sep) + 1:]
def set_version(self, index):
"""
Set software version by index
"""
sudo = 'sudo ' if self.sudo else ''
old_dir = 'current'
if index == -1:
print('Selecting system version')
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
else:
print("Selecting %s version '%s'" % (self.soft, self.versions[
index]))
directory = self.versions[index]
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,
directory, old_dir))
def ask_version(self):
"""
Prompt user for software version in the list of installed versions
"""
print('Please choose a version:')
index = 1
if self.current_version == 'current':
selected = self.SELECTED
else:
selected = ''
print('0: System' + selected)
for version in self.soft_paths:
number = version[len(self.soft_root) + 1:]
if number == self.current_version:
selected = self.SELECTED
else:
selected = ''
print(str(index) + ': ' + str(number) + selected)
index += 1
chosen = None
maximum = len(self.soft_paths)
while not chosen:
try:
choice = input()
except KeyboardInterrupt:
print('\nUser abort!')
sys.exit(0)
if re.match('\\d+', choice) and int(choice) <= maximum and int(
choice) >= 0:
index = int(choice) - 1
chosen = True
elif choice == '':
print('Keeping current')
sys.exit(0)
else:
print(
'Bad version, please choose a number between 0 and %s' %
str(maximum))
return index
@staticmethod
def run():
"""
Read software name on command line and run version selection
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])
except getopt.GetoptError as exception:
print('Error parsing command line: %s' % exception)
print(Version.HELP)
sys.exit(1)
for option, _ in opts:
if option in ('-h', '--help'):
print(Version.HELP)
sys.exit(0)
else:
print("Error parsing command line: Unhandled option '%s'" %
option)
print(Version.HELP)
sys.exit(2)
if len(args) != 1:
print('Error parsing command line: You must pass software')
print(Version.HELP)
sys.exit(1)
soft = args[0]
version = Version(soft)
version.set_version(version.ask_version())
if __name__ == '__main__':
Version.run()
| #!/usr/bin/env python
# encoding: UTF-8
'''
Script to select current version for a given soft (python, ruby or java).
'''
import os
import re
import sys
import glob
import getopt
# fix input in Python 2 and 3
try:
input = raw_input # pylint: disable=redefined-builtin,invalid-name
except NameError:
pass
class Version(object): # pylint: disable=useless-object-inheritance
'''
Software management class
'''
HELP = '''version [-h] software
Select software version in a menu:
-h To print this help screen.
software Software version to choose.'''
SELECTED = ' *'
def __init__(self, soft):
'''
Constructor that takes software name
'''
self.soft = soft
self.app_dir = os.environ.get('APP_DIR')
if self.app_dir is None:
self.app_dir = '/opt'
self.sudo = True
if os.access(self.app_dir, os.W_OK):
self.sudo = False
self.soft_root = os.path.join(self.app_dir, self.soft)
self.soft_paths = sorted(glob.glob(self.soft_root+'/[0-9]*'))
self.versions = [v[len(self.soft_root)+1:] for v in self.soft_paths]
path = os.path.realpath("%s/current" % self.soft_root)
self.current_version = path[path.rindex(os.path.sep)+1:]
def set_version(self, index):
'''
Set software version by index
'''
sudo = 'sudo ' if self.sudo else ''
old_dir = "current"
if index == -1:
print("Selecting system version")
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system("cd %s && %srm %s" % (self.soft_root, sudo, old_dir))
else:
print("Selecting %s version '%s'" %
(self.soft, self.versions[index]))
directory = self.versions[index]
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system("cd %s && %srm %s" % (self.soft_root, sudo, old_dir))
os.system("cd %s && %sln -s %s %s" % (self.soft_root, sudo, directory, old_dir))
def ask_version(self):
'''
Prompt user for software version in the list of installed versions
'''
# print version list
print('Please choose a version:')
index = 1
if self.current_version == 'current':
selected = self.SELECTED
else:
selected = ''
print("0: System"+selected)
for version in self.soft_paths:
number = version[len(self.soft_root)+1:]
if number == self.current_version:
selected = self.SELECTED
else:
selected = ''
print(str(index)+': '+str(number)+selected)
index += 1
# ask for the version
chosen = None
maximum = len(self.soft_paths)
while not chosen:
try:
choice = input()
except KeyboardInterrupt:
print("\nUser abort!")
sys.exit(0)
if re.match('\\d+', choice) and int(choice) <= maximum and \
int(choice) >= 0:
index = int(choice) - 1
chosen = True
elif choice == '':
print("Keeping current")
sys.exit(0)
else:
print("Bad version, please choose a number between 0 and %s" %
str(maximum))
# return index in version table
return index
@staticmethod
def run():
'''
Read software name on command line and run version selection
'''
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])
except getopt.GetoptError as exception:
print('Error parsing command line: %s' % exception)
print(Version.HELP)
sys.exit(1)
for option, _ in opts:
if option in ('-h', '--help'):
print(Version.HELP)
sys.exit(0)
else:
print("Error parsing command line: Unhandled option '%s'" % option)
print(Version.HELP)
sys.exit(2)
if len(args) != 1:
print("Error parsing command line: You must pass software")
print(Version.HELP)
sys.exit(1)
soft = args[0]
version = Version(soft)
version.set_version(version.ask_version())
if __name__ == '__main__':
Version.run()
| [
5,
6,
7,
8,
10
] |
1,010 | 55c2bf914a77c573d1b6835f54c82921d9fa6ad6 | <mask token>
| <mask token>
def main():
SetCodePage('ms932')
CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=
'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,
EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[
'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])
BuildStringList('@FileName', 'Vogt')
DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=
4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,
Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,
Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,
Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,
InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)
AddCharChip('ED6_DT07/CH01000 ._CH')
AddCharChipPat('ED6_DT07/CH01000P._CP')
DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,
ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,
TalkFunctionIndex=0, TalkScenaIndex=3)
ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',
'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')
def Function_0_D2():
pass
label('Function_0_D2')
Return()
def Function_1_D3():
pass
label('Function_1_D3')
OP_B0(0, 120)
OP_1C(0, 0, 5)
Return()
def Function_2_DD():
pass
label('Function_2_DD')
RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),
scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')
OP_99(254, 0, 7, 1650)
Jump('loc_244')
label('loc_102')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')
OP_99(254, 1, 7, 1600)
Jump('loc_244')
label('loc_11B')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')
OP_99(254, 2, 7, 1550)
Jump('loc_244')
label('loc_134')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')
OP_99(254, 3, 7, 1500)
Jump('loc_244')
label('loc_14D')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')
OP_99(254, 4, 7, 1450)
Jump('loc_244')
label('loc_166')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')
OP_99(254, 5, 7, 1400)
Jump('loc_244')
label('loc_17F')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')
OP_99(254, 6, 7, 1350)
Jump('loc_244')
label('loc_198')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')
OP_99(254, 0, 7, 1655)
Jump('loc_244')
label('loc_1B1')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')
OP_99(254, 1, 7, 1605)
Jump('loc_244')
label('loc_1CA')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')
OP_99(254, 2, 7, 1555)
Jump('loc_244')
label('loc_1E3')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')
OP_99(254, 3, 7, 1505)
Jump('loc_244')
label('loc_1FC')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')
OP_99(254, 4, 7, 1455)
Jump('loc_244')
label('loc_215')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')
OP_99(254, 5, 7, 1405)
Jump('loc_244')
label('loc_22E')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')
OP_99(254, 6, 7, 1355)
label('loc_244')
Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')
OP_99(254, 0, 7, 1500)
Jump('loc_244')
label('loc_259')
Return()
def Function_3_25A():
pass
label('Function_3_25A')
TalkBegin(254)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr
(EXPR_END)), 'loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_2B2')
ChrTalk(254, ('I reckon my happiness is right here in this\x01',
'lighthouse.\x02'))
CloseMessageWindow()
Jump('loc_34C')
label('loc_2B2')
ChrTalk(254, ("There's actually a shining stone here in this\x01",
"lighthouse, though, even if it's not what you\x01",
'are looking for.\x02'))
CloseMessageWindow()
ChrTalk(254, "I reckon that's my happiness...\x02")
CloseMessageWindow()
OP_A2(0)
label('loc_34C')
Jump('loc_6C1')
label('loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr
(EXPR_END)), 'loc_477')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_3DF')
ChrTalk(254, ("There's no shame in relying on others for\x01",
"help if you need it! Grab 'em by the collar\x01",
'and scream for help if you need it!\x02'))
CloseMessageWindow()
Jump('loc_474')
label('loc_3DF')
ChrTalk(254, "You lookin' for some help, young lady?\x02")
CloseMessageWindow()
ChrTalk(254, 'What do you need?\x02')
CloseMessageWindow()
ChrTalk(334, ("#1714FN-No. I'll be fine, honestly...\x02\x03",
'#1713FThank you for offering, sir.\x02'))
CloseMessageWindow()
OP_A2(0)
label('loc_474')
Jump('loc_6C1')
label('loc_477')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1280, 0, 202300, 270)
Sleep(1000)
ChrTalk(254, ("I swear, this is EXACTLY what's wrong\x01",
'with youngins these days...\x02'))
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What are you doing here, young lady?\x02')
CloseMessageWindow()
ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\x02\x03',
"You haven't seen a young girl other\x01",
'than me in here recently have you?\x02'))
CloseMessageWindow()
ChrTalk(254, "A young girl? 'Fraid not.\x02")
CloseMessageWindow()
ChrTalk(334, ('#1713FI-I see...\x02\x03', 'Sorry for troubling you...\x02')
)
CloseMessageWindow()
def lambda_639():
label('loc_639')
TurnDirection(254, 334, 0)
OP_48()
Jump('loc_639')
QueueWorkItem2(16, 3, lambda_639)
OP_43(334, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, 'They sure are a pain.\x02')
CloseMessageWindow()
OP_A2(12100)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_6C1')
Jump('loc_AE8')
label('loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr
(EXPR_END)), 'loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_721')
ChrTalk(254, 'A happiness stone, you say?\x02')
CloseMessageWindow()
ChrTalk(254, "You think somethin' like that exists?\x02")
CloseMessageWindow()
Jump('loc_ADE')
label('loc_721')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1250, 0, 202480, 270)
SetChrPos(335, -1060, 0, 201620, 270)
Sleep(1000)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What might you two be doing here?\x02')
CloseMessageWindow()
ChrTalk(334, '#1718FHello!\x02')
CloseMessageWindow()
OP_62(334, 0, 1600, 38, 39, 250, 1)
Sleep(500)
OP_63(334)
ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\x01',
"aren't they?\x02\x03",
"#1718FSir, you haven't seen a happiness stone before,\x01",
'have you?\x02'))
CloseMessageWindow()
ChrTalk(254, 'A-A happiness stone?!\x02')
CloseMessageWindow()
ChrTalk(335, "#1730FThey're really shiny and pretty!\x02")
CloseMessageWindow()
ChrTalk(254, ("N-No, I don't recall ever seein' any\x01",
'such thing in all my years...\x02'))
CloseMessageWindow()
ChrTalk(334, ("#1716FOh... That's too bad...\x02\x03",
'#1710FWell, thank you, anyway.\x02'))
CloseMessageWindow()
TurnDirection(334, 335, 400)
Sleep(400)
ChrTalk(334, "#1718FLet's keep looking, Polly! \x02")
CloseMessageWindow()
OP_43(334, 3, 0, 4)
Sleep(2000)
ChrTalk(335, '#1731FI hope your back feels better, mister!\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
def lambda_A1A():
label('loc_A1A')
TurnDirection(254, 335, 0)
OP_48()
Jump('loc_A1A')
QueueWorkItem2(16, 3, lambda_A1A)
OP_43(335, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, "...They're sharp little devils, aren't they?\x02")
CloseMessageWindow()
Sleep(500)
ChrTalk(254, 'A happiness stone, hmm...?\x02')
CloseMessageWindow()
OP_A2(12099)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_ADE')
Jump('loc_AE8')
label('loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr
(EXPR_END)), 'loc_AE8')
label('loc_AE8')
TalkEnd(254)
Return()
def Function_4_AEC():
pass
label('Function_4_AEC')
def lambda_AF2():
OP_8E(254, 2820, 0, 205060, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_AF2)
WaitChrThread(254, 1)
def lambda_B12():
OP_8E(254, 2820, 0, 206910, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B12)
WaitChrThread(254, 1)
def lambda_B32():
OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B32)
WaitChrThread(254, 1)
Return()
def Function_5_B4D():
pass
label('Function_5_B4D')
TalkBegin(255)
TalkEnd(255)
Return()
SaveToFile()
<mask token>
| <mask token>
def main():
SetCodePage('ms932')
CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=
'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,
EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[
'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])
BuildStringList('@FileName', 'Vogt')
DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=
4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,
Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,
Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,
Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,
InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)
AddCharChip('ED6_DT07/CH01000 ._CH')
AddCharChipPat('ED6_DT07/CH01000P._CP')
DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,
ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,
TalkFunctionIndex=0, TalkScenaIndex=3)
ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',
'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')
def Function_0_D2():
pass
label('Function_0_D2')
Return()
def Function_1_D3():
pass
label('Function_1_D3')
OP_B0(0, 120)
OP_1C(0, 0, 5)
Return()
def Function_2_DD():
pass
label('Function_2_DD')
RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),
scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')
OP_99(254, 0, 7, 1650)
Jump('loc_244')
label('loc_102')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')
OP_99(254, 1, 7, 1600)
Jump('loc_244')
label('loc_11B')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')
OP_99(254, 2, 7, 1550)
Jump('loc_244')
label('loc_134')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')
OP_99(254, 3, 7, 1500)
Jump('loc_244')
label('loc_14D')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')
OP_99(254, 4, 7, 1450)
Jump('loc_244')
label('loc_166')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')
OP_99(254, 5, 7, 1400)
Jump('loc_244')
label('loc_17F')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')
OP_99(254, 6, 7, 1350)
Jump('loc_244')
label('loc_198')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')
OP_99(254, 0, 7, 1655)
Jump('loc_244')
label('loc_1B1')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')
OP_99(254, 1, 7, 1605)
Jump('loc_244')
label('loc_1CA')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')
OP_99(254, 2, 7, 1555)
Jump('loc_244')
label('loc_1E3')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')
OP_99(254, 3, 7, 1505)
Jump('loc_244')
label('loc_1FC')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')
OP_99(254, 4, 7, 1455)
Jump('loc_244')
label('loc_215')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')
OP_99(254, 5, 7, 1405)
Jump('loc_244')
label('loc_22E')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')
OP_99(254, 6, 7, 1355)
label('loc_244')
Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')
OP_99(254, 0, 7, 1500)
Jump('loc_244')
label('loc_259')
Return()
def Function_3_25A():
pass
label('Function_3_25A')
TalkBegin(254)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr
(EXPR_END)), 'loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_2B2')
ChrTalk(254, ('I reckon my happiness is right here in this\x01',
'lighthouse.\x02'))
CloseMessageWindow()
Jump('loc_34C')
label('loc_2B2')
ChrTalk(254, ("There's actually a shining stone here in this\x01",
"lighthouse, though, even if it's not what you\x01",
'are looking for.\x02'))
CloseMessageWindow()
ChrTalk(254, "I reckon that's my happiness...\x02")
CloseMessageWindow()
OP_A2(0)
label('loc_34C')
Jump('loc_6C1')
label('loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr
(EXPR_END)), 'loc_477')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_3DF')
ChrTalk(254, ("There's no shame in relying on others for\x01",
"help if you need it! Grab 'em by the collar\x01",
'and scream for help if you need it!\x02'))
CloseMessageWindow()
Jump('loc_474')
label('loc_3DF')
ChrTalk(254, "You lookin' for some help, young lady?\x02")
CloseMessageWindow()
ChrTalk(254, 'What do you need?\x02')
CloseMessageWindow()
ChrTalk(334, ("#1714FN-No. I'll be fine, honestly...\x02\x03",
'#1713FThank you for offering, sir.\x02'))
CloseMessageWindow()
OP_A2(0)
label('loc_474')
Jump('loc_6C1')
label('loc_477')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1280, 0, 202300, 270)
Sleep(1000)
ChrTalk(254, ("I swear, this is EXACTLY what's wrong\x01",
'with youngins these days...\x02'))
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What are you doing here, young lady?\x02')
CloseMessageWindow()
ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\x02\x03',
"You haven't seen a young girl other\x01",
'than me in here recently have you?\x02'))
CloseMessageWindow()
ChrTalk(254, "A young girl? 'Fraid not.\x02")
CloseMessageWindow()
ChrTalk(334, ('#1713FI-I see...\x02\x03', 'Sorry for troubling you...\x02')
)
CloseMessageWindow()
def lambda_639():
label('loc_639')
TurnDirection(254, 334, 0)
OP_48()
Jump('loc_639')
QueueWorkItem2(16, 3, lambda_639)
OP_43(334, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, 'They sure are a pain.\x02')
CloseMessageWindow()
OP_A2(12100)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_6C1')
Jump('loc_AE8')
label('loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr
(EXPR_END)), 'loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_721')
ChrTalk(254, 'A happiness stone, you say?\x02')
CloseMessageWindow()
ChrTalk(254, "You think somethin' like that exists?\x02")
CloseMessageWindow()
Jump('loc_ADE')
label('loc_721')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1250, 0, 202480, 270)
SetChrPos(335, -1060, 0, 201620, 270)
Sleep(1000)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What might you two be doing here?\x02')
CloseMessageWindow()
ChrTalk(334, '#1718FHello!\x02')
CloseMessageWindow()
OP_62(334, 0, 1600, 38, 39, 250, 1)
Sleep(500)
OP_63(334)
ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\x01',
"aren't they?\x02\x03",
"#1718FSir, you haven't seen a happiness stone before,\x01",
'have you?\x02'))
CloseMessageWindow()
ChrTalk(254, 'A-A happiness stone?!\x02')
CloseMessageWindow()
ChrTalk(335, "#1730FThey're really shiny and pretty!\x02")
CloseMessageWindow()
ChrTalk(254, ("N-No, I don't recall ever seein' any\x01",
'such thing in all my years...\x02'))
CloseMessageWindow()
ChrTalk(334, ("#1716FOh... That's too bad...\x02\x03",
'#1710FWell, thank you, anyway.\x02'))
CloseMessageWindow()
TurnDirection(334, 335, 400)
Sleep(400)
ChrTalk(334, "#1718FLet's keep looking, Polly! \x02")
CloseMessageWindow()
OP_43(334, 3, 0, 4)
Sleep(2000)
ChrTalk(335, '#1731FI hope your back feels better, mister!\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
def lambda_A1A():
label('loc_A1A')
TurnDirection(254, 335, 0)
OP_48()
Jump('loc_A1A')
QueueWorkItem2(16, 3, lambda_A1A)
OP_43(335, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, "...They're sharp little devils, aren't they?\x02")
CloseMessageWindow()
Sleep(500)
ChrTalk(254, 'A happiness stone, hmm...?\x02')
CloseMessageWindow()
OP_A2(12099)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_ADE')
Jump('loc_AE8')
label('loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr
(EXPR_END)), 'loc_AE8')
label('loc_AE8')
TalkEnd(254)
Return()
def Function_4_AEC():
pass
label('Function_4_AEC')
def lambda_AF2():
OP_8E(254, 2820, 0, 205060, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_AF2)
WaitChrThread(254, 1)
def lambda_B12():
OP_8E(254, 2820, 0, 206910, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B12)
WaitChrThread(254, 1)
def lambda_B32():
OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B32)
WaitChrThread(254, 1)
Return()
def Function_5_B4D():
pass
label('Function_5_B4D')
TalkBegin(255)
TalkEnd(255)
Return()
SaveToFile()
Try(main)
| from ED63RDScenarioHelper import *
def main():
SetCodePage('ms932')
CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=
'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,
EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[
'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])
BuildStringList('@FileName', 'Vogt')
DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=
4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,
Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,
Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,
Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,
InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)
AddCharChip('ED6_DT07/CH01000 ._CH')
AddCharChipPat('ED6_DT07/CH01000P._CP')
DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,
ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,
TalkFunctionIndex=0, TalkScenaIndex=3)
ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',
'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')
def Function_0_D2():
pass
label('Function_0_D2')
Return()
def Function_1_D3():
pass
label('Function_1_D3')
OP_B0(0, 120)
OP_1C(0, 0, 5)
Return()
def Function_2_DD():
pass
label('Function_2_DD')
RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),
scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')
OP_99(254, 0, 7, 1650)
Jump('loc_244')
label('loc_102')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')
OP_99(254, 1, 7, 1600)
Jump('loc_244')
label('loc_11B')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')
OP_99(254, 2, 7, 1550)
Jump('loc_244')
label('loc_134')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')
OP_99(254, 3, 7, 1500)
Jump('loc_244')
label('loc_14D')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')
OP_99(254, 4, 7, 1450)
Jump('loc_244')
label('loc_166')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')
OP_99(254, 5, 7, 1400)
Jump('loc_244')
label('loc_17F')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')
OP_99(254, 6, 7, 1350)
Jump('loc_244')
label('loc_198')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')
OP_99(254, 0, 7, 1655)
Jump('loc_244')
label('loc_1B1')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')
OP_99(254, 1, 7, 1605)
Jump('loc_244')
label('loc_1CA')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')
OP_99(254, 2, 7, 1555)
Jump('loc_244')
label('loc_1E3')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')
OP_99(254, 3, 7, 1505)
Jump('loc_244')
label('loc_1FC')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')
OP_99(254, 4, 7, 1455)
Jump('loc_244')
label('loc_215')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')
OP_99(254, 5, 7, 1405)
Jump('loc_244')
label('loc_22E')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')
OP_99(254, 6, 7, 1355)
label('loc_244')
Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')
OP_99(254, 0, 7, 1500)
Jump('loc_244')
label('loc_259')
Return()
def Function_3_25A():
pass
label('Function_3_25A')
TalkBegin(254)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr
(EXPR_END)), 'loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_2B2')
ChrTalk(254, ('I reckon my happiness is right here in this\x01',
'lighthouse.\x02'))
CloseMessageWindow()
Jump('loc_34C')
label('loc_2B2')
ChrTalk(254, ("There's actually a shining stone here in this\x01",
"lighthouse, though, even if it's not what you\x01",
'are looking for.\x02'))
CloseMessageWindow()
ChrTalk(254, "I reckon that's my happiness...\x02")
CloseMessageWindow()
OP_A2(0)
label('loc_34C')
Jump('loc_6C1')
label('loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr
(EXPR_END)), 'loc_477')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_3DF')
ChrTalk(254, ("There's no shame in relying on others for\x01",
"help if you need it! Grab 'em by the collar\x01",
'and scream for help if you need it!\x02'))
CloseMessageWindow()
Jump('loc_474')
label('loc_3DF')
ChrTalk(254, "You lookin' for some help, young lady?\x02")
CloseMessageWindow()
ChrTalk(254, 'What do you need?\x02')
CloseMessageWindow()
ChrTalk(334, ("#1714FN-No. I'll be fine, honestly...\x02\x03",
'#1713FThank you for offering, sir.\x02'))
CloseMessageWindow()
OP_A2(0)
label('loc_474')
Jump('loc_6C1')
label('loc_477')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1280, 0, 202300, 270)
Sleep(1000)
ChrTalk(254, ("I swear, this is EXACTLY what's wrong\x01",
'with youngins these days...\x02'))
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What are you doing here, young lady?\x02')
CloseMessageWindow()
ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\x02\x03',
"You haven't seen a young girl other\x01",
'than me in here recently have you?\x02'))
CloseMessageWindow()
ChrTalk(254, "A young girl? 'Fraid not.\x02")
CloseMessageWindow()
ChrTalk(334, ('#1713FI-I see...\x02\x03', 'Sorry for troubling you...\x02')
)
CloseMessageWindow()
def lambda_639():
label('loc_639')
TurnDirection(254, 334, 0)
OP_48()
Jump('loc_639')
QueueWorkItem2(16, 3, lambda_639)
OP_43(334, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, 'They sure are a pain.\x02')
CloseMessageWindow()
OP_A2(12100)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_6C1')
Jump('loc_AE8')
label('loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr
(EXPR_END)), 'loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_721')
ChrTalk(254, 'A happiness stone, you say?\x02')
CloseMessageWindow()
ChrTalk(254, "You think somethin' like that exists?\x02")
CloseMessageWindow()
Jump('loc_ADE')
label('loc_721')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1250, 0, 202480, 270)
SetChrPos(335, -1060, 0, 201620, 270)
Sleep(1000)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What might you two be doing here?\x02')
CloseMessageWindow()
ChrTalk(334, '#1718FHello!\x02')
CloseMessageWindow()
OP_62(334, 0, 1600, 38, 39, 250, 1)
Sleep(500)
OP_63(334)
ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\x01',
"aren't they?\x02\x03",
"#1718FSir, you haven't seen a happiness stone before,\x01",
'have you?\x02'))
CloseMessageWindow()
ChrTalk(254, 'A-A happiness stone?!\x02')
CloseMessageWindow()
ChrTalk(335, "#1730FThey're really shiny and pretty!\x02")
CloseMessageWindow()
ChrTalk(254, ("N-No, I don't recall ever seein' any\x01",
'such thing in all my years...\x02'))
CloseMessageWindow()
ChrTalk(334, ("#1716FOh... That's too bad...\x02\x03",
'#1710FWell, thank you, anyway.\x02'))
CloseMessageWindow()
TurnDirection(334, 335, 400)
Sleep(400)
ChrTalk(334, "#1718FLet's keep looking, Polly! \x02")
CloseMessageWindow()
OP_43(334, 3, 0, 4)
Sleep(2000)
ChrTalk(335, '#1731FI hope your back feels better, mister!\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
def lambda_A1A():
label('loc_A1A')
TurnDirection(254, 335, 0)
OP_48()
Jump('loc_A1A')
QueueWorkItem2(16, 3, lambda_A1A)
OP_43(335, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, "...They're sharp little devils, aren't they?\x02")
CloseMessageWindow()
Sleep(500)
ChrTalk(254, 'A happiness stone, hmm...?\x02')
CloseMessageWindow()
OP_A2(12099)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_ADE')
Jump('loc_AE8')
label('loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr
(EXPR_END)), 'loc_AE8')
label('loc_AE8')
TalkEnd(254)
Return()
def Function_4_AEC():
pass
label('Function_4_AEC')
def lambda_AF2():
OP_8E(254, 2820, 0, 205060, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_AF2)
WaitChrThread(254, 1)
def lambda_B12():
OP_8E(254, 2820, 0, 206910, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B12)
WaitChrThread(254, 1)
def lambda_B32():
OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B32)
WaitChrThread(254, 1)
Return()
def Function_5_B4D():
pass
label('Function_5_B4D')
TalkBegin(255)
TalkEnd(255)
Return()
SaveToFile()
Try(main)
| from ED63RDScenarioHelper import *
def main():
SetCodePage("ms932")
CreateScenaFile(
FileName = 'C2219 ._SN',
MapName = 'Ruan',
Location = 'C2219.x',
MapIndex = 84,
MapDefaultBGM = "ed60015",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'ED6_DT21/C2219 ._SN',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'Vogt', # 9
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH01000 ._CH', # 00
)
AddCharChipPat(
'ED6_DT07/CH01000P._CP', # 00
)
DeclNpc(
X = -2870,
Z = 0,
Y = 202000,
Direction = 270,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 3,
)
ScpFunction(
"Function_0_D2", # 00, 0
"Function_1_D3", # 01, 1
"Function_2_DD", # 02, 2
"Function_3_25A", # 03, 3
"Function_4_AEC", # 04, 4
"Function_5_B4D", # 05, 5
)
def Function_0_D2(): pass
label("Function_0_D2")
Return()
# Function_0_D2 end
def Function_1_D3(): pass
label("Function_1_D3")
OP_B0(0x0, 0x78)
OP_1C(0x0, 0x0, 0x5)
Return()
# Function_1_D3 end
def Function_2_DD(): pass
label("Function_2_DD")
RunExpression(0x1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_102")
OP_99(0xFE, 0x0, 0x7, 0x672)
Jump("loc_244")
label("loc_102")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_11B")
OP_99(0xFE, 0x1, 0x7, 0x640)
Jump("loc_244")
label("loc_11B")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_134")
OP_99(0xFE, 0x2, 0x7, 0x60E)
Jump("loc_244")
label("loc_134")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_14D")
OP_99(0xFE, 0x3, 0x7, 0x5DC)
Jump("loc_244")
label("loc_14D")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_166")
OP_99(0xFE, 0x4, 0x7, 0x5AA)
Jump("loc_244")
label("loc_166")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_17F")
OP_99(0xFE, 0x5, 0x7, 0x578)
Jump("loc_244")
label("loc_17F")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_198")
OP_99(0xFE, 0x6, 0x7, 0x546)
Jump("loc_244")
label("loc_198")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1B1")
OP_99(0xFE, 0x0, 0x7, 0x677)
Jump("loc_244")
label("loc_1B1")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1CA")
OP_99(0xFE, 0x1, 0x7, 0x645)
Jump("loc_244")
label("loc_1CA")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1E3")
OP_99(0xFE, 0x2, 0x7, 0x613)
Jump("loc_244")
label("loc_1E3")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1FC")
OP_99(0xFE, 0x3, 0x7, 0x5E1)
Jump("loc_244")
label("loc_1FC")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_215")
OP_99(0xFE, 0x4, 0x7, 0x5AF)
Jump("loc_244")
label("loc_215")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_22E")
OP_99(0xFE, 0x5, 0x7, 0x57D)
Jump("loc_244")
label("loc_22E")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_244")
OP_99(0xFE, 0x6, 0x7, 0x54B)
label("loc_244")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_259")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("loc_244")
label("loc_259")
Return()
# Function_2_DD end
def Function_3_25A(): pass
label("Function_3_25A")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 2)), scpexpr(EXPR_END)), "loc_6C4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), "loc_34F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_2B2")
ChrTalk( #0
0xFE,
(
"I reckon my happiness is right here in this\x01",
"lighthouse.\x02",
)
)
CloseMessageWindow()
Jump("loc_34C")
label("loc_2B2")
ChrTalk( #1
0xFE,
(
"There's actually a shining stone here in this\x01",
"lighthouse, though, even if it's not what you\x01",
"are looking for.\x02",
)
)
CloseMessageWindow()
ChrTalk( #2
0xFE,
"I reckon that's my happiness...\x02",
)
CloseMessageWindow()
OP_A2(0x0)
label("loc_34C")
Jump("loc_6C1")
label("loc_34F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 4)), scpexpr(EXPR_END)), "loc_477")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_3DF")
ChrTalk( #3
0xFE,
(
"There's no shame in relying on others for\x01",
"help if you need it! Grab 'em by the collar\x01",
"and scream for help if you need it!\x02",
)
)
CloseMessageWindow()
Jump("loc_474")
label("loc_3DF")
ChrTalk( #4
0xFE,
"You lookin' for some help, young lady?\x02",
)
CloseMessageWindow()
ChrTalk( #5
0xFE,
"What do you need?\x02",
)
CloseMessageWindow()
ChrTalk( #6
0x14E,
(
"#1714FN-No. I'll be fine, honestly...\x02\x03",
"#1713FThank you for offering, sir.\x02",
)
)
CloseMessageWindow()
OP_A2(0x0)
label("loc_474")
Jump("loc_6C1")
label("loc_477")
EventBegin(0x1)
OP_8C(0xFE, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(0x14E, -1280, 0, 202300, 270)
Sleep(1000)
ChrTalk( #7
0xFE,
(
"I swear, this is EXACTLY what's wrong\x01",
"with youngins these days...\x02",
)
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
OP_8C(0xFE, 90, 500)
Sleep(500)
ChrTalk( #8
0xFE,
"Wh-What are you doing here, young lady?\x02",
)
CloseMessageWindow()
ChrTalk( #9
0x14E,
(
"#1712FU-Umm... Excuse me, sir...\x02\x03",
"You haven't seen a young girl other\x01",
"than me in here recently have you?\x02",
)
)
CloseMessageWindow()
ChrTalk( #10
0xFE,
"A young girl? 'Fraid not.\x02",
)
CloseMessageWindow()
ChrTalk( #11
0x14E,
(
"#1713FI-I see...\x02\x03",
"Sorry for troubling you...\x02",
)
)
CloseMessageWindow()
def lambda_639():
label("loc_639")
TurnDirection(0xFE, 0x14E, 0)
OP_48()
Jump("loc_639")
QueueWorkItem2(0x10, 3, lambda_639)
OP_43(0x14E, 0x3, 0x0, 0x4)
Sleep(3000)
OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(3000)
OP_63(0x10)
ChrTalk( #12
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
ChrTalk( #13
0xFE,
"They sure are a pain.\x02",
)
CloseMessageWindow()
OP_A2(0x2F44)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(0x10, 0x3)
NewScene("ED6_DT21/C2219 ._SN", 107, 0, 0)
IdleLoop()
label("loc_6C1")
Jump("loc_AE8")
label("loc_6C4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 0)), scpexpr(EXPR_END)), "loc_AE1")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), "loc_721")
ChrTalk( #14
0xFE,
"A happiness stone, you say?\x02",
)
CloseMessageWindow()
ChrTalk( #15
0xFE,
"You think somethin' like that exists?\x02",
)
CloseMessageWindow()
Jump("loc_ADE")
label("loc_721")
EventBegin(0x1)
OP_8C(0xFE, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(0x14E, -1250, 0, 202480, 270)
SetChrPos(0x14F, -1060, 0, 201620, 270)
Sleep(1000)
ChrTalk( #16
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
OP_8C(0xFE, 90, 500)
Sleep(500)
ChrTalk( #17
0xFE,
"Wh-What might you two be doing here?\x02",
)
CloseMessageWindow()
ChrTalk( #18
0x14E,
"#1718FHello!\x02",
)
CloseMessageWindow()
OP_62(0x14E, 0x0, 1600, 0x26, 0x27, 0xFA, 0x1)
Sleep(500)
OP_63(0x14E)
ChrTalk( #19
0x14E,
(
"#1714FActually, lighthouses are pretty high up,\x01",
"aren't they?\x02\x03",
"#1718FSir, you haven't seen a happiness stone before,\x01",
"have you?\x02",
)
)
CloseMessageWindow()
ChrTalk( #20
0xFE,
"A-A happiness stone?!\x02",
)
CloseMessageWindow()
ChrTalk( #21
0x14F,
"#1730FThey're really shiny and pretty!\x02",
)
CloseMessageWindow()
ChrTalk( #22
0xFE,
(
"N-No, I don't recall ever seein' any\x01",
"such thing in all my years...\x02",
)
)
CloseMessageWindow()
ChrTalk( #23
0x14E,
(
"#1716FOh... That's too bad...\x02\x03",
"#1710FWell, thank you, anyway.\x02",
)
)
CloseMessageWindow()
TurnDirection(0x14E, 0x14F, 400)
Sleep(400)
ChrTalk( #24
0x14E,
"#1718FLet's keep looking, Polly! \x02",
)
CloseMessageWindow()
OP_43(0x14E, 0x3, 0x0, 0x4)
Sleep(2000)
ChrTalk( #25
0x14F,
"#1731FI hope your back feels better, mister!\x02",
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
def lambda_A1A():
label("loc_A1A")
TurnDirection(0xFE, 0x14F, 0)
OP_48()
Jump("loc_A1A")
QueueWorkItem2(0x10, 3, lambda_A1A)
OP_43(0x14F, 0x3, 0x0, 0x4)
Sleep(3000)
OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(3000)
OP_63(0x10)
ChrTalk( #26
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
ChrTalk( #27
0xFE,
"...They're sharp little devils, aren't they?\x02",
)
CloseMessageWindow()
Sleep(500)
ChrTalk( #28
0xFE,
"A happiness stone, hmm...?\x02",
)
CloseMessageWindow()
OP_A2(0x2F43)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(0x10, 0x3)
NewScene("ED6_DT21/C2219 ._SN", 107, 0, 0)
IdleLoop()
label("loc_ADE")
Jump("loc_AE8")
label("loc_AE1")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E2, 7)), scpexpr(EXPR_END)), "loc_AE8")
label("loc_AE8")
TalkEnd(0xFE)
Return()
# Function_3_25A end
def Function_4_AEC(): pass
label("Function_4_AEC")
def lambda_AF2():
OP_8E(0xFE, 0xB04, 0x0, 0x32104, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_AF2)
WaitChrThread(0xFE, 0x1)
def lambda_B12():
OP_8E(0xFE, 0xB04, 0x0, 0x3283E, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_B12)
WaitChrThread(0xFE, 0x1)
def lambda_B32():
OP_8E(0xFE, 0xFFFFF254, 0xFFFFF830, 0x328F2, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_B32)
WaitChrThread(0xFE, 0x1)
Return()
# Function_4_AEC end
def Function_5_B4D(): pass
label("Function_5_B4D")
TalkBegin(0xFF)
TalkEnd(0xFF)
Return()
# Function_5_B4D end
SaveToFile()
Try(main)
| [
0,
1,
2,
3,
4
] |
1,011 | ecbca04a58c19469e63ee2310e2b2f6b86c41199 | <mask token>
class CSV_Normalize:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def get_input(self):
return list(zip(self.close_prices, self.high_prices, self.
prev_prices, self.sentiments))
def get_nomralized_input(self):
return list(zip(self.normalized_close, self.normalized_high, self.
normalized_prev, self.sentiments))
def set_output(self):
with open(self.stock + '.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
self.open_prices.append(row[2])
self.open_prices = self.open_prices[2:]
for m in range(len(self.open_prices)):
self.open_prices[m] = float(self.open_prices[m])
for i in range(len(self.open_prices)):
if self.open_prices[i] > self.max_open:
self.max_open = self.open_prices[i]
if self.open_prices[i] < self.min_open:
self.min_open = self.open_prices[i]
def set_normalized_output(self):
self.set_output()
for i1 in range(len(self.open_prices)):
self.normalized_open.append((self.open_prices[i1] - self.
min_open) / (self.max_open - self.min_open))
<mask token>
def get_normalized_output(self):
return self.normalized_open
<mask token>
def get_training_input(self):
self.set_training_input()
return self.training_inputs
def set_training_input(self):
for i in range(len(self.normalized_close)):
temp_list = [self.normalized_close[i], self.normalized_high[i],
self.normalized_prev[i], self.normalized_sent[i]]
self.inputs.append(temp_list)
train_end = int(0.7 * len(self.inputs))
self.training_inputs = self.inputs[0:train_end]
def get_testing_input(self):
self.set_testing_input()
return self.testing_inputs
<mask token>
def set_testing_input(self):
train_end = int(0.7 * len(self.inputs))
self.testing_inputs = self.inputs[train_end:]
<mask token>
<mask token>
def set_testing_output(self):
train_end = int(0.7 * len(self.normalized_open))
self.testing_outputs = self.normalized_open[train_end:]
<mask token>
| <mask token>
class CSV_Normalize:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def set_normalized_input(self):
if self.max_prev == 0:
self.set_input()
for i1 in range(len(self.close_prices)):
self.normalized_close.append((self.close_prices[i1] - self.
min_close) / (self.max_close - self.min_close))
for i2 in range(len(self.high_prices)):
self.normalized_high.append((self.high_prices[i2] - self.
min_high) / (self.max_high - self.min_high))
for i4 in range(len(self.prev_prices)):
self.normalized_prev.append((self.prev_prices[i4] - self.
min_prev) / (self.max_prev - self.min_prev))
for i5 in range(len(self.sentiments)):
diff = self.max_sent - self.min_sent
if diff == 0:
self.normalized_sent.append(0)
else:
self.normalized_sent.append((self.sentiments[i5] - self.
min_sent) / (self.max_sent - self.min_sent))
def get_input(self):
return list(zip(self.close_prices, self.high_prices, self.
prev_prices, self.sentiments))
def get_nomralized_input(self):
return list(zip(self.normalized_close, self.normalized_high, self.
normalized_prev, self.sentiments))
def set_output(self):
with open(self.stock + '.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
self.open_prices.append(row[2])
self.open_prices = self.open_prices[2:]
for m in range(len(self.open_prices)):
self.open_prices[m] = float(self.open_prices[m])
for i in range(len(self.open_prices)):
if self.open_prices[i] > self.max_open:
self.max_open = self.open_prices[i]
if self.open_prices[i] < self.min_open:
self.min_open = self.open_prices[i]
def set_normalized_output(self):
self.set_output()
for i1 in range(len(self.open_prices)):
self.normalized_open.append((self.open_prices[i1] - self.
min_open) / (self.max_open - self.min_open))
<mask token>
def get_normalized_output(self):
return self.normalized_open
<mask token>
def get_training_input(self):
self.set_training_input()
return self.training_inputs
def set_training_input(self):
for i in range(len(self.normalized_close)):
temp_list = [self.normalized_close[i], self.normalized_high[i],
self.normalized_prev[i], self.normalized_sent[i]]
self.inputs.append(temp_list)
train_end = int(0.7 * len(self.inputs))
self.training_inputs = self.inputs[0:train_end]
def get_testing_input(self):
self.set_testing_input()
return self.testing_inputs
<mask token>
def set_testing_input(self):
train_end = int(0.7 * len(self.inputs))
self.testing_inputs = self.inputs[train_end:]
<mask token>
<mask token>
def set_testing_output(self):
train_end = int(0.7 * len(self.normalized_open))
self.testing_outputs = self.normalized_open[train_end:]
<mask token>
| <mask token>
class CSV_Normalize:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def set_stock(self, stock):
self.stock = stock
def set_input(self):
with open(self.stock + '.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
self.close_prices.append(row[5])
self.high_prices.append(row[3])
self.prev_prices.append(row[2])
self.sentiments.append(row[7])
self.close_prices = self.close_prices[1:-1]
self.high_prices = self.high_prices[1:-1]
self.prev_prices = self.prev_prices[1:-1]
self.sentiments = self.sentiments[1:-1]
for m in range(len(self.close_prices)):
if self.close_prices[m] != 'Close':
self.close_prices[m] = float(self.close_prices[m])
for n in range(len(self.high_prices)):
if self.high_prices[n] != 'High':
self.high_prices[n] = float(self.high_prices[n])
for pp in range(len(self.prev_prices)):
if self.prev_prices[pp] != 'Open':
self.prev_prices[pp] = float(self.prev_prices[pp])
for p in range(len(self.close_prices)):
if self.close_prices[m] != 'Close':
if self.close_prices[p] > self.max_close:
self.max_close = self.close_prices[p]
if self.close_prices[p] < self.min_close:
self.min_close = self.close_prices[p]
for q in range(len(self.high_prices)):
if self.high_prices[q] > self.max_high:
self.max_high = self.high_prices[q]
if self.high_prices[q] < self.min_high:
self.min_high = self.high_prices[q]
for s in range(len(self.prev_prices)):
if self.prev_prices[s] > self.max_prev:
self.max_prev = self.prev_prices[s]
if self.prev_prices[s] < self.min_prev:
self.min_prev = self.prev_prices[s]
for s in range(len(self.sentiments)):
self.sentiments[s] = float(self.sentiments[s])
if self.max_sent > self.max_sent:
self.max_sent = self.sentiments[s]
if self.sentiments[s] < self.min_sent:
self.min_sent = self.sentiments[s]
def set_normalized_input(self):
if self.max_prev == 0:
self.set_input()
for i1 in range(len(self.close_prices)):
self.normalized_close.append((self.close_prices[i1] - self.
min_close) / (self.max_close - self.min_close))
for i2 in range(len(self.high_prices)):
self.normalized_high.append((self.high_prices[i2] - self.
min_high) / (self.max_high - self.min_high))
for i4 in range(len(self.prev_prices)):
self.normalized_prev.append((self.prev_prices[i4] - self.
min_prev) / (self.max_prev - self.min_prev))
for i5 in range(len(self.sentiments)):
diff = self.max_sent - self.min_sent
if diff == 0:
self.normalized_sent.append(0)
else:
self.normalized_sent.append((self.sentiments[i5] - self.
min_sent) / (self.max_sent - self.min_sent))
def get_input(self):
return list(zip(self.close_prices, self.high_prices, self.
prev_prices, self.sentiments))
def get_nomralized_input(self):
return list(zip(self.normalized_close, self.normalized_high, self.
normalized_prev, self.sentiments))
def set_output(self):
with open(self.stock + '.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
self.open_prices.append(row[2])
self.open_prices = self.open_prices[2:]
for m in range(len(self.open_prices)):
self.open_prices[m] = float(self.open_prices[m])
for i in range(len(self.open_prices)):
if self.open_prices[i] > self.max_open:
self.max_open = self.open_prices[i]
if self.open_prices[i] < self.min_open:
self.min_open = self.open_prices[i]
def set_normalized_output(self):
self.set_output()
for i1 in range(len(self.open_prices)):
self.normalized_open.append((self.open_prices[i1] - self.
min_open) / (self.max_open - self.min_open))
<mask token>
def get_normalized_output(self):
return self.normalized_open
def inverse(self, normalized):
return normalized * (self.max_open - self.min_open) + self.min_open
def get_training_input(self):
self.set_training_input()
return self.training_inputs
def set_training_input(self):
for i in range(len(self.normalized_close)):
temp_list = [self.normalized_close[i], self.normalized_high[i],
self.normalized_prev[i], self.normalized_sent[i]]
self.inputs.append(temp_list)
train_end = int(0.7 * len(self.inputs))
self.training_inputs = self.inputs[0:train_end]
def get_testing_input(self):
self.set_testing_input()
return self.testing_inputs
def get_training_output(self):
self.set_training_output()
return self.training_outputs
def set_testing_input(self):
train_end = int(0.7 * len(self.inputs))
self.testing_inputs = self.inputs[train_end:]
def set_training_output(self):
train_end = int(0.7 * len(self.normalized_open))
self.training_outputs = self.normalized_open[0:train_end]
def get_testing_output(self):
self.set_testing_output()
return self.testing_outputs
def set_testing_output(self):
train_end = int(0.7 * len(self.normalized_open))
self.testing_outputs = self.normalized_open[train_end:]
def clear_lists(self):
self.close_prices.clear()
self.high_prices.clear()
self.prev_prices.clear()
self.normalized_close.clear()
self.normalized_high.clear()
self.normalized_prev.clear()
self.open_prices.clear()
self.normalized_open.clear()
self.inputs.clear()
self.training_inputs.clear()
self.testing_inputs.clear()
self.training_outputs.clear()
self.testing_outputs.clear()
self.sentiments.clear()
self.normalized_sent = []
self.max_sent = 0.0
self.min_sent = 0.0
self.min_close = 1000
self.max_close = 0
self.min_high = 1000
self.max_high = 0
self.min_prev = 1000
self.max_prev = 0
self.min_open = 1000
self.max_open = 0
| <mask token>
class CSV_Normalize:
stock = ''
close_prices = []
high_prices = []
prev_prices = []
sentiments = []
max_sent = 0.0
min_sent = 0.0
min_close = 1000
max_close = 0
min_high = 1000
max_high = 0
min_prev = 1000
max_prev = 0
normalized_close = []
normalized_high = []
normalized_prev = []
normalized_sent = []
open_prices = []
min_open = 1000
max_open = 0
normalized_open = []
inputs = []
training_inputs = []
testing_inputs = []
training_outputs = []
testing_outputs = []
def set_stock(self, stock):
self.stock = stock
def set_input(self):
with open(self.stock + '.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
self.close_prices.append(row[5])
self.high_prices.append(row[3])
self.prev_prices.append(row[2])
self.sentiments.append(row[7])
self.close_prices = self.close_prices[1:-1]
self.high_prices = self.high_prices[1:-1]
self.prev_prices = self.prev_prices[1:-1]
self.sentiments = self.sentiments[1:-1]
for m in range(len(self.close_prices)):
if self.close_prices[m] != 'Close':
self.close_prices[m] = float(self.close_prices[m])
for n in range(len(self.high_prices)):
if self.high_prices[n] != 'High':
self.high_prices[n] = float(self.high_prices[n])
for pp in range(len(self.prev_prices)):
if self.prev_prices[pp] != 'Open':
self.prev_prices[pp] = float(self.prev_prices[pp])
for p in range(len(self.close_prices)):
if self.close_prices[m] != 'Close':
if self.close_prices[p] > self.max_close:
self.max_close = self.close_prices[p]
if self.close_prices[p] < self.min_close:
self.min_close = self.close_prices[p]
for q in range(len(self.high_prices)):
if self.high_prices[q] > self.max_high:
self.max_high = self.high_prices[q]
if self.high_prices[q] < self.min_high:
self.min_high = self.high_prices[q]
for s in range(len(self.prev_prices)):
if self.prev_prices[s] > self.max_prev:
self.max_prev = self.prev_prices[s]
if self.prev_prices[s] < self.min_prev:
self.min_prev = self.prev_prices[s]
for s in range(len(self.sentiments)):
self.sentiments[s] = float(self.sentiments[s])
if self.max_sent > self.max_sent:
self.max_sent = self.sentiments[s]
if self.sentiments[s] < self.min_sent:
self.min_sent = self.sentiments[s]
def set_normalized_input(self):
if self.max_prev == 0:
self.set_input()
for i1 in range(len(self.close_prices)):
self.normalized_close.append((self.close_prices[i1] - self.
min_close) / (self.max_close - self.min_close))
for i2 in range(len(self.high_prices)):
self.normalized_high.append((self.high_prices[i2] - self.
min_high) / (self.max_high - self.min_high))
for i4 in range(len(self.prev_prices)):
self.normalized_prev.append((self.prev_prices[i4] - self.
min_prev) / (self.max_prev - self.min_prev))
for i5 in range(len(self.sentiments)):
diff = self.max_sent - self.min_sent
if diff == 0:
self.normalized_sent.append(0)
else:
self.normalized_sent.append((self.sentiments[i5] - self.
min_sent) / (self.max_sent - self.min_sent))
def get_input(self):
return list(zip(self.close_prices, self.high_prices, self.
prev_prices, self.sentiments))
def get_nomralized_input(self):
return list(zip(self.normalized_close, self.normalized_high, self.
normalized_prev, self.sentiments))
def set_output(self):
with open(self.stock + '.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
self.open_prices.append(row[2])
self.open_prices = self.open_prices[2:]
for m in range(len(self.open_prices)):
self.open_prices[m] = float(self.open_prices[m])
for i in range(len(self.open_prices)):
if self.open_prices[i] > self.max_open:
self.max_open = self.open_prices[i]
if self.open_prices[i] < self.min_open:
self.min_open = self.open_prices[i]
def set_normalized_output(self):
self.set_output()
for i1 in range(len(self.open_prices)):
self.normalized_open.append((self.open_prices[i1] - self.
min_open) / (self.max_open - self.min_open))
def get_output(self):
return self.open_prices
def get_normalized_output(self):
return self.normalized_open
def inverse(self, normalized):
return normalized * (self.max_open - self.min_open) + self.min_open
def get_training_input(self):
self.set_training_input()
return self.training_inputs
def set_training_input(self):
for i in range(len(self.normalized_close)):
temp_list = [self.normalized_close[i], self.normalized_high[i],
self.normalized_prev[i], self.normalized_sent[i]]
self.inputs.append(temp_list)
train_end = int(0.7 * len(self.inputs))
self.training_inputs = self.inputs[0:train_end]
def get_testing_input(self):
self.set_testing_input()
return self.testing_inputs
def get_training_output(self):
self.set_training_output()
return self.training_outputs
def set_testing_input(self):
train_end = int(0.7 * len(self.inputs))
self.testing_inputs = self.inputs[train_end:]
def set_training_output(self):
train_end = int(0.7 * len(self.normalized_open))
self.training_outputs = self.normalized_open[0:train_end]
def get_testing_output(self):
self.set_testing_output()
return self.testing_outputs
def set_testing_output(self):
train_end = int(0.7 * len(self.normalized_open))
self.testing_outputs = self.normalized_open[train_end:]
def clear_lists(self):
self.close_prices.clear()
self.high_prices.clear()
self.prev_prices.clear()
self.normalized_close.clear()
self.normalized_high.clear()
self.normalized_prev.clear()
self.open_prices.clear()
self.normalized_open.clear()
self.inputs.clear()
self.training_inputs.clear()
self.testing_inputs.clear()
self.training_outputs.clear()
self.testing_outputs.clear()
self.sentiments.clear()
self.normalized_sent = []
self.max_sent = 0.0
self.min_sent = 0.0
self.min_close = 1000
self.max_close = 0
self.min_high = 1000
self.max_high = 0
self.min_prev = 1000
self.max_prev = 0
self.min_open = 1000
self.max_open = 0
| # -*- coding: utf-8 -*-
"""
Created on Wed May 8 15:05:51 2019
@author: Brian Heckman and Kyle Oprisko
"""
import csv
"""this file opens a csv file created in the csv creator class. The main purpose of this class is to
normalize the data in the csv file, so that it can be read by the neural network.
"""
class CSV_Normalize:
stock = ""
# Initialize the lists for the 4 parameters
close_prices = []
high_prices = []
prev_prices = []
sentiments = []
# Initialize max and min values for normalization calc
max_sent = 0.0
min_sent = 0.0
min_close = 1000
max_close = 0
min_high = 1000
max_high = 0
min_prev = 1000
max_prev = 0
# Initialize lists for normalized values of parameters
normalized_close = []
normalized_high = []
normalized_prev = []
normalized_sent = []
# Initialize output parameters
open_prices = []
# Initialize max and min for normalization calc
min_open= 1000
max_open = 0
# Initialize the normalized output list
normalized_open = []
# Create arrays to separate into training and testing lists
inputs = []
training_inputs = []
testing_inputs = []
training_outputs = []
testing_outputs = []
# Set name of stock
def set_stock(self,stock):
self.stock = stock
# Set input values
def set_input(self):
# Open CSV and read each row and append to specific list
with open(self.stock + '.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter = ',')
for row in readCSV:
self.close_prices.append(row[5])
self.high_prices.append(row[3])
self.prev_prices.append(row[2])
self.sentiments.append(row[7])
# Remove the headers and the last row because the data is trailing
self.close_prices = self.close_prices[1:-1]
self.high_prices = self.high_prices[1:-1]
self.prev_prices = self.prev_prices[1:-1]
self.sentiments = self.sentiments[1:-1]
# Turn data values into floats
for m in range(len(self.close_prices)):
if self.close_prices[m] != "Close":
self.close_prices[m] = float(self.close_prices[m])
for n in range(len(self.high_prices)):
if self.high_prices[n] != "High":
self.high_prices[n] = float(self.high_prices[n])
for pp in range(len(self.prev_prices)):
if self.prev_prices[pp] != "Open":
self.prev_prices[pp] = float(self.prev_prices[pp])
#Set Min and Max values for normalization
for p in range(len(self.close_prices)):
if self.close_prices[m] != "Close":
if (self.close_prices[p] > self.max_close):
self.max_close = self.close_prices[p]
if (self.close_prices[p] < self.min_close):
self.min_close = self.close_prices[p]
for q in range(len(self.high_prices)):
if (self.high_prices[q] > self.max_high):
self.max_high = self.high_prices[q]
if (self.high_prices[q] < self.min_high):
self.min_high = self.high_prices[q]
for s in range(len(self.prev_prices)):
if (self.prev_prices[s] > self.max_prev):
self.max_prev = self.prev_prices[s]
if (self.prev_prices[s] < self.min_prev):
self.min_prev = self.prev_prices[s]
for s in range(len(self.sentiments)):
self.sentiments[s] = float(self.sentiments[s])
if (self.max_sent > self.max_sent):
self.max_sent = self.sentiments[s]
if (self.sentiments[s] < self.min_sent):
self.min_sent = self.sentiments[s]
# Perform normalization calculation and set normalized inputs
def set_normalized_input(self):
# Call set_input function in case it was not called already
if (self.max_prev == 0):
self.set_input()
# Perform normalization calculation under the normalized_x = (x - min)/(max - min) model
for i1 in range(len(self.close_prices)):
self.normalized_close.append((self.close_prices[i1] - self.min_close)/(self.max_close - self.min_close))
for i2 in range(len(self.high_prices)):
self.normalized_high.append((self.high_prices[i2] - self.min_high)/(self.max_high - self.min_high))
for i4 in range(len(self.prev_prices)):
self.normalized_prev.append((self.prev_prices[i4] - self.min_prev)/(self.max_prev - self.min_prev))
for i5 in range(len(self.sentiments)):
diff = self.max_sent - self.min_sent
if diff == 0:
self.normalized_sent.append(0)
else:
self.normalized_sent.append((self.sentiments[i5] - self.min_sent)/(self.max_sent - self.min_sent))
# Organize the input into a zipped list
def get_input(self):
return (list(zip(self.close_prices,self.high_prices,self.prev_prices,self.sentiments)))
# Organize the normalized input into a zipped list
def get_nomralized_input(self):
return (list(zip(self.normalized_close,self.normalized_high,self.normalized_prev,self.sentiments)))
# Set the output data
def set_output(self):
# Open and read the output file and append the list
with open(self.stock + '.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter = ',')
for row in readCSV:
self.open_prices.append(row[2])
# Remove the first two rows (header and first data point)
self.open_prices = self.open_prices[2:]
#
for m in range(len(self.open_prices)):
self.open_prices[m] = float(self.open_prices[m])
for i in range(len(self.open_prices)):
if (self.open_prices[i] > self.max_open):
self.max_open = self.open_prices[i]
if (self.open_prices[i] < self.min_open):
self.min_open = self.open_prices[i]
#uses min max function
def set_normalized_output(self):
self.set_output()
for i1 in range(len(self.open_prices)):
self.normalized_open.append((self.open_prices[i1] - self.min_open)/(self.max_open - self.min_open))
#returns open_prices
def get_output(self):
return (self.open_prices)
#gets the normalized output
def get_normalized_output(self):
return (self.normalized_open)
#inverse function to get predicted values into actual values
def inverse(self,normalized):
return ((normalized * (self.max_open - self.min_open)) + self.min_open)
#retuns what the user input
def get_training_input(self):
self.set_training_input()
return self.training_inputs
#sets puts all of the data into a list as a tuple
def set_training_input(self):
for i in range(len(self.normalized_close)):
temp_list = [self.normalized_close[i],self.normalized_high[i],self.normalized_prev[i],self.normalized_sent[i]]
self.inputs.append(temp_list)
train_end = int(.7*len(self.inputs))
self.training_inputs = self.inputs[0:train_end]
def get_testing_input(self):
self.set_testing_input()
return self.testing_inputs
def get_training_output(self):
self.set_training_output()
return self.training_outputs
def set_testing_input(self):
train_end = int(.7*len(self.inputs))
self.testing_inputs = self.inputs[train_end:]
def set_training_output(self):
train_end = int(.7*len(self.normalized_open))
self.training_outputs = self.normalized_open[0:train_end]
def get_testing_output(self):
self.set_testing_output()
return self.testing_outputs
def set_testing_output(self):
train_end = int(.7*len(self.normalized_open))
self.testing_outputs = self.normalized_open[train_end:]
def clear_lists(self):
#everything is reinitialized
self.close_prices.clear()
self.high_prices.clear()
self.prev_prices.clear()
self.normalized_close.clear()
self.normalized_high.clear()
self.normalized_prev.clear()
self.open_prices.clear()
self.normalized_open.clear()
self.inputs.clear()
self.training_inputs.clear()
self.testing_inputs.clear()
self.training_outputs.clear()
self.testing_outputs.clear()
self.sentiments.clear()
self.normalized_sent = []
self.max_sent = 0.0
self.min_sent = 0.0
self.min_close = 1000
self.max_close = 0
self.min_high = 1000
self.max_high = 0
self.min_prev = 1000
self.max_prev = 0
self.min_open= 1000
self.max_open = 0 | [
11,
12,
19,
21,
23
] |
1,012 | 9a7994a1e51c9cf7fe7d8b50ab26fa3d789fc8e5 | <mask token>
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
<mask token>
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static('/does/not/exist')
def test_static_construct_requires_directory(tmpdir):
name = 'foo'
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
<mask token>
| <mask token>
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
def test_construct_with_list(tmpdir):
s = Static(['/'] + str(tmpdir).split('/'))
assert str(s.path) == str(tmpdir)
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static('/does/not/exist')
def test_static_construct_requires_directory(tmpdir):
name = 'foo'
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
def test_call(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
static(req, res)
res.set_type.assert_called_with('text/plain')
res.send_file.assert_called_with(file_path)
<mask token>
def test_call_with_etag(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
req.headers = {'IF-NONE-MATCH': etag}
static(req, res)
assert res.status_code == 304
assert not res.set_type.called
assert not res.send_file.called
| <mask token>
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
def test_construct_with_list(tmpdir):
s = Static(['/'] + str(tmpdir).split('/'))
assert str(s.path) == str(tmpdir)
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static('/does/not/exist')
def test_static_construct_requires_directory(tmpdir):
name = 'foo'
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
def test_call(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
static(req, res)
res.set_type.assert_called_with('text/plain')
res.send_file.assert_called_with(file_path)
def test_call_invalid_path(static):
req, res = mock.Mock(), mock.Mock()
req.path = '/foo/../bar'
static(req, res)
assert not res.set_type.called
assert not res.send_file.called
assert not res.end.called
def test_call_with_etag(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
req.headers = {'IF-NONE-MATCH': etag}
static(req, res)
assert res.status_code == 304
assert not res.set_type.called
assert not res.send_file.called
| import pytest
import growler
from pathlib import Path
from unittest import mock
from sys import version_info
from growler.middleware.static import Static
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
def test_construct_with_list(tmpdir):
s = Static(['/'] + str(tmpdir).split('/'))
assert str(s.path) == str(tmpdir)
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static('/does/not/exist')
def test_static_construct_requires_directory(tmpdir):
name = 'foo'
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
def test_call(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
static(req, res)
res.set_type.assert_called_with('text/plain')
res.send_file.assert_called_with(file_path)
def test_call_invalid_path(static):
req, res = mock.Mock(), mock.Mock()
req.path = '/foo/../bar'
static(req, res)
assert not res.set_type.called
assert not res.send_file.called
assert not res.end.called
def test_call_with_etag(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
req.headers = {'IF-NONE-MATCH': etag}
static(req, res)
assert res.status_code == 304
assert not res.set_type.called
assert not res.send_file.called
| #
# tests/middleware/test_static.py
#
import pytest
import growler
from pathlib import Path
from unittest import mock
from sys import version_info
from growler.middleware.static import Static
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
def test_construct_with_list(tmpdir):
s = Static(['/'] + str(tmpdir).split('/'))
assert str(s.path) == str(tmpdir)
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static("/does/not/exist")
def test_static_construct_requires_directory(tmpdir):
name = "foo"
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
def test_call(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
static(req, res)
res.set_type.assert_called_with('text/plain')
res.send_file.assert_called_with(file_path)
def test_call_invalid_path(static):
req, res = mock.Mock(), mock.Mock()
req.path = '/foo/../bar'
static(req, res)
assert not res.set_type.called
assert not res.send_file.called
assert not res.end.called
def test_call_with_etag(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
req.headers = {'IF-NONE-MATCH': etag}
static(req, res)
assert res.status_code == 304
assert not res.set_type.called
assert not res.send_file.called
| [
4,
7,
8,
9,
10
] |
1,013 | a319ebb05e9034f19aef39bd46830c8a607ed121 | animals = ['bear', 'python', 'peacock', 'kangaroo', 'whale', 'platypus']
The animal at 1.
The third (3rd) animal.
The first (1st) animal.
The animal at 3.
The fifth (5th) animal.
The animal at 2.
The sixth (6th) animal.
The animal at 4.
| null | null | null | null | [
0
] |
1,014 | cc6cef70381bb08247720ec32b7e8fe79ed7123d | #!/usr/bin/python
import sys
OPEN_BRACES = ['{', '(', '[']
CLOSE_BRACES = ['}', ')', ']']
def match_paranthesis (s, pos):
stack = []
for i,c in enumerate(s):
if not c in OPEN_BRACES and not c in CLOSE_BRACES:
continue
if c in OPEN_BRACES:
stack.append((i, c))
else:
idx = CLOSE_BRACES.index(c)
oi, oc = stack[len(stack)-1]
if oc == OPEN_BRACES[idx]:
if oi == pos:
print "FOUND MATCHING CLOSE %u:%u %c:%c" % (oi, i, s[oi], s[i])
return
stack.pop()
if len(stack):
print "BRACES NOT MATCHING"
if __name__ == "__main__":
match_paranthesis("Sometimes (when I nest them (my parentheticals ) too much (like this (and this))) they get confusing.", 10)
| null | null | null | null | [
0
] |
1,015 | ddaba7a8b53072da36224dd4618696ebf0e9a4e4 | <mask token>
def compile_code():
if os.path.isdir(mp6pth):
shutil.rmtree(mp6pth)
url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'
pymake.download_and_unzip(url, pth=dstpth)
pth = os.path.join(srcpth, 'utl7u1.f')
if os.path.isfile(pth):
os.remove(pth)
fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('location.', 'location%')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'ModpathCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')
line = line.replace('dimension(grid%GetReducedConnectionCount())',
'dimension(:)')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'MPath7.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'MPath7_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace("form='binary', access='stream'",
"form='unformatted', access='stream'")
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fflags = 'ffree-line-length-512'
pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite
=False, dryrun=False, double=False, debug=False, fflags=fflags)
assert os.path.isfile(target), 'Target does not exist.'
<mask token>
def replace_files():
dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.
join(expth, name))]
replace_files = ['example_1.BUD', 'Zones_layer_3.txt',
'Retardation_layer_1.txt']
for d in dirs:
pth = os.path.join(expth, d, 'original')
for rf in replace_files:
fname1 = os.path.join(pth, rf)
if rf in os.listdir(pth):
fname2 = os.path.join(pth, 'temp')
print('copy {} to {}'.format(os.path.basename(fname1), os.
path.basename(fname2)))
shutil.copy(fname1, fname2)
print('deleting {}'.format(os.path.basename(fname1)))
os.remove(fname1)
fname1 = os.path.join(pth, rf.lower())
print('rename {} to {}'.format(os.path.basename(fname2), os
.path.basename(fname1)))
os.rename(fname2, fname1)
def run_modpath7(fn):
print('running model...{}'.format(fn))
exe = os.path.abspath(target)
fpth = os.path.basename(fn)
model_ws = os.path.dirname(fn)
success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)
assert success, 'could not run...{}'.format(os.path.basename(fn))
return
def clean_up():
print('Removing folder ' + mp6pth)
shutil.rmtree(mp6pth)
print('Removing ' + target)
os.remove(target)
return
def test_compile():
compile_code()
def test_modpath7():
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
yield run_modpath7, fn
def test_clean_up():
yield clean_up
<mask token>
| <mask token>
def compile_code():
if os.path.isdir(mp6pth):
shutil.rmtree(mp6pth)
url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'
pymake.download_and_unzip(url, pth=dstpth)
pth = os.path.join(srcpth, 'utl7u1.f')
if os.path.isfile(pth):
os.remove(pth)
fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('location.', 'location%')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'ModpathCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')
line = line.replace('dimension(grid%GetReducedConnectionCount())',
'dimension(:)')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'MPath7.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'MPath7_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace("form='binary', access='stream'",
"form='unformatted', access='stream'")
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fflags = 'ffree-line-length-512'
pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite
=False, dryrun=False, double=False, debug=False, fflags=fflags)
assert os.path.isfile(target), 'Target does not exist.'
def get_simfiles():
dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.
join(expth, name))]
simfiles = []
for d in dirs:
pth = os.path.join(expth, d, 'original')
simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.
endswith('.mpsim')]
return simfiles
def replace_files():
dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.
join(expth, name))]
replace_files = ['example_1.BUD', 'Zones_layer_3.txt',
'Retardation_layer_1.txt']
for d in dirs:
pth = os.path.join(expth, d, 'original')
for rf in replace_files:
fname1 = os.path.join(pth, rf)
if rf in os.listdir(pth):
fname2 = os.path.join(pth, 'temp')
print('copy {} to {}'.format(os.path.basename(fname1), os.
path.basename(fname2)))
shutil.copy(fname1, fname2)
print('deleting {}'.format(os.path.basename(fname1)))
os.remove(fname1)
fname1 = os.path.join(pth, rf.lower())
print('rename {} to {}'.format(os.path.basename(fname2), os
.path.basename(fname1)))
os.rename(fname2, fname1)
def run_modpath7(fn):
print('running model...{}'.format(fn))
exe = os.path.abspath(target)
fpth = os.path.basename(fn)
model_ws = os.path.dirname(fn)
success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)
assert success, 'could not run...{}'.format(os.path.basename(fn))
return
def clean_up():
print('Removing folder ' + mp6pth)
shutil.rmtree(mp6pth)
print('Removing ' + target)
os.remove(target)
return
def test_compile():
compile_code()
def test_modpath7():
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
yield run_modpath7, fn
def test_clean_up():
yield clean_up
<mask token>
| <mask token>
if not os.path.exists(dstpth):
os.makedirs(dstpth)
<mask token>
def compile_code():
if os.path.isdir(mp6pth):
shutil.rmtree(mp6pth)
url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'
pymake.download_and_unzip(url, pth=dstpth)
pth = os.path.join(srcpth, 'utl7u1.f')
if os.path.isfile(pth):
os.remove(pth)
fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('location.', 'location%')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'ModpathCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')
line = line.replace('dimension(grid%GetReducedConnectionCount())',
'dimension(:)')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'MPath7.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'MPath7_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace("form='binary', access='stream'",
"form='unformatted', access='stream'")
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fflags = 'ffree-line-length-512'
pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite
=False, dryrun=False, double=False, debug=False, fflags=fflags)
assert os.path.isfile(target), 'Target does not exist.'
def get_simfiles():
dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.
join(expth, name))]
simfiles = []
for d in dirs:
pth = os.path.join(expth, d, 'original')
simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.
endswith('.mpsim')]
return simfiles
def replace_files():
dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.
join(expth, name))]
replace_files = ['example_1.BUD', 'Zones_layer_3.txt',
'Retardation_layer_1.txt']
for d in dirs:
pth = os.path.join(expth, d, 'original')
for rf in replace_files:
fname1 = os.path.join(pth, rf)
if rf in os.listdir(pth):
fname2 = os.path.join(pth, 'temp')
print('copy {} to {}'.format(os.path.basename(fname1), os.
path.basename(fname2)))
shutil.copy(fname1, fname2)
print('deleting {}'.format(os.path.basename(fname1)))
os.remove(fname1)
fname1 = os.path.join(pth, rf.lower())
print('rename {} to {}'.format(os.path.basename(fname2), os
.path.basename(fname1)))
os.rename(fname2, fname1)
def run_modpath7(fn):
print('running model...{}'.format(fn))
exe = os.path.abspath(target)
fpth = os.path.basename(fn)
model_ws = os.path.dirname(fn)
success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)
assert success, 'could not run...{}'.format(os.path.basename(fn))
return
def clean_up():
print('Removing folder ' + mp6pth)
shutil.rmtree(mp6pth)
print('Removing ' + target)
os.remove(target)
return
def test_compile():
compile_code()
def test_modpath7():
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
yield run_modpath7, fn
def test_clean_up():
yield clean_up
if __name__ == '__main__':
compile_code()
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
run_modpath7(fn)
clean_up()
| from __future__ import print_function
import os
import shutil
import pymake
import flopy
dstpth = os.path.join('temp')
if not os.path.exists(dstpth):
os.makedirs(dstpth)
mp6pth = os.path.join(dstpth, 'Modpath_7_1_000')
expth = os.path.join(mp6pth, 'examples')
exe_name = 'mp7'
srcpth = os.path.join(mp6pth, 'source')
target = os.path.join(dstpth, exe_name)
def compile_code():
if os.path.isdir(mp6pth):
shutil.rmtree(mp6pth)
url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'
pymake.download_and_unzip(url, pth=dstpth)
pth = os.path.join(srcpth, 'utl7u1.f')
if os.path.isfile(pth):
os.remove(pth)
fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('location.', 'location%')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'ModpathCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')
line = line.replace('dimension(grid%GetReducedConnectionCount())',
'dimension(:)')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'MPath7.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'MPath7_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace("form='binary', access='stream'",
"form='unformatted', access='stream'")
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fflags = 'ffree-line-length-512'
pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite
=False, dryrun=False, double=False, debug=False, fflags=fflags)
assert os.path.isfile(target), 'Target does not exist.'
def get_simfiles():
dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.
join(expth, name))]
simfiles = []
for d in dirs:
pth = os.path.join(expth, d, 'original')
simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.
endswith('.mpsim')]
return simfiles
def replace_files():
dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.
join(expth, name))]
replace_files = ['example_1.BUD', 'Zones_layer_3.txt',
'Retardation_layer_1.txt']
for d in dirs:
pth = os.path.join(expth, d, 'original')
for rf in replace_files:
fname1 = os.path.join(pth, rf)
if rf in os.listdir(pth):
fname2 = os.path.join(pth, 'temp')
print('copy {} to {}'.format(os.path.basename(fname1), os.
path.basename(fname2)))
shutil.copy(fname1, fname2)
print('deleting {}'.format(os.path.basename(fname1)))
os.remove(fname1)
fname1 = os.path.join(pth, rf.lower())
print('rename {} to {}'.format(os.path.basename(fname2), os
.path.basename(fname1)))
os.rename(fname2, fname1)
def run_modpath7(fn):
print('running model...{}'.format(fn))
exe = os.path.abspath(target)
fpth = os.path.basename(fn)
model_ws = os.path.dirname(fn)
success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)
assert success, 'could not run...{}'.format(os.path.basename(fn))
return
def clean_up():
print('Removing folder ' + mp6pth)
shutil.rmtree(mp6pth)
print('Removing ' + target)
os.remove(target)
return
def test_compile():
compile_code()
def test_modpath7():
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
yield run_modpath7, fn
def test_clean_up():
yield clean_up
if __name__ == '__main__':
compile_code()
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
run_modpath7(fn)
clean_up()
| from __future__ import print_function
import os
import shutil
import pymake
import flopy
# set up paths
dstpth = os.path.join('temp')
if not os.path.exists(dstpth):
os.makedirs(dstpth)
mp6pth = os.path.join(dstpth, 'Modpath_7_1_000')
expth = os.path.join(mp6pth, 'examples')
exe_name = 'mp7'
srcpth = os.path.join(mp6pth, 'source')
target = os.path.join(dstpth, exe_name)
def compile_code():
# Remove the existing modpath6 directory if it exists
if os.path.isdir(mp6pth):
shutil.rmtree(mp6pth)
# Download the MODFLOW-2005 distribution
url = "https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip"
pymake.download_and_unzip(url, pth=dstpth)
# modify source files that prevent compiling with gfortran
pth = os.path.join(srcpth, 'utl7u1.f')
if os.path.isfile(pth):
os.remove(pth)
fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('location.', 'location%')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'ModpathCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')
line = line.replace('dimension(grid%GetReducedConnectionCount())',
'dimension(:)')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'MPath7.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'MPath7_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace("form='binary', access='stream'",
"form='unformatted', access='stream'")
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
# allow line lengths greater than 132 columns
fflags = 'ffree-line-length-512'
# make modpath 7
pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True,
expedite=False, dryrun=False, double=False, debug=False,
fflags=fflags)
assert os.path.isfile(target), 'Target does not exist.'
def get_simfiles():
dirs = [name for name in os.listdir(expth) if
os.path.isdir(os.path.join(expth, name))]
simfiles = []
for d in dirs:
pth = os.path.join(expth, d, 'original')
simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if
f.endswith('.mpsim')]
return simfiles
def replace_files():
dirs = [name for name in os.listdir(expth) if
os.path.isdir(os.path.join(expth, name))]
# rename a few files for linux
replace_files = ['example_1.BUD', 'Zones_layer_3.txt',
'Retardation_layer_1.txt']
for d in dirs:
pth = os.path.join(expth, d, 'original')
for rf in replace_files:
fname1 = os.path.join(pth, rf)
if rf in os.listdir(pth):
fname2 = os.path.join(pth, 'temp')
print('copy {} to {}'.format(os.path.basename(fname1),
os.path.basename(fname2)))
shutil.copy(fname1, fname2)
print('deleting {}'.format(os.path.basename(fname1)))
os.remove(fname1)
fname1 = os.path.join(pth, rf.lower())
print('rename {} to {}'.format(os.path.basename(fname2),
os.path.basename(fname1)))
os.rename(fname2, fname1)
def run_modpath7(fn):
# run the model
print('running model...{}'.format(fn))
exe = os.path.abspath(target)
fpth = os.path.basename(fn)
model_ws = os.path.dirname(fn)
success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)
assert success, 'could not run...{}'.format(os.path.basename(fn))
return
def clean_up():
# clean up
print('Removing folder ' + mp6pth)
shutil.rmtree(mp6pth)
print('Removing ' + target)
os.remove(target)
return
def test_compile():
# compile MODPATH 7
compile_code()
def test_modpath7():
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
yield run_modpath7, fn
def test_clean_up():
yield clean_up
if __name__ == "__main__":
compile_code()
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
run_modpath7(fn)
clean_up()
| [
7,
8,
9,
11,
12
] |
1,016 | da3be0d3b815e11d292a7c7e8f5ce32b35580f98 | <mask token>
| <mask token>
alphabet = ' ' + string.ascii_lowercase
| import string
alphabet = ' ' + string.ascii_lowercase
| # Let's look at the lowercase letters.
import string
alphabet = " " + string.ascii_lowercase
| null | [
0,
1,
2,
3
] |
1,017 | 299432b095f16c3cb4949319705800d06f534cf9 | from __future__ import with_statement # this is to work with python2.5
from pyps import workspace, module
def invoke_function(fu, ws):
return fu._get_code(activate = module.print_code_out_regions)
if __name__=="__main__":
workspace.delete('paws_out_regions')
with workspace('paws_out_regions.c',name='paws_out_regions',deleteOnClose=True) as ws:
for fu in ws.fun:
print invoke_function(fu, ws)
| null | null | null | null | [
0
] |
1,018 | c1bb7b579e6b251ddce41384aef1243e411c5d0e | <mask token>
def distance(row):
source = row['start_lat'], row['start_lng']
dest = row['end_lat'], row['end_lng']
return vincenty(source, dest).miles
<mask token>
def dropoff_to_MH(row):
"""find the distance between dropoff point and Manhattan center"""
dest = row['end_lat'], row['end_lng']
return vincenty(dest, Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime('%A')
<mask token>
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep) - ref).seconds
return min(sec, 86400 - sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies(DD, prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD], axis=1)
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
df_train_s['distance'] = df_train_s.apply(lambda x: distance(x), axis=1)
df_train_s['distance2'] = df_train_s['distance'] ** 2
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1)
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1)
return df_train_s
<mask token>
def mean_absolute_percentage_error(dist, y_true, y_pred):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true / y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err) * 100
def evalute(dist, y_true, prediction):
MAE, MAPE = {}, {}
for kys, y_pred in prediction.items():
MAE[kys] = mean_absolute_error(dist, y_true, y_pred)
MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)
return MAE, MAPE
<mask token>
| <mask token>
def distance(row):
source = row['start_lat'], row['start_lng']
dest = row['end_lat'], row['end_lng']
return vincenty(source, dest).miles
<mask token>
def dropoff_to_MH(row):
"""find the distance between dropoff point and Manhattan center"""
dest = row['end_lat'], row['end_lng']
return vincenty(dest, Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime('%A')
def month(ep):
return datetime.fromtimestamp(ep).month
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep) - ref).seconds
return min(sec, 86400 - sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies(DD, prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD], axis=1)
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
df_train_s['distance'] = df_train_s.apply(lambda x: distance(x), axis=1)
df_train_s['distance2'] = df_train_s['distance'] ** 2
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1)
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1)
return df_train_s
<mask token>
def mean_absolute_percentage_error(dist, y_true, y_pred):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true / y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err) * 100
def evalute(dist, y_true, prediction):
MAE, MAPE = {}, {}
for kys, y_pred in prediction.items():
MAE[kys] = mean_absolute_error(dist, y_true, y_pred)
MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)
return MAE, MAPE
<mask token>
| <mask token>
def distance(row):
source = row['start_lat'], row['start_lng']
dest = row['end_lat'], row['end_lng']
return vincenty(source, dest).miles
<mask token>
def pickup_to_MH(row):
"""find the distance between pick up point and Manhattan center"""
source = row['start_lat'], row['start_lng']
return vincenty(source, Manhattan).miles
def dropoff_to_MH(row):
"""find the distance between dropoff point and Manhattan center"""
dest = row['end_lat'], row['end_lng']
return vincenty(dest, Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime('%A')
def month(ep):
return datetime.fromtimestamp(ep).month
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep) - ref).seconds
return min(sec, 86400 - sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies(DD, prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD], axis=1)
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
df_train_s['distance'] = df_train_s.apply(lambda x: distance(x), axis=1)
df_train_s['distance2'] = df_train_s['distance'] ** 2
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1)
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1)
return df_train_s
<mask token>
def mean_absolute_error(dist, y_true, y_pred):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(dist / y_true - dist / y_pred)
err = err[np.isfinite(err)]
return np.mean(err) * 3600
def mean_absolute_percentage_error(dist, y_true, y_pred):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true / y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err) * 100
def evalute(dist, y_true, prediction):
MAE, MAPE = {}, {}
for kys, y_pred in prediction.items():
MAE[kys] = mean_absolute_error(dist, y_true, y_pred)
MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)
return MAE, MAPE
<mask token>
| <mask token>
get_ipython().magic('matplotlib inline')
<mask token>
df_train.head()
def distance(row):
source = row['start_lat'], row['start_lng']
dest = row['end_lat'], row['end_lng']
return vincenty(source, dest).miles
<mask token>
def pickup_to_MH(row):
"""find the distance between pick up point and Manhattan center"""
source = row['start_lat'], row['start_lng']
return vincenty(source, Manhattan).miles
def dropoff_to_MH(row):
"""find the distance between dropoff point and Manhattan center"""
dest = row['end_lat'], row['end_lng']
return vincenty(dest, Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime('%A')
def month(ep):
return datetime.fromtimestamp(ep).month
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep) - ref).seconds
return min(sec, 86400 - sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies(DD, prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD], axis=1)
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
df_train_s['distance'] = df_train_s.apply(lambda x: distance(x), axis=1)
df_train_s['distance2'] = df_train_s['distance'] ** 2
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1)
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1)
return df_train_s
np.random.seed(42)
<mask token>
df_train_s.head()
<mask token>
ax.set(xlabel='latitude', ylabel='longitude')
<mask token>
ax.set(xlabel='latitude', ylabel='longitude')
plt.show()
df_train_s[['distance', 'duration', 'velocity']].describe()
df_train_s['velocity'].hist(bins=1000, normed=True)
<mask token>
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=0.3, square=True, linewidths=
0.5, cbar_kws={'shrink': 0.5}, ax=ax)
plt.show()
df_train_s.plot.scatter('distance', 'velocity')
<mask token>
gr.mean().plot.bar(yerr=gr.std())
<mask token>
list(enumerate(cl))
dist_train.mean()
<mask token>
model_sk.fit(X_train, y_train)
plt.figure(figsize=(12, 8))
plt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)
plt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')
plt.xlim([-1, model_sk.coef_.shape[0]])
plt.title('Linear model coefficients')
plt.show()
<mask token>
print(linear_results.summary())
<mask token>
print(gamma_results.summary())
<mask token>
DNN_model.add(Dense(100, input_dim=X_train.shape[1], init='uniform',
activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(50, init='uniform', activation='softmax'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(100, init='uniform', activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(1, init='uniform', activation='relu'))
DNN_model.summary()
<mask token>
DNN_model.compile(loss='mean_absolute_error', optimizer='adam')
<mask token>
plt.figure(figsize=(10, 8))
plt.title('Dense model training', fontsize=12)
plt.plot(history.history['loss'], label='Train')
plt.plot(history.history['val_loss'], label='Test')
plt.grid('on')
plt.xlabel('Epoch', fontsize=12)
plt.ylabel('loss', fontsize=12)
plt.legend(loc='upper right')
<mask token>
def mean_absolute_error(dist, y_true, y_pred):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(dist / y_true - dist / y_pred)
err = err[np.isfinite(err)]
return np.mean(err) * 3600
def mean_absolute_percentage_error(dist, y_true, y_pred):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true / y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err) * 100
def evalute(dist, y_true, prediction):
MAE, MAPE = {}, {}
for kys, y_pred in prediction.items():
MAE[kys] = mean_absolute_error(dist, y_true, y_pred)
MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)
return MAE, MAPE
<mask token>
pd.DataFrame([MAE_test, MAE_train, MAPE_test, MAPE_train], index=[
'MAE_test', 'MAE_train', 'MAPE_test', 'MAPE_train']).transpose()
dist_train.mean()
<mask token>
df_ans.to_csv('answer.csv')
<mask token>
|
# coding: utf-8
# ## Estimating Travel Time
#
#
# The objective of this document is proposing a prediction model for estimating the travel time of two
# specified locations at a given departure time. The main idea here is predicting the velocity of the trip. Given the distance between starting and ending point of the trip, it is possible to easily compute the Travel Time.
# According to the given data, different features including the time of the day, day of the week, month, travel distance, and distance to the center of the city (New York) are used.
# Different prediction models (Linear, GLM and Deep Neural Network) are compared, and the GLM is used for genrating the final results.
# ## Preparation
# Import required libraries
# In[136]:
import numpy as np
import pandas as pd
from geopy.distance import vincenty
from datetime import datetime
from datetime import timedelta
from datetime import time
import statsmodels.api as sm
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
get_ipython().magic('matplotlib inline')
# ## Reading data
# In[169]:
df_train = pd.read_csv('train.csv',index_col= 'row_id')
df_test = pd.read_csv('test.csv',index_col= 'row_id')
df_train.head()
# ## Feature engineering
#
# It is clear that the travel time of trip depends on the starting and ending point. In other words, the most uncertain component in the prediction of travel time is the velocity of the trip. Given the velocity and the distance, it is easy to compute the duration of the travel.
#
# Also, I observed all travels in both train and test dataset are happening around New York City. Therefore, the main component in determining the velocity of is the city traffic. We know that traffic is a time-dependent phenomenon which depends on the time of the day, the day of the week, and month of the year. In addition, the traffic is usually heavier in Manhattan (downtown of the city) in comparing to the other point of the city. Therefore, if the starting or ending point of the travel is close to the Manhattan we expect higher traffic comparing to the other neighborhoods. In visualization section, I provide enough evidence from the data set to support the aforementioned claims.
#
# According to this observation the following features are computted by using the raw data and added to the dataframe.
#
# * Distance between starting and ending computted by vincenty formula
# * The time of the day of travel (in sec far from the midnight)
# * The day of the week (Monday, Tuesday, etc). For this categorical data, six dummy variables are added to datafram
# * The month of the travel to capture seasnolity effect.
# * The sequare of distance
# * The velocity is used as the predication variable.
#
# In[156]:
def distance(row):
source = (row['start_lat'], row['start_lng'])
dest = ( row['end_lat'], row['end_lng'])
return vincenty(source,dest).miles
Manhattan = (40.7831, -73.9712)
def pickup_to_MH(row):
'''find the distance between pick up point and Manhattan center'''
source = (row['start_lat'], row['start_lng'])
return vincenty(source,Manhattan).miles
def dropoff_to_MH(row):
'''find the distance between dropoff point and Manhattan center'''
dest = ( row['end_lat'], row['end_lng'])
return vincenty(dest,Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime("%A")
def month(ep):
return datetime.fromtimestamp(ep).month
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep)- ref).seconds
return min(sec, 86400- sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
# Add day of the week and the dummy variable
DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies( DD,prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD],axis =1 )
# Month, time of the dat, df_train_s
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
# distance between start and end of the trip
df_train_s['distance'] = df_train_s.apply(lambda x :distance(x), axis=1 )
df_train_s['distance2'] = df_train_s['distance']**2
# distance between start, end, and center of Manhatan
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1 )
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1 )
return df_train_s
# Now, we can easily add all of the above features to both traing and test data set. Due to time limtation and calculation power I only used 10% of the traing data.
# In[24]:
np.random.seed(42)
df_train_s = df_train.sample(frac=0.01, replace=False)
df_train_s = add_features(df_train_s)
df_train_s['velocity'] = np.array(df_train_s['distance']/(df_train_s['duration']/3600))
# In[25]:
df_train_s.head()
# In[170]:
# adding the feature to test set.
df_test = add_features(df_test)
# ## Removing Outlires
# The following functions are used to compute these features. Considering the speed limit and the fact the usual trafic in New York, it is reseanable to assume that always the speed show not exceed 90 mph. Therefore, I remove the points with more than this number as the outlires. Also, I removed the data with less than .5 mph. Specificlly, there exists many samples with zero distance between starting and ending point which might happen becouse GPS problem.
# In[41]:
df_train_s = df_train_s[df_train_s['velocity']<90]
df_train_s = df_train_s[df_train_s['velocity']>.5]
# ## Data Visulazation
#
# First we look at the starting and ending point of the trips which happens in New York.
#
#
#
# In[30]:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
ax = df_train_s.plot.scatter( 'start_lat','start_lng',
ax = axes[0],
title='Start point of travel')
ax.set(xlabel="latitude", ylabel='longitude')
ax = df_train_s.plot.scatter('end_lng','end_lat',
ax = axes[1],
title='Destination of the travel')
ax.set(xlabel="latitude", ylabel='longitude')
plt.show()
# Here are some statitcs about the volacity, distance of each trip and its duration. Also, we looked at the density function of the volacity. A log-normal or Gamma distribution are approprate candiatdes for this distribution.
# In[42]:
df_train_s[['distance', 'duration','velocity']].describe()
# In[43]:
df_train_s['velocity'].hist(bins=1000,normed=True)
# ### Corrolation matrix
# In[44]:
corr = df_train_s.corr()
# generate a mask for the lower triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# set up the matplotlib figure
f, ax = plt.subplots(figsize=(18, 18))
# generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.show()
# In[53]:
df_train_s.plot.scatter( 'distance','velocity')
# In[48]:
### Seanility and time Effect on Velocity
gr= df_train_s[['velocity','month']].groupby(by='month')
gr.mean().plot.bar(yerr=gr.std())
# ## Data preprocessing
#
# Let's split our data to train and test set in fraction of $\frac{4}{1}$ to facilate comparing the results.
# This test set is differenet from the given test set.
# In[105]:
cl = list(set(df_train_s.keys())-{'velocity','duration','day'})
X = np.array(df_train_s[cl])
X1 = np.insert(X, 0, 1, axis=1)
y = np.array(df_train_s['velocity'])
X_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42)
dist_train = X_train[:,1]
dist_test = X_test[:,1]
# In[106]:
list(enumerate(cl))
dist_train.mean()
# ## Linear Model
# In[204]:
model_sk = LinearRegression()
model_sk.fit(X_train, y_train)
plt.figure(figsize=(12, 8))
plt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)
plt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')
plt.xlim([-1, model_sk.coef_.shape[0]])
plt.title("Linear model coefficients")
plt.show()
# The folling chart also provide better understading. Excepet X12 (dummy for sunday) all the other variables are significant; the p-value is zero and null-hypothesis is rejected.
# In[205]:
linear_model = sm.OLS(y_train, X_train)
linear_results = linear_model.fit()
print(linear_results.summary())
# ## Generalized Linear Model
# I tried GLM with gamma fammaly.
# In[206]:
gamma_model = sm.GLM( y_train, X_train,family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
# ## Deep Neural Network (DNN)
#
# Here, I am useing a DNN as a prediction model. I am using the Keras package to train the network. Network includes 3 layers. Also, between each two layer a dropout layer is add. RELU and softmax are used as the activation functions. Here, I define the model.
#
# I normilized the data the input data to imporve the performance.
# In[195]:
DNN_model = Sequential()
DNN_model.add(Dense(100,input_dim=X_train.shape[1],init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(50,init='uniform',activation='softmax'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(100,init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(1,init='uniform',activation='relu'))
DNN_model.summary()
# ### Fitting the DNN
# In[196]:
mn = X1.mean(axis=0)
#model.compile(loss='mean_absolute_error',optimizer='adam',metrics='[accuracy]')
DNN_model.compile(loss='mean_absolute_error',optimizer='adam')
history = DNN_model.fit(X_train/mn,y_train,
validation_data=(X_test/mn, y_test),
epochs =100,
batch_size=100,
verbose=2)
# In[197]:
plt.figure(figsize=(10, 8))
plt.title("Dense model training", fontsize=12)
plt.plot(history.history["loss"], label="Train")
plt.plot(history.history["val_loss"], label="Test")
plt.grid("on")
plt.xlabel("Epoch", fontsize=12)
plt.ylabel("loss", fontsize=12)
plt.legend(loc="upper right")
# ## Evalution
#
# In this part, I compare the propsed models and choose the best one. I compare the results based on mean absolute
# error of predicted versus actual durations, and also mean absolute percentage error which is the percantge of the error. Note that here we compare based on duration as asked in the question and not the velocity.
#
# In[207]:
preds_test, preds_train = {}, {}
#Linear Model
preds_test['linear'] = linear_results.predict(X_test)
preds_train['linear'] = linear_results.predict(X_train)
#GLM (Gamma Model)
preds_test['GLM'] = gamma_results.predict(X_test)
preds_train['GLM'] = gamma_results.predict(X_train)
#Deep Learning
preds_test['DL'] = np.squeeze(DNN_model.predict(X_test/mn))
preds_train['DL'] = np.squeeze(DNN_model.predict(X_train/mn))
# The functions are used for evalution
# In[84]:
def mean_absolute_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(dist/y_true - dist/y_pred)
err = err[np.isfinite(err)]
return np.mean(err) *3600
def mean_absolute_percentage_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true/y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err)*100
def evalute(dist,y_true,prediction):
MAE, MAPE= {}, {}
for kys, y_pred in prediction.items():
MAE[kys] = mean_absolute_error(dist,y_true, y_pred )
MAPE[kys] = mean_absolute_percentage_error(dist,y_true, y_pred )
return MAE, MAPE
# In[209]:
MAE_train, MAPE_train = evalute(dist_train,y_train, preds_train)
MAE_test, MAPE_test = evalute(dist_test,y_test, preds_test)
pd.DataFrame([MAE_test,MAE_train, MAPE_test, MAPE_train],
index= ['MAE_test', 'MAE_train', 'MAPE_test', 'MAPE_train'] ).transpose()
# In[201]:
dist_train.mean()
# ## Generate Prediction for Test Set
#
# By comparing the three models (linear, GLM, DNN), I choose GLM for generating the predication for the given test set.
# In[212]:
XX = np.array(df_test[cl])
XX = np.insert(XX, 0, 1, axis=1)
dist_x = XX[:,1]
#DNN_TD = dist_x/np.squeeze(DNN_model.predict(XX/mn))*3600
GLM_TD = dist_x/gamma_results.predict(XX)*3600
df_ans= pd.DataFrame(GLM_TD, columns =['duration'])
df_ans.index.name = 'row_id'
df_ans.to_csv('answer.csv')
df_ans= pd.DataFrame(TD, columns =['duration'])
# ## Extention and Further Idea
# Here, we only use the vincenty, but by conteccting to google API and fidning the real distance between start and end point the preditor defenitlly can be improved. Also, here I only used 10% of data points becouse of the limitation on runnig the DNN. By using GPU or running over the cloud we can use all the samples.
#
#
#
#
| [
8,
9,
11,
12,
15
] |
1,019 | ae84b449c8919f14954633b14993e6291501bc24 | <mask token>
| <mask token>
def login(username, password):
data = {'login': username, 'pwd': password, 'lang': ''}
r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',
data=data, allow_redirects=False)
if (r.headers['Location'] ==
'../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'
):
return False
return True
<mask token>
| <mask token>
def login(username, password):
data = {'login': username, 'pwd': password, 'lang': ''}
r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',
data=data, allow_redirects=False)
if (r.headers['Location'] ==
'../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'
):
return False
return True
if login('michelle', 'michelle'):
print('Login Successfull[+]')
| import requests
def login(username, password):
data = {'login': username, 'pwd': password, 'lang': ''}
r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',
data=data, allow_redirects=False)
if (r.headers['Location'] ==
'../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'
):
return False
return True
if login('michelle', 'michelle'):
print('Login Successfull[+]')
| import requests
def login(username, password):
data = {'login':username,'pwd':password,'lang':''}
r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php', data=data, allow_redirects=False)
if r.headers['Location'] == '../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect':
return False
return True
# import pdb;pdb.set_trace()
if login("michelle", "michelle"):
print("Login Successfull[+]")
| [
0,
1,
2,
3,
4
] |
1,020 | 9aa54f1259aceb052cfba74cedcfadfe68778ebd | <mask token>
| <mask token>
embed()
| <mask token>
b = webdriver.Firefox()
embed()
| from IPython import embed
from selenium import webdriver
b = webdriver.Firefox()
embed()
| null | [
0,
1,
2,
3
] |
1,021 | 0bfb089556bfa253bf139f03cd3079ced962d858 | <mask token>
| <mask token>
@pytest.mark.remote_data
def test_from_sbdb():
""" test from_horizons method"""
data = Phys.from_sbdb('Ceres')
assert len(data.table) == 1
data = Phys.from_sbdb([(n + 1) for n in range(5)])
assert len(data.table) == 5
| import pytest
from sbpy.data import Phys
from sbpy import bib
@pytest.mark.remote_data
def test_from_sbdb():
""" test from_horizons method"""
data = Phys.from_sbdb('Ceres')
assert len(data.table) == 1
data = Phys.from_sbdb([(n + 1) for n in range(5)])
assert len(data.table) == 5
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from sbpy.data import Phys
from sbpy import bib
@pytest.mark.remote_data
def test_from_sbdb():
""" test from_horizons method"""
# query one object
data = Phys.from_sbdb('Ceres')
assert len(data.table) == 1
# query several objects
data = Phys.from_sbdb([n+1 for n in range(5)])
assert len(data.table) == 5
| null | [
0,
1,
2,
3
] |
1,022 | 368151a134f987ed78c8048521137672530b5cce | # KeyLogger.py
# show a character key when pressed without using Enter key
# hide the Tkinter GUI window, only console shows
import Tkinter as tk
def key(event):
if event.keysym == 'Escape':
root.destroy()
print event.char, event.keysym
root = tk.Tk()
print "Press a key (Escape key to exit):"
root.bind_all('<Key>', key)
# don't show the tk window
root.withdraw()
root.mainloop()
| null | null | null | null | [
0
] |
1,023 | 70aba6c94b7050113adf7ae48bd4e13aa9a34587 | <mask token>
| <mask token>
@typ.typ(items=[int])
def gnome_sort(items):
"""
>>> gnome_sort([])
[]
>>> gnome_sort([1])
[1]
>>> gnome_sort([2,1])
[1, 2]
>>> gnome_sort([1,2])
[1, 2]
>>> gnome_sort([1,2,2])
[1, 2, 2]
"""
i = 0
n = len(items)
while i < n:
if i and items[i] < items[i - 1]:
items[i], items[i - 1] = items[i - 1], items[i]
i -= 1
else:
i += 1
return items
| import typ
@typ.typ(items=[int])
def gnome_sort(items):
"""
>>> gnome_sort([])
[]
>>> gnome_sort([1])
[1]
>>> gnome_sort([2,1])
[1, 2]
>>> gnome_sort([1,2])
[1, 2]
>>> gnome_sort([1,2,2])
[1, 2, 2]
"""
i = 0
n = len(items)
while i < n:
if i and items[i] < items[i - 1]:
items[i], items[i - 1] = items[i - 1], items[i]
i -= 1
else:
i += 1
return items
| null | null | [
0,
1,
2
] |
1,024 | 1ead23c6ea4e66b24e60598ae20606e24fa41482 | <mask token>
| <mask token>
year = datetime.datetime.now().year
project = 'python201'
copyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath'
author = 'Geoffrey Lentner, Ashwin Srinath'
version = '0.0.1'
release = '0.0.1'
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.mathjax',
'sphinx.ext.githubpages', 'sphinx.ext.autodoc',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
html_theme = 'pydata_sphinx_theme'
html_logo = '_static/logo.png'
html_favicon = '_static/favicon.ico'
html_static_path = ['']
html_theme_options = {'external_links': [], 'github_url':
'https://github.com/glentner/python201'}
latex_elements = {}
latex_documents = [(master_doc, 'python-201.tex',
'python-201 Documentation', 'Geoffrey Lentner, Ashwin Srinath', 'manual')]
man_pages = [('manpage', 'cumprod',
'Compute cumulative product of a sequence of numbers.',
'Geoffrey Lentner <[email protected]>.', '1')]
texinfo_documents = [(master_doc, 'python-201', 'python-201 Documentation',
author, 'python-201', 'One line description of project.', 'Miscellaneous')]
intersphinx_mapping = {'https://docs.python.org/3/': None}
rst_epilog = f"""
.. |release| replace:: {release}
.. |copyright| replace:: {copyright}
"""
| import datetime
year = datetime.datetime.now().year
project = 'python201'
copyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath'
author = 'Geoffrey Lentner, Ashwin Srinath'
version = '0.0.1'
release = '0.0.1'
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.mathjax',
'sphinx.ext.githubpages', 'sphinx.ext.autodoc',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
html_theme = 'pydata_sphinx_theme'
html_logo = '_static/logo.png'
html_favicon = '_static/favicon.ico'
html_static_path = ['']
html_theme_options = {'external_links': [], 'github_url':
'https://github.com/glentner/python201'}
latex_elements = {}
latex_documents = [(master_doc, 'python-201.tex',
'python-201 Documentation', 'Geoffrey Lentner, Ashwin Srinath', 'manual')]
man_pages = [('manpage', 'cumprod',
'Compute cumulative product of a sequence of numbers.',
'Geoffrey Lentner <[email protected]>.', '1')]
texinfo_documents = [(master_doc, 'python-201', 'python-201 Documentation',
author, 'python-201', 'One line description of project.', 'Miscellaneous')]
intersphinx_mapping = {'https://docs.python.org/3/': None}
rst_epilog = f"""
.. |release| replace:: {release}
.. |copyright| replace:: {copyright}
"""
| # SPDX-FileCopyrightText: 2019-2021 Python201 Contributors
# SPDX-License-Identifier: MIT
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
import datetime
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
year = datetime.datetime.now().year
project = 'python201'
copyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath'
author = 'Geoffrey Lentner, Ashwin Srinath'
version = '0.0.1'
release = '0.0.1'
# -- General configuration ---------------------------------------------------
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinx.ext.autodoc',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
html_theme = 'pydata_sphinx_theme'
html_logo = '_static/logo.png'
html_favicon = '_static/favicon.ico'
html_static_path = ['']
html_theme_options = {
'external_links': [],
'github_url': 'https://github.com/glentner/python201',
}
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {}
latex_documents = [
(master_doc, 'python-201.tex', 'python-201 Documentation',
'Geoffrey Lentner, Ashwin Srinath', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# manual pages options
man_pages = [(
'manpage',
'cumprod',
'Compute cumulative product of a sequence of numbers.',
'Geoffrey Lentner <[email protected]>.',
'1'
),
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'python-201', 'python-201 Documentation',
author, 'python-201', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
intersphinx_mapping = {'https://docs.python.org/3/': None}
# export variables with epilogue
rst_epilog = f"""
.. |release| replace:: {release}
.. |copyright| replace:: {copyright}
"""
| null | [
0,
1,
2,
3
] |
1,025 | 8fee548466abf6d35ea180f8de4e52a9b8902d3f | import os
import math
from collections import defaultdict
__author__ = 'steven'
question='qb'
fs={'t1','small.in','large'}
def getmincost(n,c,f,x):
t=0.0
for i in range(0,n):
t+=1/(2+f*i)
t=t*c
t+=x/(2+f*n)
ct=getmincostnshift(n,c,f,x)
return min(t,ct);
def getmincostnshift(n,c,f,x):
t=0.0
n-=1;
for i in range(0,n):
t+=1/(2+f*i)
t=t*c
t+=x/(2+f*n)
return t
def getminn(c,f,x):
return int(math.ceil((x*f-2*c)/(c*f)))
def solver(c,f,x):
if (x*f-2*c)<0:
return x/2
minn=getminn(c,f,x)
return getmincost(minn,c,f,x)
for s in fs:
print question+s
f='./'+question+s
if os.path.isfile('./'+question+s):
ls=open(f)
noq=(int)(ls.readline())
fout=open(question+s+'-a','w')
print noq
for i in range(0,noq):
fa=ls.readline()
fa=fa.split();
c, f, x=[float(s) for s in fa]
fout.write('Case #%d: %f\n'%(i+1,solver(c,f,x)))
#Case #1: 7
#Case #2: Bad magician!
#Case #3: Volunteer cheated!
| null | null | null | null | [
0
] |
1,026 | f2e6d23e6d8c5aa6e80a652dc6cb8bda45824d0c | <mask token>
| <mask token>
def start_task(display_window, daq, exp_type, parameters, file_save,
signal_model=None, language_model=None, fake=True, auc_filename=None):
"""Creates a Task and starts execution."""
task = make_task(display_window, daq, exp_type, parameters, file_save,
signal_model, language_model, fake, auc_filename)
task.execute()
| <mask token>
def make_task(display_window, daq, exp_type, parameters, file_save,
signal_model=None, language_model=None, fake=True, auc_filename=None
) ->Task:
"""Creates a Task based on the provided parameters.
Parameters:
-----------
display_window: pyschopy Window
daq: DataAcquisitionClient
exp_type: ExperimentType
parameters: dict
file_save: str - path to file in which to save data
signal_model
language_model - language model
fake: boolean - true if eeg stream is randomly generated
auc_filename: str
Returns:
--------
Task instance
"""
if exp_type is ExperimentType.RSVP_CALIBRATION:
return RSVPCalibrationTask(display_window, daq, parameters, file_save)
if exp_type is ExperimentType.RSVP_COPY_PHRASE:
return RSVPCopyPhraseTask(display_window, daq, parameters,
file_save, signal_model, language_model, fake=fake)
if exp_type is ExperimentType.RSVP_ICON_TO_ICON:
return RSVPIconToIconTask(display_window, daq, parameters,
file_save, signal_model, language_model, fake, False, auc_filename)
if exp_type is ExperimentType.RSVP_ICON_TO_WORD:
return RSVPIconToIconTask(display_window, daq, parameters,
file_save, signal_model, language_model, fake, True, auc_filename)
if exp_type is ExperimentType.RSVP_ALERT_TONE_CALIBRATION:
return RSVPAlertToneCalibrationTask(display_window, daq, parameters,
file_save)
if exp_type is ExperimentType.RSVP_INTER_SEQUENCE_FEEDBACK_CALIBRATION:
return RSVPInterSequenceFeedbackCalibration(display_window, daq,
parameters, file_save)
if exp_type is ExperimentType.RSVP_TIMING_VERIFICATION_CALIBRATION:
return RSVPTimingVerificationCalibration(display_window, daq,
parameters, file_save)
raise TaskRegistryException(
'The provided experiment type is not registered.')
def start_task(display_window, daq, exp_type, parameters, file_save,
signal_model=None, language_model=None, fake=True, auc_filename=None):
"""Creates a Task and starts execution."""
task = make_task(display_window, daq, exp_type, parameters, file_save,
signal_model, language_model, fake, auc_filename)
task.execute()
| <mask token>
from bcipy.tasks.rsvp.calibration.alert_tone_calibration import RSVPAlertToneCalibrationTask
from bcipy.tasks.rsvp.calibration.inter_sequence_feedback_calibration import RSVPInterSequenceFeedbackCalibration
from bcipy.tasks.rsvp.calibration.calibration import RSVPCalibrationTask
from bcipy.tasks.rsvp.copy_phrase import RSVPCopyPhraseTask
from bcipy.tasks.rsvp.icon_to_icon import RSVPIconToIconTask
from bcipy.tasks.rsvp.calibration.timing_verification import RSVPTimingVerificationCalibration
from bcipy.tasks.task import Task
from bcipy.tasks.exceptions import TaskRegistryException
from bcipy.tasks.task_registry import ExperimentType
def make_task(display_window, daq, exp_type, parameters, file_save,
signal_model=None, language_model=None, fake=True, auc_filename=None
) ->Task:
"""Creates a Task based on the provided parameters.
Parameters:
-----------
display_window: pyschopy Window
daq: DataAcquisitionClient
exp_type: ExperimentType
parameters: dict
file_save: str - path to file in which to save data
signal_model
language_model - language model
fake: boolean - true if eeg stream is randomly generated
auc_filename: str
Returns:
--------
Task instance
"""
if exp_type is ExperimentType.RSVP_CALIBRATION:
return RSVPCalibrationTask(display_window, daq, parameters, file_save)
if exp_type is ExperimentType.RSVP_COPY_PHRASE:
return RSVPCopyPhraseTask(display_window, daq, parameters,
file_save, signal_model, language_model, fake=fake)
if exp_type is ExperimentType.RSVP_ICON_TO_ICON:
return RSVPIconToIconTask(display_window, daq, parameters,
file_save, signal_model, language_model, fake, False, auc_filename)
if exp_type is ExperimentType.RSVP_ICON_TO_WORD:
return RSVPIconToIconTask(display_window, daq, parameters,
file_save, signal_model, language_model, fake, True, auc_filename)
if exp_type is ExperimentType.RSVP_ALERT_TONE_CALIBRATION:
return RSVPAlertToneCalibrationTask(display_window, daq, parameters,
file_save)
if exp_type is ExperimentType.RSVP_INTER_SEQUENCE_FEEDBACK_CALIBRATION:
return RSVPInterSequenceFeedbackCalibration(display_window, daq,
parameters, file_save)
if exp_type is ExperimentType.RSVP_TIMING_VERIFICATION_CALIBRATION:
return RSVPTimingVerificationCalibration(display_window, daq,
parameters, file_save)
raise TaskRegistryException(
'The provided experiment type is not registered.')
def start_task(display_window, daq, exp_type, parameters, file_save,
signal_model=None, language_model=None, fake=True, auc_filename=None):
"""Creates a Task and starts execution."""
task = make_task(display_window, daq, exp_type, parameters, file_save,
signal_model, language_model, fake, auc_filename)
task.execute()
| """Code for constructing and executing Tasks"""
from bcipy.tasks.rsvp.calibration.alert_tone_calibration import RSVPAlertToneCalibrationTask
from bcipy.tasks.rsvp.calibration.inter_sequence_feedback_calibration import (
RSVPInterSequenceFeedbackCalibration
)
from bcipy.tasks.rsvp.calibration.calibration import RSVPCalibrationTask
from bcipy.tasks.rsvp.copy_phrase import RSVPCopyPhraseTask
from bcipy.tasks.rsvp.icon_to_icon import RSVPIconToIconTask
from bcipy.tasks.rsvp.calibration.timing_verification import RSVPTimingVerificationCalibration
from bcipy.tasks.task import Task
from bcipy.tasks.exceptions import TaskRegistryException
from bcipy.tasks.task_registry import ExperimentType
def make_task(display_window, daq, exp_type, parameters, file_save,
signal_model=None, language_model=None, fake=True,
auc_filename=None) -> Task:
"""Creates a Task based on the provided parameters.
Parameters:
-----------
display_window: pyschopy Window
daq: DataAcquisitionClient
exp_type: ExperimentType
parameters: dict
file_save: str - path to file in which to save data
signal_model
language_model - language model
fake: boolean - true if eeg stream is randomly generated
auc_filename: str
Returns:
--------
Task instance
"""
# NORMAL RSVP MODES
if exp_type is ExperimentType.RSVP_CALIBRATION:
return RSVPCalibrationTask(
display_window, daq, parameters, file_save)
if exp_type is ExperimentType.RSVP_COPY_PHRASE:
return RSVPCopyPhraseTask(
display_window, daq, parameters, file_save, signal_model,
language_model, fake=fake)
# ICON TASKS
if exp_type is ExperimentType.RSVP_ICON_TO_ICON:
return RSVPIconToIconTask(display_window, daq,
parameters, file_save, signal_model,
language_model, fake, False, auc_filename)
if exp_type is ExperimentType.RSVP_ICON_TO_WORD:
# pylint: disable=fixme
# TODO: consider a new class for this scenario.
return RSVPIconToIconTask(display_window, daq,
parameters, file_save, signal_model,
language_model, fake, True, auc_filename)
# CALIBRATION FEEDBACK TASKS
if exp_type is ExperimentType.RSVP_ALERT_TONE_CALIBRATION:
return RSVPAlertToneCalibrationTask(
display_window, daq, parameters, file_save)
if exp_type is ExperimentType.RSVP_INTER_SEQUENCE_FEEDBACK_CALIBRATION:
return RSVPInterSequenceFeedbackCalibration(
display_window, daq, parameters, file_save)
if exp_type is ExperimentType.RSVP_TIMING_VERIFICATION_CALIBRATION:
return RSVPTimingVerificationCalibration(display_window, daq,
parameters, file_save)
raise TaskRegistryException(
'The provided experiment type is not registered.')
def start_task(display_window, daq, exp_type, parameters, file_save,
signal_model=None, language_model=None, fake=True, auc_filename=None):
"""Creates a Task and starts execution."""
task = make_task(display_window, daq, exp_type, parameters, file_save,
signal_model, language_model, fake, auc_filename)
task.execute()
| [
0,
1,
2,
3,
4
] |
1,027 | e59e60b0a4b7deca9c510bd6b9c58636c6d34c80 | <mask token>
| <mask token>
try:
print(l)
s = len(l)
if s > 5:
raise TypeError
print(d[2])
except TypeError:
print('Error!!!length should be less than or equals to 5')
except NameError:
print('index out of range')
else:
for i in l:
print(i)
finally:
print('execution done!!!!!!')
| l = {1, 2, 3, 4}
try:
print(l)
s = len(l)
if s > 5:
raise TypeError
print(d[2])
except TypeError:
print('Error!!!length should be less than or equals to 5')
except NameError:
print('index out of range')
else:
for i in l:
print(i)
finally:
print('execution done!!!!!!')
|
l={1,2,3,4}
try:
print(l)
s=len(l)
if s>5:
raise TypeError
print(d[2])
except TypeError:
print("Error!!!length should be less than or equals to 5")
except NameError:
print("index out of range")
else:
for i in l:
print(i)
finally:
print("execution done!!!!!!") | null | [
0,
1,
2,
3
] |
1,028 | c0503536672aa824eaf0d19b9d4b5431ef910432 | <mask token>
| <mask token>
def run(args):
min = -0.0
max = 0.5
Q = 10
if os.path.isfile(args.incat):
cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,
idField=args.idField, prefix=args.prefix, zCutoutSize=args.
zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=
args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,
raField=args.raField, decField=args.decField)
else:
raise Exception('### Can not find the input catalog: %s' % args.incat)
<mask token>
| <mask token>
def run(args):
min = -0.0
max = 0.5
Q = 10
if os.path.isfile(args.incat):
cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,
idField=args.idField, prefix=args.prefix, zCutoutSize=args.
zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=
args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,
raField=args.raField, decField=args.decField)
else:
raise Exception('### Can not find the input catalog: %s' % args.incat)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('root', help='Root directory of data repository')
parser.add_argument('incat', help='The input catalog for cutout')
parser.add_argument('-s', '--size', dest='size', type=int, help=
'Half size of the cutout box', default=200)
parser.add_argument('-f', '--filter', dest='filter', help='Filter',
default='HSC-I')
parser.add_argument('-cf', '--color-filters', dest='colorFilters', help
='Choice of filters for color images', default='riz')
parser.add_argument('-sf', '--size-field', dest='sizeField', help=
'Column name for cutout size', default='cutout_size')
parser.add_argument('-info1', '--infoField1', dest='infoField1', help=
'Column name for first extra information', default=None)
parser.add_argument('-info2', '--infoField2', dest='infoField2', help=
'Column name for second extra information', default=None)
parser.add_argument('-oc', '--onlyColor', action='store_true', dest=
'onlyColor', default=False)
parser.add_argument('-safe', '--safe', action='store_true', dest='safe',
default=False)
parser.add_argument('-clean', '--clean', action='store_true', dest=
'clean', default=False)
parser.add_argument('-v', '--verbose', action='store_true', dest=
'verbose', default=False)
parser.add_argument('-src', '--src', action='store_true', dest=
'saveSrc', default=True)
parser.add_argument('-makeDir', '--makeDir', action='store_true', dest=
'makeDir', default=True)
parser.add_argument('-zc', '--zCutoutSize', action='store_true', dest=
'zCutout', default=True)
parser.add_argument('-nc', '--noColor', action='store_true', dest=
'noColor', default=True)
parser.add_argument('-p', '--prefix', dest='prefix', help=
'Prefix of the output file', default='redBCG')
parser.add_argument('-id', '--id', dest='idField', help=
'Column name for ID', default='ID_CLUSTER')
parser.add_argument('-ra', '--ra', dest='raField', help=
'Column name for RA', default='RA_BCG')
parser.add_argument('-dec', '--dec', dest='decField', help=
'Column name for DEC', default='DEC_BCG')
parser.add_argument('-z', '--redshift', dest='zField', help=
'Column name for z', default='Z_LAMBDA')
args = parser.parse_args()
run(args)
| import os
import argparse
import coaddBatchCutout as cbc
def run(args):
min = -0.0
max = 0.5
Q = 10
if os.path.isfile(args.incat):
cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,
idField=args.idField, prefix=args.prefix, zCutoutSize=args.
zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=
args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,
raField=args.raField, decField=args.decField)
else:
raise Exception('### Can not find the input catalog: %s' % args.incat)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('root', help='Root directory of data repository')
parser.add_argument('incat', help='The input catalog for cutout')
parser.add_argument('-s', '--size', dest='size', type=int, help=
'Half size of the cutout box', default=200)
parser.add_argument('-f', '--filter', dest='filter', help='Filter',
default='HSC-I')
parser.add_argument('-cf', '--color-filters', dest='colorFilters', help
='Choice of filters for color images', default='riz')
parser.add_argument('-sf', '--size-field', dest='sizeField', help=
'Column name for cutout size', default='cutout_size')
parser.add_argument('-info1', '--infoField1', dest='infoField1', help=
'Column name for first extra information', default=None)
parser.add_argument('-info2', '--infoField2', dest='infoField2', help=
'Column name for second extra information', default=None)
parser.add_argument('-oc', '--onlyColor', action='store_true', dest=
'onlyColor', default=False)
parser.add_argument('-safe', '--safe', action='store_true', dest='safe',
default=False)
parser.add_argument('-clean', '--clean', action='store_true', dest=
'clean', default=False)
parser.add_argument('-v', '--verbose', action='store_true', dest=
'verbose', default=False)
parser.add_argument('-src', '--src', action='store_true', dest=
'saveSrc', default=True)
parser.add_argument('-makeDir', '--makeDir', action='store_true', dest=
'makeDir', default=True)
parser.add_argument('-zc', '--zCutoutSize', action='store_true', dest=
'zCutout', default=True)
parser.add_argument('-nc', '--noColor', action='store_true', dest=
'noColor', default=True)
parser.add_argument('-p', '--prefix', dest='prefix', help=
'Prefix of the output file', default='redBCG')
parser.add_argument('-id', '--id', dest='idField', help=
'Column name for ID', default='ID_CLUSTER')
parser.add_argument('-ra', '--ra', dest='raField', help=
'Column name for RA', default='RA_BCG')
parser.add_argument('-dec', '--dec', dest='decField', help=
'Column name for DEC', default='DEC_BCG')
parser.add_argument('-z', '--redshift', dest='zField', help=
'Column name for z', default='Z_LAMBDA')
args = parser.parse_args()
run(args)
| #!/usr/bin/env python
# encoding: utf-8
import os
import argparse
import coaddBatchCutout as cbc
def run(args):
min = -0.0
max = 0.5
Q = 10
if os.path.isfile(args.incat):
cbc.coaddBatchCutFull(args.root, args.incat,
filter=args.filter,
idField=args.idField,
prefix=args.prefix,
zCutoutSize=args.zCutout,
zField=args.zField,
onlyColor=args.onlyColor,
noColor=args.noColor,
saveSrc=args.saveSrc,
makeDir=args.makeDir,
raField=args.raField,
decField=args.decField)
else:
raise Exception("### Can not find the input catalog: %s" % args.incat)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("root", help="Root directory of data repository")
parser.add_argument("incat", help="The input catalog for cutout")
parser.add_argument("-s", '--size', dest='size', type=int,
help="Half size of the cutout box", default=200)
parser.add_argument('-f', '--filter', dest='filter', help="Filter",
default='HSC-I')
parser.add_argument('-cf', '--color-filters', dest='colorFilters',
help="Choice of filters for color images", default='riz')
parser.add_argument('-sf', '--size-field', dest='sizeField',
help="Column name for cutout size", default='cutout_size')
parser.add_argument('-info1', '--infoField1', dest='infoField1',
help="Column name for first extra information",
default=None)
parser.add_argument('-info2', '--infoField2', dest='infoField2',
help="Column name for second extra information",
default=None)
parser.add_argument('-oc', '--onlyColor', action="store_true", dest='onlyColor',
default=False)
parser.add_argument('-safe', '--safe', action="store_true", dest='safe',
default=False)
parser.add_argument('-clean', '--clean', action="store_true", dest='clean',
default=False)
parser.add_argument('-v', '--verbose', action="store_true", dest='verbose',
default=False)
parser.add_argument('-src', '--src', action="store_true", dest='saveSrc',
default=True)
parser.add_argument('-makeDir', '--makeDir', action="store_true", dest='makeDir',
default=True)
parser.add_argument('-zc', '--zCutoutSize', action="store_true", dest='zCutout',
default=True)
parser.add_argument('-nc', '--noColor', action="store_true", dest='noColor',
default=True)
parser.add_argument('-p', '--prefix', dest='prefix',
help='Prefix of the output file',
default='redBCG')
parser.add_argument('-id', '--id', dest='idField', help="Column name for ID",
default='ID_CLUSTER')
parser.add_argument('-ra', '--ra', dest='raField', help="Column name for RA",
default='RA_BCG')
parser.add_argument('-dec', '--dec', dest='decField', help="Column name for DEC",
default='DEC_BCG')
parser.add_argument('-z', '--redshift', dest='zField', help="Column name for z",
default='Z_LAMBDA')
args = parser.parse_args()
run(args)
| [
0,
1,
2,
3,
4
] |
1,029 | 2e140d1174e0b2d8a97df880b1bffdf84dc0d236 | <mask token>
| <mask token>
if __name__ == '__main__':
logger = Log()
conf = Configuration('configuration/configuration.yaml'
).load_configuration()
ph = ProductsHandler(conf['products_path'])
logger.info('Configuration loaded')
products = ph.load_products()
logger.info('Products loaded from {}'.format(conf['products_path']))
update, msg = spider.Spider(products, conf).crawl()
if len(update) > 0:
logger.info('Products to report')
mail_helper = MailHelper()
mail_helper.send_mail('', msg, 'New prices lower')
logger.info('Mail sent')
mail_helper.close_connection()
else:
logger.info('Nothing to report')
ph.save_products(products)
logger.info('Configuration saved')
else:
print('Exec this file as the main entrypoint! -> python3 init.py')
| from helper.logger_helper import Log
from helper.mail_helper import MailHelper
import spider.spider as spider
from configuration.configuration_handler import Configuration
from configuration.products_handler import ProductsHandler
if __name__ == '__main__':
logger = Log()
conf = Configuration('configuration/configuration.yaml'
).load_configuration()
ph = ProductsHandler(conf['products_path'])
logger.info('Configuration loaded')
products = ph.load_products()
logger.info('Products loaded from {}'.format(conf['products_path']))
update, msg = spider.Spider(products, conf).crawl()
if len(update) > 0:
logger.info('Products to report')
mail_helper = MailHelper()
mail_helper.send_mail('', msg, 'New prices lower')
logger.info('Mail sent')
mail_helper.close_connection()
else:
logger.info('Nothing to report')
ph.save_products(products)
logger.info('Configuration saved')
else:
print('Exec this file as the main entrypoint! -> python3 init.py')
| from helper.logger_helper import Log
from helper.mail_helper import MailHelper
import spider.spider as spider
from configuration.configuration_handler import Configuration
from configuration.products_handler import ProductsHandler
if __name__ == "__main__":
logger = Log()
conf = Configuration('configuration/configuration.yaml').load_configuration()
ph = ProductsHandler(conf["products_path"])
logger.info("Configuration loaded")
products = ph.load_products()
logger.info("Products loaded from {}".format(conf["products_path"]))
update, msg = spider.Spider(products, conf).crawl()
if len(update) > 0:
logger.info("Products to report")
mail_helper = MailHelper()
mail_helper.send_mail('', msg, "New prices lower")
logger.info("Mail sent")
mail_helper.close_connection()
else:
logger.info("Nothing to report")
ph.save_products(products)
logger.info("Configuration saved")
else:
print("Exec this file as the main entrypoint! -> python3 init.py") | null | [
0,
1,
2,
3
] |
1,030 | dbb66930edd70729e4df7d3023e83a6eae65cccd | <mask token>
| def main():
import sys
from pyramid.paster import get_appsettings
from sqlalchemy import engine_from_config
from pyvideohub.models import ScopedSession, Base
config_file = sys.argv[1]
settings = get_appsettings(config_file)
engine = engine_from_config(settings, 'sqlalchemy.')
ScopedSession.configure(bind=engine)
Base.metadata.create_all(engine)
print('DB initialized done.')
<mask token>
| def main():
import sys
from pyramid.paster import get_appsettings
from sqlalchemy import engine_from_config
from pyvideohub.models import ScopedSession, Base
config_file = sys.argv[1]
settings = get_appsettings(config_file)
engine = engine_from_config(settings, 'sqlalchemy.')
ScopedSession.configure(bind=engine)
Base.metadata.create_all(engine)
print('DB initialized done.')
if __name__ == '__main__':
main()
| #!/usr/bin/env python
def main():
import sys
from pyramid.paster import get_appsettings
from sqlalchemy import engine_from_config
from pyvideohub.models import ScopedSession, Base
config_file = sys.argv[1]
settings = get_appsettings(config_file)
engine = engine_from_config(settings, 'sqlalchemy.')
ScopedSession.configure(bind=engine)
Base.metadata.create_all(engine)
print('DB initialized done.')
if __name__ == '__main__':
main()
| null | [
0,
1,
2,
3
] |
1,031 | e288403cb310bb7241b25e74d1b5bcc63967128c | <mask token>
| <mask token>
aws_glue_integration_tests += deployment_patterns
| <mask token>
aws_glue_integration_tests = []
deployment_patterns = [IntegrationTestFixture(name=
'how_to_use_great_expectations_in_aws_glue', user_flow_script=
'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'
, backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.
AWS, BackendDependencies.AWS_GLUE])]
aws_glue_integration_tests += deployment_patterns
| <mask token>
from tests.integration.backend_dependencies import BackendDependencies
from tests.integration.integration_test_fixture import IntegrationTestFixture
aws_glue_integration_tests = []
deployment_patterns = [IntegrationTestFixture(name=
'how_to_use_great_expectations_in_aws_glue', user_flow_script=
'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'
, backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.
AWS, BackendDependencies.AWS_GLUE])]
aws_glue_integration_tests += deployment_patterns
| """Note: AWS Glue split from spark since it requires different test dependencies."""
from tests.integration.backend_dependencies import BackendDependencies
from tests.integration.integration_test_fixture import IntegrationTestFixture
aws_glue_integration_tests = []
deployment_patterns = [
# TODO: The AWS_GLUE dependency is only being marked and not run at this time.
IntegrationTestFixture(
name="how_to_use_great_expectations_in_aws_glue",
user_flow_script="tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py",
backend_dependencies=[
BackendDependencies.SPARK,
BackendDependencies.AWS,
BackendDependencies.AWS_GLUE,
],
),
]
aws_glue_integration_tests += deployment_patterns
| [
0,
1,
2,
3,
4
] |
1,032 | 8fd74287fbc653ea3ed4aa76a272486aa29185cf | # !/usr/bin/python
# sudo mn --custom _mininet_topo.py --topo mytopo,5
# sudo mn --custom _mininet_topo.py --topo mytopo,3 --test simpletest
# or just run this python file
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
from mininet.cli import CLI
class SingleSwitchTopo(Topo):
"Single switch connected to n hosts."
def build(self):
# switch = self.addSwitch('s1')
# # Python's range(N) generates 0..N-1
# for h in range(n):
# host = self.addHost('h%s' % (h + 1))
# self.addLink(host, switch)
s1 = self.addSwitch('s1')
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
h4 = self.addHost('h4')
h5 = self.addHost('h5')
h6 = self.addHost('h6')
self.addLink(h1, s1)
self.addLink(h2, s1)
self.addLink(h3, s1)
self.addLink(h4, s1)
self.addLink(h5, s1)
self.addLink(h6, s1)
#
def simpleTest():
"Create and test a simple network"
topo = SingleSwitchTopo()
net = Mininet(topo)
net.start()
print "Dumping host connections"
dumpNodeConnections(net.hosts)
print "Testing network connectivity"
net.pingAll()
# net.stop()
h1 = net.get('h1')
h2 = net.get('h2')
h3 = net.get('h3')
h4 = net.get('h4')
h5 = net.get('h5')
h6 = net.get('h6')
for host in [h1, h2, h3, h4, h5, h6]:
host.cmdPrint('cd /media/sf_DHT-Torrent')
h1.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 600 --ip ' + h1.IP() + ' \' > h1.sh')
h2.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 500 --ip ' + h2.IP() + " --nextpeerid 600 --nextpeerip " + h1.IP() + ' \' > h2.sh')
h3.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 400 --ip ' + h3.IP() + " --nextpeerid 500 --nextpeerip " + h2.IP() + ' \' > h3.sh')
h4.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 300 --ip ' + h4.IP() + " --nextpeerid 400 --nextpeerip " + h3.IP() + ' \' > h4.sh')
h5.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 200 --ip ' + h5.IP() + " --nextpeerid 300 --nextpeerip " + h4.IP() + ' \' > h5.sh')
h6.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 100 --ip ' + h6.IP() + " --nextpeerid 200 --nextpeerip " + h5.IP() + ' \' > h6.sh')
# h1.cmdPrint('ls')
net.startTerms()
CLI(net)
# CLI(net).do_xterm(h1)
net.stopXterms()
net.stop()
if __name__ == '__main__':
# Tell mininet to print useful information
setLogLevel('info')
simpleTest()
topos = { 'mytopo': SingleSwitchTopo }
# tests = { 'mytest': simpleTest } | null | null | null | null | [
0
] |
1,033 | 0e112ecfd4ccf762234dff564dd6f3987418dedd | # Start the HTML and Javascript code
print '''
<html>
<head>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["treemap"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
'''
print CountBugs('path/to/repo')
# Finish the HTML and Javascript
print '''
]);
// Create and draw the visualization.
var tree = new google.visualization.TreeMap(document.getElementById('chart_div'));
tree.draw(data, {
maxDepth: 2,
minColor: 'YellowGreen',
midColor: 'LightGoldenRodYellow',
maxColor: 'Red',
headerHeight: 15,
fontColor: 'black',
showScale: true});
}
</script>
</head>
<body>
<div id="chart_div" style="width: 900px; height: 500px;"></div>
</body>
</html>
'''
| null | null | null | null | [
0
] |
1,034 | ed66e8028d653cf6b7ea4703fef5a658665c48db | # -*- coding: utf-8 -*-
# DATE 2018-08-21
# AUTHER = tongzz
#
import MySQLdb
from Elements.LoginElements import *
import datetime
import sys
class Tradepasswd():
def __init__(self):
self.db_config={
'host': '172.28.38.59',
'usr': 'mysqladmin',
'passwd': '123465',
'port': '3306',
'db': 'hdb'
}
def tradePasswd(self):
try:
conn = MySQLdb.connect(host=self.db_config['host'],user=self.db_config['usr'],passwd=self.db_config['passwd'],db=self.db_config['db'])
conn.autocommit(True)
curr = conn.cursor()
curr.execute("SET NAMES utf8")
curr.execute("USE %s"% self.db_config['db'])
# print u"******************** 操作数据库对象成功 ********************"
# return conn,curr
tradepasswd_sql = "UPDATE member set trade_pwd = NULL where uname = " + username + ";"
curr.execute(tradepasswd_sql)
# curr.fetchone()
print u"恢复交易密码成功"
curr.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d:%s"%(e.args[0],e.args[1])
return tradepasswd_sql
| null | null | null | null | [
0
] |
1,035 | 81a1fbd13b06e4470bfbaa0d1716d5301e1a4b36 | def readint(): return int(raw_input())
T = readint()
for t in xrange(T):
N = int(raw_input())
res = 0
sum = 0
min = 1000000
for i in raw_input().split():
r = int(i)
res ^= r
sum += r
if min > r: min = r
if res == 0:
sum -= min
print "Case #%d: %s" % (t + 1, sum)
else:
print "Case #%d: NO" % (t + 1) | null | null | null | null | [
0
] |
1,036 | 90fc6e37e3988a2014c66913db61749509db2d53 | <mask token>
class Idea:
<mask token>
<mask token>
def cmd(self):
return 'intellij-idea-ultimate-edition %s' % self.folder
| <mask token>
class Idea:
def __init__(self, folder):
self.folder = folder
<mask token>
def cmd(self):
return 'intellij-idea-ultimate-edition %s' % self.folder
| <mask token>
class Idea:
def __init__(self, folder):
self.folder = folder
def name(self):
return 'jetbrains-idea'
def cmd(self):
return 'intellij-idea-ultimate-edition %s' % self.folder
| import os
class Idea:
def __init__(self, folder):
self.folder = folder
def name(self):
return 'jetbrains-idea'
def cmd(self):
return 'intellij-idea-ultimate-edition %s' % self.folder
| import os
class Idea:
def __init__(self, folder):
self.folder = folder
def name(self):
return "jetbrains-idea"
def cmd(self):
return "intellij-idea-ultimate-edition %s" % self.folder
| [
2,
3,
4,
5,
6
] |
1,037 | d2e3ac490ce5fdc20976567fa320a9e6a53cbe34 | <mask token>
def getDistanceByHaversine(loc1, loc2):
"""Haversine formula - give coordinates as a 2D numpy array of
(lat_denter link description hereecimal,lon_decimal) pairs"""
lat1 = loc1[1]
lon1 = loc1[0]
lat2 = loc2[1]
lon2 = loc2[0]
lon1 = lon1 * sp.pi / 180.0
lon2 = lon2 * sp.pi / 180.0
lat1 = lat1 * sp.pi / 180.0
lat2 = lat2 * sp.pi / 180.0
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0
) ** 2
c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
km = EARTHRADIUS * c
return km
<mask token>
| <mask token>
print(__doc__)
<mask token>
def getDistanceByHaversine(loc1, loc2):
"""Haversine formula - give coordinates as a 2D numpy array of
(lat_denter link description hereecimal,lon_decimal) pairs"""
lat1 = loc1[1]
lon1 = loc1[0]
lat2 = loc2[1]
lon2 = loc2[0]
lon1 = lon1 * sp.pi / 180.0
lon2 = lon2 * sp.pi / 180.0
lat1 = lat1 * sp.pi / 180.0
lat2 = lat2 * sp.pi / 180.0
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0
) ** 2
c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
km = EARTHRADIUS * c
return km
<mask token>
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
row_count = sum(1 for row in data)
gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
for key, row in enumerate(data):
if key != 0:
try:
gps_matrix[0][key] = float(row[2].replace('"', ''))
gps_matrix[1][key] = float(row[1].replace('"', ''))
except:
a = float(row[1].replace(',', ''))
print('problem string to float')
<mask token>
print('Estimated number of clusters: %d' % n_clusters_)
print('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))
print('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))
print('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))
print('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(
labels_true, labels))
print('Adjusted Mutual Information: %0.3f' % metrics.
adjusted_mutual_info_score(labels_true, labels))
print('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))
<mask token>
for k, col in zip(unique_labels, colors):
if k == -1:
col = 'k'
class_member_mask = labels == k
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| <mask token>
print(__doc__)
<mask token>
EARTHRADIUS = 6371.0
def getDistanceByHaversine(loc1, loc2):
"""Haversine formula - give coordinates as a 2D numpy array of
(lat_denter link description hereecimal,lon_decimal) pairs"""
lat1 = loc1[1]
lon1 = loc1[0]
lat2 = loc2[1]
lon2 = loc2[0]
lon1 = lon1 * sp.pi / 180.0
lon2 = lon2 * sp.pi / 180.0
lat1 = lat1 * sp.pi / 180.0
lat2 = lat2 * sp.pi / 180.0
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0
) ** 2
c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
km = EARTHRADIUS * c
return km
<mask token>
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
row_count = sum(1 for row in data)
gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
for key, row in enumerate(data):
if key != 0:
try:
gps_matrix[0][key] = float(row[2].replace('"', ''))
gps_matrix[1][key] = float(row[1].replace('"', ''))
except:
a = float(row[1].replace(',', ''))
print('problem string to float')
D = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(
u, v))
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))
print('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))
print('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))
print('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(
labels_true, labels))
print('Adjusted Mutual Information: %0.3f' % metrics.
adjusted_mutual_info_score(labels_true, labels))
print('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))
<mask token>
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
col = 'k'
class_member_mask = labels == k
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| <mask token>
import scipy as sp
import numpy as np
from scipy import spatial
print(__doc__)
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
EARTHRADIUS = 6371.0
def getDistanceByHaversine(loc1, loc2):
"""Haversine formula - give coordinates as a 2D numpy array of
(lat_denter link description hereecimal,lon_decimal) pairs"""
lat1 = loc1[1]
lon1 = loc1[0]
lat2 = loc2[1]
lon2 = loc2[0]
lon1 = lon1 * sp.pi / 180.0
lon2 = lon2 * sp.pi / 180.0
lat1 = lat1 * sp.pi / 180.0
lat2 = lat2 * sp.pi / 180.0
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0
) ** 2
c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
km = EARTHRADIUS * c
return km
import csv
import re
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
row_count = sum(1 for row in data)
gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
for key, row in enumerate(data):
if key != 0:
try:
gps_matrix[0][key] = float(row[2].replace('"', ''))
gps_matrix[1][key] = float(row[1].replace('"', ''))
except:
a = float(row[1].replace(',', ''))
print('problem string to float')
D = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(
u, v))
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))
print('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))
print('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))
print('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(
labels_true, labels))
print('Adjusted Mutual Information: %0.3f' % metrics.
adjusted_mutual_info_score(labels_true, labels))
print('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))
import matplotlib.pyplot as plt
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
col = 'k'
class_member_mask = labels == k
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=
'k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
import scipy as sp
import numpy as np
from scipy import spatial
print(__doc__)
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Calcule Distance Haversine Methods
EARTHRADIUS = 6371.0
def getDistanceByHaversine(loc1, loc2):
'''Haversine formula - give coordinates as a 2D numpy array of
(lat_denter link description hereecimal,lon_decimal) pairs'''
#
# "unpack" our numpy array, this extracts column wise arrays
lat1 = loc1[1]
lon1 = loc1[0]
lat2 = loc2[1]
lon2 = loc2[0]
#
# convert to radians ##### Completely identical
lon1 = lon1 * sp.pi / 180.0
lon2 = lon2 * sp.pi / 180.0
lat1 = lat1 * sp.pi / 180.0
lat2 = lat2 * sp.pi / 180.0
#
# haversine formula #### Same, but atan2 named arctan2 in numpy
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (np.sin(dlat/2))**2 + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon/2.0))**2
c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0-a))
km = EARTHRADIUS * c
return km
##############################################################################
# Create a Matrix with longitude and latitude
import csv
import re
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
row_count = sum(1 for row in data)
gps_matrix = [[0 for i in range(row_count)] for j in range(2)]
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
for key, row in enumerate(data):
if key != 0:
try:
gps_matrix[0][key] = float(row[2].replace('"',''))
gps_matrix[1][key] = float(row[1].replace('"',''))
except:
a = float(row[1].replace(',',''))
print('problem string to float')
##############################################################################
# Calculate the Distance matrix
D = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(u,v))
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| [
1,
2,
3,
4,
5
] |
1,038 | 166a1dfbd3baf766230080361d98648ec0a64455 | <mask token>
| <mask token>
print(parsed_json2)
| <mask token>
fake_header = {'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
, 'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
, 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language':
'zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2'}
s = requests.Session()
r = s.get('https://xueqiu.com', headers=fake_header)
r = s.get(
'https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea'
, headers=fake_header)
parsed_json1 = r.json
parsed_json2 = json.loads(r.text)
print(parsed_json2)
| <mask token>
import json
import requests
fake_header = {'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
, 'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
, 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language':
'zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2'}
s = requests.Session()
r = s.get('https://xueqiu.com', headers=fake_header)
r = s.get(
'https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea'
, headers=fake_header)
parsed_json1 = r.json
parsed_json2 = json.loads(r.text)
print(parsed_json2)
| #coding=utf8
"""
Created on Thu Feb 20 00:53:28 2020
@author: Neal LONG
"""
import json
import requests
fake_header = { "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate, sdch",
"Accept-Language":"zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2"
}
s = requests.Session()
r=s.get('https://xueqiu.com',headers = fake_header)
r = s.get('https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea',headers = fake_header)
#print(r.text)
parsed_json1 = r.json
parsed_json2 = json.loads(r.text)
print(parsed_json2)
| [
0,
1,
2,
3,
4
] |
1,039 | 9ef5d57d536f5c88f705b1032cc0936e2d4cd565 | from Shapes import *
c1 = Circle(5)
r1 = Rectangle(3,2)
c2 = Circle(3)
c3 = Circle(1)
r2 = Rectangle(1,1)
listShapes = [c1,r1,c2,c3,r2]
for item in listShapes:
print(item.toString())
print("Area: " + str(item.area()))
print("Perimeter: " + str(item.perimeter()))
| null | null | null | null | [
0
] |
1,040 | 813d27e8f9c1a416dab2f891dd71e4791bb92dbb | <mask token>
| <mask token>
@pytest.mark.slow
@pytest.mark.skipif('flair' not in sys.modules, reason=
'requires the Flair library')
def test_flair_simple(small_dataset):
flair_model = FlairModel(model_path='ner', entities_to_keep=['PERSON'])
evaluator = Evaluator(model=flair_model)
evaluation_results = evaluator.evaluate_all(small_dataset)
scores = evaluator.calculate_score(evaluation_results)
assert_model_results_gt(scores, 'PERSON', 0)
| import sys
import pytest
from presidio_evaluator.evaluation import Evaluator
from tests.conftest import assert_model_results_gt
from presidio_evaluator.models.flair_model import FlairModel
@pytest.mark.slow
@pytest.mark.skipif('flair' not in sys.modules, reason=
'requires the Flair library')
def test_flair_simple(small_dataset):
flair_model = FlairModel(model_path='ner', entities_to_keep=['PERSON'])
evaluator = Evaluator(model=flair_model)
evaluation_results = evaluator.evaluate_all(small_dataset)
scores = evaluator.calculate_score(evaluation_results)
assert_model_results_gt(scores, 'PERSON', 0)
| import sys
import pytest
from presidio_evaluator.evaluation import Evaluator
from tests.conftest import assert_model_results_gt
from presidio_evaluator.models.flair_model import FlairModel
@pytest.mark.slow
@pytest.mark.skipif("flair" not in sys.modules, reason="requires the Flair library")
def test_flair_simple(small_dataset):
flair_model = FlairModel(model_path="ner", entities_to_keep=["PERSON"])
evaluator = Evaluator(model=flair_model)
evaluation_results = evaluator.evaluate_all(small_dataset)
scores = evaluator.calculate_score(evaluation_results)
assert_model_results_gt(scores, "PERSON", 0)
| null | [
0,
1,
2,
3
] |
1,041 | cceda9a8a0188499ae0aa588701bb8104b5ed313 | <mask token>
| <mask token>
sys.path.insert(0, '../utils')
<mask token>
for json_file in json_files_path_list:
current_collection = ('GeoJSON-quikscat-l2b12-' + path_functions.
get_file_name(json_file))
print(current_collection)
collection_list = db.collection_names()
if current_collection not in collection_list:
collection = db[current_collection]
collection.create_index([('geometry', GEOSPHERE)])
json_docs = json.load(open(json_file))
for doc in json_docs['features']:
collection.insert(doc)
print(db.collection_names())
<mask token>
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {
'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [
10.0, 10.0], [-77.49, -89.7]]]}}}}):
pprint.pprint(doc)
<mask token>
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find({'properties.time': 2009002}).limit(3):
pprint.pprint(doc)
<mask token>
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {
'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [
10.0, 10.0], [-77.49, -89.7]]]}}}, 'properties.time': 2009003}):
pprint.pprint(doc)
| <mask token>
sys.path.insert(0, '../utils')
<mask token>
client = MongoClient('localhost', 27017)
db = client['nfcdata']
json_files_path_list = path_functions.get_json_files(
'../../ftp-data/geojson-files/quikscat-l2b12')
for json_file in json_files_path_list:
current_collection = ('GeoJSON-quikscat-l2b12-' + path_functions.
get_file_name(json_file))
print(current_collection)
collection_list = db.collection_names()
if current_collection not in collection_list:
collection = db[current_collection]
collection.create_index([('geometry', GEOSPHERE)])
json_docs = json.load(open(json_file))
for doc in json_docs['features']:
collection.insert(doc)
print(db.collection_names())
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {
'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [
10.0, 10.0], [-77.49, -89.7]]]}}}}):
pprint.pprint(doc)
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find({'properties.time': 2009002}).limit(3):
pprint.pprint(doc)
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {
'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [
10.0, 10.0], [-77.49, -89.7]]]}}}, 'properties.time': 2009003}):
pprint.pprint(doc)
| from pymongo import MongoClient, GEOSPHERE, GEO2D
import os, sys, json, pprint
sys.path.insert(0, '../utils')
import path_functions
client = MongoClient('localhost', 27017)
db = client['nfcdata']
json_files_path_list = path_functions.get_json_files(
'../../ftp-data/geojson-files/quikscat-l2b12')
for json_file in json_files_path_list:
current_collection = ('GeoJSON-quikscat-l2b12-' + path_functions.
get_file_name(json_file))
print(current_collection)
collection_list = db.collection_names()
if current_collection not in collection_list:
collection = db[current_collection]
collection.create_index([('geometry', GEOSPHERE)])
json_docs = json.load(open(json_file))
for doc in json_docs['features']:
collection.insert(doc)
print(db.collection_names())
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {
'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [
10.0, 10.0], [-77.49, -89.7]]]}}}}):
pprint.pprint(doc)
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find({'properties.time': 2009002}).limit(3):
pprint.pprint(doc)
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {
'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [
10.0, 10.0], [-77.49, -89.7]]]}}}, 'properties.time': 2009003}):
pprint.pprint(doc)
|
from pymongo import MongoClient, GEOSPHERE, GEO2D
import os, sys, json, pprint
sys.path.insert(0, '../utils')
import path_functions
client = MongoClient( 'localhost', 27017 )
db = client[ 'nfcdata' ]
json_files_path_list = path_functions.get_json_files('../../ftp-data/geojson-files/quikscat-l2b12')
for json_file in json_files_path_list:
current_collection = 'GeoJSON-quikscat-l2b12-' + path_functions.get_file_name( json_file )
print(current_collection)
collection_list = db.collection_names()
if current_collection not in collection_list:
collection = db[current_collection]
collection.create_index([( "geometry", GEOSPHERE )])
json_docs = json.load( open( json_file ) )
for doc in json_docs['features']:
collection.insert( doc )
# -- DROP COLLECTIONS --
# collection_list = db.collection_names()
# for collection in collection_list:
# db.drop_collection(collection)
# -- PRINT COLLECTIONS --
print( db.collection_names() )
# # -- PRINT INDEXES --
# collection_list = db.collection_names()
# for current_collection in collection_list:
# collection = db[current_collection]
# print( 'Index: ', sorted( list( collection.index_information() ) ) )
# -- PRINT DATA --
# collection = db['GeoJSON-quikscat-l2b12-005']
# cursor = collection.find({})
# for document in cursor:
# print('\n - - - - - - - DOCUMENTO - - - - - - - \n')
# print(document)
# -- SPATIAL QUERYING USING 2D INDEX
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[ current_collection ]
for doc in collection.find(
{ "geometry": {
"$geoWithin": {
"$geometry" : {
"type": "Polygon" ,
"coordinates" : [
[
[-77.49, -89.70],
[0.00, 0.00],
[10.00, 10.00],
[-77.49, -89.70]
]
]
} } } } ):
pprint.pprint( doc )
# -- TEMPORAL QUERYING USING 2D INDEX
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find( { "properties.time": 2009002 } ).limit(3):
pprint.pprint(doc)
# -- TEMPORAL-SPATIAL QUERYING USING 2D INDEX
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[ current_collection ]
for doc in collection.find(
{ "geometry": {
"$geoWithin": {
"$geometry" : {
"type": "Polygon" ,
"coordinates" : [
[
[-77.49, -89.70],
[0.00, 0.00],
[10.00, 10.00],
[-77.49, -89.70]
]
]
} } }, "properties.time": 2009003 } ):
pprint.pprint( doc )
# collection = db['quikscat-l2b12-001']
# cursor = collection.find({})
# for document in cursor:
# pprint.pprint( document )
| [
0,
1,
2,
3,
4
] |
1,042 | 65d5cee6899b0b75474e3898459bf2cfa8b3635b | <mask token>
| <mask token>
def is_good(arr):
for i in range(1, len(arr) // 2 + 1):
if arr[-i:] == arr[-(i * 2):-i]:
return False
return True
<mask token>
| def solve(bt):
if len(bt) == n:
print(*bt, sep='')
exit()
for i in [1, 2, 3]:
if is_good(bt + [i]):
solve(bt + [i])
def is_good(arr):
for i in range(1, len(arr) // 2 + 1):
if arr[-i:] == arr[-(i * 2):-i]:
return False
return True
<mask token>
| def solve(bt):
if len(bt) == n:
print(*bt, sep='')
exit()
for i in [1, 2, 3]:
if is_good(bt + [i]):
solve(bt + [i])
def is_good(arr):
for i in range(1, len(arr) // 2 + 1):
if arr[-i:] == arr[-(i * 2):-i]:
return False
return True
if __name__ == '__main__':
n = int(input())
solve([1])
| def solve(bt):
if len(bt) == n:
print(*bt, sep="")
exit()
for i in [1, 2, 3]:
if is_good(bt + [i]):
solve(bt + [i])
def is_good(arr):
for i in range(1, len(arr)//2+1):
if arr[-i:] == arr[-(i*2):-i]:
return False
return True
if __name__ == "__main__":
n = int(input())
solve([1]) | [
0,
1,
2,
3,
4
] |
1,043 | 6be285f9c48a20934c1846785232a73373c7d547 | ##armstrong number##
##n= int(input('enter a number '))
##a=n
##s=0
##
##while n>0:
## rem= n%10
## s= s+rem*rem*rem
## n= n//10
##if a==s:
## print(a,' is an armstrong number')
##else:
## print(a,' is not an armstrong number')
##palindrome or not##
##n= int(input('enter a number '))
##a=n
##rev=0
##
##while n>0:
## rem= n%10
## rev= rev*10+rem
## n= n//10
##if a==rev:
## print(a,' is a palindrome number')
##else:
## print(a,' is not a palindrome number')
##factorial of a number using while loop##
##n= int(input('enter a number '))
##i=1
##a=n
##fact=1
##
##while i<=n:##n>0
## fact*= n*i##fact*=n
## n-=1
##print(fact,' is the factorial of ',a)
##factorial of a number using for loop##
##n= int(input('enter a number '))
##a=n
##fact=1
##
##for i in range(1,n+1):##(n,0,-1)
## fact*=i
##print(fact,' is the factorial of ',a)
##harshed number ##
##n= int(input('enter a number '))
##a=n
##s=0
##
##while n>0:
## rem= n%10
## s+=rem
## n//=10
##print(s,' is the sum of ',a)
##if a%s==0:
## print(a,' is a harshed number')
##else:
## print(a,' is not a harshed number')
##fibonocci series using while loop##
##n= int(input('enter a range '))
##a=0
##b=1
##print(a,b,end=" ")
##count=3
##
##while count<=n:
## s= a+b
## print(s,end=" ")
## a=b
## b=s
## count+=1
##fibonocci series using for loop##
##n= int(input('enter a range '))
##a=0
##b=1
##print(a,b,end=' ')
##
##for count in range(1,n-1):##(2,n)
## s= a+b
## print(s,end=' ')
## a=b
## b=s
##previous number of fibnocci series from the given number##
##n= int(input('enter a number '))
##a=0
##b=1
##for i in range(3,n+1):
## s=a+b
## a=b
## b=s
## if b>=n:
## print(a)
## break
##next number of fibnocci series from the given number##
##n= int(input('enter a number '))
##a=0
##b=1
##for i in range(3,n+1):
## s=a+b
## a=b
## b=s
## if b>=n:
## print(b)
## break
##perfect numbers using for loop##
##n= int(input('enter a number '))
##a=n
##s=0
##
##for i in range(1,n):#(1,(n//2)+1)#
## div=n%i
## if n%i==0:
## s+=i
## else:
## continue
##
##if s==a:
## print(a,' is a perfect number')
##else:
## print(a,' is not a perfect number')
##perfect numbers using while loop##
##n= int(input('enter a number '))
##a=n
##s=0
##i=1
##
##while i<n:
## if n%i==0:
## s+=i
## i+=1
##
##if s==a:
## print(a,' is a perfect number')
##else:
## print(a,' is not a perfect number')
| null | null | null | null | [
1
] |
1,044 | 3908d303d0e41677aae332fbdbe9b681bffe5391 | <mask token>
| <mask token>
ROOT_PATH = os.path.split(os.path.abspath(__name__))[0]
DEBUG = True
JWT_SECRET_KEY = 'shop'
SQLALCHEMY_TRACK_MODIFICATIONS = False
user = 'shop'
passwd = 'shopadmin'
db = 'shopdb'
SQLALCHEMY_DATABASE_URI = (
f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}')
JWT_AUTH_USERNAME_KEY = 'username'
JWT_AUTH_PASSWORD_KEY = 'password'
JWT_AUTH_HEADER_PREFIX = 'JWT'
JWT_EXPIRATION_DELTA = timedelta(days=30)
JWT_ALGORITHM = 'HS256'
JWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']
UPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')
| import os
from datetime import timedelta
ROOT_PATH = os.path.split(os.path.abspath(__name__))[0]
DEBUG = True
JWT_SECRET_KEY = 'shop'
SQLALCHEMY_TRACK_MODIFICATIONS = False
user = 'shop'
passwd = 'shopadmin'
db = 'shopdb'
SQLALCHEMY_DATABASE_URI = (
f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}')
JWT_AUTH_USERNAME_KEY = 'username'
JWT_AUTH_PASSWORD_KEY = 'password'
JWT_AUTH_HEADER_PREFIX = 'JWT'
JWT_EXPIRATION_DELTA = timedelta(days=30)
JWT_ALGORITHM = 'HS256'
JWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']
UPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')
| import os
from datetime import timedelta
ROOT_PATH = os.path.split(os.path.abspath(__name__))[0]
DEBUG = True
JWT_SECRET_KEY = 'shop'
# SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(
# os.path.join(ROOT_PATH, 's_shop_flask.db'))
SQLALCHEMY_TRACK_MODIFICATIONS = False
user = 'shop'
passwd = 'shopadmin'
db = 'shopdb'
SQLALCHEMY_DATABASE_URI = f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}'
JWT_AUTH_USERNAME_KEY = 'username'
JWT_AUTH_PASSWORD_KEY = 'password'
JWT_AUTH_HEADER_PREFIX = 'JWT'
JWT_EXPIRATION_DELTA = timedelta(days=30)
JWT_ALGORITHM = 'HS256'
JWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']
# 图片上传路径
UPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')
| null | [
0,
1,
2,
3
] |
1,045 | 874b87ca20385aa15cc7299707c9c1c0360ace43 | <mask token>
| <mask token>
SceneName = 'sphere'
DefaultColor = QtCore.Qt.yellow
| from PyQt4 import QtCore
SceneName = 'sphere'
DefaultColor = QtCore.Qt.yellow
| from PyQt4 import QtCore
SceneName = "sphere"
DefaultColor = QtCore.Qt.yellow
| null | [
0,
1,
2,
3
] |
1,046 | 86fdea2ae8e253aa4639bb3114de70c693536760 | <mask token>
class Prestamo(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class PrestamoInLine(admin.TabularInline):
model = Prestamo
extra = 1
class LibroAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
class UsuarioAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
| <mask token>
class Prestamo(models.Model):
Fecha_Prestamo = models.DateTimeField(default=timezone.now)
Fecha_Devolucion = models.DateField()
Fecha_Devolucion_Real = models.DateField()
Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)
Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)
class PrestamoInLine(admin.TabularInline):
model = Prestamo
extra = 1
class LibroAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
class UsuarioAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
| <mask token>
class Libros(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class Usuario(models.Model):
DPI = models.CharField(max_length=20)
NombreCompleto = models.CharField(max_length=100)
def __str__(self):
return self.DPI
class Prestamo(models.Model):
Fecha_Prestamo = models.DateTimeField(default=timezone.now)
Fecha_Devolucion = models.DateField()
Fecha_Devolucion_Real = models.DateField()
Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)
Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)
class PrestamoInLine(admin.TabularInline):
model = Prestamo
extra = 1
class LibroAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
class UsuarioAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
| <mask token>
class Libros(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __str__(self):
return self.Titulo
class Usuario(models.Model):
DPI = models.CharField(max_length=20)
NombreCompleto = models.CharField(max_length=100)
def __str__(self):
return self.DPI
class Prestamo(models.Model):
Fecha_Prestamo = models.DateTimeField(default=timezone.now)
Fecha_Devolucion = models.DateField()
Fecha_Devolucion_Real = models.DateField()
Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)
Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)
class PrestamoInLine(admin.TabularInline):
model = Prestamo
extra = 1
class LibroAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
class UsuarioAdmin(admin.ModelAdmin):
inlines = PrestamoInLine,
| from django.db import models
from django.contrib import admin
from django.utils import timezone
class Libros(models.Model):
ISBN = models.CharField(max_length=13,primary_key=True)
Titulo = models.CharField(max_length=15)
# Portada = models.ImageField(upload_to='imagen/')
Autor = models.CharField(max_length=100)
Editorial = models.CharField(max_length=100)
Pais=models.CharField(max_length=100)
anno= models.IntegerField()
def __str__(self):
return self.Titulo
class Usuario(models.Model):
DPI = models.CharField(max_length=20)
NombreCompleto= models.CharField(max_length=100)
def __str__(self):
return self.DPI
class Prestamo (models.Model):
Fecha_Prestamo=models.DateTimeField(default=timezone.now)
Fecha_Devolucion=models.DateField()
Fecha_Devolucion_Real=models.DateField()
Libro=models.ForeignKey(Libros,on_delete=models.CASCADE)
Usuario=models.ForeignKey(Usuario,on_delete=models.CASCADE)
class PrestamoInLine(admin.TabularInline):
model=Prestamo
extra=1
class LibroAdmin(admin.ModelAdmin):
inlines = (PrestamoInLine,)
class UsuarioAdmin(admin.ModelAdmin):
inlines = (PrestamoInLine,)
| [
7,
8,
12,
13,
16
] |
1,047 | ca6b064dbd8200c49665eaa944fdf1fc80c25726 | <mask token>
| <mask token>
print(data)
<mask token>
print(mx, my)
<mask token>
for i in range(len(x)):
num += (x[i] - mx) * (y[i] - my)
den += (x[i] - mx) ** 2
<mask token>
print(beta1, beta0)
<mask token>
plt.scatter(x, y)
plt.plot([min(x), max(x)], [min(Y_predict), max(Y_predict)], color='red')
plt.show()
<mask token>
for i in range(len(x)):
xdata = beta1 * x[i] + beta0
ycap.append(xdata)
print(ycap)
<mask token>
for i in range(len(y)):
l = y[i] - ycap[i]
residue.append(l)
print(residue)
<mask token>
print(residualsum)
| <mask token>
data = pd.read_csv('regression.csv')
print(data)
x = data.iloc[:, 0]
y = data.iloc[:, 1]
mx = data['X1'].mean()
my = data['Y'].mean()
print(mx, my)
num, den = 0, 0
for i in range(len(x)):
num += (x[i] - mx) * (y[i] - my)
den += (x[i] - mx) ** 2
beta1 = num / den
beta0 = my - beta1 * mx
print(beta1, beta0)
Y_predict = beta1 * x + beta0
plt.scatter(x, y)
plt.plot([min(x), max(x)], [min(Y_predict), max(Y_predict)], color='red')
plt.show()
ycap = []
for i in range(len(x)):
xdata = beta1 * x[i] + beta0
ycap.append(xdata)
print(ycap)
residue = []
for i in range(len(y)):
l = y[i] - ycap[i]
residue.append(l)
print(residue)
residualsum = sum(residue)
print(residualsum)
| import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('regression.csv')
print(data)
x = data.iloc[:, 0]
y = data.iloc[:, 1]
mx = data['X1'].mean()
my = data['Y'].mean()
print(mx, my)
num, den = 0, 0
for i in range(len(x)):
num += (x[i] - mx) * (y[i] - my)
den += (x[i] - mx) ** 2
beta1 = num / den
beta0 = my - beta1 * mx
print(beta1, beta0)
Y_predict = beta1 * x + beta0
plt.scatter(x, y)
plt.plot([min(x), max(x)], [min(Y_predict), max(Y_predict)], color='red')
plt.show()
ycap = []
for i in range(len(x)):
xdata = beta1 * x[i] + beta0
ycap.append(xdata)
print(ycap)
residue = []
for i in range(len(y)):
l = y[i] - ycap[i]
residue.append(l)
print(residue)
residualsum = sum(residue)
print(residualsum)
| import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_csv('regression.csv')
print(data)
x=data.iloc[:,0]
y=data.iloc[:,1]
mx=data['X1'].mean()
my=data['Y'].mean()
print(mx,my)
num, den = 0,0
for i in range(len(x)):
num += (x[i] - mx)*(y[i]-my)
den += (x[i]-mx)**2
beta1 = num/den
beta0 =my-(beta1*mx)
print(beta1,beta0)
Y_predict=beta1*x + beta0
plt.scatter(x,y)
plt.plot([min(x),max(x)],[min(Y_predict),max(Y_predict)], color='red')
plt.show()
ycap = []
for i in range(len(x)):
xdata =( beta1*x[i])+ beta0
ycap.append(xdata)
print(ycap)
residue=[]
for i in range(len(y)):
l = y[i] - ycap[i]
residue.append(l)
print(residue)
residualsum=sum(residue)
print(residualsum)
| [
0,
1,
2,
3,
4
] |
1,048 | 5c415d5bf9d6952863a662d300cb1f706ef02a8f | <mask token>
class openacademy_course_xls_parser(report_sxw.rml_parse):
def __init__(self, cursor, uid, name, context):
super(openacademy_course_xls_parser, self).__init__(cursor, uid,
name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':
_('COURSE LIST')})
<mask token>
class openacademy_course_xls(report_xls):
column_sizes = [x[1] for x in _column_sizes]
def generate_xls_report(self, _p, _xs, data, objects, wb):
ws = wb.add_sheet(_p.report_name[:31])
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0
ws.fit_width_to_pages = 1
row_pos = 6
ws.header_str = self.xls_headers['standard']
ws.footer_str = self.xls_footers['standard']
c_sizes = self.column_sizes
c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in
range(0, len(c_sizes))]
cell_format = _xs['bold'] + _xs['underline']
so_style = xlwt.easyxf(cell_format)
cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']
table_title_style = xlwt.easyxf(cell_format)
cell_format = _xs['right']
right_style = xlwt.easyxf(cell_format)
cell_format = _xs['underline'] + _xs['right']
underline_style = xlwt.easyxf(cell_format)
for so in objects:
c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(ws, row_pos, row_data)
ws.set_horz_split_pos(row_pos)
<mask token>
| <mask token>
class openacademy_course_xls_parser(report_sxw.rml_parse):
def __init__(self, cursor, uid, name, context):
super(openacademy_course_xls_parser, self).__init__(cursor, uid,
name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':
_('COURSE LIST')})
<mask token>
class openacademy_course_xls(report_xls):
column_sizes = [x[1] for x in _column_sizes]
def generate_xls_report(self, _p, _xs, data, objects, wb):
ws = wb.add_sheet(_p.report_name[:31])
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0
ws.fit_width_to_pages = 1
row_pos = 6
ws.header_str = self.xls_headers['standard']
ws.footer_str = self.xls_footers['standard']
c_sizes = self.column_sizes
c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in
range(0, len(c_sizes))]
cell_format = _xs['bold'] + _xs['underline']
so_style = xlwt.easyxf(cell_format)
cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']
table_title_style = xlwt.easyxf(cell_format)
cell_format = _xs['right']
right_style = xlwt.easyxf(cell_format)
cell_format = _xs['underline'] + _xs['right']
underline_style = xlwt.easyxf(cell_format)
for so in objects:
c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(ws, row_pos, row_data)
ws.set_horz_split_pos(row_pos)
openacademy_course_xls('report.openacademy.course.list.xls',
'openacademy.course', parser=openacademy_course_xls_parser)
| <mask token>
class openacademy_course_xls_parser(report_sxw.rml_parse):
def __init__(self, cursor, uid, name, context):
super(openacademy_course_xls_parser, self).__init__(cursor, uid,
name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':
_('COURSE LIST')})
_column_sizes = [('0', 30), ('1', 30), ('2', 20)]
<mask token>
class openacademy_course_xls(report_xls):
column_sizes = [x[1] for x in _column_sizes]
def generate_xls_report(self, _p, _xs, data, objects, wb):
ws = wb.add_sheet(_p.report_name[:31])
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0
ws.fit_width_to_pages = 1
row_pos = 6
ws.header_str = self.xls_headers['standard']
ws.footer_str = self.xls_footers['standard']
c_sizes = self.column_sizes
c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in
range(0, len(c_sizes))]
cell_format = _xs['bold'] + _xs['underline']
so_style = xlwt.easyxf(cell_format)
cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']
table_title_style = xlwt.easyxf(cell_format)
cell_format = _xs['right']
right_style = xlwt.easyxf(cell_format)
cell_format = _xs['underline'] + _xs['right']
underline_style = xlwt.easyxf(cell_format)
for so in objects:
c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(ws, row_pos, row_data)
ws.set_horz_split_pos(row_pos)
openacademy_course_xls('report.openacademy.course.list.xls',
'openacademy.course', parser=openacademy_course_xls_parser)
| import openerp
from openerp import pooler
from openerp.report import report_sxw
import xlwt
from openerp.addons.report_xls.report_xls import report_xls
from openerp.tools.translate import _
class openacademy_course_xls_parser(report_sxw.rml_parse):
def __init__(self, cursor, uid, name, context):
super(openacademy_course_xls_parser, self).__init__(cursor, uid,
name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':
_('COURSE LIST')})
_column_sizes = [('0', 30), ('1', 30), ('2', 20)]
import time
class openacademy_course_xls(report_xls):
column_sizes = [x[1] for x in _column_sizes]
def generate_xls_report(self, _p, _xs, data, objects, wb):
ws = wb.add_sheet(_p.report_name[:31])
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0
ws.fit_width_to_pages = 1
row_pos = 6
ws.header_str = self.xls_headers['standard']
ws.footer_str = self.xls_footers['standard']
c_sizes = self.column_sizes
c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in
range(0, len(c_sizes))]
cell_format = _xs['bold'] + _xs['underline']
so_style = xlwt.easyxf(cell_format)
cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']
table_title_style = xlwt.easyxf(cell_format)
cell_format = _xs['right']
right_style = xlwt.easyxf(cell_format)
cell_format = _xs['underline'] + _xs['right']
underline_style = xlwt.easyxf(cell_format)
for so in objects:
c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(ws, row_pos, row_data)
ws.set_horz_split_pos(row_pos)
openacademy_course_xls('report.openacademy.course.list.xls',
'openacademy.course', parser=openacademy_course_xls_parser)
| import openerp
from openerp import pooler
from openerp.report import report_sxw
import xlwt
from openerp.addons.report_xls.report_xls import report_xls
from openerp.tools.translate import _
class openacademy_course_xls_parser(report_sxw.rml_parse):
def __init__(self, cursor, uid, name, context):
super(openacademy_course_xls_parser, self).__init__(cursor, uid, name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
self.localcontext.update({
'cr': cursor,
'uid': uid,
'report_name': _('COURSE LIST'),
})
_column_sizes = [
('0',30),
('1',30),
('2',20)
]
import time
class openacademy_course_xls(report_xls):
column_sizes = [x[1] for x in _column_sizes]
def generate_xls_report(self, _p, _xs, data, objects, wb):
ws = wb.add_sheet(_p.report_name[:31])
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0
ws.fit_width_to_pages = 1
row_pos = 6
ws.header_str = self.xls_headers['standard']
ws.footer_str = self.xls_footers['standard']
#write empty to define column
c_sizes = self.column_sizes
c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in range(0,len(c_sizes))]
cell_format = _xs['bold'] + _xs['underline']
so_style = xlwt.easyxf(cell_format)
cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']
table_title_style = xlwt.easyxf(cell_format)
cell_format = _xs['right']
right_style = xlwt.easyxf(cell_format)
cell_format = _xs['underline'] + _xs['right']
underline_style = xlwt.easyxf(cell_format)
for so in objects:
c_specs = [('title',3,0,'text','Subject: %s' %(so.name)),]
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(ws, row_pos, row_data)
ws.set_horz_split_pos(row_pos)
openacademy_course_xls('report.openacademy.course.list.xls','openacademy.course', parser=openacademy_course_xls_parser) | [
5,
6,
7,
8,
9
] |
1,049 | 88590aef975f7e473ef964ee0c4004cff7e24b07 | <mask token>
| <mask token>
if __name__ == '__main__':
fs = open('./src/keywords.txt', 'rb')
keywords = fs.read().decode('utf-8').split(',')
fs.close()
def find_features(doc):
words = set(doc)
features = {}
for word in keywords:
features['contains %s' % word] = word in words
return features
fs = open('./src/my_classifier.pickle', 'rb')
classifier = pickle.load(fs)
regex = re.compile('[一-龥]')
p = optparse.OptionParser(usage='usage: %prog [options] arg1 arg2',
version='%prog 0.1', prog='url-tagger')
p.add_option('--url', '-u', help='Your url')
p.add_option('--file', '-f', help='Your url file. One line one url')
options, arguments = p.parse_args()
url_list = []
for key, value in options.__dict__.items():
if value is not None:
print('%s: %s' % (key, value))
if key is 'url':
url_list.append(value)
else:
url_file = open(value, 'rb+')
for line in url_file.readlines():
url_list.append(str(line, encoding='utf-8').strip())
@asyncio.coroutine
def get_docs(url):
response = requests.get(url=url, headers={'Accept-Encoding': ''})
html = str(response.content, encoding=response.apparent_encoding,
errors='ignore')
soup = BeautifulSoup(html, 'lxml')
for script in soup(['script', 'style']):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split
(' '))
text = ''.join(chunk for chunk in chunks if chunk)
return url, text
loop = asyncio.get_event_loop()
tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)),
url_list))
data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))
loop.close()
results = [(url, classifier.classify(find_features(jieba.lcut(''.join(
regex.findall(data)))))) for url, data in data_list]
for url, category in results:
print('%s: %s' % (url, category))
| import optparse
from bs4 import BeautifulSoup
import re
import jieba
import pickle
import requests
import asyncio
if __name__ == '__main__':
fs = open('./src/keywords.txt', 'rb')
keywords = fs.read().decode('utf-8').split(',')
fs.close()
def find_features(doc):
words = set(doc)
features = {}
for word in keywords:
features['contains %s' % word] = word in words
return features
fs = open('./src/my_classifier.pickle', 'rb')
classifier = pickle.load(fs)
regex = re.compile('[一-龥]')
p = optparse.OptionParser(usage='usage: %prog [options] arg1 arg2',
version='%prog 0.1', prog='url-tagger')
p.add_option('--url', '-u', help='Your url')
p.add_option('--file', '-f', help='Your url file. One line one url')
options, arguments = p.parse_args()
url_list = []
for key, value in options.__dict__.items():
if value is not None:
print('%s: %s' % (key, value))
if key is 'url':
url_list.append(value)
else:
url_file = open(value, 'rb+')
for line in url_file.readlines():
url_list.append(str(line, encoding='utf-8').strip())
@asyncio.coroutine
def get_docs(url):
response = requests.get(url=url, headers={'Accept-Encoding': ''})
html = str(response.content, encoding=response.apparent_encoding,
errors='ignore')
soup = BeautifulSoup(html, 'lxml')
for script in soup(['script', 'style']):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split
(' '))
text = ''.join(chunk for chunk in chunks if chunk)
return url, text
loop = asyncio.get_event_loop()
tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)),
url_list))
data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))
loop.close()
results = [(url, classifier.classify(find_features(jieba.lcut(''.join(
regex.findall(data)))))) for url, data in data_list]
for url, category in results:
print('%s: %s' % (url, category))
| #!/usr/bin/env python3
import optparse
from bs4 import BeautifulSoup
import re
import jieba
import pickle
import requests
import asyncio
if __name__ == '__main__':
# 读取10000个关键词
fs = open("./src/keywords.txt", "rb")
keywords = fs.read().decode("utf-8").split(",")
fs.close()
# 找出特征
def find_features(doc):
words = set(doc)
features = {}
for word in keywords:
features["contains %s" % word] = (word in words)
return features
# 读取预先做好的nltk分词器
fs = open('./src/my_classifier.pickle', 'rb')
classifier = pickle.load(fs)
# 匹配中文字符
regex = re.compile("[\u4e00-\u9fa5]")
p = optparse.OptionParser(usage="usage: %prog [options] arg1 arg2", version="%prog 0.1", prog="url-tagger")
p.add_option("--url", "-u", help="Your url")
p.add_option("--file", "-f", help="Your url file. One line one url")
(options, arguments) = p.parse_args()
url_list = []
for key, value in options.__dict__.items():
if value is not None:
print("%s: %s" % (key, value))
if key is "url":
url_list.append(value)
else:
url_file = open(value, "rb+")
for line in url_file.readlines():
url_list.append(str(line, encoding="utf-8").strip())
# 异步发起http请求
@asyncio.coroutine
def get_docs(url):
response = requests.get(url=url, headers={'Accept-Encoding': ''})
# print(response.apparent_encoding)
html = str(response.content, encoding=response.apparent_encoding, errors="ignore")
soup = BeautifulSoup(html, "lxml")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "".join(chunk for chunk in chunks if chunk)
# print(text)
return url, text
loop = asyncio.get_event_loop()
tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)), url_list))
data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))
loop.close()
# 分类器进行分类
results = [(url, classifier.classify(find_features(jieba.lcut("".join(regex.findall(data)))))) for (url, data)
in data_list]
# 打印结果
for (url, category) in results:
print("%s: %s" % (url, category))
| null | [
0,
1,
2,
3
] |
1,050 | 7fe7ea89908f9d233dbdb9e46bf2d677406ab324 | <mask token>
def add_data(g):
g.add_node(1)
g.add_node(2, x=5)
g.add_edge(1, 2, y=6)
g.add_edge(2, 3, z=[])
def assert_graph_data(g1, g2):
assert g1 is not g2
assert g2.nodes[1] == {}
assert g2.nodes[2] == {'x': 5}
assert g2.edges[1, 2] == {'y': 6}
assert g2.edges[2, 3] == {'z': []}
assert g2.nodes[2] is not g1.nodes[2]
assert g2.edges[2, 3] is not g1.edges[2, 3]
@pytest.mark.parametrize('do_deepcopy', [True, False])
def test_nx_copy_with_deepcopy(do_deepcopy):
g = nx.Graph()
g2 = nx.DiGraph()
add_data(g)
nx_copy(g, g2, deepcopy=do_deepcopy)
assert_graph_data(g, g2)
assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy
def test_nx_copy_with_none():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, None)
assert_graph_data(g, g2)
def test_nx_copy_with_class():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, nx.OrderedDiGraph)
assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.
OrderedDiGraph, nx.Graph)
assert isinstance(g2, nx.OrderedDiGraph)
assert_graph_data(g, g2)
def test_nx_copy_node_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
def node_transform(nodes):
for n, ndata in nodes:
yield str(n), ndata
g2 = nx_copy(g, None, node_transform=node_transform)
assert g2.number_of_nodes() == 3
assert g2.number_of_edges() == 2
assert '1' in g2
assert '2' in g2
assert 1 not in g2
assert 2 not in g2
assert g2.edges['1', '2'] == {'f': 4}
assert g2.edges['2', '3'] == {'f': 5}
<mask token>
| <mask token>
def add_data(g):
g.add_node(1)
g.add_node(2, x=5)
g.add_edge(1, 2, y=6)
g.add_edge(2, 3, z=[])
def assert_graph_data(g1, g2):
assert g1 is not g2
assert g2.nodes[1] == {}
assert g2.nodes[2] == {'x': 5}
assert g2.edges[1, 2] == {'y': 6}
assert g2.edges[2, 3] == {'z': []}
assert g2.nodes[2] is not g1.nodes[2]
assert g2.edges[2, 3] is not g1.edges[2, 3]
@pytest.mark.parametrize('do_deepcopy', [True, False])
def test_nx_copy_with_deepcopy(do_deepcopy):
g = nx.Graph()
g2 = nx.DiGraph()
add_data(g)
nx_copy(g, g2, deepcopy=do_deepcopy)
assert_graph_data(g, g2)
assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy
def test_nx_copy_with_none():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, None)
assert_graph_data(g, g2)
def test_nx_copy_with_class():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, nx.OrderedDiGraph)
assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.
OrderedDiGraph, nx.Graph)
assert isinstance(g2, nx.OrderedDiGraph)
assert_graph_data(g, g2)
def test_nx_copy_node_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
def node_transform(nodes):
for n, ndata in nodes:
yield str(n), ndata
g2 = nx_copy(g, None, node_transform=node_transform)
assert g2.number_of_nodes() == 3
assert g2.number_of_edges() == 2
assert '1' in g2
assert '2' in g2
assert 1 not in g2
assert 2 not in g2
assert g2.edges['1', '2'] == {'f': 4}
assert g2.edges['2', '3'] == {'f': 5}
def test_nx_copy_edge_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def edge_transform(edges):
for n1, n2, edata in edges:
if n1 != 4:
yield n1, n2, {'f': 8}
g2 = nx_copy(g, None, edge_transform=edge_transform)
assert g2.number_of_nodes() == 5
assert g2.number_of_edges() == 2
assert g2.edges[1, 2] == {'f': 8}
assert g2.edges[2, 3] == {'f': 8}
<mask token>
| <mask token>
def add_data(g):
g.add_node(1)
g.add_node(2, x=5)
g.add_edge(1, 2, y=6)
g.add_edge(2, 3, z=[])
def assert_graph_data(g1, g2):
assert g1 is not g2
assert g2.nodes[1] == {}
assert g2.nodes[2] == {'x': 5}
assert g2.edges[1, 2] == {'y': 6}
assert g2.edges[2, 3] == {'z': []}
assert g2.nodes[2] is not g1.nodes[2]
assert g2.edges[2, 3] is not g1.edges[2, 3]
@pytest.mark.parametrize('do_deepcopy', [True, False])
def test_nx_copy_with_deepcopy(do_deepcopy):
g = nx.Graph()
g2 = nx.DiGraph()
add_data(g)
nx_copy(g, g2, deepcopy=do_deepcopy)
assert_graph_data(g, g2)
assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy
def test_nx_copy_with_none():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, None)
assert_graph_data(g, g2)
def test_nx_copy_with_class():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, nx.OrderedDiGraph)
assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.
OrderedDiGraph, nx.Graph)
assert isinstance(g2, nx.OrderedDiGraph)
assert_graph_data(g, g2)
def test_nx_copy_node_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
def node_transform(nodes):
for n, ndata in nodes:
yield str(n), ndata
g2 = nx_copy(g, None, node_transform=node_transform)
assert g2.number_of_nodes() == 3
assert g2.number_of_edges() == 2
assert '1' in g2
assert '2' in g2
assert 1 not in g2
assert 2 not in g2
assert g2.edges['1', '2'] == {'f': 4}
assert g2.edges['2', '3'] == {'f': 5}
def test_nx_copy_edge_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def edge_transform(edges):
for n1, n2, edata in edges:
if n1 != 4:
yield n1, n2, {'f': 8}
g2 = nx_copy(g, None, edge_transform=edge_transform)
assert g2.number_of_nodes() == 5
assert g2.number_of_edges() == 2
assert g2.edges[1, 2] == {'f': 8}
assert g2.edges[2, 3] == {'f': 8}
def test_nx_copy_global_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
g.get_global()['f'] = 8
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def global_transform(g):
for _, gdata in g:
gdata['x'] = 4
yield _, gdata
g2 = nx_copy(g, None, global_transform=global_transform)
assert g2.get_global() == {'x': 4, 'f': 8}
| import networkx as nx
import pytest
from caldera.utils.nx import nx_copy
def add_data(g):
g.add_node(1)
g.add_node(2, x=5)
g.add_edge(1, 2, y=6)
g.add_edge(2, 3, z=[])
def assert_graph_data(g1, g2):
assert g1 is not g2
assert g2.nodes[1] == {}
assert g2.nodes[2] == {'x': 5}
assert g2.edges[1, 2] == {'y': 6}
assert g2.edges[2, 3] == {'z': []}
assert g2.nodes[2] is not g1.nodes[2]
assert g2.edges[2, 3] is not g1.edges[2, 3]
@pytest.mark.parametrize('do_deepcopy', [True, False])
def test_nx_copy_with_deepcopy(do_deepcopy):
g = nx.Graph()
g2 = nx.DiGraph()
add_data(g)
nx_copy(g, g2, deepcopy=do_deepcopy)
assert_graph_data(g, g2)
assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy
def test_nx_copy_with_none():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, None)
assert_graph_data(g, g2)
def test_nx_copy_with_class():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, nx.OrderedDiGraph)
assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.
OrderedDiGraph, nx.Graph)
assert isinstance(g2, nx.OrderedDiGraph)
assert_graph_data(g, g2)
def test_nx_copy_node_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
def node_transform(nodes):
for n, ndata in nodes:
yield str(n), ndata
g2 = nx_copy(g, None, node_transform=node_transform)
assert g2.number_of_nodes() == 3
assert g2.number_of_edges() == 2
assert '1' in g2
assert '2' in g2
assert 1 not in g2
assert 2 not in g2
assert g2.edges['1', '2'] == {'f': 4}
assert g2.edges['2', '3'] == {'f': 5}
def test_nx_copy_edge_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def edge_transform(edges):
for n1, n2, edata in edges:
if n1 != 4:
yield n1, n2, {'f': 8}
g2 = nx_copy(g, None, edge_transform=edge_transform)
assert g2.number_of_nodes() == 5
assert g2.number_of_edges() == 2
assert g2.edges[1, 2] == {'f': 8}
assert g2.edges[2, 3] == {'f': 8}
def test_nx_copy_global_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
g.get_global()['f'] = 8
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def global_transform(g):
for _, gdata in g:
gdata['x'] = 4
yield _, gdata
g2 = nx_copy(g, None, global_transform=global_transform)
assert g2.get_global() == {'x': 4, 'f': 8}
| import networkx as nx
import pytest
from caldera.utils.nx import nx_copy
def add_data(g):
g.add_node(1)
g.add_node(2, x=5)
g.add_edge(1, 2, y=6)
g.add_edge(2, 3, z=[])
def assert_graph_data(g1, g2):
assert g1 is not g2
assert g2.nodes[1] == {}
assert g2.nodes[2] == {"x": 5}
assert g2.edges[(1, 2)] == {"y": 6}
assert g2.edges[(2, 3)] == {"z": []}
assert g2.nodes[2] is not g1.nodes[2]
assert g2.edges[(2, 3)] is not g1.edges[(2, 3)]
@pytest.mark.parametrize("do_deepcopy", [True, False])
def test_nx_copy_with_deepcopy(do_deepcopy):
g = nx.Graph()
g2 = nx.DiGraph()
add_data(g)
nx_copy(g, g2, deepcopy=do_deepcopy)
assert_graph_data(g, g2)
assert (g2.edges[(2, 3)]["z"] is g.edges[(2, 3)]["z"]) != do_deepcopy
def test_nx_copy_with_none():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, None)
assert_graph_data(g, g2)
def test_nx_copy_with_class():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, nx.OrderedDiGraph)
assert isinstance(nx.OrderedDiGraph, type) and issubclass(
nx.OrderedDiGraph, nx.Graph
)
assert isinstance(g2, nx.OrderedDiGraph)
assert_graph_data(g, g2)
def test_nx_copy_node_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
def node_transform(nodes):
for n, ndata in nodes:
yield str(n), ndata
g2 = nx_copy(g, None, node_transform=node_transform)
assert g2.number_of_nodes() == 3
assert g2.number_of_edges() == 2
assert "1" in g2
assert "2" in g2
assert 1 not in g2
assert 2 not in g2
assert g2.edges[("1", "2")] == {"f": 4}
assert g2.edges[("2", "3")] == {"f": 5}
def test_nx_copy_edge_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def edge_transform(edges):
for n1, n2, edata in edges:
if n1 != 4:
yield n1, n2, {"f": 8}
g2 = nx_copy(g, None, edge_transform=edge_transform)
assert g2.number_of_nodes() == 5
assert g2.number_of_edges() == 2
assert g2.edges[(1, 2)] == {"f": 8}
assert g2.edges[(2, 3)] == {"f": 8}
def test_nx_copy_global_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
g.get_global()["f"] = 8
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def global_transform(g):
for _, gdata in g:
gdata["x"] = 4
yield _, gdata
g2 = nx_copy(g, None, global_transform=global_transform)
assert g2.get_global() == {"x": 4, "f": 8}
| [
6,
7,
8,
9,
10
] |
1,051 | 7cb75195df567a5b65fe2385423b0082f3b9de4b | <mask token>
class GameMap(list):
<mask token>
def __init__(self):
super().__init__()
self.xmax = 5
self.ymax = 5
self.__nb_elephants = 0
self.__nb_rhinoceros = 0
self.nb_boulders = 0
self.nb_crosses = 0
self.playerTurn = 'Elephant'
self.winner = ''
for k in range(self.ymax):
y = []
for i in range(self.ymax):
y.append(0)
self.append(y)
for k in range(3):
self[2][1 + k] = Boulder(2, 1 + k)
self.nb_boulders += 1
@property
def nb_elephants(self):
"""
This is the number of elephant on the gamemap.
:Getter: Return the number of elephant on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> ne = m.nb_elephants
.. note:: The elephant's number can not exceed 5.
.. warning:: the number of elephant can't be changed by hand.
"""
return self.__nb_elephants
@nb_elephants.setter
def nb_elephants(self, x):
"""
Setting the elephant's number.
.. warning:: the number of elephant can't be changed by hand.
"""
print('Warning ! Changing the number of Elephant is not possible!')
<mask token>
<mask token>
def add(self, animal):
"""
This method add a new animal onto the board, with position and orientation
It returns whether the placement was possible or not.
:Args:
:param animal (Animal): the animal to add on the GameMap.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.add(a)
.. note:: the turn does not count if the insertion was not possible
.. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see
if insertion is possible
.. sectionauthor:: Quentin BRATEAU <[email protected]>
"""
x, y = animal.coords
if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==
0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_elephants += 1
self.playerTurn = 'Rhinoceros'
elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (
x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_rhinoceros += 1
self.playerTurn = 'Elephant'
else:
return False
<mask token>
def push_counter(self, x, y, cx, cy, counter=1, k=0):
"""
This recursive method determines if a push move is possible by counting the elements having to be pushed,
and taking into account their orientation.
It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.
:Args:
:param x (int): is the abscissa of the current GamePiece,
:param y (int): is the ordinate of the current GamePiece,
:param cx (int): the direction of the move following the x-axis,
:param cy (int): the direction of the move following the y-axis,
:param counter (int): the sum of the scalar product of each animals in a row,
:param k (int): the counter of pawns in a row.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.push_counter(0, 1, 1, 0)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: The function has a double use, as without it "move" wouldn't know how many pieces to move
.. warning:: ...
.. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.
"""
k += 1
if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):
return counter, k
elif self[x + cx][y + cy] == 0:
return counter, k
elif isinstance(self[x + cx][y + cy], Animal):
if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:
counter += 1
elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:
counter -= 2
elif isinstance(self[x + cx][y + cy], Boulder):
counter -= 1
return self.push_counter(x + cx, y + cy, cx, cy, counter, k)
def move(self, animal, ncoords, ndir):
"""
This method moves an animal from on the board, as well as turns it
If the coords to which the animal is moving are taken, the the animal pushes
:Args:
:param animal (Animal): the animal to move,
:param ncoords (tuple): the new coordinates of the animal,
:param ndir (np.array): the new direction of the animal.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.move(a,(1,1),np.array([0,1]))
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: player turn does not change if move is not possible
.. warning:: ...
.. info:: it is possible to both rotate and move to another position in the same turn
"""
x, y = animal.coords
nx, ny = ncoords
cx, cy = nx - x, ny - y
if abs(cx) > 1 or abs(cy) > 1:
return False
elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) and (animal.direction[0] == cx and animal.
direction[1] == cy):
res = self.push_counter(x, y, cx, cy, 1)
c = res[0]
k = res[1]
if c >= 0:
for i in range(k, 0, -1):
if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==
-1 or y + i * cy == 5):
if isinstance(self[x + (i - 1) * cx][y + (i - 1) *
cy], Animal):
self[x + (i - 1) * cx][y + (i - 1) * cy] = animal
if animal.species == 'Elephant':
self.__nb_elephants -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
else:
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
for k in range(5):
if isinstance(self[x + (i - 1 - k) * cx][y +
(i - 1 - k) * cy], Animal) and [self[x +
(i - 1 - k) * cx][y + (i - 1 - k) * cy]
.direction[0], self[x + (i - 1 - k) *
cx][y + (i - 1 - k) * cy].direction[1]
] == [cx, cy]:
self.winner = self[x + (i - 1 - k) * cx][
y + (i - 1 - k) * cy].species
print('winner is', self.winner)
break
else:
self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][
y + (i - 1) * cy]
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
self[x + i * cx][y + i * cy
].coords = x + i * cx, y + i * cy
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
print('Push not possible')
return False
elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) or cx == 0 and cy == 0:
animal.coords = nx, ny
animal.direction = ndir
self[x][y] = 0
self[nx][ny] = animal
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
<mask token>
<mask token>
def load(self, file):
"""
This method load a KingOfSiam file with the .kos extension in a GameMap object.
:Args:
:param file (file object): is file to load.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Quentin BRATEAU <[email protected]>
.. note:: this method take in argument a file object.
"""
for i in range(5):
for j in range(5):
self[i][j] = 0
f = file.readlines()
k = 0
while k < len(f) and 'Boulder {' not in f[k]:
k += 1
k += 1
while ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
self[x][y] = Boulder(x, y)
k += 1
while k < len(f) and 'Elephant {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Elephant')
k += 1
while k < len(f) and 'Rhinoceros {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Rhinoceros')
k += 1
file.close()
<mask token>
| <mask token>
class GameMap(list):
<mask token>
def __init__(self):
super().__init__()
self.xmax = 5
self.ymax = 5
self.__nb_elephants = 0
self.__nb_rhinoceros = 0
self.nb_boulders = 0
self.nb_crosses = 0
self.playerTurn = 'Elephant'
self.winner = ''
for k in range(self.ymax):
y = []
for i in range(self.ymax):
y.append(0)
self.append(y)
for k in range(3):
self[2][1 + k] = Boulder(2, 1 + k)
self.nb_boulders += 1
@property
def nb_elephants(self):
"""
This is the number of elephant on the gamemap.
:Getter: Return the number of elephant on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> ne = m.nb_elephants
.. note:: The elephant's number can not exceed 5.
.. warning:: the number of elephant can't be changed by hand.
"""
return self.__nb_elephants
@nb_elephants.setter
def nb_elephants(self, x):
"""
Setting the elephant's number.
.. warning:: the number of elephant can't be changed by hand.
"""
print('Warning ! Changing the number of Elephant is not possible!')
<mask token>
<mask token>
def add(self, animal):
"""
This method add a new animal onto the board, with position and orientation
It returns whether the placement was possible or not.
:Args:
:param animal (Animal): the animal to add on the GameMap.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.add(a)
.. note:: the turn does not count if the insertion was not possible
.. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see
if insertion is possible
.. sectionauthor:: Quentin BRATEAU <[email protected]>
"""
x, y = animal.coords
if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==
0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_elephants += 1
self.playerTurn = 'Rhinoceros'
elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (
x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_rhinoceros += 1
self.playerTurn = 'Elephant'
else:
return False
<mask token>
def push_counter(self, x, y, cx, cy, counter=1, k=0):
"""
This recursive method determines if a push move is possible by counting the elements having to be pushed,
and taking into account their orientation.
It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.
:Args:
:param x (int): is the abscissa of the current GamePiece,
:param y (int): is the ordinate of the current GamePiece,
:param cx (int): the direction of the move following the x-axis,
:param cy (int): the direction of the move following the y-axis,
:param counter (int): the sum of the scalar product of each animals in a row,
:param k (int): the counter of pawns in a row.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.push_counter(0, 1, 1, 0)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: The function has a double use, as without it "move" wouldn't know how many pieces to move
.. warning:: ...
.. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.
"""
k += 1
if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):
return counter, k
elif self[x + cx][y + cy] == 0:
return counter, k
elif isinstance(self[x + cx][y + cy], Animal):
if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:
counter += 1
elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:
counter -= 2
elif isinstance(self[x + cx][y + cy], Boulder):
counter -= 1
return self.push_counter(x + cx, y + cy, cx, cy, counter, k)
def move(self, animal, ncoords, ndir):
"""
This method moves an animal from on the board, as well as turns it
If the coords to which the animal is moving are taken, the the animal pushes
:Args:
:param animal (Animal): the animal to move,
:param ncoords (tuple): the new coordinates of the animal,
:param ndir (np.array): the new direction of the animal.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.move(a,(1,1),np.array([0,1]))
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: player turn does not change if move is not possible
.. warning:: ...
.. info:: it is possible to both rotate and move to another position in the same turn
"""
x, y = animal.coords
nx, ny = ncoords
cx, cy = nx - x, ny - y
if abs(cx) > 1 or abs(cy) > 1:
return False
elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) and (animal.direction[0] == cx and animal.
direction[1] == cy):
res = self.push_counter(x, y, cx, cy, 1)
c = res[0]
k = res[1]
if c >= 0:
for i in range(k, 0, -1):
if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==
-1 or y + i * cy == 5):
if isinstance(self[x + (i - 1) * cx][y + (i - 1) *
cy], Animal):
self[x + (i - 1) * cx][y + (i - 1) * cy] = animal
if animal.species == 'Elephant':
self.__nb_elephants -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
else:
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
for k in range(5):
if isinstance(self[x + (i - 1 - k) * cx][y +
(i - 1 - k) * cy], Animal) and [self[x +
(i - 1 - k) * cx][y + (i - 1 - k) * cy]
.direction[0], self[x + (i - 1 - k) *
cx][y + (i - 1 - k) * cy].direction[1]
] == [cx, cy]:
self.winner = self[x + (i - 1 - k) * cx][
y + (i - 1 - k) * cy].species
print('winner is', self.winner)
break
else:
self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][
y + (i - 1) * cy]
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
self[x + i * cx][y + i * cy
].coords = x + i * cx, y + i * cy
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
print('Push not possible')
return False
elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) or cx == 0 and cy == 0:
animal.coords = nx, ny
animal.direction = ndir
self[x][y] = 0
self[nx][ny] = animal
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
def __str__(self):
"""
Show the current state of the game board
:return: the string with the characteristics of the board
:rtype: str
"""
s = ''
for i in range(5):
for j in range(5):
ani = False
if self[i][j] == 0:
s += ' 0 '
elif self[i][j].species == 'Elephant':
s += ' E'
ani = True
elif self[i][j].species == 'Rhinoceros':
s += ' R'
ani = True
else:
s += ' B '
if ani:
if self[i][j].direction[0] == 0 and self[i][j].direction[1
] == 1:
d = '> '
elif self[i][j].direction[0] == -1 and self[i][j
].direction[1] == 0:
d = '∧ '
elif self[i][j].direction[0] == 0 and self[i][j].direction[
1] == -1:
d = '< '
else:
d = '∨ '
s += d
s += '\n \n'
return s
<mask token>
def load(self, file):
"""
This method load a KingOfSiam file with the .kos extension in a GameMap object.
:Args:
:param file (file object): is file to load.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Quentin BRATEAU <[email protected]>
.. note:: this method take in argument a file object.
"""
for i in range(5):
for j in range(5):
self[i][j] = 0
f = file.readlines()
k = 0
while k < len(f) and 'Boulder {' not in f[k]:
k += 1
k += 1
while ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
self[x][y] = Boulder(x, y)
k += 1
while k < len(f) and 'Elephant {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Elephant')
k += 1
while k < len(f) and 'Rhinoceros {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Rhinoceros')
k += 1
file.close()
<mask token>
| <mask token>
class GameMap(list):
<mask token>
def __init__(self):
super().__init__()
self.xmax = 5
self.ymax = 5
self.__nb_elephants = 0
self.__nb_rhinoceros = 0
self.nb_boulders = 0
self.nb_crosses = 0
self.playerTurn = 'Elephant'
self.winner = ''
for k in range(self.ymax):
y = []
for i in range(self.ymax):
y.append(0)
self.append(y)
for k in range(3):
self[2][1 + k] = Boulder(2, 1 + k)
self.nb_boulders += 1
@property
def nb_elephants(self):
"""
This is the number of elephant on the gamemap.
:Getter: Return the number of elephant on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> ne = m.nb_elephants
.. note:: The elephant's number can not exceed 5.
.. warning:: the number of elephant can't be changed by hand.
"""
return self.__nb_elephants
@nb_elephants.setter
def nb_elephants(self, x):
"""
Setting the elephant's number.
.. warning:: the number of elephant can't be changed by hand.
"""
print('Warning ! Changing the number of Elephant is not possible!')
<mask token>
@nb_rhinoceros.setter
def nb_rhinoceros(self, x):
"""
Setting the rhinoceros's number.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
print('Warning ! Changing the number of Rhinoceros is not possible!')
def add(self, animal):
"""
This method add a new animal onto the board, with position and orientation
It returns whether the placement was possible or not.
:Args:
:param animal (Animal): the animal to add on the GameMap.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.add(a)
.. note:: the turn does not count if the insertion was not possible
.. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see
if insertion is possible
.. sectionauthor:: Quentin BRATEAU <[email protected]>
"""
x, y = animal.coords
if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==
0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_elephants += 1
self.playerTurn = 'Rhinoceros'
elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (
x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_rhinoceros += 1
self.playerTurn = 'Elephant'
else:
return False
def delete(self, animal):
"""
This method removes an animal from the board
It reduces by one the number of animals of that species
:Args:
:param animal (Animal): the animal to delete.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.delete(a)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: if removal of a boulder, game ends?
.. warning:: error if piece is not on the edge
"""
x, y = animal.coords
if x == 0 or x == 4 or y == 0 or y == 4:
self[x][y] = 0
if animal.species == 'Elephant':
self.__nb_elephants -= 1
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
def push_counter(self, x, y, cx, cy, counter=1, k=0):
"""
This recursive method determines if a push move is possible by counting the elements having to be pushed,
and taking into account their orientation.
It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.
:Args:
:param x (int): is the abscissa of the current GamePiece,
:param y (int): is the ordinate of the current GamePiece,
:param cx (int): the direction of the move following the x-axis,
:param cy (int): the direction of the move following the y-axis,
:param counter (int): the sum of the scalar product of each animals in a row,
:param k (int): the counter of pawns in a row.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.push_counter(0, 1, 1, 0)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: The function has a double use, as without it "move" wouldn't know how many pieces to move
.. warning:: ...
.. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.
"""
k += 1
if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):
return counter, k
elif self[x + cx][y + cy] == 0:
return counter, k
elif isinstance(self[x + cx][y + cy], Animal):
if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:
counter += 1
elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:
counter -= 2
elif isinstance(self[x + cx][y + cy], Boulder):
counter -= 1
return self.push_counter(x + cx, y + cy, cx, cy, counter, k)
def move(self, animal, ncoords, ndir):
"""
This method moves an animal from on the board, as well as turns it
If the coords to which the animal is moving are taken, the the animal pushes
:Args:
:param animal (Animal): the animal to move,
:param ncoords (tuple): the new coordinates of the animal,
:param ndir (np.array): the new direction of the animal.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.move(a,(1,1),np.array([0,1]))
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: player turn does not change if move is not possible
.. warning:: ...
.. info:: it is possible to both rotate and move to another position in the same turn
"""
x, y = animal.coords
nx, ny = ncoords
cx, cy = nx - x, ny - y
if abs(cx) > 1 or abs(cy) > 1:
return False
elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) and (animal.direction[0] == cx and animal.
direction[1] == cy):
res = self.push_counter(x, y, cx, cy, 1)
c = res[0]
k = res[1]
if c >= 0:
for i in range(k, 0, -1):
if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==
-1 or y + i * cy == 5):
if isinstance(self[x + (i - 1) * cx][y + (i - 1) *
cy], Animal):
self[x + (i - 1) * cx][y + (i - 1) * cy] = animal
if animal.species == 'Elephant':
self.__nb_elephants -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
else:
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
for k in range(5):
if isinstance(self[x + (i - 1 - k) * cx][y +
(i - 1 - k) * cy], Animal) and [self[x +
(i - 1 - k) * cx][y + (i - 1 - k) * cy]
.direction[0], self[x + (i - 1 - k) *
cx][y + (i - 1 - k) * cy].direction[1]
] == [cx, cy]:
self.winner = self[x + (i - 1 - k) * cx][
y + (i - 1 - k) * cy].species
print('winner is', self.winner)
break
else:
self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][
y + (i - 1) * cy]
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
self[x + i * cx][y + i * cy
].coords = x + i * cx, y + i * cy
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
print('Push not possible')
return False
elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) or cx == 0 and cy == 0:
animal.coords = nx, ny
animal.direction = ndir
self[x][y] = 0
self[nx][ny] = animal
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
def __str__(self):
"""
Show the current state of the game board
:return: the string with the characteristics of the board
:rtype: str
"""
s = ''
for i in range(5):
for j in range(5):
ani = False
if self[i][j] == 0:
s += ' 0 '
elif self[i][j].species == 'Elephant':
s += ' E'
ani = True
elif self[i][j].species == 'Rhinoceros':
s += ' R'
ani = True
else:
s += ' B '
if ani:
if self[i][j].direction[0] == 0 and self[i][j].direction[1
] == 1:
d = '> '
elif self[i][j].direction[0] == -1 and self[i][j
].direction[1] == 0:
d = '∧ '
elif self[i][j].direction[0] == 0 and self[i][j].direction[
1] == -1:
d = '< '
else:
d = '∨ '
s += d
s += '\n \n'
return s
<mask token>
def load(self, file):
"""
This method load a KingOfSiam file with the .kos extension in a GameMap object.
:Args:
:param file (file object): is file to load.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Quentin BRATEAU <[email protected]>
.. note:: this method take in argument a file object.
"""
for i in range(5):
for j in range(5):
self[i][j] = 0
f = file.readlines()
k = 0
while k < len(f) and 'Boulder {' not in f[k]:
k += 1
k += 1
while ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
self[x][y] = Boulder(x, y)
k += 1
while k < len(f) and 'Elephant {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Elephant')
k += 1
while k < len(f) and 'Rhinoceros {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Rhinoceros')
k += 1
file.close()
<mask token>
| <mask token>
class GameMap(list):
"""
The Gamemap module
==================
Creating the Gamemap.
This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.
:Example:
>>> m = GameMap()
.. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`
.. moduleauthor:: Quentin BRATEAU <[email protected]>, Luca FAROLFI <[email protected]>
"""
def __init__(self):
super().__init__()
self.xmax = 5
self.ymax = 5
self.__nb_elephants = 0
self.__nb_rhinoceros = 0
self.nb_boulders = 0
self.nb_crosses = 0
self.playerTurn = 'Elephant'
self.winner = ''
for k in range(self.ymax):
y = []
for i in range(self.ymax):
y.append(0)
self.append(y)
for k in range(3):
self[2][1 + k] = Boulder(2, 1 + k)
self.nb_boulders += 1
@property
def nb_elephants(self):
"""
This is the number of elephant on the gamemap.
:Getter: Return the number of elephant on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> ne = m.nb_elephants
.. note:: The elephant's number can not exceed 5.
.. warning:: the number of elephant can't be changed by hand.
"""
return self.__nb_elephants
@nb_elephants.setter
def nb_elephants(self, x):
"""
Setting the elephant's number.
.. warning:: the number of elephant can't be changed by hand.
"""
print('Warning ! Changing the number of Elephant is not possible!')
@property
def nb_rhinoceros(self):
"""
This is the number of rinoceros on the gamemap.
:Getter: Return the number of rhinoceros on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> nr = m.nb_rhinoceros
.. note:: The rhinoceros's number can not exceed 5.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
return self.__nb_rhinoceros
@nb_rhinoceros.setter
def nb_rhinoceros(self, x):
"""
Setting the rhinoceros's number.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
print('Warning ! Changing the number of Rhinoceros is not possible!')
def add(self, animal):
"""
This method add a new animal onto the board, with position and orientation
It returns whether the placement was possible or not.
:Args:
:param animal (Animal): the animal to add on the GameMap.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.add(a)
.. note:: the turn does not count if the insertion was not possible
.. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see
if insertion is possible
.. sectionauthor:: Quentin BRATEAU <[email protected]>
"""
x, y = animal.coords
if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==
0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_elephants += 1
self.playerTurn = 'Rhinoceros'
elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (
x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_rhinoceros += 1
self.playerTurn = 'Elephant'
else:
return False
def delete(self, animal):
"""
This method removes an animal from the board
It reduces by one the number of animals of that species
:Args:
:param animal (Animal): the animal to delete.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.delete(a)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: if removal of a boulder, game ends?
.. warning:: error if piece is not on the edge
"""
x, y = animal.coords
if x == 0 or x == 4 or y == 0 or y == 4:
self[x][y] = 0
if animal.species == 'Elephant':
self.__nb_elephants -= 1
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
def push_counter(self, x, y, cx, cy, counter=1, k=0):
"""
This recursive method determines if a push move is possible by counting the elements having to be pushed,
and taking into account their orientation.
It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.
:Args:
:param x (int): is the abscissa of the current GamePiece,
:param y (int): is the ordinate of the current GamePiece,
:param cx (int): the direction of the move following the x-axis,
:param cy (int): the direction of the move following the y-axis,
:param counter (int): the sum of the scalar product of each animals in a row,
:param k (int): the counter of pawns in a row.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.push_counter(0, 1, 1, 0)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: The function has a double use, as without it "move" wouldn't know how many pieces to move
.. warning:: ...
.. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.
"""
k += 1
if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):
return counter, k
elif self[x + cx][y + cy] == 0:
return counter, k
elif isinstance(self[x + cx][y + cy], Animal):
if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:
counter += 1
elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:
counter -= 2
elif isinstance(self[x + cx][y + cy], Boulder):
counter -= 1
return self.push_counter(x + cx, y + cy, cx, cy, counter, k)
def move(self, animal, ncoords, ndir):
"""
This method moves an animal from on the board, as well as turns it
If the coords to which the animal is moving are taken, the the animal pushes
:Args:
:param animal (Animal): the animal to move,
:param ncoords (tuple): the new coordinates of the animal,
:param ndir (np.array): the new direction of the animal.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.move(a,(1,1),np.array([0,1]))
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: player turn does not change if move is not possible
.. warning:: ...
.. info:: it is possible to both rotate and move to another position in the same turn
"""
x, y = animal.coords
nx, ny = ncoords
cx, cy = nx - x, ny - y
if abs(cx) > 1 or abs(cy) > 1:
return False
elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) and (animal.direction[0] == cx and animal.
direction[1] == cy):
res = self.push_counter(x, y, cx, cy, 1)
c = res[0]
k = res[1]
if c >= 0:
for i in range(k, 0, -1):
if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==
-1 or y + i * cy == 5):
if isinstance(self[x + (i - 1) * cx][y + (i - 1) *
cy], Animal):
self[x + (i - 1) * cx][y + (i - 1) * cy] = animal
if animal.species == 'Elephant':
self.__nb_elephants -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
else:
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
for k in range(5):
if isinstance(self[x + (i - 1 - k) * cx][y +
(i - 1 - k) * cy], Animal) and [self[x +
(i - 1 - k) * cx][y + (i - 1 - k) * cy]
.direction[0], self[x + (i - 1 - k) *
cx][y + (i - 1 - k) * cy].direction[1]
] == [cx, cy]:
self.winner = self[x + (i - 1 - k) * cx][
y + (i - 1 - k) * cy].species
print('winner is', self.winner)
break
else:
self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][
y + (i - 1) * cy]
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
self[x + i * cx][y + i * cy
].coords = x + i * cx, y + i * cy
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
print('Push not possible')
return False
elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) ==
1 and cy == 0) or cx == 0 and cy == 0:
animal.coords = nx, ny
animal.direction = ndir
self[x][y] = 0
self[nx][ny] = animal
if self.playerTurn == 'Elephant':
self.playerTurn = 'Rhinoceros'
elif self.playerTurn == 'Rhinoceros':
self.playerTurn = 'Elephant'
else:
return False
def __str__(self):
"""
Show the current state of the game board
:return: the string with the characteristics of the board
:rtype: str
"""
s = ''
for i in range(5):
for j in range(5):
ani = False
if self[i][j] == 0:
s += ' 0 '
elif self[i][j].species == 'Elephant':
s += ' E'
ani = True
elif self[i][j].species == 'Rhinoceros':
s += ' R'
ani = True
else:
s += ' B '
if ani:
if self[i][j].direction[0] == 0 and self[i][j].direction[1
] == 1:
d = '> '
elif self[i][j].direction[0] == -1 and self[i][j
].direction[1] == 0:
d = '∧ '
elif self[i][j].direction[0] == 0 and self[i][j].direction[
1] == -1:
d = '< '
else:
d = '∨ '
s += d
s += '\n \n'
return s
def save(self, file):
"""
This method save a GameMap in a KingOfSiam file with the .kos extension.
:Args:
:param file (file object): is file in which to write.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: this method take in argument a file object.
"""
boulders = []
elephants = []
rhinos = []
for i in range(5):
for j in range(5):
if self[i][j] != 0:
piece = self[i][j]
L = []
if not isinstance(self[i][j], Boulder):
L.append(self[i][j].direction[0])
L.append(self[i][j].direction[1])
if piece.species == 'Elephant':
elephants.append('(' + str(i) + ',' + str(j) +
') : np.array([' + str(L[0]) + ',' + str(L[1]) +
'])')
elif piece.species == 'Rhinoceros':
rhinos.append('(' + str(i) + ',' + str(j) +
') : np.array([' + str(L[0]) + ',' + str(L[1]) +
'])')
elif isinstance(piece, Boulder):
boulders.append('(' + str(i) + ',' + str(j) + ')')
file.write('# King of Siam GameFile \n\nplayer_turn {\n ' + self
.playerTurn + '\n}\n\n')
file.write('Boulder {')
for k in range(len(boulders)):
file.write('\n ' + boulders[k] + ';')
file.write('\n}\n\nElephant {')
for elt in elephants:
file.write('\n ' + elt + ';')
file.write('\n}\n\nRhinoceros {')
for elt in rhinos:
file.write('\n ' + elt + ';')
file.write('\n}')
file.close()
def load(self, file):
"""
This method load a KingOfSiam file with the .kos extension in a GameMap object.
:Args:
:param file (file object): is file to load.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Quentin BRATEAU <[email protected]>
.. note:: this method take in argument a file object.
"""
for i in range(5):
for j in range(5):
self[i][j] = 0
f = file.readlines()
k = 0
while k < len(f) and 'Boulder {' not in f[k]:
k += 1
k += 1
while ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
self[x][y] = Boulder(x, y)
k += 1
while k < len(f) and 'Elephant {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Elephant')
k += 1
while k < len(f) and 'Rhinoceros {' not in f[k]:
k += 1
k += 1
while ':' in f[k] and ';' in f[k]:
coords = f[k][5:8].split(',')
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split(']')[0].split(',')
xdir, ydir = 0, 0
if d[0] == '1':
xdir = 1
elif d[0] == '-1':
xdir = -1
if d[1] == '1':
ydir = 1
elif d[1] == '-1':
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Rhinoceros')
k += 1
file.close()
if __name__ == '__main__':
g = GameMap()
print(g)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Brateaqu, Farolflu"
__copyright__ = "Copyright 2019"
__credits__ = ["Quentin BRATEAU", "Luca FAROLFI"]
__license__ = "GPL"
__version__ = "1.0"
__email__ = ["[email protected]", "[email protected]"]
# Importing modules
import numpy as np
from GamePieces import Animal, Boulder
class GameMap(list):
"""
The Gamemap module
==================
Creating the Gamemap.
This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.
:Example:
>>> m = GameMap()
.. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`
.. moduleauthor:: Quentin BRATEAU <[email protected]>, Luca FAROLFI <[email protected]>
"""
def __init__(self):
super().__init__()
self.xmax = 5
self.ymax = 5
self.__nb_elephants = 0
self.__nb_rhinoceros = 0
self.nb_boulders = 0
self.nb_crosses = 0
self.playerTurn = "Elephant"
self.winner = ""
for k in range(self.ymax):
y = []
for i in range(self.ymax):
y.append(0)
self.append(y)
for k in range(3): # Setting up the 3 Boulders
self[2][1+k] = Boulder(2, 1+k)
self.nb_boulders += 1
@property
def nb_elephants(self):
"""
This is the number of elephant on the gamemap.
:Getter: Return the number of elephant on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> ne = m.nb_elephants
.. note:: The elephant's number can not exceed 5.
.. warning:: the number of elephant can't be changed by hand.
"""
return self.__nb_elephants
@nb_elephants.setter
def nb_elephants(self, x):
"""
Setting the elephant's number.
.. warning:: the number of elephant can't be changed by hand.
"""
print('Warning ! Changing the number of Elephant is not possible!')
@property
def nb_rhinoceros(self):
"""
This is the number of rinoceros on the gamemap.
:Getter: Return the number of rhinoceros on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> nr = m.nb_rhinoceros
.. note:: The rhinoceros's number can not exceed 5.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
return self.__nb_rhinoceros
@nb_rhinoceros.setter
def nb_rhinoceros(self, x):
"""
Setting the rhinoceros's number.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
print('Warning ! Changing the number of Rhinoceros is not possible!')
def add(self, animal):
"""
This method add a new animal onto the board, with position and orientation
It returns whether the placement was possible or not.
:Args:
:param animal (Animal): the animal to add on the GameMap.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.add(a)
.. note:: the turn does not count if the insertion was not possible
.. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see
if insertion is possible
.. sectionauthor:: Quentin BRATEAU <[email protected]>
"""
x, y = animal.coords
if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_elephants += 1
self.playerTurn = "Rhinoceros"
elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_rhinoceros += 1
self.playerTurn = "Elephant"
else:
return False
def delete(self, animal):
"""
This method removes an animal from the board
It reduces by one the number of animals of that species
:Args:
:param animal (Animal): the animal to delete.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.delete(a)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: if removal of a boulder, game ends?
.. warning:: error if piece is not on the edge
"""
x, y = animal.coords
if x == 0 or x == 4 or y == 0 or y == 4:
self[x][y] = 0
if animal.species == 'Elephant':
self.__nb_elephants -= 1
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
if self.playerTurn == "Elephant":
self.playerTurn = "Rhinoceros"
elif self.playerTurn == "Rhinoceros":
self.playerTurn = "Elephant"
else:
return False
def push_counter(self, x, y, cx, cy, counter = 1, k = 0):
"""
This recursive method determines if a push move is possible by counting the elements having to be pushed,
and taking into account their orientation.
It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.
:Args:
:param x (int): is the abscissa of the current GamePiece,
:param y (int): is the ordinate of the current GamePiece,
:param cx (int): the direction of the move following the x-axis,
:param cy (int): the direction of the move following the y-axis,
:param counter (int): the sum of the scalar product of each animals in a row,
:param k (int): the counter of pawns in a row.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.push_counter(0, 1, 1, 0)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: The function has a double use, as without it "move" wouldn't know how many pieces to move
.. warning:: ...
.. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.
"""
k += 1
if not (0 <= (x+cx) <= 4 and 0 <= y+cy <= 4):
return counter, k
elif self[x + cx][y + cy] == 0:
return counter, k
elif isinstance(self[x + cx][y + cy], Animal):
if self[x + cx][y + cy].direction @ + np.array([cx, cy]) == 1:
counter += 1
elif self[x + cx][y + cy].direction @ + np.array([cx, cy]) == -1:
counter -= 2
elif isinstance(self[x + cx][y + cy], Boulder):
counter -= 1
return self.push_counter(x + cx, y + cy, cx, cy, counter, k)
def move(self, animal, ncoords, ndir):
"""
This method moves an animal from on the board, as well as turns it
If the coords to which the animal is moving are taken, the the animal pushes
:Args:
:param animal (Animal): the animal to move,
:param ncoords (tuple): the new coordinates of the animal,
:param ndir (np.array): the new direction of the animal.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.move(a,(1,1),np.array([0,1]))
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: player turn does not change if move is not possible
.. warning:: ...
.. info:: it is possible to both rotate and move to another position in the same turn
"""
x, y = animal.coords
(nx, ny) = ncoords
cx, cy = nx - x, ny - y
if abs(cx) > 1 or abs(cy) > 1:
return False
elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == 1 and cy == 0) and (animal.direction[0] == cx and animal.direction[1] == cy):
res = self.push_counter(x, y, cx, cy, 1)
c = res[0]
k = res[1]
if c >= 0:
for i in range(k, 0, -1):
if (x + i * cx) == -1 or (x + i * cx) == 5 or (y + i * cy) == -1 or (y + i * cy) == 5:
if isinstance(self[x + (i-1)*cx][y + (i-1)*cy], Animal):
self[x + (i-1)*cx][y + (i-1)*cy] = animal
if animal.species == 'Elephant':
self.__nb_elephants -= 1
self[x + (i-1)*cx][y + (i-1)*cy] = 0
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
else:
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
for k in range(5):
if isinstance(self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy], Animal) and [self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].direction[0], self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].direction[1]] == [cx, cy]:
self.winner=self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].species
print("winner is", self.winner)
break
else:
self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][y + (i - 1) * cy]
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
self[x + i * cx][y + i * cy].coords = (x + i * cx, y + i * cy)
if self.playerTurn == "Elephant":
self.playerTurn = "Rhinoceros"
elif self.playerTurn == "Rhinoceros":
self.playerTurn = "Elephant"
else:
print("Push not possible")
return (False)
elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == 1 and cy == 0) or (cx == 0 and cy == 0):
animal.coords = (nx, ny)
animal.direction = ndir
self[x][y] = 0
self[nx][ny] = animal
if self.playerTurn == "Elephant":
self.playerTurn = "Rhinoceros"
elif self.playerTurn == "Rhinoceros":
self.playerTurn = "Elephant"
else:
return False
def __str__(self):
"""
Show the current state of the game board
:return: the string with the characteristics of the board
:rtype: str
"""
s = ''
for i in range(5):
for j in range(5):
ani = False
if self[i][j] == 0:
s += ' 0 '
elif self[i][j].species == 'Elephant':
s += ' E'
ani = True
elif self[i][j].species == 'Rhinoceros':
s += ' R'
ani = True
else:
s += ' B '
if ani:
if self[i][j].direction[0] == 0 and self[i][j].direction[1] == 1:
d = '> '
elif self[i][j].direction[0] == -1 and self[i][j].direction[1] == 0:
d = '∧ '
elif self[i][j].direction[0] == 0 and self[i][j].direction[1] == -1:
d = '< '
else:
d = '∨ '
s += d
s += '\n \n'
return s
def save(self, file):
"""
This method save a GameMap in a KingOfSiam file with the .kos extension.
:Args:
:param file (file object): is file in which to write.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: this method take in argument a file object.
"""
boulders = []
elephants = []
rhinos = []
for i in range(5):
for j in range(5):
if self[i][j]!= 0:
piece = self[i][j]
L = []
if not isinstance(self[i][j], Boulder):
L.append(self[i][j].direction[0])
L.append(self[i][j].direction[1])
if piece.species == "Elephant":
elephants.append("(" + str(i) + "," + str(j)+ ") : np.array(["+str(L[0])+ "," + str(L[1])+"])")
elif piece.species == "Rhinoceros":
rhinos.append("("+str(i)+"," +str(j)+ ") : np.array(["+str(L[0]) + "," + str(L[1])+"])")
elif isinstance(piece, Boulder):
boulders.append("(" + str(i) + "," + str(j) + ")")
file.write("# King of Siam GameFile \n\nplayer_turn {\n " + self.playerTurn + "\n}\n\n")
file.write("Boulder {")
for k in range(len(boulders)):
file.write("\n " + boulders[k] + ";")
file.write("\n}\n\nElephant {")
for elt in elephants:
file.write("\n " + elt + ";")
file.write("\n}\n\nRhinoceros {")
for elt in rhinos:
file.write("\n " + elt + ";")
file.write("\n}")
file.close()
def load(self, file):
"""
This method load a KingOfSiam file with the .kos extension in a GameMap object.
:Args:
:param file (file object): is file to load.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Quentin BRATEAU <[email protected]>
.. note:: this method take in argument a file object.
"""
for i in range(5):
for j in range(5):
self[i][j] = 0
f = file.readlines()
k = 0
while k < len(f) and "Boulder {" not in f[k]:
k += 1
k += 1
while ";" in f[k]:
coords = f[k][5:8].split(",")
x, y = int(coords[0]), int(coords[1])
self[x][y] = Boulder(x, y)
k += 1
while k < len(f) and "Elephant {" not in f[k]:
k += 1
k += 1
while ":" in f[k] and ";" in f[k]:
coords = f[k][5:8].split(",")
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split("]")[0].split(",")
xdir, ydir = 0, 0
if d[0] == "1":
xdir = 1
elif d[0] == "-1":
xdir = -1
if d[1] == "1":
ydir = 1
elif d[1] == "-1":
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Elephant')
k += 1
while k < len(f) and "Rhinoceros {" not in f[k]:
k += 1
k += 1
while ":" in f[k] and ";" in f[k]:
coords = f[k][5:8].split(",")
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split("]")[0].split(",")
xdir, ydir = 0, 0
if d[0] == "1":
xdir = 1
elif d[0] == "-1":
xdir = -1
if d[1] == "1":
ydir = 1
elif d[1] == "-1":
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Rhinoceros')
k += 1
file.close()
if __name__ == '__main__':
g = GameMap()
print(g) | [
8,
9,
11,
15,
18
] |
1,052 | fecaf41152e8c98784585abfdb3777fc0a4824f3 | <mask token>
| <mask token>
print(string1 == string2)
print(string1 != string2)
if string1.lower() == string2.lower():
print('The strings are equal')
else:
print('The strings are not equal')
<mask token>
if number1 <= number2:
print('number 1 is greater')
<mask token>
if name_1.lower() == name_2.lower() and number_1 < number_2:
print('We passed the test')
if name_1.lower() == name_2.lower() or number_1 < number_2:
print('We passed the test')
| string1 = 'Vegetable'
string2 = 'vegetable'
print(string1 == string2)
print(string1 != string2)
if string1.lower() == string2.lower():
print('The strings are equal')
else:
print('The strings are not equal')
number1 = 25
number2 = 30
if number1 <= number2:
print('number 1 is greater')
name_1 = 'Stephen'
name_2 = 'stephen'
number_1 = 45
number_2 = 30
if name_1.lower() == name_2.lower() and number_1 < number_2:
print('We passed the test')
if name_1.lower() == name_2.lower() or number_1 < number_2:
print('We passed the test')
|
string1 = "Vegetable"
#string2 = "Fruit"
string2 = "vegetable"
print(string1 == string2)
print(string1 != string2)
if string1.lower() == string2.lower():
print("The strings are equal")
else:
print("The strings are not equal")
number1 = 25
number2 = 30
# ==
# !=
# >
# <
# >=
# <=
if number1 <= number2:
print("number 1 is greater")
name_1 = "Stephen"
name_2 = "stephen"
number_1 = 45
number_2 = 30
if name_1.lower() == name_2.lower() and number_1 < number_2:
print("We passed the test")
if name_1.lower() == name_2.lower() or number_1 < number_2:
print("We passed the test") | null | [
0,
1,
2,
3
] |
1,053 | 6bc400896c004f0fdddbbd3dd73ef9aaa19eb4db | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = []
operations = [migrations.CreateModel(name='Customer', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('created', models.DateTimeField(
verbose_name='Date de création', auto_now_add=True)), ('modified',
models.DateTimeField(verbose_name='Date de modification', auto_now=
True)), ('corporate_name', models.CharField(verbose_name='Nom',
max_length=255))], options={'abstract': False, 'ordering': (
'-created',)})]
| from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = []
operations = [migrations.CreateModel(name='Customer', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('created', models.DateTimeField(
verbose_name='Date de création', auto_now_add=True)), ('modified',
models.DateTimeField(verbose_name='Date de modification', auto_now=
True)), ('corporate_name', models.CharField(verbose_name='Nom',
max_length=255))], options={'abstract': False, 'ordering': (
'-created',)})]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(verbose_name='Date de création', auto_now_add=True)),
('modified', models.DateTimeField(verbose_name='Date de modification', auto_now=True)),
('corporate_name', models.CharField(verbose_name='Nom', max_length=255)),
],
options={
'abstract': False,
'ordering': ('-created',),
},
),
]
| [
0,
1,
2,
3,
4
] |
1,054 | 513a2bbcf7a63baf900b73b18cf25618937dc7d0 | <mask token>
| <mask token>
print('Hello, world!')
| """
Prog: helloworld.py
Name: Samuel doyle
Date: 18/04/18
Desc: My first program!
"""
print('Hello, world!')
| null | null | [
0,
1,
2
] |
1,055 | a21ac29911931bb71460175cba584e0011fa2ece | <mask token>
def send():
global last_index_sent
global last_sent
DIR = './videos'
videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(
os.path.join(DIR, name))])
for i in range(last_index_sent, videosToSend + 1):
last_index_sent = i
last_sent = datetime.datetime.now()
encoded = base64.b64encode(open('frame.jpg', 'rb').read()).decode()
html = f""" <html>
<body>
<img src="data:image/jpg;base64,{encoded}">
<a href="http://localhost:3000/{last_index_sent}">Gravar</a>
</body>
</html>
"""
message = MIMEMultipart('alternative')
message['Subject'] = 'inline embedding'
message['From'] = sender_email
message['To'] = receiver_email
part = MIMEText(html, 'html')
message.attach(part)
with smtplib.SMTP('smtp.mailtrap.io', 2525) as server:
server.login(login, password)
server.sendmail(sender_email, receiver_email, message.as_string())
print('Sent')
return
| <mask token>
def timeFromLastSent():
if last_sent is None:
return 10
else:
return (datetime.datetime.now() - last_sent).total_seconds()
def send():
global last_index_sent
global last_sent
DIR = './videos'
videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(
os.path.join(DIR, name))])
for i in range(last_index_sent, videosToSend + 1):
last_index_sent = i
last_sent = datetime.datetime.now()
encoded = base64.b64encode(open('frame.jpg', 'rb').read()).decode()
html = f""" <html>
<body>
<img src="data:image/jpg;base64,{encoded}">
<a href="http://localhost:3000/{last_index_sent}">Gravar</a>
</body>
</html>
"""
message = MIMEMultipart('alternative')
message['Subject'] = 'inline embedding'
message['From'] = sender_email
message['To'] = receiver_email
part = MIMEText(html, 'html')
message.attach(part)
with smtplib.SMTP('smtp.mailtrap.io', 2525) as server:
server.login(login, password)
server.sendmail(sender_email, receiver_email, message.as_string())
print('Sent')
return
| <mask token>
config = configobj.ConfigObj('.env')
port = 2525
smtp_server = 'smtp.mailtrap.io'
login = config['SMTP_USERNAME']
password = config['SMTP_PASSWORD']
sender_email = '[email protected]'
receiver_email = '[email protected]'
last_sent = datetime.datetime.now()
last_index_sent = 0
def timeFromLastSent():
if last_sent is None:
return 10
else:
return (datetime.datetime.now() - last_sent).total_seconds()
def send():
global last_index_sent
global last_sent
DIR = './videos'
videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(
os.path.join(DIR, name))])
for i in range(last_index_sent, videosToSend + 1):
last_index_sent = i
last_sent = datetime.datetime.now()
encoded = base64.b64encode(open('frame.jpg', 'rb').read()).decode()
html = f""" <html>
<body>
<img src="data:image/jpg;base64,{encoded}">
<a href="http://localhost:3000/{last_index_sent}">Gravar</a>
</body>
</html>
"""
message = MIMEMultipart('alternative')
message['Subject'] = 'inline embedding'
message['From'] = sender_email
message['To'] = receiver_email
part = MIMEText(html, 'html')
message.attach(part)
with smtplib.SMTP('smtp.mailtrap.io', 2525) as server:
server.login(login, password)
server.sendmail(sender_email, receiver_email, message.as_string())
print('Sent')
return
| import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import base64
import configobj
import datetime
import os
config = configobj.ConfigObj('.env')
port = 2525
smtp_server = 'smtp.mailtrap.io'
login = config['SMTP_USERNAME']
password = config['SMTP_PASSWORD']
sender_email = '[email protected]'
receiver_email = '[email protected]'
last_sent = datetime.datetime.now()
last_index_sent = 0
def timeFromLastSent():
if last_sent is None:
return 10
else:
return (datetime.datetime.now() - last_sent).total_seconds()
def send():
global last_index_sent
global last_sent
DIR = './videos'
videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(
os.path.join(DIR, name))])
for i in range(last_index_sent, videosToSend + 1):
last_index_sent = i
last_sent = datetime.datetime.now()
encoded = base64.b64encode(open('frame.jpg', 'rb').read()).decode()
html = f""" <html>
<body>
<img src="data:image/jpg;base64,{encoded}">
<a href="http://localhost:3000/{last_index_sent}">Gravar</a>
</body>
</html>
"""
message = MIMEMultipart('alternative')
message['Subject'] = 'inline embedding'
message['From'] = sender_email
message['To'] = receiver_email
part = MIMEText(html, 'html')
message.attach(part)
with smtplib.SMTP('smtp.mailtrap.io', 2525) as server:
server.login(login, password)
server.sendmail(sender_email, receiver_email, message.as_string())
print('Sent')
return
| import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import base64
import configobj
import datetime
import os
config = configobj.ConfigObj('.env')
port = 2525
smtp_server = "smtp.mailtrap.io"
login = config['SMTP_USERNAME']
password = config['SMTP_PASSWORD']
sender_email = "[email protected]"
receiver_email = "[email protected]"
last_sent = datetime.datetime.now()
last_index_sent = 0
def timeFromLastSent():
if(last_sent is None):
return 10
else:
return (datetime.datetime.now() - last_sent).total_seconds()
# send your email
def send():
global last_index_sent
global last_sent
DIR = './videos'
videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
for i in range(last_index_sent, videosToSend + 1):
last_index_sent=i
last_sent = datetime.datetime.now()
encoded = base64.b64encode(open("frame.jpg", "rb").read()).decode()
html = f"""\
<html>
<body>
<img src="data:image/jpg;base64,{encoded}">
<a href="http://localhost:3000/{last_index_sent}">Gravar</a>
</body>
</html>
"""
message = MIMEMultipart("alternative")
message["Subject"] = "inline embedding"
message["From"] = sender_email
message["To"] = receiver_email
part = MIMEText(html, "html")
message.attach(part)
with smtplib.SMTP("smtp.mailtrap.io", 2525) as server:
server.login(login, password)
server.sendmail(
sender_email, receiver_email, message.as_string() )
print('Sent')
return
| [
1,
2,
3,
4,
5
] |
1,056 | 562b2c3567e42699cfd0804a5780af7ede142e13 | <mask token>
| <mask token>
def exec(bucket_id, project_id, reverse_opt):
client = storage.Client()
bucket = client.bucket(bucket_id, user_project=project_id)
blobs = bucket.list_blobs()
blob_list = []
try:
for blob in blobs:
this_blob = {'name': blob.name, 'owner': blob.owner, 'class':
blob.storage_class, 'size': blob.size, 'date': str(blob.
updated).split('.')[0].split('+')[0]}
blob_list.append(this_blob)
except Exception as e:
print(e)
exit(1)
sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=
reverse_opt)
report_table = PrettyTable()
report_table.field_names = ['NAME', 'OWNER', 'CLASS', 'SIZE', 'DATE']
report_table.align['NAME'] = 'l'
report_table.align['SIZE'] = 'r'
report_table.align['DATE'] = 'r'
for blob in sorted_list:
report_table.add_row([blob['name'], blob['owner'], blob['class'],
str(byte.convert_size(blob['size'])), blob['date']])
print(report_table)
| from lib import byte
from google.cloud import storage
from prettytable import PrettyTable
def exec(bucket_id, project_id, reverse_opt):
client = storage.Client()
bucket = client.bucket(bucket_id, user_project=project_id)
blobs = bucket.list_blobs()
blob_list = []
try:
for blob in blobs:
this_blob = {'name': blob.name, 'owner': blob.owner, 'class':
blob.storage_class, 'size': blob.size, 'date': str(blob.
updated).split('.')[0].split('+')[0]}
blob_list.append(this_blob)
except Exception as e:
print(e)
exit(1)
sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=
reverse_opt)
report_table = PrettyTable()
report_table.field_names = ['NAME', 'OWNER', 'CLASS', 'SIZE', 'DATE']
report_table.align['NAME'] = 'l'
report_table.align['SIZE'] = 'r'
report_table.align['DATE'] = 'r'
for blob in sorted_list:
report_table.add_row([blob['name'], blob['owner'], blob['class'],
str(byte.convert_size(blob['size'])), blob['date']])
print(report_table)
| ## Filename: name.py
# Author: Marcelo Feitoza Parisi
#
# Description: Report the objects
# on the bucket sorted by name.
#
# ###########################
# # DISCLAIMER - IMPORTANT! #
# ###########################
#
# Stuff found here was built as a
# Proof-Of-Concept or Study material
# and should not be considered
# production ready!
#
# USE WITH CARE!
##
from lib import byte
from google.cloud import storage
from prettytable import PrettyTable
def exec(bucket_id, project_id, reverse_opt):
# Google Cloud Storage Client
client = storage.Client()
bucket = client.bucket(bucket_id, user_project=project_id)
blobs = bucket.list_blobs()
# Will hold our local list of objects
blob_list = []
try:
for blob in blobs:
# For each object we'll save name, owner, class, size and date
this_blob = { 'name': blob.name,
'owner': blob.owner,
'class': blob.storage_class,
'size' : blob.size,
'date' : str(blob.updated).split('.')[0].split('+')[0]
}
# Append object to our list
blob_list.append(this_blob)
except Exception as e:
print(e)
exit(1)
# Sort our object list by name using our reverse_opt
sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=reverse_opt)
# Generating our PrettyTable
report_table = PrettyTable()
report_table.field_names = ["NAME", "OWNER", "CLASS", "SIZE", "DATE"]
report_table.align["NAME"] = "l"
report_table.align["SIZE"] = "r"
report_table.align["DATE"] = "r"
for blob in sorted_list:
report_table.add_row([blob['name'], blob['owner'], blob['class'], str(byte.convert_size(blob['size'])), blob['date']])
print(report_table)
| null | [
0,
1,
2,
3
] |
1,057 | 358879d83ed3058530031d50fb69e3ce11fbd524 | <mask token>
| print(60 * 60)
<mask token>
print(24 * seconds_per_hour)
<mask token>
print(seconds_per_day / seconds_per_hour)
print(seconds_per_day // seconds_per_hour)
| print(60 * 60)
seconds_per_hour = 60 * 60
print(24 * seconds_per_hour)
seconds_per_day = 24 * seconds_per_hour
print(seconds_per_day / seconds_per_hour)
print(seconds_per_day // seconds_per_hour)
| null | null | [
0,
1,
2
] |
1,058 | 5c1d81c973487f1b091e58a6ccf5947c3f2a7e6d | <mask token>
class TestTelegram(unittest.TestCase):
<mask token>
<mask token>
| <mask token>
class TestTelegram(unittest.TestCase):
def test_export_iter(self):
pass
<mask token>
| <mask token>
class TestTelegram(unittest.TestCase):
def test_export_iter(self):
pass
if __name__ == '__main__':
unittest.main()
| import unittest
from nldata.corpora import Telegram
import os
class TestTelegram(unittest.TestCase):
def test_export_iter(self):
pass
if __name__ == '__main__':
unittest.main()
| import unittest
from nldata.corpora import Telegram
import os
class TestTelegram(unittest.TestCase):
def test_export_iter(self):
pass
# telegram = Telegram(data_dir)
# it = telegram.split("train", n=20)
# samples = [s for s in it]
# self.assertEqual(len(samples), 20)
# list(map(print,samples))
if __name__ == '__main__':
unittest.main()
| [
1,
2,
3,
4,
5
] |
1,059 | 03629e62b11e66eeb0e111fee551c75c8463cbb8 | <mask token>
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
<mask token>
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
| <mask token>
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
def test_equality():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert (p1, p2) == line
assert line == Line(p1, p2)
assert line != (p2, p1)
assert line != 1
<mask token>
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
| <mask token>
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
def test_equality():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert (p1, p2) == line
assert line == Line(p1, p2)
assert line != (p2, p1)
assert line != 1
def test___repr__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line == eval(repr(line))
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
| from compas.geometry import Line
from compas.geometry import Point
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
def test_equality():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert (p1, p2) == line
assert line == Line(p1, p2)
assert line != (p2, p1)
assert line != 1
def test___repr__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line == eval(repr(line))
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
| from compas.geometry import Line
# This import is use to test __repr__.
from compas.geometry import Point # noqa: F401
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
def test_equality():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert (p1, p2) == line
assert line == Line(p1, p2)
assert line != (p2, p1)
assert line != 1
def test___repr__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line == eval(repr(line))
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
| [
2,
3,
4,
5,
6
] |
1,060 | 9bb6fd6fbe212bdc29e2d1ec37fa6ec6ca9a9469 | <mask token>
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
<mask token>
def main2():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
d.join()
n.join()
<mask token>
| <mask token>
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def main1():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
def main2():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
d.join()
n.join()
def main3():
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
n.start()
d.join(1)
print('d.is_alive()', d.is_alive())
n.join()
<mask token>
| <mask token>
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def main1():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
def main2():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
d.join()
n.join()
def main3():
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
n.start()
d.join(1)
print('d.is_alive()', d.is_alive())
n.join()
if __name__ == '__main__':
main3()
| import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def main1():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
def main2():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
d.join()
n.join()
def main3():
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
n.start()
d.join(1)
print('d.is_alive()', d.is_alive())
n.join()
if __name__ == '__main__':
main3()
| #!/usr/bin/env python
# encoding: utf-8
import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def main1():
d = multiprocessing.Process(name="daemon_process", target=daemon)
n = multiprocessing.Process(name="no_daemon_process", target=non_daemon)
print("daemon_process default daemon value: %s" % d.daemon)
print("no_daemon_process default daemon value: %s" % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
def main2():
d = multiprocessing.Process(name="daemon_process", target=daemon)
n = multiprocessing.Process(name="no_daemon_process", target=non_daemon)
print("daemon_process default daemon value: %s" % d.daemon)
print("no_daemon_process default daemon value: %s" % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
# 阻塞父进程,直到子进程结束为止。
# 从实验来看,子进程结束和join的先后顺序无关。
# 唯一的限制是父进程需要等所有join的子进程结束后,才会继续向下执行。
d.join()
n.join()
def main3():
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
n.start()
# join接受一个timeout的参数,意思就是如果超过了timeout的时间,不管子进程是否结束,join函数也会直接返回。
d.join(1)
# 可以看到子进程d仍然未结束,但是父进程已经继续执行了。
print('d.is_alive()', d.is_alive())
n.join()
if __name__ == "__main__":
# main1()
# main2()
main3()
| [
2,
5,
6,
7,
8
] |
1,061 | 267cb37f2ccad5b02a809d9b85327eacd9a49515 | <mask token>
@app.route('/')
def hello():
return 'Flask setup'
def sheets_row_writer(data_list):
print('sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
worksheet.append_row(data_list)
print('Write complete')
def sheets_row_writer_donor(data_list_donor):
print('donor sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
worksheet.append_row(data_list_donor)
print('Write complete')
<mask token>
def death_global_api():
data = request.get_json(silent=True)
page = requests.get('https://www.worldometers.info/coronavirus/')
response = death_global()
reply = {'fulfillmentText': response}
return jsonify(reply)
<mask token>
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print(intent)
def news_nepal_int():
url = 'https://nepalcorona.info/api/v1/news'
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{'card': {'title': data1['title'], 'subtitle':
'Source: ' + data1['source'] + ' >>', 'imageUri': data1[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data1['url']}, {'text': 'Corona Symptoms',
'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {
'title': data2['title'], 'subtitle': 'Source ' + data2['source'
] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':
'Read Full Story', 'postback': data2['url']}, {'text':
'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':
'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle':
'Source ' + data3['source'] + ' >>', 'imageUri': data3[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data3['url']}, {'text': 'Self Isolation',
'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0], name[0], phone[0], place[0]]
sheets_row_writer(ilist)
response2 = 'Hello ' + name[0
] + ' so you are looking for ' + item_required[0
] + ' Your location is ' + place[0
] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'
response = [{'quickReplies': {'title': response2, 'quickReplies': [
'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'
}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message'][
'text']
url = 'https://nepalcorona.info/api/v1/faqs'
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',
'Preventions', 'Self Isolation', 'Play Corona Quiz']
faqs = todos['data']
faq = faqs[rand]
if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==
'भाषा परिवर्तन'):
randq = faq['question']
randa = faq['answer']
opt1 = 'More Quizzles'
opt2 = 'Switch Language'
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = 'अरु देखाउनुहोस >>'
opt2 = 'भाषा परिवर्तन'
response2 = 'Q. ' + randq + '\n A. ' + randa + '\n'
response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':
randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def blood_pal_yes():
print(intent)
print(data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group, blood_amount, location, case, date, phone]
sheets_row_writer(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = (
'The following request has been sent. We will contact you shortly. '
+ blood_group + ' blood (' + str(blood_amount) +
' ) required for ' + case + ' at ' + location + ' On ' + date +
' - ' + phone + ' Thank you .')
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def blood_pal_donor_yes():
print(intent)
print(data)
permananet_address = data['queryResult']['parameters'][
'permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation = data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name, number, email, current_address, permananet_address,
age, height, weight, gender, blood, last_donation]
sheets_row_writer_donor(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = ('Thank you ' + name +
' for registration as a blood donor We will contact you at the time of urgency in your area.'
)
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def world_data_live():
text = death_global()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Data', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def district_all_summary():
text = dss.district_all_summary()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [{'quickReplies': {'title': text, 'quickReplies': [
'District-Summary', 'Province-Data', 'World Data',
'Preventions', "Corona FAQ's", 'Corona Quiz']}, 'platform':
'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def proviencewise_detail():
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [{'card': {'title': 'Covid-19 Provience: ' + str(
province) + ' | Details', 'subtitle': response_summary,
'imageUri':
'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',
'buttons': [{'text': 'Prov ' + str(province) + ' District Data',
'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(
province) + ' Vdc-Mun Data', 'postback':
'dis-vdc data detail int'}, {'text': 'Latest Nepali News',
'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
if dvdc == 'vdc':
print('inside vdc')
typ = 'vdc'
else:
print('inside district')
typ = 'district'
data_return = dss.ard(code, typ)
response = [{'quickReplies': {'title': data_return, 'quickReplies':
['District Summary', 'Province Summary', 'Nepali News',
'World Data', 'Preventions', "Corona FAQ's", 'Corona Quiz']},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def nepal_data_new_main_int():
url = 'https://nepalcorona.info/api/v1/data/nepal'
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = 'Nepal Cases \n Positive :' + str(todos['tested_positive']
) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(
todos['deaths']) + ' ' + '\n'
print(response2)
response_summary = dss.affected_summary()
response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['']}}, {'card': {'title':
'Covid-19 Nepal | Stats', 'subtitle': response_summary,
'imageUri':
'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'
, 'buttons': [{'text': 'Province Summary', 'postback':
'province data int'}, {'text': 'District-Summary', 'postback':
'district data int'}, {'text': 'Latest Nepali News', 'postback':
'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':
['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def batti_update():
url = (
'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'
)
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos['feeds'][0]
response2 = 'Batti Status Now :' + str(feeds['field1'] +
'\n Last Updated: ' + str(feeds['created_at']))
print(response2)
reply = {'fulfillmentText': response2}
return reply
def default():
return 'Incorrect Data'
switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':
news_nepal_int, 'i need help main int - yes': i_need_help_yes,
'faq-que-ans-int': faq_ques_ans,
'bloodpal-need-blood-main-int - yes': blood_pal_yes,
'data world int': world_data_live, 'district data int':
district_all_summary, 'province data int': province_all_summary,
'province-wise-data': proviencewise_detail,
'dis-vdc data detail int': dis_vdc_detail,
'bloodpal-become-donor-main-int': blood_pal_donor_yes,
'batti-update-intent': batti_update}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
<mask token>
| <mask token>
@app.route('/')
def hello():
return 'Flask setup'
def sheets_row_writer(data_list):
print('sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
worksheet.append_row(data_list)
print('Write complete')
def sheets_row_writer_donor(data_list_donor):
print('donor sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
worksheet.append_row(data_list_donor)
print('Write complete')
<mask token>
def death_global_api():
data = request.get_json(silent=True)
page = requests.get('https://www.worldometers.info/coronavirus/')
response = death_global()
reply = {'fulfillmentText': response}
return jsonify(reply)
def death_country(id):
idu = id.upper()
page = requests.get(
'https://www.worldometers.info/coronavirus/country/' + id + '/')
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all('div', {'class': 'maincounter-number'})
active = soup.find('div', {'class': 'number-table-main'})
active_cases = active.text
cases_list = []
for res in result:
cases_list.append(res.text)
return ('In ' + idu + ' There are' + cases_list[0] +
'Total cases out of which' + cases_list[1] + 'are dead and' +
cases_list[2] + 'have already recovered . There are still ' +
active_cases + ' active cases .')
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print(intent)
def news_nepal_int():
url = 'https://nepalcorona.info/api/v1/news'
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{'card': {'title': data1['title'], 'subtitle':
'Source: ' + data1['source'] + ' >>', 'imageUri': data1[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data1['url']}, {'text': 'Corona Symptoms',
'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {
'title': data2['title'], 'subtitle': 'Source ' + data2['source'
] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':
'Read Full Story', 'postback': data2['url']}, {'text':
'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':
'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle':
'Source ' + data3['source'] + ' >>', 'imageUri': data3[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data3['url']}, {'text': 'Self Isolation',
'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0], name[0], phone[0], place[0]]
sheets_row_writer(ilist)
response2 = 'Hello ' + name[0
] + ' so you are looking for ' + item_required[0
] + ' Your location is ' + place[0
] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'
response = [{'quickReplies': {'title': response2, 'quickReplies': [
'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'
}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message'][
'text']
url = 'https://nepalcorona.info/api/v1/faqs'
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',
'Preventions', 'Self Isolation', 'Play Corona Quiz']
faqs = todos['data']
faq = faqs[rand]
if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==
'भाषा परिवर्तन'):
randq = faq['question']
randa = faq['answer']
opt1 = 'More Quizzles'
opt2 = 'Switch Language'
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = 'अरु देखाउनुहोस >>'
opt2 = 'भाषा परिवर्तन'
response2 = 'Q. ' + randq + '\n A. ' + randa + '\n'
response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':
randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def blood_pal_yes():
print(intent)
print(data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group, blood_amount, location, case, date, phone]
sheets_row_writer(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = (
'The following request has been sent. We will contact you shortly. '
+ blood_group + ' blood (' + str(blood_amount) +
' ) required for ' + case + ' at ' + location + ' On ' + date +
' - ' + phone + ' Thank you .')
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def blood_pal_donor_yes():
print(intent)
print(data)
permananet_address = data['queryResult']['parameters'][
'permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation = data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name, number, email, current_address, permananet_address,
age, height, weight, gender, blood, last_donation]
sheets_row_writer_donor(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = ('Thank you ' + name +
' for registration as a blood donor We will contact you at the time of urgency in your area.'
)
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def world_data_live():
text = death_global()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Data', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def district_all_summary():
text = dss.district_all_summary()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [{'quickReplies': {'title': text, 'quickReplies': [
'District-Summary', 'Province-Data', 'World Data',
'Preventions', "Corona FAQ's", 'Corona Quiz']}, 'platform':
'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def proviencewise_detail():
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [{'card': {'title': 'Covid-19 Provience: ' + str(
province) + ' | Details', 'subtitle': response_summary,
'imageUri':
'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',
'buttons': [{'text': 'Prov ' + str(province) + ' District Data',
'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(
province) + ' Vdc-Mun Data', 'postback':
'dis-vdc data detail int'}, {'text': 'Latest Nepali News',
'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
if dvdc == 'vdc':
print('inside vdc')
typ = 'vdc'
else:
print('inside district')
typ = 'district'
data_return = dss.ard(code, typ)
response = [{'quickReplies': {'title': data_return, 'quickReplies':
['District Summary', 'Province Summary', 'Nepali News',
'World Data', 'Preventions', "Corona FAQ's", 'Corona Quiz']},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def nepal_data_new_main_int():
url = 'https://nepalcorona.info/api/v1/data/nepal'
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = 'Nepal Cases \n Positive :' + str(todos['tested_positive']
) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(
todos['deaths']) + ' ' + '\n'
print(response2)
response_summary = dss.affected_summary()
response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['']}}, {'card': {'title':
'Covid-19 Nepal | Stats', 'subtitle': response_summary,
'imageUri':
'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'
, 'buttons': [{'text': 'Province Summary', 'postback':
'province data int'}, {'text': 'District-Summary', 'postback':
'district data int'}, {'text': 'Latest Nepali News', 'postback':
'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':
['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def batti_update():
url = (
'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'
)
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos['feeds'][0]
response2 = 'Batti Status Now :' + str(feeds['field1'] +
'\n Last Updated: ' + str(feeds['created_at']))
print(response2)
reply = {'fulfillmentText': response2}
return reply
def default():
return 'Incorrect Data'
switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':
news_nepal_int, 'i need help main int - yes': i_need_help_yes,
'faq-que-ans-int': faq_ques_ans,
'bloodpal-need-blood-main-int - yes': blood_pal_yes,
'data world int': world_data_live, 'district data int':
district_all_summary, 'province data int': province_all_summary,
'province-wise-data': proviencewise_detail,
'dis-vdc data detail int': dis_vdc_detail,
'bloodpal-become-donor-main-int': blood_pal_donor_yes,
'batti-update-intent': batti_update}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
<mask token>
| <mask token>
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
app = Flask(__name__)
@app.route('/')
def hello():
return 'Flask setup'
def sheets_row_writer(data_list):
print('sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
worksheet.append_row(data_list)
print('Write complete')
def sheets_row_writer_donor(data_list_donor):
print('donor sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
worksheet.append_row(data_list_donor)
print('Write complete')
def death_global():
page = requests.get('https://www.worldometers.info/coronavirus/')
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all('div', {'class': 'maincounter-number'})
cases_list = []
active = soup.find('div', {'class': 'number-table-main'})
active_cases = active.text
for res in result:
cases_list.append(res.text)
return 'There are' + cases_list[0
] + ' Total cases out of which' + cases_list[1
] + ' have died and' + cases_list[2
] + ' have recovered . There are still ' + active_cases + ' active cases.'
app.route('/death/global', methods=['POST'])
def death_global_api():
data = request.get_json(silent=True)
page = requests.get('https://www.worldometers.info/coronavirus/')
response = death_global()
reply = {'fulfillmentText': response}
return jsonify(reply)
def death_country(id):
idu = id.upper()
page = requests.get(
'https://www.worldometers.info/coronavirus/country/' + id + '/')
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all('div', {'class': 'maincounter-number'})
active = soup.find('div', {'class': 'number-table-main'})
active_cases = active.text
cases_list = []
for res in result:
cases_list.append(res.text)
return ('In ' + idu + ' There are' + cases_list[0] +
'Total cases out of which' + cases_list[1] + 'are dead and' +
cases_list[2] + 'have already recovered . There are still ' +
active_cases + ' active cases .')
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print(intent)
def news_nepal_int():
url = 'https://nepalcorona.info/api/v1/news'
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{'card': {'title': data1['title'], 'subtitle':
'Source: ' + data1['source'] + ' >>', 'imageUri': data1[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data1['url']}, {'text': 'Corona Symptoms',
'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {
'title': data2['title'], 'subtitle': 'Source ' + data2['source'
] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':
'Read Full Story', 'postback': data2['url']}, {'text':
'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':
'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle':
'Source ' + data3['source'] + ' >>', 'imageUri': data3[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data3['url']}, {'text': 'Self Isolation',
'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0], name[0], phone[0], place[0]]
sheets_row_writer(ilist)
response2 = 'Hello ' + name[0
] + ' so you are looking for ' + item_required[0
] + ' Your location is ' + place[0
] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'
response = [{'quickReplies': {'title': response2, 'quickReplies': [
'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'
}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message'][
'text']
url = 'https://nepalcorona.info/api/v1/faqs'
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',
'Preventions', 'Self Isolation', 'Play Corona Quiz']
faqs = todos['data']
faq = faqs[rand]
if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==
'भाषा परिवर्तन'):
randq = faq['question']
randa = faq['answer']
opt1 = 'More Quizzles'
opt2 = 'Switch Language'
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = 'अरु देखाउनुहोस >>'
opt2 = 'भाषा परिवर्तन'
response2 = 'Q. ' + randq + '\n A. ' + randa + '\n'
response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':
randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def blood_pal_yes():
print(intent)
print(data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group, blood_amount, location, case, date, phone]
sheets_row_writer(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = (
'The following request has been sent. We will contact you shortly. '
+ blood_group + ' blood (' + str(blood_amount) +
' ) required for ' + case + ' at ' + location + ' On ' + date +
' - ' + phone + ' Thank you .')
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def blood_pal_donor_yes():
print(intent)
print(data)
permananet_address = data['queryResult']['parameters'][
'permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation = data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name, number, email, current_address, permananet_address,
age, height, weight, gender, blood, last_donation]
sheets_row_writer_donor(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = ('Thank you ' + name +
' for registration as a blood donor We will contact you at the time of urgency in your area.'
)
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def world_data_live():
text = death_global()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Data', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def district_all_summary():
text = dss.district_all_summary()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [{'quickReplies': {'title': text, 'quickReplies': [
'District-Summary', 'Province-Data', 'World Data',
'Preventions', "Corona FAQ's", 'Corona Quiz']}, 'platform':
'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def proviencewise_detail():
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [{'card': {'title': 'Covid-19 Provience: ' + str(
province) + ' | Details', 'subtitle': response_summary,
'imageUri':
'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',
'buttons': [{'text': 'Prov ' + str(province) + ' District Data',
'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(
province) + ' Vdc-Mun Data', 'postback':
'dis-vdc data detail int'}, {'text': 'Latest Nepali News',
'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
if dvdc == 'vdc':
print('inside vdc')
typ = 'vdc'
else:
print('inside district')
typ = 'district'
data_return = dss.ard(code, typ)
response = [{'quickReplies': {'title': data_return, 'quickReplies':
['District Summary', 'Province Summary', 'Nepali News',
'World Data', 'Preventions', "Corona FAQ's", 'Corona Quiz']},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def nepal_data_new_main_int():
url = 'https://nepalcorona.info/api/v1/data/nepal'
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = 'Nepal Cases \n Positive :' + str(todos['tested_positive']
) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(
todos['deaths']) + ' ' + '\n'
print(response2)
response_summary = dss.affected_summary()
response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['']}}, {'card': {'title':
'Covid-19 Nepal | Stats', 'subtitle': response_summary,
'imageUri':
'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'
, 'buttons': [{'text': 'Province Summary', 'postback':
'province data int'}, {'text': 'District-Summary', 'postback':
'district data int'}, {'text': 'Latest Nepali News', 'postback':
'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':
['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def batti_update():
url = (
'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'
)
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos['feeds'][0]
response2 = 'Batti Status Now :' + str(feeds['field1'] +
'\n Last Updated: ' + str(feeds['created_at']))
print(response2)
reply = {'fulfillmentText': response2}
return reply
def default():
return 'Incorrect Data'
switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':
news_nepal_int, 'i need help main int - yes': i_need_help_yes,
'faq-que-ans-int': faq_ques_ans,
'bloodpal-need-blood-main-int - yes': blood_pal_yes,
'data world int': world_data_live, 'district data int':
district_all_summary, 'province data int': province_all_summary,
'province-wise-data': proviencewise_detail,
'dis-vdc data detail int': dis_vdc_detail,
'bloodpal-become-donor-main-int': blood_pal_donor_yes,
'batti-update-intent': batti_update}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
if __name__ == '__main__':
app.run()
| from flask import Flask, jsonify, request
import requests, json, random
from bs4 import BeautifulSoup
import gspread
import pandas as pd
import dataservices as dss
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
app = Flask(__name__)
@app.route('/')
def hello():
return 'Flask setup'
def sheets_row_writer(data_list):
print('sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
worksheet.append_row(data_list)
print('Write complete')
def sheets_row_writer_donor(data_list_donor):
print('donor sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
worksheet.append_row(data_list_donor)
print('Write complete')
def death_global():
page = requests.get('https://www.worldometers.info/coronavirus/')
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all('div', {'class': 'maincounter-number'})
cases_list = []
active = soup.find('div', {'class': 'number-table-main'})
active_cases = active.text
for res in result:
cases_list.append(res.text)
return 'There are' + cases_list[0
] + ' Total cases out of which' + cases_list[1
] + ' have died and' + cases_list[2
] + ' have recovered . There are still ' + active_cases + ' active cases.'
app.route('/death/global', methods=['POST'])
def death_global_api():
data = request.get_json(silent=True)
page = requests.get('https://www.worldometers.info/coronavirus/')
response = death_global()
reply = {'fulfillmentText': response}
return jsonify(reply)
def death_country(id):
idu = id.upper()
page = requests.get(
'https://www.worldometers.info/coronavirus/country/' + id + '/')
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all('div', {'class': 'maincounter-number'})
active = soup.find('div', {'class': 'number-table-main'})
active_cases = active.text
cases_list = []
for res in result:
cases_list.append(res.text)
return ('In ' + idu + ' There are' + cases_list[0] +
'Total cases out of which' + cases_list[1] + 'are dead and' +
cases_list[2] + 'have already recovered . There are still ' +
active_cases + ' active cases .')
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print(intent)
def news_nepal_int():
url = 'https://nepalcorona.info/api/v1/news'
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{'card': {'title': data1['title'], 'subtitle':
'Source: ' + data1['source'] + ' >>', 'imageUri': data1[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data1['url']}, {'text': 'Corona Symptoms',
'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {
'title': data2['title'], 'subtitle': 'Source ' + data2['source'
] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':
'Read Full Story', 'postback': data2['url']}, {'text':
'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':
'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle':
'Source ' + data3['source'] + ' >>', 'imageUri': data3[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data3['url']}, {'text': 'Self Isolation',
'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0], name[0], phone[0], place[0]]
sheets_row_writer(ilist)
response2 = 'Hello ' + name[0
] + ' so you are looking for ' + item_required[0
] + ' Your location is ' + place[0
] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'
response = [{'quickReplies': {'title': response2, 'quickReplies': [
'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'
}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message'][
'text']
url = 'https://nepalcorona.info/api/v1/faqs'
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',
'Preventions', 'Self Isolation', 'Play Corona Quiz']
faqs = todos['data']
faq = faqs[rand]
if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==
'भाषा परिवर्तन'):
randq = faq['question']
randa = faq['answer']
opt1 = 'More Quizzles'
opt2 = 'Switch Language'
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = 'अरु देखाउनुहोस >>'
opt2 = 'भाषा परिवर्तन'
response2 = 'Q. ' + randq + '\n A. ' + randa + '\n'
response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':
randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def blood_pal_yes():
print(intent)
print(data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group, blood_amount, location, case, date, phone]
sheets_row_writer(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = (
'The following request has been sent. We will contact you shortly. '
+ blood_group + ' blood (' + str(blood_amount) +
' ) required for ' + case + ' at ' + location + ' On ' + date +
' - ' + phone + ' Thank you .')
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def blood_pal_donor_yes():
print(intent)
print(data)
permananet_address = data['queryResult']['parameters'][
'permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation = data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name, number, email, current_address, permananet_address,
age, height, weight, gender, blood, last_donation]
sheets_row_writer_donor(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = ('Thank you ' + name +
' for registration as a blood donor We will contact you at the time of urgency in your area.'
)
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def world_data_live():
text = death_global()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Data', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def district_all_summary():
text = dss.district_all_summary()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [{'quickReplies': {'title': text, 'quickReplies': [
'District-Summary', 'Province-Data', 'World Data',
'Preventions', "Corona FAQ's", 'Corona Quiz']}, 'platform':
'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def proviencewise_detail():
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [{'card': {'title': 'Covid-19 Provience: ' + str(
province) + ' | Details', 'subtitle': response_summary,
'imageUri':
'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',
'buttons': [{'text': 'Prov ' + str(province) + ' District Data',
'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(
province) + ' Vdc-Mun Data', 'postback':
'dis-vdc data detail int'}, {'text': 'Latest Nepali News',
'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
if dvdc == 'vdc':
print('inside vdc')
typ = 'vdc'
else:
print('inside district')
typ = 'district'
data_return = dss.ard(code, typ)
response = [{'quickReplies': {'title': data_return, 'quickReplies':
['District Summary', 'Province Summary', 'Nepali News',
'World Data', 'Preventions', "Corona FAQ's", 'Corona Quiz']},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def nepal_data_new_main_int():
url = 'https://nepalcorona.info/api/v1/data/nepal'
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = 'Nepal Cases \n Positive :' + str(todos['tested_positive']
) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(
todos['deaths']) + ' ' + '\n'
print(response2)
response_summary = dss.affected_summary()
response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['']}}, {'card': {'title':
'Covid-19 Nepal | Stats', 'subtitle': response_summary,
'imageUri':
'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'
, 'buttons': [{'text': 'Province Summary', 'postback':
'province data int'}, {'text': 'District-Summary', 'postback':
'district data int'}, {'text': 'Latest Nepali News', 'postback':
'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':
['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def batti_update():
url = (
'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'
)
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos['feeds'][0]
response2 = 'Batti Status Now :' + str(feeds['field1'] +
'\n Last Updated: ' + str(feeds['created_at']))
print(response2)
reply = {'fulfillmentText': response2}
return reply
def default():
return 'Incorrect Data'
switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':
news_nepal_int, 'i need help main int - yes': i_need_help_yes,
'faq-que-ans-int': faq_ques_ans,
'bloodpal-need-blood-main-int - yes': blood_pal_yes,
'data world int': world_data_live, 'district data int':
district_all_summary, 'province data int': province_all_summary,
'province-wise-data': proviencewise_detail,
'dis-vdc data detail int': dis_vdc_detail,
'bloodpal-become-donor-main-int': blood_pal_donor_yes,
'batti-update-intent': batti_update}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
if __name__ == '__main__':
app.run()
| from flask import Flask, jsonify, request
import requests, json, random
from bs4 import BeautifulSoup
import gspread
import pandas as pd
import dataservices as dss
from oauth2client.service_account import ServiceAccountCredentials
# page = requests.get("https://www.worldometers.info/coronavirus/")
# soup = BeautifulSoup(page.content, 'html.parser')
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
# Initialize application
app = Flask(__name__)
@app.route("/")
def hello():
return "Flask setup"
def sheets_row_writer(data_list):
print("sheets method invoked")
credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
# worksheet = client.open('corona-help-resource-management').BloodPal
worksheet.append_row(data_list)
print("Write complete")
def sheets_row_writer_donor(data_list_donor):
print("donor sheets method invoked")
credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
# worksheet = client.open('corona-help-resource-management').BloodPal
worksheet.append_row(data_list_donor)
print("Write complete")
def death_global():
page = requests.get("https://www.worldometers.info/coronavirus/")
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all("div", {"class":"maincounter-number"})
cases_list = []
active = soup.find("div", {"class":"number-table-main"})
active_cases = active.text
for res in result:
cases_list.append(res.text)
return "There are"+cases_list[0]+" Total cases out of which"+cases_list[1]+" have died and"+cases_list[2]+" have recovered . There are still "+active_cases+" active cases."
app.route("/death/global", methods=['POST'])
def death_global_api():
data = request.get_json(silent=True)
page = requests.get("https://www.worldometers.info/coronavirus/")
response = death_global()
reply = { "fulfillmentText": response }
return jsonify(reply)
def death_country(id):
idu = id.upper()
page = requests.get("https://www.worldometers.info/coronavirus/country/"+id+"/")
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all("div", {"class":"maincounter-number"})
active = soup.find("div", {"class":"number-table-main"})
active_cases = active.text
cases_list = []
for res in result:
cases_list.append(res.text)
return "In " +idu+" There are"+cases_list[0]+"Total cases out of which"+cases_list[1]+"are dead and"+cases_list[2]+"have already recovered . There are still "+active_cases+ " active cases ."
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print (intent)
def news_nepal_int():
url = "https://nepalcorona.info/api/v1/news"
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{
"card":{
"title":data1['title'],
"subtitle":"Source: "+data1['source']+" >>",
"imageUri":data1['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data1['url']
},
{
"text":"Corona Symptoms",
"postback":"symptoms"
}
]
},
"platform":"FACEBOOK"
},
{
"card":{
"title":data2['title'],
"subtitle":"Source "+data2['source']+" >>",
"imageUri":data2['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data2['url']
},
{
"text":"Live Nepal Data",
"postback":"live-nepal-data"
}
]
},
"platform":"FACEBOOK"
},
{
"card":{
"title":data3['title'],
"subtitle":"Source "+data3['source']+" >>",
"imageUri":data3['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data3['url']
},
{
"text":"Self Isolation",
"postback":"self isolation"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response2 }
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0],name[0],phone[0],place[0]]
sheets_row_writer(ilist)
response2 = "Hello "+name[0]+" so you are looking for "+item_required[0]+" Your location is "+place[0]+" One of our Team will contact you @ " +phone[0]+" soon !"
response = [
{
"quickReplies": {
"title": response2,
"quickReplies": [
"Call a Doctor",
"Get Online Support"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message']['text']
url = "https://nepalcorona.info/api/v1/faqs"
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ["Live Nepali Data","Latest Nepali News","Symptoms","Preventions","Self Isolation","Play Corona Quiz"]
faqs = todos['data']
faq = faqs[rand]
if(ff=="English FAQ" or ff =="More Quizzles" or ff =="भाषा परिवर्तन"):
randq= faq['question']
randa = faq['answer']
opt1 = "More Quizzles"
opt2 = "Switch Language"
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = "अरु देखाउनुहोस >>"
opt2 = "भाषा परिवर्तन"
response2 = "Q. "+randq+"\n A. "+randa+"\n"
response = [{
"text": {
"text": [
randq
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"quickReplies": {
"title": randa,
"quickReplies": [
opt1,
opt2,
random.choice(opt3)
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def blood_pal_yes():
print (intent)
print (data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group,blood_amount,location,case,date,phone]
sheets_row_writer(ilist)
response3 = "For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!"
response = "The following request has been sent. We will contact you shortly. "+blood_group+" blood ("+str(blood_amount)+" ) required for "+case+" at "+location+" On "+date+" - "+phone+" Thank you ."
response2 = [{
"text": {
"text": [
response
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"text": {
"text": [
response3
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response2 }
return reply
def blood_pal_donor_yes():
print (intent)
print (data)
permananet_address = data['queryResult']['parameters']['permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation= data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name,number,email,current_address,permananet_address,age,height,weight,gender,blood,last_donation]
sheets_row_writer_donor(ilist)
response3 = "For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!"
response = "Thank you "+name+" for registration as a blood donor We will contact you at the time of urgency in your area."
response2 = [{
"text": {
"text": [
response
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"text": {
"text": [
response3
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response2 }
return reply
def world_data_live():
text = death_global()
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"Provience Data",
"Nepali News",
"World Data",
"Symptoms",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
#district summary all
def district_all_summary():
text = dss.district_all_summary()
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"Provience Summary",
"Nepali News",
"World Data",
"Symptoms",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
#provience summary all should remove
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"District-Summary",
"Province-Data",
"World Data",
"Preventions",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def proviencewise_detail():
#get provience name
#return dss.ard(provience)
#card
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [
{
"card":{
"title": "Covid-19 Provience: "+str(province)+" | Details",
"subtitle":response_summary,
"imageUri": "https://setopati.net/wp-content/uploads/2018/02/province6.jpg",
"buttons":[
{
"text":"Prov "+str(province)+" District Data",
"postback":"dis-vdc data detail int"
},
{
"text":"Prov "+str(province)+" Vdc-Mun Data",
"postback":"dis-vdc data detail int"
},
{
"text":"Latest Nepali News",
"postback":"news-nepal-int"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response }
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
# provincecode = pcode
if(dvdc=="vdc"):
print('inside vdc')
typ = "vdc"
else:
print('inside district')
typ = "district"
data_return = dss.ard(code,typ)
response = [
{
"quickReplies": {
"title": data_return,
"quickReplies": [
"District Summary",
"Province Summary",
"Nepali News",
"World Data",
"Preventions",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def nepal_data_new_main_int():
url = "https://nepalcorona.info/api/v1/data/nepal"
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = "Nepal Cases \n Positive :"+str(todos["tested_positive"])+" | Recovered: "+str(todos["recovered"])+"| Deaths:"+str(todos["deaths"])+" "+"\n"
print(response2)
response_summary = dss.affected_summary()
response = [
{
"text": {
"text": [
response2
]
},
"platform": "FACEBOOK"
},
{
"text": {
"text": [
""
]
}
},
{
"card":{
"title": "Covid-19 Nepal | Stats",
"subtitle":response_summary,
# "subtitle": "Find details by Province, Municipals and Districts for Nepal",
"imageUri": "https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png",
"buttons":[
{
"text":"Province Summary",
"postback":"province data int"
},
{
"text":"District-Summary",
"postback":"district data int"
},
{
"text":"Latest Nepali News",
"postback":"news-nepal-int"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response }
return reply
def batti_update():
url = "https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM"
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos["feeds"][0]
response2 = "Batti Status Now :"+str(feeds["field1"]+"\n Last Updated: "+str(feeds["created_at"]))
print(response2)
reply = { "fulfillmentText": response2 }
return reply
def default():
return "Incorrect Data"
switcher = {
"nepal data int": nepal_data_new_main_int,
"news-nepal-int": news_nepal_int,
"i need help main int - yes": i_need_help_yes,
"faq-que-ans-int": faq_ques_ans,
"bloodpal-need-blood-main-int - yes": blood_pal_yes,
"data world int": world_data_live,
"district data int": district_all_summary,
"province data int": province_all_summary,
"province-wise-data": proviencewise_detail,
"dis-vdc data detail int": dis_vdc_detail,
"bloodpal-become-donor-main-int":blood_pal_donor_yes,
"batti-update-intent":batti_update
}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
if __name__ == '__main__':
app.run()
| [
5,
6,
9,
10,
11
] |
1,062 | 0555c577a8fb746cf2debb929d02b46cd3be4d7b | <mask token>
| <mask token>
def uppercase_first_letter(string: str) ->str:
return string[0:1].upper() + string[1:]
<mask token>
| <mask token>
def uppercase_first_letter(string: str) ->str:
return string[0:1].upper() + string[1:]
string_list: List[str] = input('Please, input string: ').split(' ')
result: str = ''
for i, value in enumerate(string_list):
result += (lambda index: '' if index == 0 else ' ')(i
) + uppercase_first_letter(value)
print(result)
| from typing import List
def uppercase_first_letter(string: str) ->str:
return string[0:1].upper() + string[1:]
string_list: List[str] = input('Please, input string: ').split(' ')
result: str = ''
for i, value in enumerate(string_list):
result += (lambda index: '' if index == 0 else ' ')(i
) + uppercase_first_letter(value)
print(result)
| null | [
0,
1,
2,
3
] |
1,063 | 8355faf7c0d3742be34a56ddc982cb389c80d0a9 | <mask token>
class ModelManagerTests(unittest.TestCase):
def test_model_manager_will_return_same_instance_when_instantiated_many_times(
self):
"""Testing that the ModelManager will return the same instance of an MLModel class from several different
references of ModelManager."""
first_model_manager = ModelManager()
second_model_manager = ModelManager()
first_model_manager.load_model('tests.mocks.MLModelMock')
first_model_object = first_model_manager.get_model(qualified_name=
'qualified_name')
second_model_object = second_model_manager.get_model(qualified_name
='qualified_name')
self.assertTrue(str(first_model_manager) == str(second_model_manager))
self.assertTrue(str(first_model_object) == str(second_model_object))
def test_load_model_method(self):
"""Testing the load_model() method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
model_object = None
try:
model_object = model_manager.get_model(qualified_name=
'qualified_name')
except Exception as e:
exception_raised = True
print_tb(e)
self.assertFalse(exception_raised)
self.assertTrue(model_object is not None)
<mask token>
def test_only_ml_model_instances_allowed_to_be_stored(self):
"""Testing that the ModelManager only allows MLModel objects to be stored."""
model_manager = ModelManager()
exception_raised = False
exception_message = ''
try:
model_manager.load_model('tests.mocks.SomeClass')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
'ModelManager instance can only hold references to objects of type MLModel.'
)
def test_model_manager_does_not_allow_duplicate_qualified_names(self):
"""Testing that the ModelManager does not allow duplicate qualified names in the singleton."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
exception_message = ''
try:
model_manager.load_model('tests.mocks.MLModelMock')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
'A model with the same qualified name is already in the ModelManager singleton.'
)
def test_remove_model_method(self):
"""Testing the remove_model() method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised1 = False
try:
model_manager.remove_model(qualified_name='qualified_name')
except Exception as e:
exception_raised1 = True
exception_raised2 = False
exception_message2 = ''
try:
model = model_manager.get_model(qualified_name='qualified_name')
except Exception as e:
exception_raised2 = True
exception_message2 = str(e)
self.assertFalse(exception_raised1)
self.assertTrue(exception_raised2)
self.assertTrue(exception_message2 ==
"Instance of model 'qualified_name' not found in ModelManager.")
<mask token>
<mask token>
def test_get_model_metadata_method(self):
"""Testing get_model_metadata method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
model_metadata = model_manager.get_model_metadata(qualified_name=
'qualified_name')
self.assertTrue(model_metadata['display_name'] == 'display_name')
self.assertTrue(model_metadata['qualified_name'] == 'qualified_name')
self.assertTrue(model_metadata['description'] == 'description')
self.assertTrue(model_metadata['version'] == '1.0.0')
self.assertTrue(type(model_metadata['input_schema']) is dict)
self.assertTrue(type(model_metadata['output_schema']) is dict)
def test_get_model_metadata_method_with_missing_model(self):
"""Testing get_model_metadata method with missing model."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
excpeption_raised = False
exception_message = None
try:
model_metadata = model_manager.get_model_metadata(qualified_name
='asdf')
except Exception as e:
excpeption_raised = True
exception_message = str(e)
self.assertTrue(excpeption_raised)
self.assertTrue(exception_message ==
"Instance of model 'asdf' not found in ModelManager.")
<mask token>
def test_get_model_method_with_missing_model(self):
"""Testing that the ModelManager raises ValueError exception when a model is not found."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
exception_message = ''
model = None
try:
model = model_manager.get_model(qualified_name='asdf')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
"Instance of model 'asdf' not found in ModelManager.")
<mask token>
| <mask token>
class ModelManagerTests(unittest.TestCase):
def test_model_manager_will_return_same_instance_when_instantiated_many_times(
self):
"""Testing that the ModelManager will return the same instance of an MLModel class from several different
references of ModelManager."""
first_model_manager = ModelManager()
second_model_manager = ModelManager()
first_model_manager.load_model('tests.mocks.MLModelMock')
first_model_object = first_model_manager.get_model(qualified_name=
'qualified_name')
second_model_object = second_model_manager.get_model(qualified_name
='qualified_name')
self.assertTrue(str(first_model_manager) == str(second_model_manager))
self.assertTrue(str(first_model_object) == str(second_model_object))
def test_load_model_method(self):
"""Testing the load_model() method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
model_object = None
try:
model_object = model_manager.get_model(qualified_name=
'qualified_name')
except Exception as e:
exception_raised = True
print_tb(e)
self.assertFalse(exception_raised)
self.assertTrue(model_object is not None)
def test_load_model_method_with_wrong_class_path(self):
"""Testing the load_model() method."""
model_manager = ModelManager()
exception_raised = False
exception_message = None
try:
model_manager.load_model('sdf.sdf.sdf')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "No module named 'sdf'")
def test_only_ml_model_instances_allowed_to_be_stored(self):
"""Testing that the ModelManager only allows MLModel objects to be stored."""
model_manager = ModelManager()
exception_raised = False
exception_message = ''
try:
model_manager.load_model('tests.mocks.SomeClass')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
'ModelManager instance can only hold references to objects of type MLModel.'
)
def test_model_manager_does_not_allow_duplicate_qualified_names(self):
"""Testing that the ModelManager does not allow duplicate qualified names in the singleton."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
exception_message = ''
try:
model_manager.load_model('tests.mocks.MLModelMock')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
'A model with the same qualified name is already in the ModelManager singleton.'
)
def test_remove_model_method(self):
"""Testing the remove_model() method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised1 = False
try:
model_manager.remove_model(qualified_name='qualified_name')
except Exception as e:
exception_raised1 = True
exception_raised2 = False
exception_message2 = ''
try:
model = model_manager.get_model(qualified_name='qualified_name')
except Exception as e:
exception_raised2 = True
exception_message2 = str(e)
self.assertFalse(exception_raised1)
self.assertTrue(exception_raised2)
self.assertTrue(exception_message2 ==
"Instance of model 'qualified_name' not found in ModelManager.")
def test_remove_model_method_with_missing_model(self):
"""Testing that the ModelManager raises ValueError exception when removing a model that is not found."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
exception_message = ''
try:
model_manager.remove_model(qualified_name='asdf')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
"Instance of model 'asdf' not found in ModelManager.")
def test_get_models_method(self):
"""Testing get_models method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
models = model_manager.get_models()
self.assertTrue(models[0]['display_name'] == 'display_name')
self.assertTrue(models[0]['qualified_name'] == 'qualified_name')
self.assertTrue(models[0]['description'] == 'description')
self.assertTrue(models[0]['version'] == '1.0.0')
def test_get_model_metadata_method(self):
"""Testing get_model_metadata method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
model_metadata = model_manager.get_model_metadata(qualified_name=
'qualified_name')
self.assertTrue(model_metadata['display_name'] == 'display_name')
self.assertTrue(model_metadata['qualified_name'] == 'qualified_name')
self.assertTrue(model_metadata['description'] == 'description')
self.assertTrue(model_metadata['version'] == '1.0.0')
self.assertTrue(type(model_metadata['input_schema']) is dict)
self.assertTrue(type(model_metadata['output_schema']) is dict)
def test_get_model_metadata_method_with_missing_model(self):
"""Testing get_model_metadata method with missing model."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
excpeption_raised = False
exception_message = None
try:
model_metadata = model_manager.get_model_metadata(qualified_name
='asdf')
except Exception as e:
excpeption_raised = True
exception_message = str(e)
self.assertTrue(excpeption_raised)
self.assertTrue(exception_message ==
"Instance of model 'asdf' not found in ModelManager.")
def test_get_model_method(self):
"""Testing the get_model method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
model = None
try:
model = model_manager.get_model(qualified_name='qualified_name')
except Exception as e:
exception_raised = True
self.assertFalse(exception_raised)
self.assertTrue(type(model) is MLModelMock)
def test_get_model_method_with_missing_model(self):
"""Testing that the ModelManager raises ValueError exception when a model is not found."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
exception_message = ''
model = None
try:
model = model_manager.get_model(qualified_name='asdf')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
"Instance of model 'asdf' not found in ModelManager.")
<mask token>
| <mask token>
class ModelManagerTests(unittest.TestCase):
def test_model_manager_will_return_same_instance_when_instantiated_many_times(
self):
"""Testing that the ModelManager will return the same instance of an MLModel class from several different
references of ModelManager."""
first_model_manager = ModelManager()
second_model_manager = ModelManager()
first_model_manager.load_model('tests.mocks.MLModelMock')
first_model_object = first_model_manager.get_model(qualified_name=
'qualified_name')
second_model_object = second_model_manager.get_model(qualified_name
='qualified_name')
self.assertTrue(str(first_model_manager) == str(second_model_manager))
self.assertTrue(str(first_model_object) == str(second_model_object))
def test_load_model_method(self):
"""Testing the load_model() method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
model_object = None
try:
model_object = model_manager.get_model(qualified_name=
'qualified_name')
except Exception as e:
exception_raised = True
print_tb(e)
self.assertFalse(exception_raised)
self.assertTrue(model_object is not None)
def test_load_model_method_with_wrong_class_path(self):
"""Testing the load_model() method."""
model_manager = ModelManager()
exception_raised = False
exception_message = None
try:
model_manager.load_model('sdf.sdf.sdf')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "No module named 'sdf'")
def test_only_ml_model_instances_allowed_to_be_stored(self):
"""Testing that the ModelManager only allows MLModel objects to be stored."""
model_manager = ModelManager()
exception_raised = False
exception_message = ''
try:
model_manager.load_model('tests.mocks.SomeClass')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
'ModelManager instance can only hold references to objects of type MLModel.'
)
def test_model_manager_does_not_allow_duplicate_qualified_names(self):
"""Testing that the ModelManager does not allow duplicate qualified names in the singleton."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
exception_message = ''
try:
model_manager.load_model('tests.mocks.MLModelMock')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
'A model with the same qualified name is already in the ModelManager singleton.'
)
def test_remove_model_method(self):
"""Testing the remove_model() method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised1 = False
try:
model_manager.remove_model(qualified_name='qualified_name')
except Exception as e:
exception_raised1 = True
exception_raised2 = False
exception_message2 = ''
try:
model = model_manager.get_model(qualified_name='qualified_name')
except Exception as e:
exception_raised2 = True
exception_message2 = str(e)
self.assertFalse(exception_raised1)
self.assertTrue(exception_raised2)
self.assertTrue(exception_message2 ==
"Instance of model 'qualified_name' not found in ModelManager.")
def test_remove_model_method_with_missing_model(self):
"""Testing that the ModelManager raises ValueError exception when removing a model that is not found."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
exception_message = ''
try:
model_manager.remove_model(qualified_name='asdf')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
"Instance of model 'asdf' not found in ModelManager.")
def test_get_models_method(self):
"""Testing get_models method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
models = model_manager.get_models()
self.assertTrue(models[0]['display_name'] == 'display_name')
self.assertTrue(models[0]['qualified_name'] == 'qualified_name')
self.assertTrue(models[0]['description'] == 'description')
self.assertTrue(models[0]['version'] == '1.0.0')
def test_get_model_metadata_method(self):
"""Testing get_model_metadata method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
model_metadata = model_manager.get_model_metadata(qualified_name=
'qualified_name')
self.assertTrue(model_metadata['display_name'] == 'display_name')
self.assertTrue(model_metadata['qualified_name'] == 'qualified_name')
self.assertTrue(model_metadata['description'] == 'description')
self.assertTrue(model_metadata['version'] == '1.0.0')
self.assertTrue(type(model_metadata['input_schema']) is dict)
self.assertTrue(type(model_metadata['output_schema']) is dict)
def test_get_model_metadata_method_with_missing_model(self):
"""Testing get_model_metadata method with missing model."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
excpeption_raised = False
exception_message = None
try:
model_metadata = model_manager.get_model_metadata(qualified_name
='asdf')
except Exception as e:
excpeption_raised = True
exception_message = str(e)
self.assertTrue(excpeption_raised)
self.assertTrue(exception_message ==
"Instance of model 'asdf' not found in ModelManager.")
def test_get_model_method(self):
"""Testing the get_model method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
model = None
try:
model = model_manager.get_model(qualified_name='qualified_name')
except Exception as e:
exception_raised = True
self.assertFalse(exception_raised)
self.assertTrue(type(model) is MLModelMock)
def test_get_model_method_with_missing_model(self):
"""Testing that the ModelManager raises ValueError exception when a model is not found."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
exception_message = ''
model = None
try:
model = model_manager.get_model(qualified_name='asdf')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
"Instance of model 'asdf' not found in ModelManager.")
if __name__ == '__main__':
unittest.main()
| import unittest
from traceback import print_tb
from ml_base.utilities.model_manager import ModelManager
from tests.mocks import MLModelMock
class ModelManagerTests(unittest.TestCase):
def test_model_manager_will_return_same_instance_when_instantiated_many_times(
self):
"""Testing that the ModelManager will return the same instance of an MLModel class from several different
references of ModelManager."""
first_model_manager = ModelManager()
second_model_manager = ModelManager()
first_model_manager.load_model('tests.mocks.MLModelMock')
first_model_object = first_model_manager.get_model(qualified_name=
'qualified_name')
second_model_object = second_model_manager.get_model(qualified_name
='qualified_name')
self.assertTrue(str(first_model_manager) == str(second_model_manager))
self.assertTrue(str(first_model_object) == str(second_model_object))
def test_load_model_method(self):
"""Testing the load_model() method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
model_object = None
try:
model_object = model_manager.get_model(qualified_name=
'qualified_name')
except Exception as e:
exception_raised = True
print_tb(e)
self.assertFalse(exception_raised)
self.assertTrue(model_object is not None)
def test_load_model_method_with_wrong_class_path(self):
"""Testing the load_model() method."""
model_manager = ModelManager()
exception_raised = False
exception_message = None
try:
model_manager.load_model('sdf.sdf.sdf')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "No module named 'sdf'")
def test_only_ml_model_instances_allowed_to_be_stored(self):
"""Testing that the ModelManager only allows MLModel objects to be stored."""
model_manager = ModelManager()
exception_raised = False
exception_message = ''
try:
model_manager.load_model('tests.mocks.SomeClass')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
'ModelManager instance can only hold references to objects of type MLModel.'
)
def test_model_manager_does_not_allow_duplicate_qualified_names(self):
"""Testing that the ModelManager does not allow duplicate qualified names in the singleton."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
exception_message = ''
try:
model_manager.load_model('tests.mocks.MLModelMock')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
'A model with the same qualified name is already in the ModelManager singleton.'
)
def test_remove_model_method(self):
"""Testing the remove_model() method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised1 = False
try:
model_manager.remove_model(qualified_name='qualified_name')
except Exception as e:
exception_raised1 = True
exception_raised2 = False
exception_message2 = ''
try:
model = model_manager.get_model(qualified_name='qualified_name')
except Exception as e:
exception_raised2 = True
exception_message2 = str(e)
self.assertFalse(exception_raised1)
self.assertTrue(exception_raised2)
self.assertTrue(exception_message2 ==
"Instance of model 'qualified_name' not found in ModelManager.")
def test_remove_model_method_with_missing_model(self):
"""Testing that the ModelManager raises ValueError exception when removing a model that is not found."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
exception_message = ''
try:
model_manager.remove_model(qualified_name='asdf')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
"Instance of model 'asdf' not found in ModelManager.")
def test_get_models_method(self):
"""Testing get_models method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
models = model_manager.get_models()
self.assertTrue(models[0]['display_name'] == 'display_name')
self.assertTrue(models[0]['qualified_name'] == 'qualified_name')
self.assertTrue(models[0]['description'] == 'description')
self.assertTrue(models[0]['version'] == '1.0.0')
def test_get_model_metadata_method(self):
"""Testing get_model_metadata method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
model_metadata = model_manager.get_model_metadata(qualified_name=
'qualified_name')
self.assertTrue(model_metadata['display_name'] == 'display_name')
self.assertTrue(model_metadata['qualified_name'] == 'qualified_name')
self.assertTrue(model_metadata['description'] == 'description')
self.assertTrue(model_metadata['version'] == '1.0.0')
self.assertTrue(type(model_metadata['input_schema']) is dict)
self.assertTrue(type(model_metadata['output_schema']) is dict)
def test_get_model_metadata_method_with_missing_model(self):
"""Testing get_model_metadata method with missing model."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
excpeption_raised = False
exception_message = None
try:
model_metadata = model_manager.get_model_metadata(qualified_name
='asdf')
except Exception as e:
excpeption_raised = True
exception_message = str(e)
self.assertTrue(excpeption_raised)
self.assertTrue(exception_message ==
"Instance of model 'asdf' not found in ModelManager.")
def test_get_model_method(self):
"""Testing the get_model method."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
model = None
try:
model = model_manager.get_model(qualified_name='qualified_name')
except Exception as e:
exception_raised = True
self.assertFalse(exception_raised)
self.assertTrue(type(model) is MLModelMock)
def test_get_model_method_with_missing_model(self):
"""Testing that the ModelManager raises ValueError exception when a model is not found."""
model_manager = ModelManager()
model_manager.load_model('tests.mocks.MLModelMock')
exception_raised = False
exception_message = ''
model = None
try:
model = model_manager.get_model(qualified_name='asdf')
except Exception as e:
exception_raised = True
exception_message = str(e)
self.assertTrue(exception_raised)
self.assertTrue(exception_message ==
"Instance of model 'asdf' not found in ModelManager.")
if __name__ == '__main__':
unittest.main()
| import unittest
from traceback import print_tb
from ml_base.utilities.model_manager import ModelManager
from tests.mocks import MLModelMock
class ModelManagerTests(unittest.TestCase):
def test_model_manager_will_return_same_instance_when_instantiated_many_times(self):
"""Testing that the ModelManager will return the same instance of an MLModel class from several different
references of ModelManager."""
# arrange, act
# instantiating the model manager class twice
first_model_manager = ModelManager()
second_model_manager = ModelManager()
# loading the MLModel objects from configuration
first_model_manager.load_model("tests.mocks.MLModelMock")
first_model_object = first_model_manager.get_model(qualified_name="qualified_name")
second_model_object = second_model_manager.get_model(qualified_name="qualified_name")
# assert
self.assertTrue(str(first_model_manager) == str(second_model_manager))
self.assertTrue(str(first_model_object) == str(second_model_object))
def test_load_model_method(self):
"""Testing the load_model() method."""
# arrange
# instantiating the model manager class
model_manager = ModelManager()
# adding the model
model_manager.load_model("tests.mocks.MLModelMock")
# act
exception_raised = False
model_object = None
# accessing the MLModelMock model object
try:
model_object = model_manager.get_model(qualified_name="qualified_name")
except Exception as e:
exception_raised = True
print_tb(e)
# assert
self.assertFalse(exception_raised)
self.assertTrue(model_object is not None)
def test_load_model_method_with_wrong_class_path(self):
"""Testing the load_model() method."""
# arrange
# instantiating the model manager class
model_manager = ModelManager()
# act
# adding the model
exception_raised = False
exception_message = None
# accessing the MLModelMock model object
try:
model_manager.load_model("sdf.sdf.sdf")
except Exception as e:
exception_raised = True
exception_message = str(e)
# assert
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "No module named 'sdf'")
def test_only_ml_model_instances_allowed_to_be_stored(self):
"""Testing that the ModelManager only allows MLModel objects to be stored."""
# arrange
model_manager = ModelManager()
# act
exception_raised = False
exception_message = ""
try:
model_manager.load_model("tests.mocks.SomeClass")
except Exception as e:
exception_raised = True
exception_message = str(e)
# assert
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "ModelManager instance can only hold references to objects of type MLModel.")
def test_model_manager_does_not_allow_duplicate_qualified_names(self):
"""Testing that the ModelManager does not allow duplicate qualified names in the singleton."""
# arrange
model_manager = ModelManager()
# act
# loading the first instance of the model object
model_manager.load_model("tests.mocks.MLModelMock")
exception_raised = False
exception_message = ""
try:
# loading it again
model_manager.load_model("tests.mocks.MLModelMock")
except Exception as e:
exception_raised = True
exception_message = str(e)
# assert
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "A model with the same qualified name is already in the ModelManager singleton.")
def test_remove_model_method(self):
"""Testing the remove_model() method."""
# arrange
# instantiating the model manager class
model_manager = ModelManager()
# adding the model
model_manager.load_model("tests.mocks.MLModelMock")
# act
exception_raised1 = False
# accessing the MLModelMock model object
try:
model_manager.remove_model(qualified_name="qualified_name")
except Exception as e:
exception_raised1 = True
exception_raised2 = False
exception_message2 = ""
# trying to access the model that was removed
try:
model = model_manager.get_model(qualified_name="qualified_name")
except Exception as e:
exception_raised2 = True
exception_message2 = str(e)
# assert
self.assertFalse(exception_raised1)
self.assertTrue(exception_raised2)
self.assertTrue(exception_message2 == "Instance of model 'qualified_name' not found in ModelManager.")
def test_remove_model_method_with_missing_model(self):
"""Testing that the ModelManager raises ValueError exception when removing a model that is not found."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
exception_raised = False
exception_message = ""
try:
model_manager.remove_model(qualified_name="asdf")
except Exception as e:
exception_raised = True
exception_message = str(e)
# assert
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "Instance of model 'asdf' not found in ModelManager.")
def test_get_models_method(self):
"""Testing get_models method."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
models = model_manager.get_models()
# assert
self.assertTrue(models[0]["display_name"] == "display_name")
self.assertTrue(models[0]["qualified_name"] == "qualified_name")
self.assertTrue(models[0]["description"] == "description")
self.assertTrue(models[0]["version"] == "1.0.0")
def test_get_model_metadata_method(self):
"""Testing get_model_metadata method."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
model_metadata = model_manager.get_model_metadata(qualified_name="qualified_name")
# assert
self.assertTrue(model_metadata["display_name"] == "display_name")
self.assertTrue(model_metadata["qualified_name"] == "qualified_name")
self.assertTrue(model_metadata["description"] == "description")
self.assertTrue(model_metadata["version"] == "1.0.0")
self.assertTrue(type(model_metadata["input_schema"]) is dict)
self.assertTrue(type(model_metadata["output_schema"]) is dict)
def test_get_model_metadata_method_with_missing_model(self):
"""Testing get_model_metadata method with missing model."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
excpeption_raised = False
exception_message = None
try:
model_metadata = model_manager.get_model_metadata(qualified_name="asdf")
except Exception as e:
excpeption_raised = True
exception_message = str(e)
# assert
self.assertTrue(excpeption_raised)
self.assertTrue(exception_message == "Instance of model 'asdf' not found in ModelManager.")
def test_get_model_method(self):
"""Testing the get_model method."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
exception_raised = False
model = None
try:
model = model_manager.get_model(qualified_name="qualified_name")
except Exception as e:
exception_raised = True
# assert
self.assertFalse(exception_raised)
self.assertTrue(type(model) is MLModelMock)
def test_get_model_method_with_missing_model(self):
"""Testing that the ModelManager raises ValueError exception when a model is not found."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
exception_raised = False
exception_message = ""
model = None
try:
model = model_manager.get_model(qualified_name="asdf")
except Exception as e:
exception_raised = True
exception_message = str(e)
# assert
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "Instance of model 'asdf' not found in ModelManager.")
if __name__ == '__main__':
unittest.main()
| [
9,
13,
14,
15,
16
] |
1,064 | 8ec18e259af1123fad7563aee3a363e095e30e8e | <mask token>
| <mask token>
class Song(models.Model):
<mask token>
<mask token>
<mask token>
def __unicode__(self):
return self.name
| <mask token>
class Song(models.Model):
name = models.CharField(max_length=255)
filename = models.FileField(upload_to='canciones/')
album = models.ForeignKey(Albums)
def __unicode__(self):
return self.name
| from django.db import models
from albums.models import Albums
class Song(models.Model):
name = models.CharField(max_length=255)
filename = models.FileField(upload_to='canciones/')
album = models.ForeignKey(Albums)
def __unicode__(self):
return self.name
| from django.db import models
from albums.models import Albums
class Song(models.Model):
name = models.CharField(max_length=255)
filename = models.FileField(upload_to='canciones/')
album = models.ForeignKey(Albums)
def __unicode__(self,):
return self.name
| [
0,
2,
3,
4,
5
] |
1,065 | f7d3096d669946e13186a893ffc53067e0fd0a0a | <mask token>
| # -*- coding: utf-8 -*-
"""Digital Forensics Virtual File System (dfVFS).
dfVFS, or Digital Forensics Virtual File System, is a Python module
that provides read-only access to file-system objects from various
storage media types and file formats.
"""
| null | null | null | [
0,
1
] |
1,066 | 84980b8923fa25664833f810a906d27531145141 | <mask token>
def pdf_to_png(filename):
doc = fitz.open('pdf_files\\{}'.format(filename))
zoom = 4
page = doc.loadPage(0)
mat = fitz.Matrix(zoom, zoom)
pix = page.getPixmap(matrix=mat)
new_filename = filename.replace('pdf', 'png')
pix.writePNG('photo_files\\{}'.format(new_filename))
return new_filename
def create_learn_base(filename, language, i):
img_to_read = cv2.imdecode(np.fromfile('photo_files\\{}'.format(
filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)
img_to_crop = Image.open('photo_files\\{}'.format(filename))
height, width, c = img_to_read.shape
letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)
for box in letter_boxes.splitlines():
try:
i += 1
box = box.split()
x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])
cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,
0, 255), 1)
area = x, height - h, w, height - y
cropped_img = img_to_crop.crop(area)
try:
if not os.path.exists('learn_data\\s_{}'.format(box[0])):
os.mkdir('learn_data\\s_{}'.format(box[0]))
cropped_img.save('learn_data\\s_{}/{}_{}.PNG'.format(box[0],
box[0], i))
except OSError:
pass
except SystemError:
pass
return i
def fix_dir_bugs():
for the_dir in os.listdir('learn_data'):
for the_file in os.listdir('learn_data/' + the_dir):
try:
Image.open('learn_data/' + the_dir + '/' + the_file)
except OSError:
os.remove('learn_data/' + the_dir + '/' + the_file)
def clear_directory(directory):
shutil.rmtree(directory)
os.makedirs(directory)
<mask token>
| <mask token>
def pdf_to_png(filename):
doc = fitz.open('pdf_files\\{}'.format(filename))
zoom = 4
page = doc.loadPage(0)
mat = fitz.Matrix(zoom, zoom)
pix = page.getPixmap(matrix=mat)
new_filename = filename.replace('pdf', 'png')
pix.writePNG('photo_files\\{}'.format(new_filename))
return new_filename
def create_learn_base(filename, language, i):
img_to_read = cv2.imdecode(np.fromfile('photo_files\\{}'.format(
filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)
img_to_crop = Image.open('photo_files\\{}'.format(filename))
height, width, c = img_to_read.shape
letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)
for box in letter_boxes.splitlines():
try:
i += 1
box = box.split()
x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])
cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,
0, 255), 1)
area = x, height - h, w, height - y
cropped_img = img_to_crop.crop(area)
try:
if not os.path.exists('learn_data\\s_{}'.format(box[0])):
os.mkdir('learn_data\\s_{}'.format(box[0]))
cropped_img.save('learn_data\\s_{}/{}_{}.PNG'.format(box[0],
box[0], i))
except OSError:
pass
except SystemError:
pass
return i
def fix_dir_bugs():
for the_dir in os.listdir('learn_data'):
for the_file in os.listdir('learn_data/' + the_dir):
try:
Image.open('learn_data/' + the_dir + '/' + the_file)
except OSError:
os.remove('learn_data/' + the_dir + '/' + the_file)
def clear_directory(directory):
shutil.rmtree(directory)
os.makedirs(directory)
clear_directory('learn_data')
for the_file in os.listdir('pdf_files'):
filename = the_file
png_filename = pdf_to_png(filename)
<mask token>
for the_file in os.listdir('photo_files'):
i += create_learn_base(the_file, 'rus', i)
fix_dir_bugs()
| <mask token>
pytesseract.tesseract_cmd = (
'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe')
config = '--oem 3 --psm'
def pdf_to_png(filename):
doc = fitz.open('pdf_files\\{}'.format(filename))
zoom = 4
page = doc.loadPage(0)
mat = fitz.Matrix(zoom, zoom)
pix = page.getPixmap(matrix=mat)
new_filename = filename.replace('pdf', 'png')
pix.writePNG('photo_files\\{}'.format(new_filename))
return new_filename
def create_learn_base(filename, language, i):
img_to_read = cv2.imdecode(np.fromfile('photo_files\\{}'.format(
filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)
img_to_crop = Image.open('photo_files\\{}'.format(filename))
height, width, c = img_to_read.shape
letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)
for box in letter_boxes.splitlines():
try:
i += 1
box = box.split()
x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])
cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,
0, 255), 1)
area = x, height - h, w, height - y
cropped_img = img_to_crop.crop(area)
try:
if not os.path.exists('learn_data\\s_{}'.format(box[0])):
os.mkdir('learn_data\\s_{}'.format(box[0]))
cropped_img.save('learn_data\\s_{}/{}_{}.PNG'.format(box[0],
box[0], i))
except OSError:
pass
except SystemError:
pass
return i
def fix_dir_bugs():
for the_dir in os.listdir('learn_data'):
for the_file in os.listdir('learn_data/' + the_dir):
try:
Image.open('learn_data/' + the_dir + '/' + the_file)
except OSError:
os.remove('learn_data/' + the_dir + '/' + the_file)
def clear_directory(directory):
shutil.rmtree(directory)
os.makedirs(directory)
clear_directory('learn_data')
for the_file in os.listdir('pdf_files'):
filename = the_file
png_filename = pdf_to_png(filename)
i = 0
for the_file in os.listdir('photo_files'):
i += create_learn_base(the_file, 'rus', i)
fix_dir_bugs()
| import cv2, os, fitz, shutil
import numpy as np
from PIL import Image
from pytesseract import pytesseract
from PIL import UnidentifiedImageError
pytesseract.tesseract_cmd = (
'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe')
config = '--oem 3 --psm'
def pdf_to_png(filename):
doc = fitz.open('pdf_files\\{}'.format(filename))
zoom = 4
page = doc.loadPage(0)
mat = fitz.Matrix(zoom, zoom)
pix = page.getPixmap(matrix=mat)
new_filename = filename.replace('pdf', 'png')
pix.writePNG('photo_files\\{}'.format(new_filename))
return new_filename
def create_learn_base(filename, language, i):
img_to_read = cv2.imdecode(np.fromfile('photo_files\\{}'.format(
filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)
img_to_crop = Image.open('photo_files\\{}'.format(filename))
height, width, c = img_to_read.shape
letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)
for box in letter_boxes.splitlines():
try:
i += 1
box = box.split()
x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])
cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,
0, 255), 1)
area = x, height - h, w, height - y
cropped_img = img_to_crop.crop(area)
try:
if not os.path.exists('learn_data\\s_{}'.format(box[0])):
os.mkdir('learn_data\\s_{}'.format(box[0]))
cropped_img.save('learn_data\\s_{}/{}_{}.PNG'.format(box[0],
box[0], i))
except OSError:
pass
except SystemError:
pass
return i
def fix_dir_bugs():
for the_dir in os.listdir('learn_data'):
for the_file in os.listdir('learn_data/' + the_dir):
try:
Image.open('learn_data/' + the_dir + '/' + the_file)
except OSError:
os.remove('learn_data/' + the_dir + '/' + the_file)
def clear_directory(directory):
shutil.rmtree(directory)
os.makedirs(directory)
clear_directory('learn_data')
for the_file in os.listdir('pdf_files'):
filename = the_file
png_filename = pdf_to_png(filename)
i = 0
for the_file in os.listdir('photo_files'):
i += create_learn_base(the_file, 'rus', i)
fix_dir_bugs()
| import cv2, os, fitz, shutil
import numpy as np
from PIL import Image
from pytesseract import pytesseract
from PIL import UnidentifiedImageError
pytesseract.tesseract_cmd = 'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe'
config = r'--oem 3 --psm'
# Возвращает путь к картинке, созданной на основе 1 СТРАНИЦЫ pdf файла
# На входе требуется название pdf файла
def pdf_to_png(filename):
doc = fitz.open('pdf_files\{}'.format(filename))
zoom = 4 # zoom factor (влияет на качество получаемого из pdf изображения png)
page = doc.loadPage(0)
mat = fitz.Matrix(zoom, zoom)
pix = page.getPixmap(matrix=mat)
new_filename = filename.replace('pdf', 'png')
pix.writePNG('photo_files\{}'.format(new_filename))
return new_filename
# i в аргументах - номер итерации, чтобы вырезанных символов не пересекались
def create_learn_base(filename, language, i): # Создает папки с вырезанными распознанными символами в папке learn_data
# Открываем файлы с картинками
img_to_read = cv2.imdecode(np.fromfile('photo_files\{}'.format(filename), dtype=np.uint8),cv2.IMREAD_UNCHANGED) # МОДУЛЬ ДЛЯ ЧТЕНИЯ РУССКИХ ФАЙЛОВ #
img_to_crop = Image.open('photo_files\{}'.format(filename))
# Считываем текст с картинки в массив, если нужно - выводим
# words_in_image = pytesseract.image_to_string(img_to_read, lang=language)
# print(words_in_image)
height, width, c = img_to_read.shape
letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)
for box in letter_boxes.splitlines(): # Вырезаем по очереди квадраты с символами
# Обрабатываем ошибки, возникающие при выходе за пределы картинки при обрезке
try:
i += 1
box = box.split()
x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])
cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0, 0, 255), 1)
area = (x, height - h, w, height - y) # Задаем область, содержащую вырезаемый символ
cropped_img = img_to_crop.crop(area)
try: # Обрабатываем ошибки, возникающие при неправильных именах файлов
if not os.path.exists('learn_data\s_{}'.format(box[0])):
os.mkdir('learn_data\s_{}'.format(box[0]))
cropped_img.save('learn_data\s_{}/{}_{}.PNG'.format(box[0], box[0], i))
except OSError:
pass
except SystemError:
pass
return i
def fix_dir_bugs():
for the_dir in os.listdir('learn_data'):
for the_file in os.listdir('learn_data/'+the_dir):
try:
Image.open('learn_data/'+the_dir+'/'+the_file)
except OSError:
os.remove('learn_data/'+the_dir+'/'+the_file)
def clear_directory(directory):
shutil.rmtree(directory)
os.makedirs(directory)
clear_directory('learn_data')
for the_file in os.listdir('pdf_files'):
filename = the_file
png_filename = pdf_to_png(filename)
i = 0
for the_file in os.listdir('photo_files'):
i += create_learn_base(the_file, 'rus', i)
fix_dir_bugs()
############# РУЧНАЯ ПРОВЕРКА #############
# Image.open('renamed_learn_data/26/C_591.PNG')
# fix_dir_bugs()
# try:
# Image.open('renamed_learn_data/26/C_591.PNG')
# except OSError:
# os.remove('renamed_learn_data/26/C_591.PNG') | [
4,
5,
6,
7,
8
] |
1,067 | bb208d40ce098b05594aaf9c579f64b909738d52 | #!/usr/bin/python
import os;
import math;
# os.chdir('data/postgres/linux.env')
os.chdir('data/mysql/linux.env')
# os.chdir('data/mongo/linux.env')
col_time = 0;
col_read_ops = 1
col_read_err = 2
col_write_ops = 3
col_write_err = 4
class ColumnData:
def __init__(self, chart, title, data):
self.chart = chart;
self.title = title;
self.data = data;
self.sum = sum(self.data);
self.avg = self.sum / len(self.data);
self.sd = math.sqrt(sum([math.pow(x - self.avg, 2) for x in data]) / len(self.data));
def aggregate(self, group_size):
assert len(self.data) % group_size == 0
agg_data = [0.0 for i in range(len(self.data) / group_size)]
for i in range(len(self.data)):
agg_data[i / group_size] += self.data[i]
agg_column = ColumnData(self.chart, self.title + '_agg', [x / group_size for x in agg_data])
agg_column.sum = self.sum
agg_column.avg = self.avg
agg_column.sd = self.sd
return agg_column
class ChartData:
def __init__(self, file):
assert file.endswith('.csv')
# read meta-data:
for field in file[:-len('.csv')].split(','):
[key, value] = field.split('=');
setattr(self, key, int(value));
# read raw data:
first_line = True;
input_matrix = None
for line in open(file, 'r'):
line = line.strip();
if line <> '':
items = line.split(',')
if first_line:
input_matrix = [[title.replace("'", '')] for title in items]
first_line = False;
else:
values = [float(value) for value in items]
for i in range(len(values)):
input_matrix[i].append(values[i])
self.columns = [ColumnData(self, input_column[0], input_column[1:]) for input_column in input_matrix]
self.time_line = self.columns[0]
self.read_th = self.r_lite + self.r_heavy;
read_title = 'r%d_R%d' % (self.r_lite, self.r_heavy)
self.read_ops = self.columns[1]
self.read_ops.title = 'R_' + read_title
self.read_err = self.columns[2]
self.read_err.title = 'RE_' + read_title
self.write_th = self.w_ins + self.w_up_tiny + self.w_up_wide;
write_title = 'i%d_u%d_U%d' % (self.w_ins, self.w_up_tiny, self.w_up_wide)
self.write_ops = self.columns[3]
self.write_ops.title = 'W_' + write_title
self.write_err = self.columns[4]
self.write_err.title = 'WE_' + write_title
name_index = 0;
def draw_chart(columns, name='', notes=''):
if name == '':
global name_index;
name_index += 1;
name = 'chart_%s' % name_index
id = 'chart_' + name;
result = "";
result += """
function %s() {
var data = google.visualization.arrayToDataTable([
""" % id;
result += '[%s],\n' % ', '.join(['"' + c.title + '"' for c in columns])
for i in range(len(columns[0].data)):
result += '[%s],\n' % (', '.join([str(c.data[i]) for c in columns]))
result += """
]);
var options = {
title: '%s',
//curveType: 'function',
chartArea:{left:60,top:10,width:'65%%',height:'85%%'}
};
var chart = new google.visualization.LineChart(document.getElementById('%s'));
chart.draw(data, options);
}
""" % (name, id);
return id, result
charts = []
def draw_aggregated_chart(name, columns, read_from=0, read_to=0, write_from=0, write_to=0):
read_chart = []
for file_csv in os.listdir('.'):
if file_csv.endswith('.csv'):
items = file_csv.replace('=', '_').replace('.', '_').split('_');
read_threads = int(items[4]);
write_threads = int(items[6]);
if read_from <= read_threads <= read_to and write_from <= write_threads <= write_to:
chart = read_chart_data(file_csv);
if len(read_chart) == 0:
read_chart = [[t] for t in extract_column(chart, col_time)];
for column in columns:
column_data = extract_column(chart, column)
if sum(column_data[1:]) == 0.0:
continue;
read_chart = append_column(read_chart, column_data);
return draw_chart(read_chart, name);
def meta_column(columns, title, metric):
return ColumnData(None, title, [metric(c) for c in columns])
def render_group(time_line, group_list, meta_prefix, threads_metric):
global c
charts.append(draw_chart([time_line] + [c.write_ops for c in group_list]));
charts.append(draw_chart([time_line.aggregate(10)] + [c.write_ops.aggregate(10) for c in group_list]));
charts.append(draw_chart([
meta_column([c.write_ops for c in group_list], meta_prefix + ' Threads', threads_metric),
meta_column([c.write_ops for c in group_list], meta_prefix + ' ops avg', lambda c: c.avg),
meta_column([c.write_ops for c in group_list], meta_prefix + ' ops sd', lambda c: c.sd),
]));
if True:
chart_list = []
for file_name in os.listdir('.'):
if file_name.endswith('.csv'):
chart_list.append(ChartData(file_name));
chart_ins_list = [c for c in chart_list if c.w_ins > 0 and c.read_th==0]
chart_up_tiny_list = [c for c in chart_list if c.w_up_tiny > 0 and c.read_th==0]
chart_up_wide_list = [c for c in chart_list if c.w_up_wide > 0 and c.read_th==0]
chart_r_lite_list = [c for c in chart_list if c.r_lite > 0 and c.write_th==0]
chart_r_heavy_list = [c for c in chart_list if c.r_heavy > 0 and c.write_th==0]
time_line = chart_list[0].time_line
if len(chart_ins_list)>0:
render_group(time_line, chart_ins_list, 'Write Ins', lambda c: c.chart.write_th)
if len(chart_up_tiny_list)>0:
render_group(time_line, chart_up_tiny_list, 'Write Up Tiny', lambda c: c.chart.write_th)
if len(chart_up_wide_list)>0:
render_group(time_line, chart_up_wide_list, 'Write Up Wide', lambda c: c.chart.write_th)
with open('report-all.html', 'w') as out:
out.write("""<html>
<head>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["corechart"]});
google.setOnLoadCallback(function(){
""");
for id, renderer in charts:
out.write(" %s();\n" % id);
out.write("""
});
""");
for id, renderer in charts:
out.write(renderer);
out.write("""
</script>
</head>
<body>
""");
for id, renderer in charts:
out.write(' <div id="%s" style="width: 1200px; height: 400px;"></div>\n' % id)
out.write("""
</body>
</html>""");
| null | null | null | null | [
0
] |
1,068 | 84515ef6879b54b333f9afd48c6c4b7c43ff6957 | <mask token>
| class Solution(object):
<mask token>
| class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
t = triangle
if len(t) == 1:
return t[0][0]
ret = [0] * len(t)
ret[0] = t[0][0]
for i in range(1, len(t)):
for j in range(0, i + 1):
if j == 0:
old_v = ret[j]
ret[j] += t[i][j]
elif j == i:
ret[j] = old_v + t[i][j]
else:
val = min(old_v + t[i][j], ret[j] + t[i][j])
old_v = ret[j]
ret[j] = val
return min(ret)
| null | null | [
0,
1,
2
] |
1,069 | 1bbadf02c4b9ca22a0099bcc09fa4c62c9901c39 | <mask token>
class Styles(models.Model):
<mask token>
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',
'로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField('색상', max_length=10)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',
'회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
image = models.ImageField(upload_to=get_image_filename, verbose_name=
'다중 이미지')
image_comment = models.TextField('사진 설명', max_length=200, blank=True,
null=True)
| <mask token>
class Pyeong(models.Model):
<mask token>
<mask token>
<mask token>
class HousingTypes(models.Model):
type = models.CharField('주거 환경', max_length=20)
@staticmethod
def make_housing_type():
index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']
for i in range(len(index_list)):
HousingTypes.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Styles(models.Model):
type = models.CharField('디자인 스타일', max_length=10)
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',
'로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField('색상', max_length=10)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',
'회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
image = models.ImageField(upload_to=get_image_filename, verbose_name=
'다중 이미지')
image_comment = models.TextField('사진 설명', max_length=200, blank=True,
null=True)
| <mask token>
class Comments(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class Meta:
verbose_name = '댓글'
verbose_name_plural = '댓글 목록'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
class PostLike(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
user = models.ForeignKey('members.Users', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=
self.post.pk, username=self.user.username)
class Meta:
verbose_name = '게시글 좋아요'
verbose_name_plural = f'{verbose_name} 목록'
unique_together = ('post', 'user'),
class Pyeong(models.Model):
type = models.CharField('평 수', max_length=20)
@staticmethod
def make_pyeng():
index_list = ['1-7', '8-15', '16-25', '그 이상']
for i in range(len(index_list)):
Pyeong.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class HousingTypes(models.Model):
type = models.CharField('주거 환경', max_length=20)
@staticmethod
def make_housing_type():
index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']
for i in range(len(index_list)):
HousingTypes.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Styles(models.Model):
type = models.CharField('디자인 스타일', max_length=10)
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',
'로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField('색상', max_length=10)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',
'회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
image = models.ImageField(upload_to=get_image_filename, verbose_name=
'다중 이미지')
image_comment = models.TextField('사진 설명', max_length=200, blank=True,
null=True)
| <mask token>
class Posts(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
@staticmethod
def initial_setting():
Pyeong.make_pyeng()
Colors.make_color()
HousingTypes.make_housing_type()
Styles.make_style()
class Meta:
verbose_name = '게시글'
verbose_name_plural = '게시글 목록'
def __str__(self):
return '%s : %s' % (self.pk, self.title)
class Comments(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=
'포스트', related_name='comment_set', related_query_name='comments')
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE)
content = models.TextField('댓글 내용', max_length=500)
created_at = models.DateTimeField('작성 날', auto_now_add=True)
updated_at = models.DateTimeField('수정 날짜', auto_now=True)
class Meta:
verbose_name = '댓글'
verbose_name_plural = '댓글 목록'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
class PostLike(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
user = models.ForeignKey('members.Users', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=
self.post.pk, username=self.user.username)
class Meta:
verbose_name = '게시글 좋아요'
verbose_name_plural = f'{verbose_name} 목록'
unique_together = ('post', 'user'),
class Pyeong(models.Model):
type = models.CharField('평 수', max_length=20)
@staticmethod
def make_pyeng():
index_list = ['1-7', '8-15', '16-25', '그 이상']
for i in range(len(index_list)):
Pyeong.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class HousingTypes(models.Model):
type = models.CharField('주거 환경', max_length=20)
@staticmethod
def make_housing_type():
index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']
for i in range(len(index_list)):
HousingTypes.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Styles(models.Model):
type = models.CharField('디자인 스타일', max_length=10)
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',
'로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField('색상', max_length=10)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',
'회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
image = models.ImageField(upload_to=get_image_filename, verbose_name=
'다중 이미지')
image_comment = models.TextField('사진 설명', max_length=200, blank=True,
null=True)
| from django.conf import settings
from django.db import models
def get_image_filename(instance, filename):
a = f'post_images/{instance.post.title}.svg'
return a
def get_main_image_filename(instance, filename):
a = f'post_images/{instance.title}_main.svg'
return a
# Create your models here.
class Posts(models.Model):
PYEONG_CHOICE_FIELD = (
('1-7', '1-7평'),
('8-15', '8-15평'),
('16-25', '16-25평'),
('26-', '그 이상'),
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
title = models.TextField(
'제목', max_length=50
)
content = models.TextField(
'작성 글', max_length=500
)
main_image = models.ImageField(
upload_to=get_main_image_filename,
blank=True,
null=True,
verbose_name='메인 이미지',
)
pyeong = models.ManyToManyField(
'Pyeong',
blank=True,
)
created_at = models.DateTimeField(
'생성 날짜', auto_now_add=True,
)
updated_at = models.DateTimeField(
verbose_name='수정 날짜', auto_now=True, null=True, blank=True
)
like_users = models.ManyToManyField(
'members.Users',
through='PostLike',
related_name='like_posts',
related_query_name='like_post',
blank=True,
)
colors = models.ManyToManyField(
'posts.Colors',
blank=True,
)
housingtype = models.ManyToManyField(
'HousingTypes',
blank=True,
)
style = models.ManyToManyField(
'Styles',
blank=True,
)
postPyeong = models.CharField(max_length=10, choices=PYEONG_CHOICE_FIELD)
@staticmethod
def initial_setting():
Pyeong.make_pyeng()
Colors.make_color()
HousingTypes.make_housing_type()
Styles.make_style()
class Meta:
verbose_name = '게시글'
verbose_name_plural = '게시글 목록'
def __str__(self):
return '%s : %s' % (self.pk, self.title)
class Comments(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
verbose_name='포스트',
related_name='comment_set',
related_query_name='comments',
)
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
content = models.TextField(
'댓글 내용', max_length=500
)
# 글쓴이
created_at = models.DateTimeField(
'작성 날', auto_now_add=True,
)
updated_at = models.DateTimeField(
'수정 날짜', auto_now=True,
)
class Meta:
verbose_name = '댓글'
verbose_name_plural = '댓글 목록'
def save(self, *args, **kwargs):
# 여기서 이미지 처리를 하게 될 듯
super().save(*args, **kwargs)
class PostLike(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
)
user = models.ForeignKey(
'members.Users',
on_delete=models.CASCADE,
)
created_at = models.DateTimeField(
auto_now_add=True,
)
def __str__(self):
return 'Post[{post_pk}] Like (User: {username})'.format(
post_pk=self.post.pk,
username=self.user.username,
)
class Meta:
verbose_name = '게시글 좋아요'
verbose_name_plural = f'{verbose_name} 목록'
# 특정 유저가 특정 포스트 좋아요를 누른 정보는 유니크 해야 함.
unique_together = (
('post', 'user'),
)
class Pyeong(models.Model):
type = models.CharField(
'평 수',
max_length=20,
)
@staticmethod
def make_pyeng():
index_list = ['1-7', '8-15', '16-25', '그 이상']
for i in range((len(index_list))):
Pyeong.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class HousingTypes(models.Model):
type = models.CharField(
'주거 환경',
max_length=20,
)
@staticmethod
def make_housing_type():
index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']
for i in range(len(index_list)):
HousingTypes.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Styles(models.Model):
type = models.CharField(
'디자인 스타일',
max_length=10,
)
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스', '로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField(
'색상',
max_length=10
)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색', '회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
)
image = models.ImageField(
upload_to=get_image_filename,
verbose_name='다중 이미지',
)
image_comment = models.TextField(
'사진 설명', max_length=200, blank=True, null=True,
)
# 이미지 추가 스택오버플로우 정보
# https://stackoverflow.com/questions/34006994/how-to-upload-multiple-images-to-a-blog-post-in-django
| [
9,
15,
23,
27,
32
] |
1,070 | 1ea71f7b17809189eeacf19a6b7c4c7d88a5022c | <mask token>
| <mask token>
def make_id2class(args):
if args.dataset == 'caltech101':
return caltech.id2class
| <mask token>
def make_data_loader(args, **kwargs):
if args.dataset == 'caltech101':
train_set = caltech.caltech101Classification(args, split='train')
val_set = caltech.caltech101Classification(args, split='val')
test_set = caltech.caltech101Classification(args, split='test')
num_classes = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size,
shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size,
shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size,
shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, num_classes
elif args.dataset == 'embedding':
dataset = embedding.Embedding(args)
num_classes = dataset.NUM_CLASSES
loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=
False, **kwargs)
return loader, loader, loader, num_classes
else:
print('Dataloader for {} is not implemented'.format(args.dataset))
raise NotImplementedError
def make_id2class(args):
if args.dataset == 'caltech101':
return caltech.id2class
| from dataloaders.datasets import caltech, embedding
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset == 'caltech101':
train_set = caltech.caltech101Classification(args, split='train')
val_set = caltech.caltech101Classification(args, split='val')
test_set = caltech.caltech101Classification(args, split='test')
num_classes = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size,
shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size,
shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size,
shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, num_classes
elif args.dataset == 'embedding':
dataset = embedding.Embedding(args)
num_classes = dataset.NUM_CLASSES
loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=
False, **kwargs)
return loader, loader, loader, num_classes
else:
print('Dataloader for {} is not implemented'.format(args.dataset))
raise NotImplementedError
def make_id2class(args):
if args.dataset == 'caltech101':
return caltech.id2class
| from dataloaders.datasets import caltech, embedding
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset == 'caltech101':
train_set = caltech.caltech101Classification(args, split='train')
val_set = caltech.caltech101Classification(args, split='val')
test_set = caltech.caltech101Classification(args, split='test')
num_classes = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, num_classes
elif args.dataset == 'embedding':
dataset = embedding.Embedding(args)
num_classes = dataset.NUM_CLASSES
loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, **kwargs)
return loader, loader, loader, num_classes
else:
print("Dataloader for {} is not implemented".format(args.dataset))
raise NotImplementedError
def make_id2class(args):
if args.dataset == 'caltech101':
return caltech.id2class
| [
0,
1,
2,
3,
4
] |
1,071 | 2ca1b603b18316bc1d970b5e32389e10e4b532e2 | import configure
import connectify
import userlog
import dirlog
import time
def getUser(sock):
try:
userinfo = userlog.getInfo()
except:
userinfo = configure.init(sock)
userinfo = userinfo.split('^')[0]
# print userinfo
return userinfo
if __name__=="__main__":
sock = connectify.createCon()
userinfo = getUser(sock)
while 1:
dirs, flag = dirlog.getDirs()
if flag:
sock.send('2'+userinfo+'^'+dirs)
print sock.recv(1024)
sock.send('3'+userinfo)
update_count = sock.recv(1024)
update = []
for x in range(0,int(update_count)):
sock.send('4'+userinfo)
update.append(sock.recv(1024))
print update
time.sleep(2)
connectify.closeCon(sock)
| null | null | null | null | [
0
] |
1,072 | 07544d1eb039da0081716aa489fc1a0a5a200145 | <mask token>
def read_contact():
contacts = Contact.select()
for contact in contacts:
print(contact)
print(contact.firstname + ' ' + contact.lastname + ' ' + contact.
phone + ' ' + contact.email + ' ' + contact.address)
def create_contact():
contact_firstname = input('Enter First Name: ')
contact_lastname = input('Enter Last Name: ')
contact_phone = input('Enter Phone Number: ')
contact_email = input('Enter Email: ')
contact_address = input('Enter Address: ')
newcontact = Contact(firstname=contact_firstname, lastname=
contact_lastname, phone=contact_phone, email=contact_email, address
=contact_address)
newcontact.save()
print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +
newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)
def update_contact():
update_find_by_firstname = input(
'Enter the First Name of the contact you want to update: ')
updated_info = Contact.get(Contact.firstname == update_find_by_firstname)
new_phone = input('Enter the new number: ')
updated_info.phone = new_phone
new_email = input('Enter new Email: ')
updated_info.email = new_email
new_address = input('Enter new Address: ')
updated_info.address = new_address
updated_info.save()
<mask token>
def delete_contact():
contact_name_delete = input(
'Enter First Name of the contact you want to delete: ')
contact_firstname = Contact.get(Contact.firstname == contact_name_delete)
contact_firstname.delete_instance()
class BaseModel(Model):
class Meta:
database = db
class Contact(BaseModel):
firstname = CharField()
lastname = CharField()
phone = CharField()
email = CharField()
address = CharField()
<mask token>
| <mask token>
def read_contact():
contacts = Contact.select()
for contact in contacts:
print(contact)
print(contact.firstname + ' ' + contact.lastname + ' ' + contact.
phone + ' ' + contact.email + ' ' + contact.address)
def create_contact():
contact_firstname = input('Enter First Name: ')
contact_lastname = input('Enter Last Name: ')
contact_phone = input('Enter Phone Number: ')
contact_email = input('Enter Email: ')
contact_address = input('Enter Address: ')
newcontact = Contact(firstname=contact_firstname, lastname=
contact_lastname, phone=contact_phone, email=contact_email, address
=contact_address)
newcontact.save()
print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +
newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)
def update_contact():
update_find_by_firstname = input(
'Enter the First Name of the contact you want to update: ')
updated_info = Contact.get(Contact.firstname == update_find_by_firstname)
new_phone = input('Enter the new number: ')
updated_info.phone = new_phone
new_email = input('Enter new Email: ')
updated_info.email = new_email
new_address = input('Enter new Address: ')
updated_info.address = new_address
updated_info.save()
def find_contact():
find_contact_by_firstname = input(
'Enter First Name of the contact you want to find: ')
find_by_firstname = Contact.get(Contact.firstname ==
find_contact_by_firstname)
print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +
' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +
find_by_firstname.address)
def delete_contact():
contact_name_delete = input(
'Enter First Name of the contact you want to delete: ')
contact_firstname = Contact.get(Contact.firstname == contact_name_delete)
contact_firstname.delete_instance()
class BaseModel(Model):
class Meta:
database = db
class Contact(BaseModel):
firstname = CharField()
lastname = CharField()
phone = CharField()
email = CharField()
address = CharField()
db.connect()
db.create_tables([Contact])
if intro_question == 'Create':
create_contact()
elif intro_question == 'Read':
read_contact()
elif intro_question == 'Delete':
delete_contact()
elif intro_question == 'Find':
find_contact()
elif intro_question == 'Update':
update_contact()
| <mask token>
db = PostgresqlDatabase('contacts', user='postgres', password='', host=
'localhost', port=5432)
intro_question = input(
'What would you like to do with Contacts? Create? Read? Find? Delete? Update? '
)
def read_contact():
contacts = Contact.select()
for contact in contacts:
print(contact)
print(contact.firstname + ' ' + contact.lastname + ' ' + contact.
phone + ' ' + contact.email + ' ' + contact.address)
def create_contact():
contact_firstname = input('Enter First Name: ')
contact_lastname = input('Enter Last Name: ')
contact_phone = input('Enter Phone Number: ')
contact_email = input('Enter Email: ')
contact_address = input('Enter Address: ')
newcontact = Contact(firstname=contact_firstname, lastname=
contact_lastname, phone=contact_phone, email=contact_email, address
=contact_address)
newcontact.save()
print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +
newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)
def update_contact():
update_find_by_firstname = input(
'Enter the First Name of the contact you want to update: ')
updated_info = Contact.get(Contact.firstname == update_find_by_firstname)
new_phone = input('Enter the new number: ')
updated_info.phone = new_phone
new_email = input('Enter new Email: ')
updated_info.email = new_email
new_address = input('Enter new Address: ')
updated_info.address = new_address
updated_info.save()
def find_contact():
find_contact_by_firstname = input(
'Enter First Name of the contact you want to find: ')
find_by_firstname = Contact.get(Contact.firstname ==
find_contact_by_firstname)
print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +
' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +
find_by_firstname.address)
def delete_contact():
contact_name_delete = input(
'Enter First Name of the contact you want to delete: ')
contact_firstname = Contact.get(Contact.firstname == contact_name_delete)
contact_firstname.delete_instance()
class BaseModel(Model):
class Meta:
database = db
class Contact(BaseModel):
firstname = CharField()
lastname = CharField()
phone = CharField()
email = CharField()
address = CharField()
db.connect()
db.create_tables([Contact])
if intro_question == 'Create':
create_contact()
elif intro_question == 'Read':
read_contact()
elif intro_question == 'Delete':
delete_contact()
elif intro_question == 'Find':
find_contact()
elif intro_question == 'Update':
update_contact()
| from peewee import *
db = PostgresqlDatabase('contacts', user='postgres', password='', host=
'localhost', port=5432)
intro_question = input(
'What would you like to do with Contacts? Create? Read? Find? Delete? Update? '
)
def read_contact():
contacts = Contact.select()
for contact in contacts:
print(contact)
print(contact.firstname + ' ' + contact.lastname + ' ' + contact.
phone + ' ' + contact.email + ' ' + contact.address)
def create_contact():
contact_firstname = input('Enter First Name: ')
contact_lastname = input('Enter Last Name: ')
contact_phone = input('Enter Phone Number: ')
contact_email = input('Enter Email: ')
contact_address = input('Enter Address: ')
newcontact = Contact(firstname=contact_firstname, lastname=
contact_lastname, phone=contact_phone, email=contact_email, address
=contact_address)
newcontact.save()
print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +
newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)
def update_contact():
update_find_by_firstname = input(
'Enter the First Name of the contact you want to update: ')
updated_info = Contact.get(Contact.firstname == update_find_by_firstname)
new_phone = input('Enter the new number: ')
updated_info.phone = new_phone
new_email = input('Enter new Email: ')
updated_info.email = new_email
new_address = input('Enter new Address: ')
updated_info.address = new_address
updated_info.save()
def find_contact():
find_contact_by_firstname = input(
'Enter First Name of the contact you want to find: ')
find_by_firstname = Contact.get(Contact.firstname ==
find_contact_by_firstname)
print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +
' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +
find_by_firstname.address)
def delete_contact():
contact_name_delete = input(
'Enter First Name of the contact you want to delete: ')
contact_firstname = Contact.get(Contact.firstname == contact_name_delete)
contact_firstname.delete_instance()
class BaseModel(Model):
class Meta:
database = db
class Contact(BaseModel):
firstname = CharField()
lastname = CharField()
phone = CharField()
email = CharField()
address = CharField()
db.connect()
db.create_tables([Contact])
if intro_question == 'Create':
create_contact()
elif intro_question == 'Read':
read_contact()
elif intro_question == 'Delete':
delete_contact()
elif intro_question == 'Find':
find_contact()
elif intro_question == 'Update':
update_contact()
| from peewee import *
db = PostgresqlDatabase('contacts', user='postgres', password='',
host='localhost', port=5432)
intro_question = input("What would you like to do with Contacts? Create? Read? Find? Delete? Update? ")
def read_contact():
contacts = Contact.select()
for contact in contacts:
print(contact)
print(contact.firstname + " " + contact.lastname + " " + contact.phone + " " + contact.email + " " + contact.address)
def create_contact():
contact_firstname = input("Enter First Name: ")
contact_lastname = input("Enter Last Name: ")
contact_phone = input("Enter Phone Number: ")
contact_email = input("Enter Email: ")
contact_address = input("Enter Address: ")
newcontact = Contact(firstname = contact_firstname, lastname = contact_lastname, phone = contact_phone, email = contact_email, address = contact_address)
newcontact.save()
print(newcontact.firstname + " " + newcontact.lastname + " " + newcontact.phone + " " + newcontact.email + " " + newcontact.address)
def update_contact():
update_find_by_firstname = input("Enter the First Name of the contact you want to update: ")
updated_info = Contact.get(Contact.firstname == update_find_by_firstname)
new_phone = input("Enter the new number: ")
updated_info.phone = new_phone
new_email = input("Enter new Email: ")
updated_info.email = new_email
new_address = input("Enter new Address: ")
updated_info.address = new_address
updated_info.save()
def find_contact():
find_contact_by_firstname = input("Enter First Name of the contact you want to find: ")
find_by_firstname = Contact.get(Contact.firstname == find_contact_by_firstname)
print(find_by_firstname.firstname + " " + find_by_firstname.lastname + " " + find_by_firstname.phone + " " + find_by_firstname.email + " " + find_by_firstname.address)
def delete_contact():
contact_name_delete = input("Enter First Name of the contact you want to delete: ")
contact_firstname = Contact.get(Contact.firstname == contact_name_delete)
contact_firstname.delete_instance()
class BaseModel(Model):
class Meta:
database = db
class Contact(BaseModel):
firstname = CharField()
lastname = CharField()
phone = CharField()
email = CharField()
address = CharField()
db.connect()
db.create_tables([Contact])
if intro_question == "Create":
create_contact()
elif intro_question == "Read":
read_contact()
elif intro_question == "Delete":
delete_contact()
elif intro_question == "Find":
find_contact()
elif intro_question == "Update":
update_contact() | [
7,
9,
10,
11,
12
] |
1,073 | 289aa48b4433be533c3916dd039136df45e0ac0b | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('student', '0008_studentbasic_stu_class_num')]
operations = [migrations.AlterModelOptions(name='onduty', options={
'verbose_name': '学员考勤信息', 'verbose_name_plural': '学员考勤信息'}),
migrations.AlterModelOptions(name='studentbasic', options={
'verbose_name': '学员招生信息', 'verbose_name_plural': '学员招生信息'}),
migrations.AlterModelOptions(name='studentcertification', options={
'verbose_name': '学员证书', 'verbose_name_plural': '学员证书'}), migrations
.AlterModelOptions(name='studentclass', options={'verbose_name':
'学员班级', 'verbose_name_plural': '学员班级'}), migrations.
AlterModelOptions(name='studentexam', options={'verbose_name':
'学员考试信息', 'verbose_name_plural': '学员考试信息'}), migrations.
AlterModelOptions(name='studentexamextra', options={'verbose_name':
'学员补考情况', 'verbose_name_plural': '学员补考情况'}), migrations.
AlterModelOptions(name='studenttextbook', options={'verbose_name':
'学员教材', 'verbose_name_plural': '学员教材'}), migrations.
AlterModelOptions(name='studentwechat', options={'verbose_name':
'学员365开通情况', 'verbose_name_plural': '学员365开通情况'}), migrations.
AlterModelOptions(name='tuition', options={'verbose_name': '学员交费信息',
'verbose_name_plural': '学员交费信息'}), migrations.AlterField(model_name
='studentbasic', name='stu_signup_date', field=models.CharField(
blank=True, max_length=128, null=True, verbose_name='报名日期')),
migrations.AlterField(model_name='studentcertification', name=
'cert_date', field=models.CharField(blank=True, max_length=128,
null=True, verbose_name='发证日期')), migrations.AlterField(model_name=
'studentexam', name='exam_date', field=models.CharField(blank=True,
max_length=128, null=True, verbose_name='报考日期')), migrations.
AlterField(model_name='studentexamextra', name='exam_date', field=
models.CharField(blank=True, max_length=128, null=True,
verbose_name='报考日期')), migrations.AlterField(model_name=
'studentwechat', name='wechat_date', field=models.CharField(blank=
True, max_length=128, null=True, verbose_name='开通日期')), migrations.
AlterField(model_name='tuition', name='fee_date', field=models.
CharField(blank=True, max_length=128, null=True, verbose_name='缴费日期'))]
| from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('student', '0008_studentbasic_stu_class_num')]
operations = [migrations.AlterModelOptions(name='onduty', options={
'verbose_name': '学员考勤信息', 'verbose_name_plural': '学员考勤信息'}),
migrations.AlterModelOptions(name='studentbasic', options={
'verbose_name': '学员招生信息', 'verbose_name_plural': '学员招生信息'}),
migrations.AlterModelOptions(name='studentcertification', options={
'verbose_name': '学员证书', 'verbose_name_plural': '学员证书'}), migrations
.AlterModelOptions(name='studentclass', options={'verbose_name':
'学员班级', 'verbose_name_plural': '学员班级'}), migrations.
AlterModelOptions(name='studentexam', options={'verbose_name':
'学员考试信息', 'verbose_name_plural': '学员考试信息'}), migrations.
AlterModelOptions(name='studentexamextra', options={'verbose_name':
'学员补考情况', 'verbose_name_plural': '学员补考情况'}), migrations.
AlterModelOptions(name='studenttextbook', options={'verbose_name':
'学员教材', 'verbose_name_plural': '学员教材'}), migrations.
AlterModelOptions(name='studentwechat', options={'verbose_name':
'学员365开通情况', 'verbose_name_plural': '学员365开通情况'}), migrations.
AlterModelOptions(name='tuition', options={'verbose_name': '学员交费信息',
'verbose_name_plural': '学员交费信息'}), migrations.AlterField(model_name
='studentbasic', name='stu_signup_date', field=models.CharField(
blank=True, max_length=128, null=True, verbose_name='报名日期')),
migrations.AlterField(model_name='studentcertification', name=
'cert_date', field=models.CharField(blank=True, max_length=128,
null=True, verbose_name='发证日期')), migrations.AlterField(model_name=
'studentexam', name='exam_date', field=models.CharField(blank=True,
max_length=128, null=True, verbose_name='报考日期')), migrations.
AlterField(model_name='studentexamextra', name='exam_date', field=
models.CharField(blank=True, max_length=128, null=True,
verbose_name='报考日期')), migrations.AlterField(model_name=
'studentwechat', name='wechat_date', field=models.CharField(blank=
True, max_length=128, null=True, verbose_name='开通日期')), migrations.
AlterField(model_name='tuition', name='fee_date', field=models.
CharField(blank=True, max_length=128, null=True, verbose_name='缴费日期'))]
| # Generated by Django 2.2.5 on 2019-10-24 05:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0008_studentbasic_stu_class_num'),
]
operations = [
migrations.AlterModelOptions(
name='onduty',
options={'verbose_name': '学员考勤信息', 'verbose_name_plural': '学员考勤信息'},
),
migrations.AlterModelOptions(
name='studentbasic',
options={'verbose_name': '学员招生信息', 'verbose_name_plural': '学员招生信息'},
),
migrations.AlterModelOptions(
name='studentcertification',
options={'verbose_name': '学员证书', 'verbose_name_plural': '学员证书'},
),
migrations.AlterModelOptions(
name='studentclass',
options={'verbose_name': '学员班级', 'verbose_name_plural': '学员班级'},
),
migrations.AlterModelOptions(
name='studentexam',
options={'verbose_name': '学员考试信息', 'verbose_name_plural': '学员考试信息'},
),
migrations.AlterModelOptions(
name='studentexamextra',
options={'verbose_name': '学员补考情况', 'verbose_name_plural': '学员补考情况'},
),
migrations.AlterModelOptions(
name='studenttextbook',
options={'verbose_name': '学员教材', 'verbose_name_plural': '学员教材'},
),
migrations.AlterModelOptions(
name='studentwechat',
options={'verbose_name': '学员365开通情况', 'verbose_name_plural': '学员365开通情况'},
),
migrations.AlterModelOptions(
name='tuition',
options={'verbose_name': '学员交费信息', 'verbose_name_plural': '学员交费信息'},
),
migrations.AlterField(
model_name='studentbasic',
name='stu_signup_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报名日期'),
),
migrations.AlterField(
model_name='studentcertification',
name='cert_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='发证日期'),
),
migrations.AlterField(
model_name='studentexam',
name='exam_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报考日期'),
),
migrations.AlterField(
model_name='studentexamextra',
name='exam_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报考日期'),
),
migrations.AlterField(
model_name='studentwechat',
name='wechat_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='开通日期'),
),
migrations.AlterField(
model_name='tuition',
name='fee_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='缴费日期'),
),
]
| [
0,
1,
2,
3,
4
] |
1,074 | 03062ea08bd6ad88376f7c2aa2c89d2194ed8b2e | <mask token>
| <mask token>
def fibonacci(n):
if n == 0:
return []
elif n == 1:
return [1]
elif n == 2:
return [1, 1]
else:
lista = fibonacci(n - 1)
suma = lista[len(lista) - 1] + lista[len(lista) - 2]
lista.append(suma)
return lista
<mask token>
| <mask token>
def fibonacci(n):
if n == 0:
return []
elif n == 1:
return [1]
elif n == 2:
return [1, 1]
else:
lista = fibonacci(n - 1)
suma = lista[len(lista) - 1] + lista[len(lista) - 2]
lista.append(suma)
return lista
def main():
resultado = fibonacci(6)
print(resultado)
<mask token>
| <mask token>
def fibonacci(n):
if n == 0:
return []
elif n == 1:
return [1]
elif n == 2:
return [1, 1]
else:
lista = fibonacci(n - 1)
suma = lista[len(lista) - 1] + lista[len(lista) - 2]
lista.append(suma)
return lista
def main():
resultado = fibonacci(6)
print(resultado)
if __name__ == '__main__':
main()
| '''
fibonacci(6) => [1, 1, 2, 3, 5, 8]
fibonacci(7) => [1, 1, 2, 3, 5, 8, 13]
'''
def fibonacci(n):
if n == 0:
return []
elif n == 1:
return [1]
elif n == 2:
return [1, 1]
else:
lista = fibonacci(n-1)
suma = lista[len(lista)-1] + lista[len(lista)-2]
lista.append(suma)
return lista
def main():
resultado = fibonacci(6)
print(resultado)
if __name__ == '__main__':
main()
| [
0,
1,
2,
3,
4
] |
1,075 | af668751074df6f182c7121821587270734ea5af | <mask token>
class NovelsSpider(scrapy.Spider):
<mask token>
<mask token>
<mask token>
def parse(self, response):
path = '/Users/qx/Documents/小说/new/'
all = response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname = oneitem.xpath('./h2/text()').extract_first()
if classname == '奇幻小说、玄幻小说大全列表':
classname = 'xuanhuan'
if classname == '历史小说、军事小说、穿越小说大全列表':
classname = 'chuanyue'
if classname == '武侠小说、仙侠小说、修真小说大全列表':
classname = 'xiuzhen'
if classname == '言情小说、都市小说大全列表':
classname = 'dushi'
if classname == '异灵小说、科幻小说大全列表':
classname = 'kehuan'
if classname == '游戏小说、竞技小说、网游小说大全列表':
classname = 'wangyou'
urls = oneitem.xpath('./ul/li/a/@href').extract()
names = oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url = urls[i]
name = names[i]
yield scrapy.Request(url, meta={'name': name, 'classname':
classname}, callback=self.url_parse)
def url_parse(self, response):
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author = author.split(':', 1)[1]
print(name + '-' + author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()"
).extract()
for i in range(len(listurls)):
url = 'http://www.xbiquge.la' + listurls[i]
chaptername = chapternames[i]
oldname = path + classname + '/' + name + '-作者:' + author
newname = path + classname + '/' + name
if os.path.exists(oldname):
os.rename(oldname, newname)
if not os.path.exists(newname):
os.makedirs(newname)
if not os.path.exists(newname + '/' + str(i) + '.txt'):
yield scrapy.Request(url, meta={'chaptername': chaptername,
'tag': classname, 'name': name, 'author': author,
'index': i}, callback=self.detail_parse)
<mask token>
| <mask token>
class NovelsSpider(scrapy.Spider):
<mask token>
<mask token>
<mask token>
def parse(self, response):
path = '/Users/qx/Documents/小说/new/'
all = response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname = oneitem.xpath('./h2/text()').extract_first()
if classname == '奇幻小说、玄幻小说大全列表':
classname = 'xuanhuan'
if classname == '历史小说、军事小说、穿越小说大全列表':
classname = 'chuanyue'
if classname == '武侠小说、仙侠小说、修真小说大全列表':
classname = 'xiuzhen'
if classname == '言情小说、都市小说大全列表':
classname = 'dushi'
if classname == '异灵小说、科幻小说大全列表':
classname = 'kehuan'
if classname == '游戏小说、竞技小说、网游小说大全列表':
classname = 'wangyou'
urls = oneitem.xpath('./ul/li/a/@href').extract()
names = oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url = urls[i]
name = names[i]
yield scrapy.Request(url, meta={'name': name, 'classname':
classname}, callback=self.url_parse)
def url_parse(self, response):
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author = author.split(':', 1)[1]
print(name + '-' + author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()"
).extract()
for i in range(len(listurls)):
url = 'http://www.xbiquge.la' + listurls[i]
chaptername = chapternames[i]
oldname = path + classname + '/' + name + '-作者:' + author
newname = path + classname + '/' + name
if os.path.exists(oldname):
os.rename(oldname, newname)
if not os.path.exists(newname):
os.makedirs(newname)
if not os.path.exists(newname + '/' + str(i) + '.txt'):
yield scrapy.Request(url, meta={'chaptername': chaptername,
'tag': classname, 'name': name, 'author': author,
'index': i}, callback=self.detail_parse)
def detail_parse(self, response):
tag = response.meta['tag']
name = response.meta['name']
author = response.meta['author']
chaptername = response.meta['chaptername']
index = response.meta['index']
item = BiqugeItem()
novel = response.xpath("//div[@id='content']/text()").extract()
item['novel'] = '\n'.join(novel).replace(' ', ' ')
item['name'] = name
item['tag'] = tag
item['author'] = author
item['chapter'] = chaptername
item['index'] = index
yield item
| <mask token>
class NovelsSpider(scrapy.Spider):
name = 'novels'
allowed_domains = ['xbiquge.la']
start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']
def parse(self, response):
path = '/Users/qx/Documents/小说/new/'
all = response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname = oneitem.xpath('./h2/text()').extract_first()
if classname == '奇幻小说、玄幻小说大全列表':
classname = 'xuanhuan'
if classname == '历史小说、军事小说、穿越小说大全列表':
classname = 'chuanyue'
if classname == '武侠小说、仙侠小说、修真小说大全列表':
classname = 'xiuzhen'
if classname == '言情小说、都市小说大全列表':
classname = 'dushi'
if classname == '异灵小说、科幻小说大全列表':
classname = 'kehuan'
if classname == '游戏小说、竞技小说、网游小说大全列表':
classname = 'wangyou'
urls = oneitem.xpath('./ul/li/a/@href').extract()
names = oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url = urls[i]
name = names[i]
yield scrapy.Request(url, meta={'name': name, 'classname':
classname}, callback=self.url_parse)
def url_parse(self, response):
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author = author.split(':', 1)[1]
print(name + '-' + author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()"
).extract()
for i in range(len(listurls)):
url = 'http://www.xbiquge.la' + listurls[i]
chaptername = chapternames[i]
oldname = path + classname + '/' + name + '-作者:' + author
newname = path + classname + '/' + name
if os.path.exists(oldname):
os.rename(oldname, newname)
if not os.path.exists(newname):
os.makedirs(newname)
if not os.path.exists(newname + '/' + str(i) + '.txt'):
yield scrapy.Request(url, meta={'chaptername': chaptername,
'tag': classname, 'name': name, 'author': author,
'index': i}, callback=self.detail_parse)
def detail_parse(self, response):
tag = response.meta['tag']
name = response.meta['name']
author = response.meta['author']
chaptername = response.meta['chaptername']
index = response.meta['index']
item = BiqugeItem()
novel = response.xpath("//div[@id='content']/text()").extract()
item['novel'] = '\n'.join(novel).replace(' ', ' ')
item['name'] = name
item['tag'] = tag
item['author'] = author
item['chapter'] = chaptername
item['index'] = index
yield item
| import scrapy
import os
from topdb.items import BiqugeItem
class NovelsSpider(scrapy.Spider):
name = 'novels'
allowed_domains = ['xbiquge.la']
start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']
def parse(self, response):
path = '/Users/qx/Documents/小说/new/'
all = response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname = oneitem.xpath('./h2/text()').extract_first()
if classname == '奇幻小说、玄幻小说大全列表':
classname = 'xuanhuan'
if classname == '历史小说、军事小说、穿越小说大全列表':
classname = 'chuanyue'
if classname == '武侠小说、仙侠小说、修真小说大全列表':
classname = 'xiuzhen'
if classname == '言情小说、都市小说大全列表':
classname = 'dushi'
if classname == '异灵小说、科幻小说大全列表':
classname = 'kehuan'
if classname == '游戏小说、竞技小说、网游小说大全列表':
classname = 'wangyou'
urls = oneitem.xpath('./ul/li/a/@href').extract()
names = oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url = urls[i]
name = names[i]
yield scrapy.Request(url, meta={'name': name, 'classname':
classname}, callback=self.url_parse)
def url_parse(self, response):
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author = author.split(':', 1)[1]
print(name + '-' + author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()"
).extract()
for i in range(len(listurls)):
url = 'http://www.xbiquge.la' + listurls[i]
chaptername = chapternames[i]
oldname = path + classname + '/' + name + '-作者:' + author
newname = path + classname + '/' + name
if os.path.exists(oldname):
os.rename(oldname, newname)
if not os.path.exists(newname):
os.makedirs(newname)
if not os.path.exists(newname + '/' + str(i) + '.txt'):
yield scrapy.Request(url, meta={'chaptername': chaptername,
'tag': classname, 'name': name, 'author': author,
'index': i}, callback=self.detail_parse)
def detail_parse(self, response):
tag = response.meta['tag']
name = response.meta['name']
author = response.meta['author']
chaptername = response.meta['chaptername']
index = response.meta['index']
item = BiqugeItem()
novel = response.xpath("//div[@id='content']/text()").extract()
item['novel'] = '\n'.join(novel).replace(' ', ' ')
item['name'] = name
item['tag'] = tag
item['author'] = author
item['chapter'] = chaptername
item['index'] = index
yield item
| # -*- coding: utf-8 -*-
import scrapy
import os
from topdb.items import BiqugeItem
class NovelsSpider(scrapy.Spider):
name = 'novels'
allowed_domains = ['xbiquge.la']
start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']
def parse(self, response):
# 小说分类
path = '/Users/qx/Documents/小说/new/'
all=response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname=oneitem.xpath('./h2/text()').extract_first()
if classname=='奇幻小说、玄幻小说大全列表':
classname='xuanhuan'
if classname=='历史小说、军事小说、穿越小说大全列表':
classname='chuanyue'
if classname=='武侠小说、仙侠小说、修真小说大全列表':
classname='xiuzhen'
if classname=='言情小说、都市小说大全列表':
classname='dushi'
if classname=='异灵小说、科幻小说大全列表':
classname='kehuan'
if classname=='游戏小说、竞技小说、网游小说大全列表':
classname='wangyou'
urls=oneitem.xpath('./ul/li/a/@href').extract()
names=oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url=urls[i]
name=names[i]
yield scrapy.Request(url, meta={'name': name, 'classname': classname}, callback=self.url_parse)
def url_parse(self, response):
# 小说章节列表
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author=author.split(':',1)[1]
print(name+'-'+author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()").extract()
for i in range(len(listurls)):
url = "http://www.xbiquge.la" + listurls[i]
chaptername=chapternames[i]
oldname=path+ classname+'/'+name+ '-作者:' + author
newname=path+ classname+'/'+name
if (os.path.exists(oldname)):
os.rename(oldname,newname)
if (not os.path.exists(newname)):
os.makedirs(newname)
if(not os.path.exists(newname+'/'+ str(i) + ".txt")):
yield scrapy.Request(url, meta={'chaptername':chaptername,'tag':classname,'name':name,'author':author,'index':i}, callback=self.detail_parse)
def detail_parse(self, response):
# 章节详细内容
tag = response.meta['tag']
name = response.meta['name']
author = response.meta['author']
chaptername = response.meta['chaptername']
index = response.meta['index']
item = BiqugeItem()
novel = response.xpath("//div[@id='content']/text()").extract()
item['novel'] = "\n".join(novel).replace(" ", " ")
item['name'] = name
item['tag'] = tag
item['author'] = author
item['chapter'] = chaptername
item['index'] = index
# print(item['classname'])
# print(item['name'])
# print(item['title'])
# print('\n')
yield item
# 这里是爬取整个网站且按照分类进行爬取 但是重点是 爬取太慢scrapy 是异步操作 还需要了解一下多线程的问题 这样速度能更快些
| [
3,
4,
5,
6,
7
] |
1,076 | 06f961c07695d1c312cb943afbfa64508a709c7e | <mask token>
| <mask token>
with alive_bar(100) as bar:
for i in range(100):
sleep(0.03)
bar()
with alive_bar(200, bar='bubbles', spinner='notes2') as bar:
for i in range(200):
sleep(0.03)
bar()
| from alive_progress import alive_bar
from time import sleep
with alive_bar(100) as bar:
for i in range(100):
sleep(0.03)
bar()
with alive_bar(200, bar='bubbles', spinner='notes2') as bar:
for i in range(200):
sleep(0.03)
bar()
| from alive_progress import alive_bar
from time import sleep
with alive_bar(100) as bar: # default setting
for i in range(100):
sleep(0.03)
bar() # call after consuming one item
# using bubble bar and notes spinner
with alive_bar(200, bar='bubbles', spinner='notes2') as bar:
for i in range(200):
sleep(0.03)
bar() # call after consuming one item
| null | [
0,
1,
2,
3
] |
1,077 | 1fafbc1e415b5089afcd2976d4f0dc2aa1c5a144 | def maxProduct(self, A):
size= len(A)
if size==1:
return A[0]
Max=[A[0]]
Min=[A[0]]
for i in range(1,size):
Max.append(max(max(Max[i-1]*A[i],Min[i-1]*A[i]),A[i]))
Min.append(min(min(Max[i-1]*A[i],Min[i-1]*A[i]),A[i]))
tmax=Max[0]
for i in range(0,size):
if Max[i]>tmax:
tmax=Max[i]
return tmax
| null | null | null | null | [
0
] |
1,078 | 9a6ceeb286bb6c3d5923fe3b53be90a097e16ef5 | <mask token>
def convert_lines_to_arrays(content):
"""
convert each line in scene to an array of text
formating when not relevant
"""
lines = []
for x in content:
line = x.strip()
if len(line) > 0:
if 'scene:' in x:
lines.append([])
else:
line_arr = format_line(line)
if line_arr:
lines.append(line_arr)
return lines
def format_line(line):
"""
format the line before storing as an array
"""
line = line.lower()
line_arr = []
open_brack = []
is_dialogue = False
word = ''
for s in line:
if s == "'":
continue
if s == ':':
is_dialogue = True
continue
if s == '[':
open_brack.append(s)
continue
if s == ']':
open_brack.pop()
continue
if is_dialogue and not open_brack:
if s == ' ':
if len(word) > 0:
line_arr.append(word)
word = ''
elif re.match('[.,?;!"]', s):
if len(word) > 0:
line_arr.append(word)
line_arr.append(s)
word = ''
elif re.match('[A-Za-z\\-]', s):
word = word + s
return line_arr
def line2TrainVector(pairs, word_dict):
"""
Input: Read pairs of lines:
[["Hi", "Mr", ".", "Seinfeld", "!"],
["Hey", ",", "theres", "the", "old","man","!"]
word_dict is embedding hash formed with processDict() below
Output: convert into fasttext embedding vectors (dim 300)
above example returns
matrix size 4 x 300 for input
matrix size 7 x 300 for target
"""
input_v = createWordVector(pairs[0], word_dict)
target_v = createWordVector(pairs[1], word_dict)
return input_v, target_v
def createWordVector(word_array, word_dict):
vect = []
for word in word_array:
if '-' in word and word not in word_dict:
vect.extend(createHypenEmbed(word))
continue
if word == ';':
word = '.'
if word == '':
continue
if word in word_dict:
vect.append(word_dict[word])
else:
print('NOT IN DICT')
print(word)
editted_word = editWord(word, word_dict)
vect.append(editted_word)
print(editted_word)
print(word_array)
return vect
def editWord(weird_word, word_dict):
last_s = ''
weird_stack = []
for i, s in enumerate(weird_word):
if s != last_s:
weird_stack.append([i, s, 1])
else:
weird_stack[-1][2] += 1
last_s = s
sorted_stack = sorted(weird_stack, key=lambda x: x[2])
most_common = sorted_stack[-1]
common_idx = most_common[0] + most_common[2]
weird_word = weird_word[:common_idx - 1] + weird_word[common_idx:]
if weird_word in word_dict:
return weird_word
else:
weird_word = editWord(weird_word, word_dict)
return weird_word
<mask token>
class fastDict:
def __init__(self, read_filename, method):
self.method = method
print(method)
if method == 'store':
read_filename = '~/FastData/wiki.en/wiki.en.bin'
print(read_filename)
self.fast = ft.load_model(os.path.expanduser(read_filename))
pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'
self.pickle_path = os.path.expanduser(pickle_filename)
print(pickle_filename)
def processDict(self):
if self.method == 'store':
self.writeWordDict()
return self.loadWordDict()
def loadWordDict(self):
pickle_reader = open(self.pickle_path, 'rb')
word_vec = pk.load(pickle_reader)
return word_vec
def writeWordDict(self):
all_words = self.getAllWords()
self.createWordDict(all_words)
def getAllWords(self):
all_the_words = self.fast.get_words()
return all_the_words
def createWordDict(self, all_words):
pickle_writer = open(self.pickle_path, 'wb')
word_dict = {}
for word in all_words:
word_dict[word] = self.fast.get_word_vector(word)
pk.dump(word_dict, pickle_writer)
<mask token>
| <mask token>
def createLinePairs(corpus):
"""
Input: Read episode linse with format:
ELAINE Hi Mr . Seinfeld !
JERRY Hey , theres the old man !
Output: convert those pairs into array
[["Hi", "Mr", ".", "Seinfeld", "!"],
["Hey", ",", "theres", "the", "old","man","!"]
"""
print('Reading lines...')
with open(corpus) as f:
content = f.readlines()
print('CONTENT')
print(content)
lines = convert_lines_to_arrays(content)
pairs = []
for i, x in enumerate(lines[:-1]):
if lines[i] and lines[i + 1]:
pairs.append([lines[i], lines[i + 1]])
return pairs
def convert_lines_to_arrays(content):
"""
convert each line in scene to an array of text
formating when not relevant
"""
lines = []
for x in content:
line = x.strip()
if len(line) > 0:
if 'scene:' in x:
lines.append([])
else:
line_arr = format_line(line)
if line_arr:
lines.append(line_arr)
return lines
def format_line(line):
"""
format the line before storing as an array
"""
line = line.lower()
line_arr = []
open_brack = []
is_dialogue = False
word = ''
for s in line:
if s == "'":
continue
if s == ':':
is_dialogue = True
continue
if s == '[':
open_brack.append(s)
continue
if s == ']':
open_brack.pop()
continue
if is_dialogue and not open_brack:
if s == ' ':
if len(word) > 0:
line_arr.append(word)
word = ''
elif re.match('[.,?;!"]', s):
if len(word) > 0:
line_arr.append(word)
line_arr.append(s)
word = ''
elif re.match('[A-Za-z\\-]', s):
word = word + s
return line_arr
def line2TrainVector(pairs, word_dict):
"""
Input: Read pairs of lines:
[["Hi", "Mr", ".", "Seinfeld", "!"],
["Hey", ",", "theres", "the", "old","man","!"]
word_dict is embedding hash formed with processDict() below
Output: convert into fasttext embedding vectors (dim 300)
above example returns
matrix size 4 x 300 for input
matrix size 7 x 300 for target
"""
input_v = createWordVector(pairs[0], word_dict)
target_v = createWordVector(pairs[1], word_dict)
return input_v, target_v
def createWordVector(word_array, word_dict):
vect = []
for word in word_array:
if '-' in word and word not in word_dict:
vect.extend(createHypenEmbed(word))
continue
if word == ';':
word = '.'
if word == '':
continue
if word in word_dict:
vect.append(word_dict[word])
else:
print('NOT IN DICT')
print(word)
editted_word = editWord(word, word_dict)
vect.append(editted_word)
print(editted_word)
print(word_array)
return vect
def editWord(weird_word, word_dict):
last_s = ''
weird_stack = []
for i, s in enumerate(weird_word):
if s != last_s:
weird_stack.append([i, s, 1])
else:
weird_stack[-1][2] += 1
last_s = s
sorted_stack = sorted(weird_stack, key=lambda x: x[2])
most_common = sorted_stack[-1]
common_idx = most_common[0] + most_common[2]
weird_word = weird_word[:common_idx - 1] + weird_word[common_idx:]
if weird_word in word_dict:
return weird_word
else:
weird_word = editWord(weird_word, word_dict)
return weird_word
<mask token>
class fastDict:
def __init__(self, read_filename, method):
self.method = method
print(method)
if method == 'store':
read_filename = '~/FastData/wiki.en/wiki.en.bin'
print(read_filename)
self.fast = ft.load_model(os.path.expanduser(read_filename))
pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'
self.pickle_path = os.path.expanduser(pickle_filename)
print(pickle_filename)
def processDict(self):
if self.method == 'store':
self.writeWordDict()
return self.loadWordDict()
def loadWordDict(self):
pickle_reader = open(self.pickle_path, 'rb')
word_vec = pk.load(pickle_reader)
return word_vec
def writeWordDict(self):
all_words = self.getAllWords()
self.createWordDict(all_words)
def getAllWords(self):
all_the_words = self.fast.get_words()
return all_the_words
def createWordDict(self, all_words):
pickle_writer = open(self.pickle_path, 'wb')
word_dict = {}
for word in all_words:
word_dict[word] = self.fast.get_word_vector(word)
pk.dump(word_dict, pickle_writer)
<mask token>
| <mask token>
def createLinePairs(corpus):
"""
Input: Read episode linse with format:
ELAINE Hi Mr . Seinfeld !
JERRY Hey , theres the old man !
Output: convert those pairs into array
[["Hi", "Mr", ".", "Seinfeld", "!"],
["Hey", ",", "theres", "the", "old","man","!"]
"""
print('Reading lines...')
with open(corpus) as f:
content = f.readlines()
print('CONTENT')
print(content)
lines = convert_lines_to_arrays(content)
pairs = []
for i, x in enumerate(lines[:-1]):
if lines[i] and lines[i + 1]:
pairs.append([lines[i], lines[i + 1]])
return pairs
def convert_lines_to_arrays(content):
"""
convert each line in scene to an array of text
formating when not relevant
"""
lines = []
for x in content:
line = x.strip()
if len(line) > 0:
if 'scene:' in x:
lines.append([])
else:
line_arr = format_line(line)
if line_arr:
lines.append(line_arr)
return lines
def format_line(line):
"""
format the line before storing as an array
"""
line = line.lower()
line_arr = []
open_brack = []
is_dialogue = False
word = ''
for s in line:
if s == "'":
continue
if s == ':':
is_dialogue = True
continue
if s == '[':
open_brack.append(s)
continue
if s == ']':
open_brack.pop()
continue
if is_dialogue and not open_brack:
if s == ' ':
if len(word) > 0:
line_arr.append(word)
word = ''
elif re.match('[.,?;!"]', s):
if len(word) > 0:
line_arr.append(word)
line_arr.append(s)
word = ''
elif re.match('[A-Za-z\\-]', s):
word = word + s
return line_arr
def line2TrainVector(pairs, word_dict):
"""
Input: Read pairs of lines:
[["Hi", "Mr", ".", "Seinfeld", "!"],
["Hey", ",", "theres", "the", "old","man","!"]
word_dict is embedding hash formed with processDict() below
Output: convert into fasttext embedding vectors (dim 300)
above example returns
matrix size 4 x 300 for input
matrix size 7 x 300 for target
"""
input_v = createWordVector(pairs[0], word_dict)
target_v = createWordVector(pairs[1], word_dict)
return input_v, target_v
def createWordVector(word_array, word_dict):
vect = []
for word in word_array:
if '-' in word and word not in word_dict:
vect.extend(createHypenEmbed(word))
continue
if word == ';':
word = '.'
if word == '':
continue
if word in word_dict:
vect.append(word_dict[word])
else:
print('NOT IN DICT')
print(word)
editted_word = editWord(word, word_dict)
vect.append(editted_word)
print(editted_word)
print(word_array)
return vect
def editWord(weird_word, word_dict):
last_s = ''
weird_stack = []
for i, s in enumerate(weird_word):
if s != last_s:
weird_stack.append([i, s, 1])
else:
weird_stack[-1][2] += 1
last_s = s
sorted_stack = sorted(weird_stack, key=lambda x: x[2])
most_common = sorted_stack[-1]
common_idx = most_common[0] + most_common[2]
weird_word = weird_word[:common_idx - 1] + weird_word[common_idx:]
if weird_word in word_dict:
return weird_word
else:
weird_word = editWord(weird_word, word_dict)
return weird_word
def createHypenEmbed(word):
"""
Handle outlier language with hyphen
"""
word_whole = re.sub('-', '', word)
if word_whole in word_dict:
return [word_dict[word_whole]]
else:
subwords = word.split('-')
word_vect = [word_dict[subwords[0]]]
for w in subwords[1:]:
word_vect.append(word_dict[w])
return word_vect
class fastDict:
def __init__(self, read_filename, method):
self.method = method
print(method)
if method == 'store':
read_filename = '~/FastData/wiki.en/wiki.en.bin'
print(read_filename)
self.fast = ft.load_model(os.path.expanduser(read_filename))
pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'
self.pickle_path = os.path.expanduser(pickle_filename)
print(pickle_filename)
def processDict(self):
if self.method == 'store':
self.writeWordDict()
return self.loadWordDict()
def loadWordDict(self):
pickle_reader = open(self.pickle_path, 'rb')
word_vec = pk.load(pickle_reader)
return word_vec
def writeWordDict(self):
all_words = self.getAllWords()
self.createWordDict(all_words)
def getAllWords(self):
all_the_words = self.fast.get_words()
return all_the_words
def createWordDict(self, all_words):
pickle_writer = open(self.pickle_path, 'wb')
word_dict = {}
for word in all_words:
word_dict[word] = self.fast.get_word_vector(word)
pk.dump(word_dict, pickle_writer)
if __name__ == '__main__':
read_filename = '~/FastData/wiki.en/wiki.en.bin'
method = 'import'
fast = fastDict(read_filename, method)
word_dict = fast.processDict()
test_filename = (
'~/Documents/seinfeld/episodes/episode_TheSeinfeldChronicles_copy')
pairs = createLinePairs(os.path.expanduser(test_filename))
| <mask token>
import fastText as ft
import pickle as pk
import os
import re
import pdb
def createLinePairs(corpus):
"""
Input: Read episode linse with format:
ELAINE Hi Mr . Seinfeld !
JERRY Hey , theres the old man !
Output: convert those pairs into array
[["Hi", "Mr", ".", "Seinfeld", "!"],
["Hey", ",", "theres", "the", "old","man","!"]
"""
print('Reading lines...')
with open(corpus) as f:
content = f.readlines()
print('CONTENT')
print(content)
lines = convert_lines_to_arrays(content)
pairs = []
for i, x in enumerate(lines[:-1]):
if lines[i] and lines[i + 1]:
pairs.append([lines[i], lines[i + 1]])
return pairs
def convert_lines_to_arrays(content):
"""
convert each line in scene to an array of text
formating when not relevant
"""
lines = []
for x in content:
line = x.strip()
if len(line) > 0:
if 'scene:' in x:
lines.append([])
else:
line_arr = format_line(line)
if line_arr:
lines.append(line_arr)
return lines
def format_line(line):
"""
format the line before storing as an array
"""
line = line.lower()
line_arr = []
open_brack = []
is_dialogue = False
word = ''
for s in line:
if s == "'":
continue
if s == ':':
is_dialogue = True
continue
if s == '[':
open_brack.append(s)
continue
if s == ']':
open_brack.pop()
continue
if is_dialogue and not open_brack:
if s == ' ':
if len(word) > 0:
line_arr.append(word)
word = ''
elif re.match('[.,?;!"]', s):
if len(word) > 0:
line_arr.append(word)
line_arr.append(s)
word = ''
elif re.match('[A-Za-z\\-]', s):
word = word + s
return line_arr
def line2TrainVector(pairs, word_dict):
"""
Input: Read pairs of lines:
[["Hi", "Mr", ".", "Seinfeld", "!"],
["Hey", ",", "theres", "the", "old","man","!"]
word_dict is embedding hash formed with processDict() below
Output: convert into fasttext embedding vectors (dim 300)
above example returns
matrix size 4 x 300 for input
matrix size 7 x 300 for target
"""
input_v = createWordVector(pairs[0], word_dict)
target_v = createWordVector(pairs[1], word_dict)
return input_v, target_v
def createWordVector(word_array, word_dict):
vect = []
for word in word_array:
if '-' in word and word not in word_dict:
vect.extend(createHypenEmbed(word))
continue
if word == ';':
word = '.'
if word == '':
continue
if word in word_dict:
vect.append(word_dict[word])
else:
print('NOT IN DICT')
print(word)
editted_word = editWord(word, word_dict)
vect.append(editted_word)
print(editted_word)
print(word_array)
return vect
def editWord(weird_word, word_dict):
last_s = ''
weird_stack = []
for i, s in enumerate(weird_word):
if s != last_s:
weird_stack.append([i, s, 1])
else:
weird_stack[-1][2] += 1
last_s = s
sorted_stack = sorted(weird_stack, key=lambda x: x[2])
most_common = sorted_stack[-1]
common_idx = most_common[0] + most_common[2]
weird_word = weird_word[:common_idx - 1] + weird_word[common_idx:]
if weird_word in word_dict:
return weird_word
else:
weird_word = editWord(weird_word, word_dict)
return weird_word
def createHypenEmbed(word):
"""
Handle outlier language with hyphen
"""
word_whole = re.sub('-', '', word)
if word_whole in word_dict:
return [word_dict[word_whole]]
else:
subwords = word.split('-')
word_vect = [word_dict[subwords[0]]]
for w in subwords[1:]:
word_vect.append(word_dict[w])
return word_vect
class fastDict:
def __init__(self, read_filename, method):
self.method = method
print(method)
if method == 'store':
read_filename = '~/FastData/wiki.en/wiki.en.bin'
print(read_filename)
self.fast = ft.load_model(os.path.expanduser(read_filename))
pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'
self.pickle_path = os.path.expanduser(pickle_filename)
print(pickle_filename)
def processDict(self):
if self.method == 'store':
self.writeWordDict()
return self.loadWordDict()
def loadWordDict(self):
pickle_reader = open(self.pickle_path, 'rb')
word_vec = pk.load(pickle_reader)
return word_vec
def writeWordDict(self):
all_words = self.getAllWords()
self.createWordDict(all_words)
def getAllWords(self):
all_the_words = self.fast.get_words()
return all_the_words
def createWordDict(self, all_words):
pickle_writer = open(self.pickle_path, 'wb')
word_dict = {}
for word in all_words:
word_dict[word] = self.fast.get_word_vector(word)
pk.dump(word_dict, pickle_writer)
if __name__ == '__main__':
read_filename = '~/FastData/wiki.en/wiki.en.bin'
method = 'import'
fast = fastDict(read_filename, method)
word_dict = fast.processDict()
test_filename = (
'~/Documents/seinfeld/episodes/episode_TheSeinfeldChronicles_copy')
pairs = createLinePairs(os.path.expanduser(test_filename))
| '''
Create a dictionary of fasttext embedding, stored locally
fasttext import. This will hopefully make it easier to load
and train data.
This will also be used to store the
Steps to clean scripts (codify):
1) copy direct from website (space-delimited text)
2) remove actions in brackets
3) change words not in fasttext dictionary like "heeeey" to closest approximation like "heeey", and convert made-up conjuction like "overdie" to "over-die"
4) concate the speaker into one string, without space
5) create a space between punctuation and words [.,?;!]
6) delete apostrophes for shorten words like "it's"
'''
import fastText as ft
import pickle as pk
import os
import re
import pdb
def createLinePairs(corpus):
'''
Input: Read episode linse with format:
ELAINE Hi Mr . Seinfeld !
JERRY Hey , theres the old man !
Output: convert those pairs into array
[["Hi", "Mr", ".", "Seinfeld", "!"],
["Hey", ",", "theres", "the", "old","man","!"]
'''
print("Reading lines...")
# combine every two lines into pairs of vectors
with open(corpus) as f:
content = f.readlines()
print('CONTENT')
print(content)
# strip \n and \t, and skip the speaker
lines = convert_lines_to_arrays(content)
pairs = []
for i,x in enumerate(lines[:-1]):
# create pairs of lines to feed as input and output
# for model, empty lines represent new scene
# so any pair wiht an empty line is discarded
if lines[i] and lines[i+1]: #if neither lines are empty
pairs.append([lines[i], lines[i+1]])
return pairs
def convert_lines_to_arrays(content):
'''
convert each line in scene to an array of text
formating when not relevant
'''
lines = []
for x in content:
line = x.strip()
if len(line)>0: #skip empty lines
if 'scene:' in x: # store empty arrays for new scene
lines.append([])
else:
line_arr = format_line(line)
if line_arr: # if line not empty
lines.append(line_arr)
return lines
def format_line(line):
'''
format the line before storing as an array
'''
line = line.lower() # set line to lower case
line_arr = []
open_brack = []
is_dialogue = False
word = ''
for s in line:
if s=="'": # don't store apostrophe, so it's stored as its
continue
if s==':': # after first speaker identified
is_dialogue = True
continue
if s=='[': #if open_brack is not null, string is not dialogue
open_brack.append(s)
continue
if s==']': #remove open brack, if closed one found
open_brack.pop()
continue
if is_dialogue and not open_brack:
# if not inside bracket and some word to store
if s == ' ': # if space
if len(word)>0:
line_arr.append(word)
word = '' # reset word to blank
elif re.match("[.,?;!\"]", s):
# start new word if character
if len(word)>0:
line_arr.append(word)
line_arr.append(s)
word = ''
elif re.match("[A-Za-z\-]", s):
# store if alpha character
word = word+s
return line_arr
def line2TrainVector(pairs, word_dict):
# [TODO] convert each line into vectors
# don't need to use target lens when not batched
'''
Input: Read pairs of lines:
[["Hi", "Mr", ".", "Seinfeld", "!"],
["Hey", ",", "theres", "the", "old","man","!"]
word_dict is embedding hash formed with processDict() below
Output: convert into fasttext embedding vectors (dim 300)
above example returns
matrix size 4 x 300 for input
matrix size 7 x 300 for target
'''
input_v = createWordVector(pairs[0], word_dict)
target_v = createWordVector(pairs[1], word_dict)
return input_v, target_v
def createWordVector(word_array, word_dict):
vect = []
for word in word_array:
# a hyphenated word may be tricky
# if cannot find, then may need to split up
# as 2 word
if '-' in word and word not in word_dict:
vect.extend(createHypenEmbed(word))
continue
# semi-colons not in fasttext
if word == ';': word = '.'
if word == '':
continue
if word in word_dict:
vect.append(word_dict[word])
else:
print('NOT IN DICT')
print(word)
editted_word = editWord(word, word_dict)
vect.append(editted_word)
print(editted_word)
print(word_array)
return vect
def editWord(weird_word, word_dict):
# edit weird string, remove extra letters
# until word in dict
last_s = ''
weird_stack = []
for i, s in enumerate(weird_word):
## create ([index, letter, num]) for each different letter
if s!=last_s:
weird_stack.append([i, s, 1])
else:
weird_stack[-1][2]+=1 # add 1 to the weird word
last_s = s
# sort the stack to find most common group of letters and index
sorted_stack = sorted(weird_stack, key = lambda x: x[2])
most_common = sorted_stack[-1]
# remove most common letter in the weird word
# i.e. in heeeeey, remove e
common_idx = most_common[0]+most_common[2]
weird_word = weird_word[:(common_idx-1)]+weird_word[common_idx:]
if weird_word in word_dict:
return weird_word
else:
weird_word = editWord(weird_word, word_dict)
return weird_word
def createHypenEmbed(word):
'''
Handle outlier language with hyphen
'''
word_whole = re.sub('-', '', word)
if word_whole in word_dict:
return [word_dict[word_whole]]
else:
# [TODO] should the hyphenated word be
# split into two words or kept as an
# average embedding?
# currently adding the two word into one vect
subwords = word.split('-')
word_vect = [word_dict[subwords[0]]]
for w in subwords[1:]:
word_vect.append(word_dict[w])
return word_vect
class fastDict():
def __init__(self, read_filename, method):
# [TODO] allow dynamically init
self.method = method
print(method)
if method == 'store':
read_filename = '~/FastData/wiki.en/wiki.en.bin'
print(read_filename)
self.fast = ft.load_model(
os.path.expanduser(read_filename))
pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'
self.pickle_path = os.path.expanduser(pickle_filename)
print(pickle_filename)
def processDict(self):
# method = store or import
# read pickle dictionary
# if method = store, convert fastText data to pickle format first
if self.method == 'store':
self.writeWordDict()
return self.loadWordDict()
def loadWordDict(self):
pickle_reader = open(self.pickle_path, 'rb')
word_vec = pk.load(pickle_reader)
return word_vec
def writeWordDict(self):
all_words = self.getAllWords()
self.createWordDict(all_words)
def getAllWords(self):
all_the_words = self.fast.get_words()
return all_the_words
def createWordDict(self, all_words):
pickle_writer = open(self.pickle_path, 'wb')
word_dict = {}
for word in all_words:
word_dict[word] = self.fast.get_word_vector(word)
pk.dump(word_dict, pickle_writer)
if __name__ == '__main__':
read_filename = '~/FastData/wiki.en/wiki.en.bin'
method = 'import'
fast = fastDict(read_filename, method)
word_dict = fast.processDict()
# [TODO] clean-up do not need to call these functions in main
test_filename = '~/Documents/seinfeld/episodes/episode_TheSeinfeldChronicles_copy'
pairs = createLinePairs(os.path.expanduser(test_filename))
# [TODO] transfer this into
# for pair in pairs: input, output = line2TrainVector(pair, word_dict)
| [
12,
13,
15,
16,
17
] |
1,079 | 618aa64c08ebf8d9a0bc9662195ece2bbd485c17 | <mask token>
| <mask token>
try:
print(dic[55])
except Exception as err:
print('Mensagem: ', err)
| dic = {}
try:
print(dic[55])
except Exception as err:
print('Mensagem: ', err)
| null | null | [
0,
1,
2
] |
1,080 | 8f5d9918260e2f50fb229a7067f820a186101b99 | <mask token>
class _spectra:
def __init__(self, x, y):
self.x = x
self.y = y
<mask token>
def y(self):
return intensities
<mask token>
| <mask token>
class _spectra:
def __init__(self, x, y):
self.x = x
self.y = y
def x(self):
return waveNumbers
def y(self):
return intensities
<mask token>
def getPeaks(waveNumbers, intensities):
data = _spectra(waveNumbers, intensities)
waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(
lowerBound, upperBound, steps))
waveletCoeff = np.flipud(waveletCoeff)
ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),
columnWindow, rowWindow)
peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)
return peakInfo
| <mask token>
class _spectra:
def __init__(self, x, y):
self.x = x
self.y = y
def x(self):
return waveNumbers
def y(self):
return intensities
<mask token>
def _filterRidgeLines(maximaArray, rowMax, colMax):
def checkValues(value, ridgeLines):
for lines in ridgeLines:
for points in lines:
if value in points:
return True
return False
ridgeLines = []
for i, row in enumerate(maximaArray):
ridge = []
colPos = row[1]
rowPos = row[0]
if checkValues(colPos, ridgeLines):
continue
for j, nextRows in enumerate(maximaArray[i:, :]):
if nextRows[0] == rowPos:
continue
if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -
nextRows[0]) <= rowMax:
ridge.append((rowPos, colPos, nextRows[2]))
rowPos = nextRows[0]
colPos = nextRows[1]
if len(ridge) != 0:
if ridge[-1][0] <= 2:
ridgeLines.append(ridge)
return ridgeLines
<mask token>
def getPeaks(waveNumbers, intensities):
data = _spectra(waveNumbers, intensities)
waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(
lowerBound, upperBound, steps))
waveletCoeff = np.flipud(waveletCoeff)
ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),
columnWindow, rowWindow)
peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)
return peakInfo
| <mask token>
class _spectra:
def __init__(self, x, y):
self.x = x
self.y = y
def x(self):
return waveNumbers
def y(self):
return intensities
<mask token>
def _findMaxima1D(CWTArray):
maximas = np.zeros(CWTArray.size, dtype=(float, 3))
count = 0
for j, row in enumerate(CWTArray):
for i, element in enumerate(row):
try:
if element > row[i - 1] and element > row[i + 1]:
maximas[count] = steps - j, i, element
count += 1
except IndexError:
pass
return np.vstack(maximas[:count])
<mask token>
def _filterRidgeLines(maximaArray, rowMax, colMax):
def checkValues(value, ridgeLines):
for lines in ridgeLines:
for points in lines:
if value in points:
return True
return False
ridgeLines = []
for i, row in enumerate(maximaArray):
ridge = []
colPos = row[1]
rowPos = row[0]
if checkValues(colPos, ridgeLines):
continue
for j, nextRows in enumerate(maximaArray[i:, :]):
if nextRows[0] == rowPos:
continue
if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -
nextRows[0]) <= rowMax:
ridge.append((rowPos, colPos, nextRows[2]))
rowPos = nextRows[0]
colPos = nextRows[1]
if len(ridge) != 0:
if ridge[-1][0] <= 2:
ridgeLines.append(ridge)
return ridgeLines
<mask token>
def getPeakInfo(ridgeLines, data, waveletCoeff):
peakInfo = np.zeros(len(ridgeLines), dtype=[('position', 'int32'), (
'scale', 'int32'), ('cwtCoeff', 'f'), ('SNR', 'f'), ('length',
'uint8'), ('intensity', 'f'), ('wavenumber', 'f')])
for i, lines in enumerate(ridgeLines):
maximum = np.argmax(zip(*lines)[2])
peakInfo[i] = lines[maximum][1], lines[maximum][0], lines[maximum][2
], 0, len(lines), data.x[lines[maximum][1]], data.y[lines[
maximum][1]]
for i, peaks in enumerate(peakInfo):
SNR = np.abs(waveletCoeff[-1, peaks[0] - 15:peaks[0] + 15])
if len(SNR) == 0:
peakInfo['SNR'][i] = 0
else:
SNR = stats.scoreatpercentile(SNR, 95)
peakInfo['SNR'][i] = SNR
return peakInfo
<mask token>
def getPeaks(waveNumbers, intensities):
data = _spectra(waveNumbers, intensities)
waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(
lowerBound, upperBound, steps))
waveletCoeff = np.flipud(waveletCoeff)
ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),
columnWindow, rowWindow)
peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)
return peakInfo
| import numpy as np
from scipy import stats
from scipy import interpolate
from math import factorial
from scipy import signal
"""
A continuous wavelet transform based peak finder. Tested exclusively on Raman spectra, however,
it should work for most datasets.
Parameters
----------
lowerBound: The lowest value of the scale factor to use in the wavelet transform
upperBound: The highest value of the scale factor to use in the wavelet transform
steps: The number of scale factors we want between the highest and lowest bounds
rowWindow: The maximum number of rows that a ridge line can be discontinuous before it is
terminated. I.e. the maximum number of scale factors it can deviate.
colWindow: The maximum number of columns that a ridge line can wander before it is terminated.
I.e. the maximum number of wavenumbers (or a similar X value) that the ridge line can deviate.
"""
# CWT Transform parameters
lowerBound = 1
upperBound = 70
steps = 90
# Ridge line filtering parameters
rowWindow = 2
columnWindow = 5
class _spectra:
def __init__(self,x,y):
self.x = x
self.y = y
def x(self):
return waveNumbers
def y(self):
return intensities
"""
Simple helper function for finding all of the maxima in the 2D array returned by the wavelet
transform. Works on the basis of a simple comparison between neighbouring elements. These
values form the initial basis for the ridge lines.
"""
def _findMaxima1D(CWTArray):
maximas = np.zeros(CWTArray.size,dtype=(float,3))
# Populate the maxima array with a tuple of the coordinates and the values of the maxima
count = 0
for j,row in enumerate(CWTArray):
for i,element in enumerate(row):
try:
if element > row[i-1] and element > row[i+1]:
maximas[count]= ((steps-j,i,element))
count += 1
except IndexError:
pass
return np.vstack(maximas[:count])
"""
Filter the ridge lines found from the maxima of the CWT coefficient array based on a set
parameters, namely the maximum deviations in wavenumber and scale space. Any lines which are
found from this criteria are considered to be peaks and further evaluated in the following
steps.
"""
def _filterRidgeLines(maximaArray,rowMax,colMax):
# Helper to prevent duplicating ridge lines
def checkValues(value, ridgeLines):
for lines in ridgeLines:
for points in lines:
if value in points:
return True
return False
ridgeLines = []
# Maxima array is a n row, 1 column array containing tuples of (scaleFactor, column)
for i,row in enumerate(maximaArray):
ridge = [] # For each maxima start a ridge line
colPos = row[1] # Get the column position of the current maxima
rowPos = row[0] # Get the row position of the current maxima
# If this value is already part of another ridge line, move to the next value
if checkValues(colPos, ridgeLines):
continue
for j, nextRows in enumerate(maximaArray[i:,:]): # Look through the subsequent maxima
if nextRows[0] == rowPos: # If the scale factors are the same, skip
continue
if np.abs(colPos - nextRows[1]) <= colMax and \
np.abs(rowPos - nextRows[0]) <= rowMax:
ridge.append((rowPos,colPos,nextRows[2]))
rowPos = nextRows[0]
colPos = nextRows[1]
# If the ridge lines run all the way to the lowest scale factors, add them to the list
if len(ridge) != 0:
if ridge[-1][0] <= 2:
ridgeLines.append(ridge)
return ridgeLines
"""
For each of the ridge lines found from the filtered CWT array, determine the other
characteristics of the peaks.
The position of the peak is determined from the position of the maxima in the ridge
line.
"""
def getPeakInfo(ridgeLines,data,waveletCoeff):
# For each of the ridge lines we have found, locate the positions of the maxima. These
# correspond to the peak centers.
peakInfo = np.zeros(len(ridgeLines),dtype=[('position','int32'),('scale','int32'),\
('cwtCoeff','f'),('SNR','f'),('length','uint8'),\
('intensity','f'),('wavenumber','f')])
# For each of the ridge lines, add the position of the peak center and the length of the
# line. These are useful for filtering peaks later.
for i,lines in enumerate(ridgeLines):
# Find the index of the maximum CWT coefficient. This is the peak center.
maximum = np.argmax(zip(*lines)[2])
peakInfo[i] = lines[maximum][1],lines[maximum][0],lines[maximum][2],0,len(lines),\
data.x[lines[maximum][1]],data.y[lines[maximum][1]]
# Calculate the local SNR of each peak within a window of 30 pixels of the peak. The SNR is
# defined as the 95th quantile of the absolute values of the lowest scale factor coefficients.
for i, peaks in enumerate(peakInfo):
SNR = np.abs(waveletCoeff[-1,peaks[0]-15:peaks[0]+15])
if len(SNR) == 0:
peakInfo['SNR'][i] = 0
else:
SNR = stats.scoreatpercentile(SNR, 95)
peakInfo['SNR'][i] = SNR
return peakInfo
"""
Processes spectral data and returns a structured array of peak information. Peak can then be
filtered based on ridge line length, signal to noise ratio and scale values.
"""
def getPeaks(waveNumbers,intensities):
data = _spectra(waveNumbers,intensities)
# Take the CWT of the spectra. Trim the result to remove padding.
waveletCoeff = signal.cwt(intensities, signal.ricker, \
np.linspace(lowerBound,upperBound,steps))
# Flip the matrix so the highest wavelet coefficient is the top row
waveletCoeff = np.flipud(waveletCoeff)
# Find the ridge lines connecting the maxima in the wavelet coefficient array. Filter ridge lines
# takes a (scaleFactor,3) array of positions and values of maxima.
ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),columnWindow,rowWindow)
# Populate a structured array with peak information
peakInfo = getPeakInfo(ridgeLines,data,waveletCoeff)
return peakInfo
| [
3,
5,
6,
8,
11
] |
1,081 | e51c0d8c6430603d989d55a64fdf77f9e1a2397b | <mask token>
class TestExampleIO(BaseTestIO, unittest.TestCase):
<mask token>
<mask token>
<mask token>
<mask token>
def tearDown(self) ->None:
super().tearDown()
for entity in self.entities_to_test:
full_path = get_test_file_full_path(self.ioclass, filename=
entity, directory=self.local_test_dir)
pathlib.Path(full_path).unlink(missing_ok=True)
class Specific_TestExampleIO(unittest.TestCase):
def test_read_segment_lazy(self):
r = ExampleIO(filename=None)
seg = r.read_segment(lazy=True)
for ana in seg.analogsignals:
assert isinstance(ana, AnalogSignalProxy)
ana = ana.load()
assert isinstance(ana, AnalogSignal)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrainProxy)
st = st.load()
assert isinstance(st, SpikeTrain)
seg = r.read_segment(lazy=False)
for anasig in seg.analogsignals:
assert isinstance(ana, AnalogSignal)
self.assertNotEqual(anasig.size, 0)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrain)
self.assertNotEqual(st.size, 0)
assert 'seg_extra_info' in seg.annotations
assert seg.name == 'Seg #0 Block #0'
for anasig in seg.analogsignals:
assert anasig.name is not None
for st in seg.spiketrains:
assert st.name is not None
for ev in seg.events:
assert ev.name is not None
for ep in seg.epochs:
assert ep.name is not None
def test_read_block(self):
r = ExampleIO(filename=None)
bl = r.read_block(lazy=True)
def test_read_segment_with_time_slice(self):
r = ExampleIO(filename=None)
seg = r.read_segment(time_slice=None)
shape_full = seg.analogsignals[0].shape
spikes_full = seg.spiketrains[0]
event_full = seg.events[0]
t_start, t_stop = 260 * pq.ms, 1.854 * pq.s
seg = r.read_segment(time_slice=(t_start, t_stop))
shape_slice = seg.analogsignals[0].shape
spikes_slice = seg.spiketrains[0]
event_slice = seg.events[0]
assert shape_full[0] > shape_slice[0]
assert spikes_full.size > spikes_slice.size
assert np.all(spikes_slice >= t_start)
assert np.all(spikes_slice <= t_stop)
assert spikes_slice.t_start == t_start
assert spikes_slice.t_stop == t_stop
assert event_full.size > event_slice.size
assert np.all(event_slice.times >= t_start)
assert np.all(event_slice.times <= t_stop)
<mask token>
| <mask token>
class TestExampleIO(BaseTestIO, unittest.TestCase):
<mask token>
<mask token>
<mask token>
def setUp(self):
super().setUp()
for entity in self.entities_to_test:
full_path = get_test_file_full_path(self.ioclass, filename=
entity, directory=self.local_test_dir)
pathlib.Path(full_path).touch()
def tearDown(self) ->None:
super().tearDown()
for entity in self.entities_to_test:
full_path = get_test_file_full_path(self.ioclass, filename=
entity, directory=self.local_test_dir)
pathlib.Path(full_path).unlink(missing_ok=True)
class Specific_TestExampleIO(unittest.TestCase):
def test_read_segment_lazy(self):
r = ExampleIO(filename=None)
seg = r.read_segment(lazy=True)
for ana in seg.analogsignals:
assert isinstance(ana, AnalogSignalProxy)
ana = ana.load()
assert isinstance(ana, AnalogSignal)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrainProxy)
st = st.load()
assert isinstance(st, SpikeTrain)
seg = r.read_segment(lazy=False)
for anasig in seg.analogsignals:
assert isinstance(ana, AnalogSignal)
self.assertNotEqual(anasig.size, 0)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrain)
self.assertNotEqual(st.size, 0)
assert 'seg_extra_info' in seg.annotations
assert seg.name == 'Seg #0 Block #0'
for anasig in seg.analogsignals:
assert anasig.name is not None
for st in seg.spiketrains:
assert st.name is not None
for ev in seg.events:
assert ev.name is not None
for ep in seg.epochs:
assert ep.name is not None
def test_read_block(self):
r = ExampleIO(filename=None)
bl = r.read_block(lazy=True)
def test_read_segment_with_time_slice(self):
r = ExampleIO(filename=None)
seg = r.read_segment(time_slice=None)
shape_full = seg.analogsignals[0].shape
spikes_full = seg.spiketrains[0]
event_full = seg.events[0]
t_start, t_stop = 260 * pq.ms, 1.854 * pq.s
seg = r.read_segment(time_slice=(t_start, t_stop))
shape_slice = seg.analogsignals[0].shape
spikes_slice = seg.spiketrains[0]
event_slice = seg.events[0]
assert shape_full[0] > shape_slice[0]
assert spikes_full.size > spikes_slice.size
assert np.all(spikes_slice >= t_start)
assert np.all(spikes_slice <= t_stop)
assert spikes_slice.t_start == t_start
assert spikes_slice.t_stop == t_stop
assert event_full.size > event_slice.size
assert np.all(event_slice.times >= t_start)
assert np.all(event_slice.times <= t_stop)
<mask token>
| <mask token>
class TestExampleIO(BaseTestIO, unittest.TestCase):
ioclass = ExampleIO
entities_to_download = []
entities_to_test = ['fake1.fake', 'fake2.fake']
def setUp(self):
super().setUp()
for entity in self.entities_to_test:
full_path = get_test_file_full_path(self.ioclass, filename=
entity, directory=self.local_test_dir)
pathlib.Path(full_path).touch()
def tearDown(self) ->None:
super().tearDown()
for entity in self.entities_to_test:
full_path = get_test_file_full_path(self.ioclass, filename=
entity, directory=self.local_test_dir)
pathlib.Path(full_path).unlink(missing_ok=True)
class Specific_TestExampleIO(unittest.TestCase):
def test_read_segment_lazy(self):
r = ExampleIO(filename=None)
seg = r.read_segment(lazy=True)
for ana in seg.analogsignals:
assert isinstance(ana, AnalogSignalProxy)
ana = ana.load()
assert isinstance(ana, AnalogSignal)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrainProxy)
st = st.load()
assert isinstance(st, SpikeTrain)
seg = r.read_segment(lazy=False)
for anasig in seg.analogsignals:
assert isinstance(ana, AnalogSignal)
self.assertNotEqual(anasig.size, 0)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrain)
self.assertNotEqual(st.size, 0)
assert 'seg_extra_info' in seg.annotations
assert seg.name == 'Seg #0 Block #0'
for anasig in seg.analogsignals:
assert anasig.name is not None
for st in seg.spiketrains:
assert st.name is not None
for ev in seg.events:
assert ev.name is not None
for ep in seg.epochs:
assert ep.name is not None
def test_read_block(self):
r = ExampleIO(filename=None)
bl = r.read_block(lazy=True)
def test_read_segment_with_time_slice(self):
r = ExampleIO(filename=None)
seg = r.read_segment(time_slice=None)
shape_full = seg.analogsignals[0].shape
spikes_full = seg.spiketrains[0]
event_full = seg.events[0]
t_start, t_stop = 260 * pq.ms, 1.854 * pq.s
seg = r.read_segment(time_slice=(t_start, t_stop))
shape_slice = seg.analogsignals[0].shape
spikes_slice = seg.spiketrains[0]
event_slice = seg.events[0]
assert shape_full[0] > shape_slice[0]
assert spikes_full.size > spikes_slice.size
assert np.all(spikes_slice >= t_start)
assert np.all(spikes_slice <= t_stop)
assert spikes_slice.t_start == t_start
assert spikes_slice.t_stop == t_stop
assert event_full.size > event_slice.size
assert np.all(event_slice.times >= t_start)
assert np.all(event_slice.times <= t_stop)
<mask token>
| <mask token>
import pathlib
import unittest
from neo.io.exampleio import ExampleIO
from neo.test.iotest.common_io_test import BaseTestIO
from neo.test.iotest.tools import get_test_file_full_path
from neo.io.proxyobjects import AnalogSignalProxy, SpikeTrainProxy, EventProxy, EpochProxy
from neo import AnalogSignal, SpikeTrain
import quantities as pq
import numpy as np
class TestExampleIO(BaseTestIO, unittest.TestCase):
ioclass = ExampleIO
entities_to_download = []
entities_to_test = ['fake1.fake', 'fake2.fake']
def setUp(self):
super().setUp()
for entity in self.entities_to_test:
full_path = get_test_file_full_path(self.ioclass, filename=
entity, directory=self.local_test_dir)
pathlib.Path(full_path).touch()
def tearDown(self) ->None:
super().tearDown()
for entity in self.entities_to_test:
full_path = get_test_file_full_path(self.ioclass, filename=
entity, directory=self.local_test_dir)
pathlib.Path(full_path).unlink(missing_ok=True)
class Specific_TestExampleIO(unittest.TestCase):
def test_read_segment_lazy(self):
r = ExampleIO(filename=None)
seg = r.read_segment(lazy=True)
for ana in seg.analogsignals:
assert isinstance(ana, AnalogSignalProxy)
ana = ana.load()
assert isinstance(ana, AnalogSignal)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrainProxy)
st = st.load()
assert isinstance(st, SpikeTrain)
seg = r.read_segment(lazy=False)
for anasig in seg.analogsignals:
assert isinstance(ana, AnalogSignal)
self.assertNotEqual(anasig.size, 0)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrain)
self.assertNotEqual(st.size, 0)
assert 'seg_extra_info' in seg.annotations
assert seg.name == 'Seg #0 Block #0'
for anasig in seg.analogsignals:
assert anasig.name is not None
for st in seg.spiketrains:
assert st.name is not None
for ev in seg.events:
assert ev.name is not None
for ep in seg.epochs:
assert ep.name is not None
def test_read_block(self):
r = ExampleIO(filename=None)
bl = r.read_block(lazy=True)
def test_read_segment_with_time_slice(self):
r = ExampleIO(filename=None)
seg = r.read_segment(time_slice=None)
shape_full = seg.analogsignals[0].shape
spikes_full = seg.spiketrains[0]
event_full = seg.events[0]
t_start, t_stop = 260 * pq.ms, 1.854 * pq.s
seg = r.read_segment(time_slice=(t_start, t_stop))
shape_slice = seg.analogsignals[0].shape
spikes_slice = seg.spiketrains[0]
event_slice = seg.events[0]
assert shape_full[0] > shape_slice[0]
assert spikes_full.size > spikes_slice.size
assert np.all(spikes_slice >= t_start)
assert np.all(spikes_slice <= t_stop)
assert spikes_slice.t_start == t_start
assert spikes_slice.t_stop == t_stop
assert event_full.size > event_slice.size
assert np.all(event_slice.times >= t_start)
assert np.all(event_slice.times <= t_stop)
if __name__ == '__main__':
unittest.main()
| """
Tests of neo.io.exampleio
"""
import pathlib
import unittest
from neo.io.exampleio import ExampleIO # , HAVE_SCIPY
from neo.test.iotest.common_io_test import BaseTestIO
from neo.test.iotest.tools import get_test_file_full_path
from neo.io.proxyobjects import (AnalogSignalProxy,
SpikeTrainProxy, EventProxy, EpochProxy)
from neo import (AnalogSignal, SpikeTrain)
import quantities as pq
import numpy as np
# This run standart tests, this is mandatory for all IO
class TestExampleIO(BaseTestIO, unittest.TestCase, ):
ioclass = ExampleIO
entities_to_download = []
entities_to_test = [
'fake1.fake',
'fake2.fake',
]
def setUp(self):
super().setUp()
# ensure fake test files exist before running common tests
for entity in self.entities_to_test:
full_path = get_test_file_full_path(self.ioclass, filename=entity,
directory=self.local_test_dir)
pathlib.Path(full_path).touch()
def tearDown(self) -> None:
super().tearDown()
for entity in self.entities_to_test:
full_path = get_test_file_full_path(self.ioclass, filename=entity,
directory=self.local_test_dir)
pathlib.Path(full_path).unlink(missing_ok=True)
# This is the minimal variables that are required
# to run the common IO tests. IO specific tests
# can be added here and will be run automatically
# in addition to the common tests.
class Specific_TestExampleIO(unittest.TestCase):
def test_read_segment_lazy(self):
r = ExampleIO(filename=None)
seg = r.read_segment(lazy=True)
for ana in seg.analogsignals:
assert isinstance(ana, AnalogSignalProxy)
ana = ana.load()
assert isinstance(ana, AnalogSignal)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrainProxy)
st = st.load()
assert isinstance(st, SpikeTrain)
seg = r.read_segment(lazy=False)
for anasig in seg.analogsignals:
assert isinstance(ana, AnalogSignal)
self.assertNotEqual(anasig.size, 0)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrain)
self.assertNotEqual(st.size, 0)
# annotations
assert 'seg_extra_info' in seg.annotations
assert seg.name == 'Seg #0 Block #0'
for anasig in seg.analogsignals:
assert anasig.name is not None
for st in seg.spiketrains:
assert st.name is not None
for ev in seg.events:
assert ev.name is not None
for ep in seg.epochs:
assert ep.name is not None
def test_read_block(self):
r = ExampleIO(filename=None)
bl = r.read_block(lazy=True)
#assert len(bl.list_units) == 3
#assert len(bl.channel_indexes) == 1 + 1 # signals grouped + units grouped
def test_read_segment_with_time_slice(self):
r = ExampleIO(filename=None)
seg = r.read_segment(time_slice=None)
shape_full = seg.analogsignals[0].shape
spikes_full = seg.spiketrains[0]
event_full = seg.events[0]
t_start, t_stop = 260 * pq.ms, 1.854 * pq.s
seg = r.read_segment(time_slice=(t_start, t_stop))
shape_slice = seg.analogsignals[0].shape
spikes_slice = seg.spiketrains[0]
event_slice = seg.events[0]
assert shape_full[0] > shape_slice[0]
assert spikes_full.size > spikes_slice.size
assert np.all(spikes_slice >= t_start)
assert np.all(spikes_slice <= t_stop)
assert spikes_slice.t_start == t_start
assert spikes_slice.t_stop == t_stop
assert event_full.size > event_slice.size
assert np.all(event_slice.times >= t_start)
assert np.all(event_slice.times <= t_stop)
if __name__ == "__main__":
unittest.main()
| [
6,
7,
8,
10,
11
] |
1,082 | 57b51ea36e9e2a095cf7e9646db2cc400cc72b83 | <mask token>
| <mask token>
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
if runBatch:
fixed_params = {'width': 60, 'height': 60, 'splitSize': 1,
'distributed': True, 'verbose': False, 'searchSize': 1,
'batchRun': True}
variable_params = {'quantity': [10, 20, 50, 80, 100, 120, 150],
'ordersPerWeek': [1, 5, 20, 40, 80, 120]}
batch_run = BatchRunner(MASArchitecture, variable_params,
fixed_params, iterations=10, max_steps=800, model_reporters={
'Utilisation': metrics.machineUtilisation, 'CompleteOrders':
metrics.ordersComplete, 'AverageOrderWaitTime': metrics.
averageOrderWaitTime, 'TotalMessagesSent': metrics.
totalMessagesSent, 'AverageMessagesSent': metrics.
averageMessagesSent, 'SuccessfulOrders': metrics.
successfulOrders, 'noProposalOrders': metrics.noProposalOrders,
'OutsourcedOrders': metrics.outsourcedOrders, 'LateOrders':
metrics.lateOrders, 'WIPBacklog': metrics.totalWIPSize,
'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,
'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,
'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,
'MaxMessagesReceivedFactory': metrics.
maxMessagesReceivedByFactory, 'AverageSatisfactionScore':
metrics.averageSatisfactionScore,
'AverageSuccessfulSatisfactionScore': metrics.
averageSuccessfulSatisfactionScore,
'CheapOrdersWithCheapMachines': metrics.
cheapOrdersWithCheapMachines, 'AsapOrdersWithFastMachines':
metrics.asapOrdersWithFastMachines, 'AverageSuccessfulPrice':
metrics.averageSuccessfulOrderPrice,
'AverageSuccessfulOrderPriceCheap': metrics.
averageSuccessfulOrderPriceCheap,
'AverageSuccessfulOrderPriceNeutral': metrics.
averageSuccessfulOrderPriceNeutral,
'AverageSuccessfulOrderPriceAsap': metrics.
averageSuccessfulOrderPriceAsap, 'AverageSuccessfulMakespan':
metrics.averageSuccessfulOrderMakeSpan,
'AverageSuccessfulOrderMakespanCheap': metrics.
averageSuccessfulOrderMakespanCheap,
'AverageSuccessfulOrderMakespanNeutral': metrics.
averageSuccessfulOrderMakespanNeutral,
'AverageSuccessfulOrderMakespanAsap': metrics.
averageSuccessfulOrderMakespanAsap, 'SuccessfulAsapOrders':
metrics.percentageOfSuccessfulASAPOrders,
'SuccessfulCheapOrders': metrics.
percentageOfSuccessfulCheapOrders, 'SuccessfulNeutralOrders':
metrics.percentageOfSuccessfulNeutralOrders}, agent_reporters={
'id': 'unique_id'})
batch_run.run_all()
model_data = batch_run.get_model_vars_dataframe()
agent_data = batch_run.get_agent_vars_dataframe()
if saveResults:
number = 0
while os.path.exists('{}/results/test_{}'.format(dir_path, number)
) == True:
number += 1
os.makedirs('{}/results/test_{}'.format(dir_path, number))
model_data.to_pickle('{}/results/test_{}/model_data.pkl'.format
(dir_path, number))
agent_data.to_pickle('{}/results/test_{}/agent_data.pkl'.format
(dir_path, number))
else:
grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)
chart = ChartModule([{'Label': 'Utilisation', 'Color': 'Black'}],
data_collector_name='datacollector')
chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'
}], data_collector_name='datacollector')
chart3 = ChartModule([{'Label': 'Average Order Wait Time', 'Color':
'Red'}], data_collector_name='datacollector')
chart4 = ChartModule([{'Label': 'Total Messages Sent', 'Color':
'Red'}], data_collector_name='datacollector')
averageMessagesSentChart = ChartModule([{'Label':
'Average Messages Sent', 'Color': 'Red'}], data_collector_name=
'datacollector')
chart5 = ChartModule([{'Label': 'Successful Orders', 'Color':
'Green'}], data_collector_name='datacollector')
chart6 = ChartModule([{'Label': 'Outsourced Orders', 'Color':
'Blue'}], data_collector_name='datacollector')
chart7 = ChartModule([{'Label': 'Late Orders', 'Color': 'Red'}],
data_collector_name='datacollector')
chart8 = ChartModule([{'Label': 'WIP Backlog', 'Color': 'Blue'}],
data_collector_name='datacollector')
chart9 = ChartModule([{'Label': 'Max Messages Sent - Order',
'Color': 'Blue'}], data_collector_name='datacollector')
chart10 = ChartModule([{'Label': 'Max Messages Received - Order',
'Color': 'Blue'}], data_collector_name='datacollector')
chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory',
'Color': 'Red'}], data_collector_name='datacollector')
chart12 = ChartModule([{'Label': 'Max Messages Received - Factory',
'Color': 'Red'}], data_collector_name='datacollector')
chart13 = ChartModule([{'Label': 'Average satisfaction score',
'Color': 'Blue'}], data_collector_name='datacollector')
chart14 = ChartModule([{'Label':
'Average successful satisfaction score', 'Color': 'Blue'}],
data_collector_name='datacollector')
chart15 = ChartModule([{'Label':
'% Cheap orders with cheap machines', 'Color': 'Green'}],
data_collector_name='datacollector')
chart16 = ChartModule([{'Label': '% Asap orders with fast machines',
'Color': 'Green'}], data_collector_name='datacollector')
chart17 = ChartModule([{'Label': 'Average successful price',
'Color': 'Blue'}], data_collector_name='datacollector')
chart18 = ChartModule([{'Label': 'Average successful price Cheap',
'Color': 'Blue'}], data_collector_name='datacollector')
chart19 = ChartModule([{'Label': 'Average successful price Neutral',
'Color': 'Blue'}], data_collector_name='datacollector')
chart20 = ChartModule([{'Label': 'Average successful price Asap',
'Color': 'Blue'}], data_collector_name='datacollector')
chart21 = ChartModule([{'Label': 'Average successful makespan',
'Color': 'Green'}], data_collector_name='datacollector')
chart22 = ChartModule([{'Label':
'Average successful makespan Cheap', 'Color': 'Green'}],
data_collector_name='datacollector')
chart23 = ChartModule([{'Label':
'Average successful makespan Neutral', 'Color': 'Green'}],
data_collector_name='datacollector')
chart24 = ChartModule([{'Label': 'Average successful makespan Asap',
'Color': 'Green'}], data_collector_name='datacollector')
chart25 = ChartModule([{'Label': 'Successful Cheap Orders', 'Color':
'Red'}], data_collector_name='datacollector')
chart26 = ChartModule([{'Label': 'Successful Neutral Orders',
'Color': 'Red'}], data_collector_name='datacollector')
chart27 = ChartModule([{'Label': 'Successful Asap Orders', 'Color':
'Red'}], data_collector_name='datacollector')
noProposalOrdersChart = ChartModule([{'Label':
'Orders that received no proposals', 'Color': 'Red'}],
data_collector_name='datacollector')
server = ModularServer(MASArchitecture, [grid, chart, chart2,
chart3, chart4, averageMessagesSentChart, chart5,
noProposalOrdersChart, chart6, chart7, chart8, chart9, chart10,
chart11, chart12, chart13, chart14, chart15, chart16, chart17,
chart18, chart19, chart20, chart21, chart22, chart23, chart24,
chart25, chart26, chart27], 'Festo-Fetch.ai', {'width': 60,
'height': 60, 'distributed': True, 'quantity': 10, 'splitSize':
1, 'newOrderProbability': 5, 'verbose': True, 'ordersPerWeek':
40, 'model_reporters_dict': {'Utilisation': metrics.
machineUtilisation, 'Complete Orders': metrics.ordersComplete,
'Average Order Wait Time': metrics.averageOrderWaitTime,
'Successful Orders': metrics.successfulOrders,
'Total Messages Sent': metrics.totalMessagesSent,
'Average Messages Sent': metrics.averageMessagesSent,
'Late Orders': metrics.lateOrders, 'WIP Backlog': metrics.
totalWIPSize, 'Max Messages Sent - Order': metrics.
maxMessagesSentFromOrder, 'Max Messages Received - Order':
metrics.maxMessagesReceivedByOrder,
'Max Messages Sent - Factory': metrics.
maxMessagesSentFromFactory, 'Max Messages Received - Factory':
metrics.maxMessagesReceivedByFactory, 'Outsourced Orders':
metrics.outsourcedOrders, 'Orders that received no proposals':
metrics.noProposalOrders,
'Average successful satisfaction score': metrics.
averageSuccessfulSatisfactionScore,
'Average satisfaction score': metrics.averageSatisfactionScore,
'% Cheap orders with cheap machines': metrics.
cheapOrdersWithCheapMachines,
'% Asap orders with fast machines': metrics.
asapOrdersWithFastMachines, 'Average successful price': metrics
.averageSuccessfulOrderPrice, 'Average successful price Cheap':
metrics.averageSuccessfulOrderPriceCheap,
'Average successful price Neutral': metrics.
averageSuccessfulOrderPriceNeutral,
'Average successful price Asap': metrics.
averageSuccessfulOrderPriceAsap, 'Average successful makespan':
metrics.averageSuccessfulOrderMakeSpan,
'Average successful makespan Cheap': metrics.
averageSuccessfulOrderMakespanCheap,
'Average successful makespan Neutral': metrics.
averageSuccessfulOrderMakespanNeutral,
'Average successful makespan Asap': metrics.
averageSuccessfulOrderMakespanAsap, 'Successful Cheap Orders':
metrics.percentageOfSuccessfulASAPOrders,
'Successful Neutral Orders': metrics.
percentageOfSuccessfulCheapOrders, 'Successful Asap Orders':
metrics.percentageOfSuccessfulNeutralOrders}})
server.port = 8521
server.launch()
| <mask token>
runBatch = True
architecture = 'Inter-Firm'
saveResults = True
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
if runBatch:
fixed_params = {'width': 60, 'height': 60, 'splitSize': 1,
'distributed': True, 'verbose': False, 'searchSize': 1,
'batchRun': True}
variable_params = {'quantity': [10, 20, 50, 80, 100, 120, 150],
'ordersPerWeek': [1, 5, 20, 40, 80, 120]}
batch_run = BatchRunner(MASArchitecture, variable_params,
fixed_params, iterations=10, max_steps=800, model_reporters={
'Utilisation': metrics.machineUtilisation, 'CompleteOrders':
metrics.ordersComplete, 'AverageOrderWaitTime': metrics.
averageOrderWaitTime, 'TotalMessagesSent': metrics.
totalMessagesSent, 'AverageMessagesSent': metrics.
averageMessagesSent, 'SuccessfulOrders': metrics.
successfulOrders, 'noProposalOrders': metrics.noProposalOrders,
'OutsourcedOrders': metrics.outsourcedOrders, 'LateOrders':
metrics.lateOrders, 'WIPBacklog': metrics.totalWIPSize,
'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,
'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,
'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,
'MaxMessagesReceivedFactory': metrics.
maxMessagesReceivedByFactory, 'AverageSatisfactionScore':
metrics.averageSatisfactionScore,
'AverageSuccessfulSatisfactionScore': metrics.
averageSuccessfulSatisfactionScore,
'CheapOrdersWithCheapMachines': metrics.
cheapOrdersWithCheapMachines, 'AsapOrdersWithFastMachines':
metrics.asapOrdersWithFastMachines, 'AverageSuccessfulPrice':
metrics.averageSuccessfulOrderPrice,
'AverageSuccessfulOrderPriceCheap': metrics.
averageSuccessfulOrderPriceCheap,
'AverageSuccessfulOrderPriceNeutral': metrics.
averageSuccessfulOrderPriceNeutral,
'AverageSuccessfulOrderPriceAsap': metrics.
averageSuccessfulOrderPriceAsap, 'AverageSuccessfulMakespan':
metrics.averageSuccessfulOrderMakeSpan,
'AverageSuccessfulOrderMakespanCheap': metrics.
averageSuccessfulOrderMakespanCheap,
'AverageSuccessfulOrderMakespanNeutral': metrics.
averageSuccessfulOrderMakespanNeutral,
'AverageSuccessfulOrderMakespanAsap': metrics.
averageSuccessfulOrderMakespanAsap, 'SuccessfulAsapOrders':
metrics.percentageOfSuccessfulASAPOrders,
'SuccessfulCheapOrders': metrics.
percentageOfSuccessfulCheapOrders, 'SuccessfulNeutralOrders':
metrics.percentageOfSuccessfulNeutralOrders}, agent_reporters={
'id': 'unique_id'})
batch_run.run_all()
model_data = batch_run.get_model_vars_dataframe()
agent_data = batch_run.get_agent_vars_dataframe()
if saveResults:
number = 0
while os.path.exists('{}/results/test_{}'.format(dir_path, number)
) == True:
number += 1
os.makedirs('{}/results/test_{}'.format(dir_path, number))
model_data.to_pickle('{}/results/test_{}/model_data.pkl'.format
(dir_path, number))
agent_data.to_pickle('{}/results/test_{}/agent_data.pkl'.format
(dir_path, number))
else:
grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)
chart = ChartModule([{'Label': 'Utilisation', 'Color': 'Black'}],
data_collector_name='datacollector')
chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'
}], data_collector_name='datacollector')
chart3 = ChartModule([{'Label': 'Average Order Wait Time', 'Color':
'Red'}], data_collector_name='datacollector')
chart4 = ChartModule([{'Label': 'Total Messages Sent', 'Color':
'Red'}], data_collector_name='datacollector')
averageMessagesSentChart = ChartModule([{'Label':
'Average Messages Sent', 'Color': 'Red'}], data_collector_name=
'datacollector')
chart5 = ChartModule([{'Label': 'Successful Orders', 'Color':
'Green'}], data_collector_name='datacollector')
chart6 = ChartModule([{'Label': 'Outsourced Orders', 'Color':
'Blue'}], data_collector_name='datacollector')
chart7 = ChartModule([{'Label': 'Late Orders', 'Color': 'Red'}],
data_collector_name='datacollector')
chart8 = ChartModule([{'Label': 'WIP Backlog', 'Color': 'Blue'}],
data_collector_name='datacollector')
chart9 = ChartModule([{'Label': 'Max Messages Sent - Order',
'Color': 'Blue'}], data_collector_name='datacollector')
chart10 = ChartModule([{'Label': 'Max Messages Received - Order',
'Color': 'Blue'}], data_collector_name='datacollector')
chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory',
'Color': 'Red'}], data_collector_name='datacollector')
chart12 = ChartModule([{'Label': 'Max Messages Received - Factory',
'Color': 'Red'}], data_collector_name='datacollector')
chart13 = ChartModule([{'Label': 'Average satisfaction score',
'Color': 'Blue'}], data_collector_name='datacollector')
chart14 = ChartModule([{'Label':
'Average successful satisfaction score', 'Color': 'Blue'}],
data_collector_name='datacollector')
chart15 = ChartModule([{'Label':
'% Cheap orders with cheap machines', 'Color': 'Green'}],
data_collector_name='datacollector')
chart16 = ChartModule([{'Label': '% Asap orders with fast machines',
'Color': 'Green'}], data_collector_name='datacollector')
chart17 = ChartModule([{'Label': 'Average successful price',
'Color': 'Blue'}], data_collector_name='datacollector')
chart18 = ChartModule([{'Label': 'Average successful price Cheap',
'Color': 'Blue'}], data_collector_name='datacollector')
chart19 = ChartModule([{'Label': 'Average successful price Neutral',
'Color': 'Blue'}], data_collector_name='datacollector')
chart20 = ChartModule([{'Label': 'Average successful price Asap',
'Color': 'Blue'}], data_collector_name='datacollector')
chart21 = ChartModule([{'Label': 'Average successful makespan',
'Color': 'Green'}], data_collector_name='datacollector')
chart22 = ChartModule([{'Label':
'Average successful makespan Cheap', 'Color': 'Green'}],
data_collector_name='datacollector')
chart23 = ChartModule([{'Label':
'Average successful makespan Neutral', 'Color': 'Green'}],
data_collector_name='datacollector')
chart24 = ChartModule([{'Label': 'Average successful makespan Asap',
'Color': 'Green'}], data_collector_name='datacollector')
chart25 = ChartModule([{'Label': 'Successful Cheap Orders', 'Color':
'Red'}], data_collector_name='datacollector')
chart26 = ChartModule([{'Label': 'Successful Neutral Orders',
'Color': 'Red'}], data_collector_name='datacollector')
chart27 = ChartModule([{'Label': 'Successful Asap Orders', 'Color':
'Red'}], data_collector_name='datacollector')
noProposalOrdersChart = ChartModule([{'Label':
'Orders that received no proposals', 'Color': 'Red'}],
data_collector_name='datacollector')
server = ModularServer(MASArchitecture, [grid, chart, chart2,
chart3, chart4, averageMessagesSentChart, chart5,
noProposalOrdersChart, chart6, chart7, chart8, chart9, chart10,
chart11, chart12, chart13, chart14, chart15, chart16, chart17,
chart18, chart19, chart20, chart21, chart22, chart23, chart24,
chart25, chart26, chart27], 'Festo-Fetch.ai', {'width': 60,
'height': 60, 'distributed': True, 'quantity': 10, 'splitSize':
1, 'newOrderProbability': 5, 'verbose': True, 'ordersPerWeek':
40, 'model_reporters_dict': {'Utilisation': metrics.
machineUtilisation, 'Complete Orders': metrics.ordersComplete,
'Average Order Wait Time': metrics.averageOrderWaitTime,
'Successful Orders': metrics.successfulOrders,
'Total Messages Sent': metrics.totalMessagesSent,
'Average Messages Sent': metrics.averageMessagesSent,
'Late Orders': metrics.lateOrders, 'WIP Backlog': metrics.
totalWIPSize, 'Max Messages Sent - Order': metrics.
maxMessagesSentFromOrder, 'Max Messages Received - Order':
metrics.maxMessagesReceivedByOrder,
'Max Messages Sent - Factory': metrics.
maxMessagesSentFromFactory, 'Max Messages Received - Factory':
metrics.maxMessagesReceivedByFactory, 'Outsourced Orders':
metrics.outsourcedOrders, 'Orders that received no proposals':
metrics.noProposalOrders,
'Average successful satisfaction score': metrics.
averageSuccessfulSatisfactionScore,
'Average satisfaction score': metrics.averageSatisfactionScore,
'% Cheap orders with cheap machines': metrics.
cheapOrdersWithCheapMachines,
'% Asap orders with fast machines': metrics.
asapOrdersWithFastMachines, 'Average successful price': metrics
.averageSuccessfulOrderPrice, 'Average successful price Cheap':
metrics.averageSuccessfulOrderPriceCheap,
'Average successful price Neutral': metrics.
averageSuccessfulOrderPriceNeutral,
'Average successful price Asap': metrics.
averageSuccessfulOrderPriceAsap, 'Average successful makespan':
metrics.averageSuccessfulOrderMakeSpan,
'Average successful makespan Cheap': metrics.
averageSuccessfulOrderMakespanCheap,
'Average successful makespan Neutral': metrics.
averageSuccessfulOrderMakespanNeutral,
'Average successful makespan Asap': metrics.
averageSuccessfulOrderMakespanAsap, 'Successful Cheap Orders':
metrics.percentageOfSuccessfulASAPOrders,
'Successful Neutral Orders': metrics.
percentageOfSuccessfulCheapOrders, 'Successful Asap Orders':
metrics.percentageOfSuccessfulNeutralOrders}})
server.port = 8521
server.launch()
| from mesa.visualization.modules import CanvasGrid
from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.modules import ChartModule
from mesa.batchrunner import BatchRunner
from agentPortrayal import agent_portrayal
import metrics
from matplotlib import pyplot as plt
from ArchitectureModel import MASArchitecture
import os
import random
import sys
runBatch = True
architecture = 'Inter-Firm'
saveResults = True
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
if runBatch:
fixed_params = {'width': 60, 'height': 60, 'splitSize': 1,
'distributed': True, 'verbose': False, 'searchSize': 1,
'batchRun': True}
variable_params = {'quantity': [10, 20, 50, 80, 100, 120, 150],
'ordersPerWeek': [1, 5, 20, 40, 80, 120]}
batch_run = BatchRunner(MASArchitecture, variable_params,
fixed_params, iterations=10, max_steps=800, model_reporters={
'Utilisation': metrics.machineUtilisation, 'CompleteOrders':
metrics.ordersComplete, 'AverageOrderWaitTime': metrics.
averageOrderWaitTime, 'TotalMessagesSent': metrics.
totalMessagesSent, 'AverageMessagesSent': metrics.
averageMessagesSent, 'SuccessfulOrders': metrics.
successfulOrders, 'noProposalOrders': metrics.noProposalOrders,
'OutsourcedOrders': metrics.outsourcedOrders, 'LateOrders':
metrics.lateOrders, 'WIPBacklog': metrics.totalWIPSize,
'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,
'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,
'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,
'MaxMessagesReceivedFactory': metrics.
maxMessagesReceivedByFactory, 'AverageSatisfactionScore':
metrics.averageSatisfactionScore,
'AverageSuccessfulSatisfactionScore': metrics.
averageSuccessfulSatisfactionScore,
'CheapOrdersWithCheapMachines': metrics.
cheapOrdersWithCheapMachines, 'AsapOrdersWithFastMachines':
metrics.asapOrdersWithFastMachines, 'AverageSuccessfulPrice':
metrics.averageSuccessfulOrderPrice,
'AverageSuccessfulOrderPriceCheap': metrics.
averageSuccessfulOrderPriceCheap,
'AverageSuccessfulOrderPriceNeutral': metrics.
averageSuccessfulOrderPriceNeutral,
'AverageSuccessfulOrderPriceAsap': metrics.
averageSuccessfulOrderPriceAsap, 'AverageSuccessfulMakespan':
metrics.averageSuccessfulOrderMakeSpan,
'AverageSuccessfulOrderMakespanCheap': metrics.
averageSuccessfulOrderMakespanCheap,
'AverageSuccessfulOrderMakespanNeutral': metrics.
averageSuccessfulOrderMakespanNeutral,
'AverageSuccessfulOrderMakespanAsap': metrics.
averageSuccessfulOrderMakespanAsap, 'SuccessfulAsapOrders':
metrics.percentageOfSuccessfulASAPOrders,
'SuccessfulCheapOrders': metrics.
percentageOfSuccessfulCheapOrders, 'SuccessfulNeutralOrders':
metrics.percentageOfSuccessfulNeutralOrders}, agent_reporters={
'id': 'unique_id'})
batch_run.run_all()
model_data = batch_run.get_model_vars_dataframe()
agent_data = batch_run.get_agent_vars_dataframe()
if saveResults:
number = 0
while os.path.exists('{}/results/test_{}'.format(dir_path, number)
) == True:
number += 1
os.makedirs('{}/results/test_{}'.format(dir_path, number))
model_data.to_pickle('{}/results/test_{}/model_data.pkl'.format
(dir_path, number))
agent_data.to_pickle('{}/results/test_{}/agent_data.pkl'.format
(dir_path, number))
else:
grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)
chart = ChartModule([{'Label': 'Utilisation', 'Color': 'Black'}],
data_collector_name='datacollector')
chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'
}], data_collector_name='datacollector')
chart3 = ChartModule([{'Label': 'Average Order Wait Time', 'Color':
'Red'}], data_collector_name='datacollector')
chart4 = ChartModule([{'Label': 'Total Messages Sent', 'Color':
'Red'}], data_collector_name='datacollector')
averageMessagesSentChart = ChartModule([{'Label':
'Average Messages Sent', 'Color': 'Red'}], data_collector_name=
'datacollector')
chart5 = ChartModule([{'Label': 'Successful Orders', 'Color':
'Green'}], data_collector_name='datacollector')
chart6 = ChartModule([{'Label': 'Outsourced Orders', 'Color':
'Blue'}], data_collector_name='datacollector')
chart7 = ChartModule([{'Label': 'Late Orders', 'Color': 'Red'}],
data_collector_name='datacollector')
chart8 = ChartModule([{'Label': 'WIP Backlog', 'Color': 'Blue'}],
data_collector_name='datacollector')
chart9 = ChartModule([{'Label': 'Max Messages Sent - Order',
'Color': 'Blue'}], data_collector_name='datacollector')
chart10 = ChartModule([{'Label': 'Max Messages Received - Order',
'Color': 'Blue'}], data_collector_name='datacollector')
chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory',
'Color': 'Red'}], data_collector_name='datacollector')
chart12 = ChartModule([{'Label': 'Max Messages Received - Factory',
'Color': 'Red'}], data_collector_name='datacollector')
chart13 = ChartModule([{'Label': 'Average satisfaction score',
'Color': 'Blue'}], data_collector_name='datacollector')
chart14 = ChartModule([{'Label':
'Average successful satisfaction score', 'Color': 'Blue'}],
data_collector_name='datacollector')
chart15 = ChartModule([{'Label':
'% Cheap orders with cheap machines', 'Color': 'Green'}],
data_collector_name='datacollector')
chart16 = ChartModule([{'Label': '% Asap orders with fast machines',
'Color': 'Green'}], data_collector_name='datacollector')
chart17 = ChartModule([{'Label': 'Average successful price',
'Color': 'Blue'}], data_collector_name='datacollector')
chart18 = ChartModule([{'Label': 'Average successful price Cheap',
'Color': 'Blue'}], data_collector_name='datacollector')
chart19 = ChartModule([{'Label': 'Average successful price Neutral',
'Color': 'Blue'}], data_collector_name='datacollector')
chart20 = ChartModule([{'Label': 'Average successful price Asap',
'Color': 'Blue'}], data_collector_name='datacollector')
chart21 = ChartModule([{'Label': 'Average successful makespan',
'Color': 'Green'}], data_collector_name='datacollector')
chart22 = ChartModule([{'Label':
'Average successful makespan Cheap', 'Color': 'Green'}],
data_collector_name='datacollector')
chart23 = ChartModule([{'Label':
'Average successful makespan Neutral', 'Color': 'Green'}],
data_collector_name='datacollector')
chart24 = ChartModule([{'Label': 'Average successful makespan Asap',
'Color': 'Green'}], data_collector_name='datacollector')
chart25 = ChartModule([{'Label': 'Successful Cheap Orders', 'Color':
'Red'}], data_collector_name='datacollector')
chart26 = ChartModule([{'Label': 'Successful Neutral Orders',
'Color': 'Red'}], data_collector_name='datacollector')
chart27 = ChartModule([{'Label': 'Successful Asap Orders', 'Color':
'Red'}], data_collector_name='datacollector')
noProposalOrdersChart = ChartModule([{'Label':
'Orders that received no proposals', 'Color': 'Red'}],
data_collector_name='datacollector')
server = ModularServer(MASArchitecture, [grid, chart, chart2,
chart3, chart4, averageMessagesSentChart, chart5,
noProposalOrdersChart, chart6, chart7, chart8, chart9, chart10,
chart11, chart12, chart13, chart14, chart15, chart16, chart17,
chart18, chart19, chart20, chart21, chart22, chart23, chart24,
chart25, chart26, chart27], 'Festo-Fetch.ai', {'width': 60,
'height': 60, 'distributed': True, 'quantity': 10, 'splitSize':
1, 'newOrderProbability': 5, 'verbose': True, 'ordersPerWeek':
40, 'model_reporters_dict': {'Utilisation': metrics.
machineUtilisation, 'Complete Orders': metrics.ordersComplete,
'Average Order Wait Time': metrics.averageOrderWaitTime,
'Successful Orders': metrics.successfulOrders,
'Total Messages Sent': metrics.totalMessagesSent,
'Average Messages Sent': metrics.averageMessagesSent,
'Late Orders': metrics.lateOrders, 'WIP Backlog': metrics.
totalWIPSize, 'Max Messages Sent - Order': metrics.
maxMessagesSentFromOrder, 'Max Messages Received - Order':
metrics.maxMessagesReceivedByOrder,
'Max Messages Sent - Factory': metrics.
maxMessagesSentFromFactory, 'Max Messages Received - Factory':
metrics.maxMessagesReceivedByFactory, 'Outsourced Orders':
metrics.outsourcedOrders, 'Orders that received no proposals':
metrics.noProposalOrders,
'Average successful satisfaction score': metrics.
averageSuccessfulSatisfactionScore,
'Average satisfaction score': metrics.averageSatisfactionScore,
'% Cheap orders with cheap machines': metrics.
cheapOrdersWithCheapMachines,
'% Asap orders with fast machines': metrics.
asapOrdersWithFastMachines, 'Average successful price': metrics
.averageSuccessfulOrderPrice, 'Average successful price Cheap':
metrics.averageSuccessfulOrderPriceCheap,
'Average successful price Neutral': metrics.
averageSuccessfulOrderPriceNeutral,
'Average successful price Asap': metrics.
averageSuccessfulOrderPriceAsap, 'Average successful makespan':
metrics.averageSuccessfulOrderMakeSpan,
'Average successful makespan Cheap': metrics.
averageSuccessfulOrderMakespanCheap,
'Average successful makespan Neutral': metrics.
averageSuccessfulOrderMakespanNeutral,
'Average successful makespan Asap': metrics.
averageSuccessfulOrderMakespanAsap, 'Successful Cheap Orders':
metrics.percentageOfSuccessfulASAPOrders,
'Successful Neutral Orders': metrics.
percentageOfSuccessfulCheapOrders, 'Successful Asap Orders':
metrics.percentageOfSuccessfulNeutralOrders}})
server.port = 8521
server.launch()
| from mesa.visualization.modules import CanvasGrid
from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.modules import ChartModule
from mesa.batchrunner import BatchRunner
from agentPortrayal import agent_portrayal
import metrics
from matplotlib import pyplot as plt
from ArchitectureModel import MASArchitecture
import os
import random
import sys
runBatch = True
architecture = 'Inter-Firm'
saveResults = True
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
if(runBatch):
fixed_params = {'width': 60, 'height': 60,'splitSize':1,'distributed':True,'verbose':False,'searchSize':1,'batchRun':True}
variable_params = {'quantity':[10,20,50,80,100,120,150],'ordersPerWeek':[1,5,20,40,80,120]}
batch_run = BatchRunner(
MASArchitecture,
variable_params,
fixed_params,
iterations=10,
max_steps=800,
model_reporters={
"Utilisation": metrics.machineUtilisation,
"CompleteOrders": metrics.ordersComplete,
'AverageOrderWaitTime': metrics.averageOrderWaitTime,
'TotalMessagesSent': metrics.totalMessagesSent,
'AverageMessagesSent': metrics.averageMessagesSent,
"SuccessfulOrders":metrics.successfulOrders,
"noProposalOrders":metrics.noProposalOrders,
'OutsourcedOrders': metrics.outsourcedOrders,
'LateOrders':metrics.lateOrders,
'WIPBacklog':metrics.totalWIPSize,
'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,
'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,
'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,
'MaxMessagesReceivedFactory': metrics.maxMessagesReceivedByFactory,
'AverageSatisfactionScore':metrics.averageSatisfactionScore,
'AverageSuccessfulSatisfactionScore':metrics.averageSuccessfulSatisfactionScore,
'CheapOrdersWithCheapMachines':metrics.cheapOrdersWithCheapMachines,
'AsapOrdersWithFastMachines':metrics.asapOrdersWithFastMachines,
'AverageSuccessfulPrice': metrics.averageSuccessfulOrderPrice,
'AverageSuccessfulOrderPriceCheap':metrics.averageSuccessfulOrderPriceCheap,
'AverageSuccessfulOrderPriceNeutral':metrics.averageSuccessfulOrderPriceNeutral,
'AverageSuccessfulOrderPriceAsap':metrics.averageSuccessfulOrderPriceAsap,
'AverageSuccessfulMakespan': metrics.averageSuccessfulOrderMakeSpan,
'AverageSuccessfulOrderMakespanCheap':metrics.averageSuccessfulOrderMakespanCheap,
'AverageSuccessfulOrderMakespanNeutral':metrics.averageSuccessfulOrderMakespanNeutral,
'AverageSuccessfulOrderMakespanAsap':metrics.averageSuccessfulOrderMakespanAsap,
'SuccessfulAsapOrders':metrics.percentageOfSuccessfulASAPOrders,
'SuccessfulCheapOrders':metrics.percentageOfSuccessfulCheapOrders,
'SuccessfulNeutralOrders':metrics.percentageOfSuccessfulNeutralOrders
},
agent_reporters={
'id':'unique_id',
# # TODO: add in other agent reports that you would like to use
}
)
batch_run.run_all()
model_data = batch_run.get_model_vars_dataframe()
agent_data = batch_run.get_agent_vars_dataframe()
# Save results
if(saveResults):
number = 0
### CHANGE PATH TO WHERE YOU WANT RESULTS TO BE SAVED
while (os.path.exists('{}/results/test_{}'.format(dir_path,number)) == True):
number += 1
# TODO: maybe make a text file that describes the test that has been run
os.makedirs(
'{}/results/test_{}'.format(dir_path,number))
model_data.to_pickle(
'{}/results/test_{}/model_data.pkl'.format(dir_path,number))
agent_data.to_pickle(
'{}/results/test_{}/agent_data.pkl'.format(dir_path,number))
else:
# TODO: rename all of these
grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)
chart = ChartModule([{'Label': 'Utilisation', "Color": 'Black'}],data_collector_name='datacollector')
chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'}], data_collector_name='datacollector')
chart3 = ChartModule([{'Label': 'Average Order Wait Time','Color': 'Red'}], data_collector_name='datacollector')
chart4 = ChartModule([{'Label': 'Total Messages Sent','Color': 'Red'}], data_collector_name='datacollector')
averageMessagesSentChart = ChartModule([{'Label': 'Average Messages Sent','Color': 'Red'}], data_collector_name='datacollector')
chart5 = ChartModule([{'Label': 'Successful Orders','Color': 'Green'}], data_collector_name='datacollector')
chart6 = ChartModule([{'Label': 'Outsourced Orders','Color': 'Blue'}], data_collector_name='datacollector')
chart7 = ChartModule([{'Label': 'Late Orders','Color': 'Red'}], data_collector_name='datacollector')
chart8 = ChartModule([{'Label': 'WIP Backlog','Color': 'Blue'}], data_collector_name='datacollector')
chart9 = ChartModule([{'Label': 'Max Messages Sent - Order','Color': 'Blue'}], data_collector_name='datacollector')
chart10 = ChartModule([{'Label': 'Max Messages Received - Order','Color': 'Blue'}], data_collector_name='datacollector')
chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory','Color': 'Red'}], data_collector_name='datacollector')
chart12 = ChartModule([{'Label': 'Max Messages Received - Factory','Color': 'Red'}], data_collector_name='datacollector')
chart13 = ChartModule([{'Label': 'Average satisfaction score','Color': 'Blue'}], data_collector_name='datacollector')
chart14 = ChartModule([{'Label': 'Average successful satisfaction score','Color': 'Blue'}], data_collector_name='datacollector')
chart15 = ChartModule([{'Label': '% Cheap orders with cheap machines','Color': 'Green'}], data_collector_name='datacollector')
chart16 = ChartModule([{'Label': '% Asap orders with fast machines','Color': 'Green'}], data_collector_name='datacollector')
chart17 = ChartModule([{'Label': 'Average successful price','Color': 'Blue'}], data_collector_name='datacollector')
chart18 = ChartModule([{'Label': 'Average successful price Cheap','Color': 'Blue'}], data_collector_name='datacollector')
chart19 = ChartModule([{'Label': 'Average successful price Neutral','Color': 'Blue'}], data_collector_name='datacollector')
chart20 = ChartModule([{'Label': 'Average successful price Asap','Color': 'Blue'}], data_collector_name='datacollector')
chart21 = ChartModule([{'Label': 'Average successful makespan','Color': 'Green'}], data_collector_name='datacollector')
chart22 = ChartModule([{'Label': 'Average successful makespan Cheap','Color': 'Green'}], data_collector_name='datacollector')
chart23 = ChartModule([{'Label': 'Average successful makespan Neutral','Color': 'Green'}], data_collector_name='datacollector')
chart24 = ChartModule([{'Label': 'Average successful makespan Asap','Color': 'Green'}], data_collector_name='datacollector')
chart25 = ChartModule([{'Label': 'Successful Cheap Orders','Color': 'Red'}], data_collector_name='datacollector')
chart26 = ChartModule([{'Label': 'Successful Neutral Orders','Color': 'Red'}], data_collector_name='datacollector')
chart27 = ChartModule([{'Label': 'Successful Asap Orders','Color': 'Red'}], data_collector_name='datacollector')
noProposalOrdersChart = ChartModule([{'Label': 'Orders that received no proposals','Color': 'Red'}], data_collector_name='datacollector')
server = ModularServer(MASArchitecture,
[grid,
chart,
chart2,
chart3,
chart4,
averageMessagesSentChart,
chart5,
noProposalOrdersChart,
chart6,
chart7,
chart8, chart9, chart10,chart11, chart12,
chart13,chart14,
chart15,
chart16,chart17,
chart18, chart19, chart20,chart21,chart22,chart23,chart24,chart25,chart26,chart27
],
'Festo-Fetch.ai',
{'width': 60, 'height': 60, 'distributed':True,'quantity':10,'splitSize':1,'newOrderProbability':5,'verbose':True,'ordersPerWeek':40,
'model_reporters_dict': {
"Utilisation": metrics.machineUtilisation,
"Complete Orders": metrics.ordersComplete,
'Average Order Wait Time': metrics.averageOrderWaitTime,
"Successful Orders":metrics.successfulOrders,
'Total Messages Sent': metrics.totalMessagesSent,
'Average Messages Sent': metrics.averageMessagesSent,
'Late Orders':metrics.lateOrders,
'WIP Backlog':metrics.totalWIPSize,
'Max Messages Sent - Order': metrics.maxMessagesSentFromOrder,
'Max Messages Received - Order': metrics.maxMessagesReceivedByOrder,
'Max Messages Sent - Factory': metrics.maxMessagesSentFromFactory,
'Max Messages Received - Factory': metrics.maxMessagesReceivedByFactory,
'Outsourced Orders': metrics.outsourcedOrders,
'Orders that received no proposals':metrics.noProposalOrders,
'Average successful satisfaction score':metrics.averageSuccessfulSatisfactionScore,
'Average satisfaction score':metrics.averageSatisfactionScore,
'% Cheap orders with cheap machines':metrics.cheapOrdersWithCheapMachines,
'% Asap orders with fast machines':metrics.asapOrdersWithFastMachines,
'Average successful price': metrics.averageSuccessfulOrderPrice,
'Average successful price Cheap':metrics.averageSuccessfulOrderPriceCheap,
'Average successful price Neutral':metrics.averageSuccessfulOrderPriceNeutral,
'Average successful price Asap':metrics.averageSuccessfulOrderPriceAsap,
'Average successful makespan': metrics.averageSuccessfulOrderMakeSpan,
'Average successful makespan Cheap':metrics.averageSuccessfulOrderMakespanCheap,
'Average successful makespan Neutral':metrics.averageSuccessfulOrderMakespanNeutral,
'Average successful makespan Asap':metrics.averageSuccessfulOrderMakespanAsap,
'Successful Cheap Orders':metrics.percentageOfSuccessfulASAPOrders,
'Successful Neutral Orders':metrics.percentageOfSuccessfulCheapOrders,
'Successful Asap Orders':metrics.percentageOfSuccessfulNeutralOrders
}})
server.port = 8521
server.launch() | [
0,
1,
2,
3,
4
] |
1,083 | f5dffa3c22bb35ed07cb5ca28f2ba02ea3c07dda | <mask token>
def player(x, y):
screen.blit(player_image, (x, y))
def fire_bullet(x, y, n):
global bullet_fired
bullet_fired[n] = True
screen.blit(bullet_image, (x + 16, y + 10))
def add_bullet():
global num_bullet
num_bullet += 1
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
def spawn_enemy(x, y):
screen.blit(enemy_image, (x, y))
def add_enemy():
global num_enemies
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
num_enemies += 1
def reset_enemy(index):
enemy_X[index] = random.randint(0, 736)
enemy_Y[index] = random.randint(50, 150)
enemy_X_movement[index] = 0.2
def reset_bullet(n):
global bullet_fired, bullet_Y
bullet_fired[n] = False
bullet_Y[n] = player_Y
def isCollion(eX, eY, bX, bY):
distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))
if distance < 27:
return True
else:
return False
def show_score():
text = score_font.render('Score: ' + str(score), True, (255, 255, 255))
screen.blit(text, (textX, testY))
def show_intro():
show_big_text(intro_text)
show_play_button()
def show_big_text(s):
text = intro_font.render(s, True, (89, 203, 255))
text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))
screen.blit(text, text_rect)
text2 = intro_font2.render(s, True, (250, 50, 183))
text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /
2 + 3))
screen.blit(text2, text_rect2)
def show_play_button():
screen.blit(play_button, (play_button_X, play_button_Y))
def show_replay_button():
screen.blit(replay_button, (play_button_X, play_button_Y))
def play_button_clicked():
click = pygame.mouse.get_pressed()
if click[0] == 1:
pos = pygame.mouse.get_pos()
if play_button_X < pos[0] < play_button_X + play_button.get_width():
if play_button_Y < pos[1] < play_button_Y + play_button.get_height(
):
return True
return False
def game_over_screen():
show_big_text(gameover_text)
show_score()
show_replay_button()
<mask token>
| <mask token>
def player(x, y):
screen.blit(player_image, (x, y))
def fire_bullet(x, y, n):
global bullet_fired
bullet_fired[n] = True
screen.blit(bullet_image, (x + 16, y + 10))
def add_bullet():
global num_bullet
num_bullet += 1
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
def spawn_enemy(x, y):
screen.blit(enemy_image, (x, y))
def add_enemy():
global num_enemies
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
num_enemies += 1
def reset_enemy(index):
enemy_X[index] = random.randint(0, 736)
enemy_Y[index] = random.randint(50, 150)
enemy_X_movement[index] = 0.2
def reset_bullet(n):
global bullet_fired, bullet_Y
bullet_fired[n] = False
bullet_Y[n] = player_Y
def isCollion(eX, eY, bX, bY):
distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))
if distance < 27:
return True
else:
return False
def show_score():
text = score_font.render('Score: ' + str(score), True, (255, 255, 255))
screen.blit(text, (textX, testY))
def show_intro():
show_big_text(intro_text)
show_play_button()
def show_big_text(s):
text = intro_font.render(s, True, (89, 203, 255))
text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))
screen.blit(text, text_rect)
text2 = intro_font2.render(s, True, (250, 50, 183))
text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /
2 + 3))
screen.blit(text2, text_rect2)
def show_play_button():
screen.blit(play_button, (play_button_X, play_button_Y))
def show_replay_button():
screen.blit(replay_button, (play_button_X, play_button_Y))
def play_button_clicked():
click = pygame.mouse.get_pressed()
if click[0] == 1:
pos = pygame.mouse.get_pos()
if play_button_X < pos[0] < play_button_X + play_button.get_width():
if play_button_Y < pos[1] < play_button_Y + play_button.get_height(
):
return True
return False
def game_over_screen():
show_big_text(gameover_text)
show_score()
show_replay_button()
def reset():
global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y
num_enemies = 2
enemy_X = []
enemy_Y = []
for i in range(num_enemies):
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(2)
player_X = 370
player_Y = 480
score = 0
bullet_fired = []
bullet_fired.append(False)
gamespeed = 0
num_bullet = 1
bullet_X = []
bullet_X.append(0)
bullet_Y = []
bullet_Y.append(player_Y)
<mask token>
| <mask token>
pygame.init()
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
pygame.display.set_caption('space invaders')
background = pygame.image.load('background.png')
score = 0
previous_score = 0
score_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 32)
textX = 10
testY = 10
intro = True
intro_text = 'SpaceInvaders'
intro_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)
intro_font2 = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)
play_button = pygame.image.load('play-button.png')
play_button_X = SCREEN_WIDTH / 2 - play_button.get_width()
play_button_Y = SCREEN_HEIGHT / (4 / 3) - play_button.get_height()
gameover = False
gameover_text = 'Game Over'
replay_button = pygame.image.load('replay.png')
player_image = pygame.image.load('spaceship.png')
player_X = 370
player_Y = 480
player_movement = 0
bullet_image = pygame.image.load('hot.png')
bullet_X = []
bullet_Y = []
bullet_movement = 0.7
bullet_fired = []
num_bullet = 1
for i in range(num_bullet):
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
enemy_image = pygame.image.load('ufo.png')
enemy_X = []
enemy_Y = []
enemy_X_movement = []
enemy_Y_movement = 40
num_enemies = 2
gamespeed = 0
gamespeed_increment = 0.05
for i in range(num_enemies):
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
def player(x, y):
screen.blit(player_image, (x, y))
def fire_bullet(x, y, n):
global bullet_fired
bullet_fired[n] = True
screen.blit(bullet_image, (x + 16, y + 10))
def add_bullet():
global num_bullet
num_bullet += 1
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
def spawn_enemy(x, y):
screen.blit(enemy_image, (x, y))
def add_enemy():
global num_enemies
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
num_enemies += 1
def reset_enemy(index):
enemy_X[index] = random.randint(0, 736)
enemy_Y[index] = random.randint(50, 150)
enemy_X_movement[index] = 0.2
def reset_bullet(n):
global bullet_fired, bullet_Y
bullet_fired[n] = False
bullet_Y[n] = player_Y
def isCollion(eX, eY, bX, bY):
distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))
if distance < 27:
return True
else:
return False
def show_score():
text = score_font.render('Score: ' + str(score), True, (255, 255, 255))
screen.blit(text, (textX, testY))
def show_intro():
show_big_text(intro_text)
show_play_button()
def show_big_text(s):
text = intro_font.render(s, True, (89, 203, 255))
text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))
screen.blit(text, text_rect)
text2 = intro_font2.render(s, True, (250, 50, 183))
text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /
2 + 3))
screen.blit(text2, text_rect2)
def show_play_button():
screen.blit(play_button, (play_button_X, play_button_Y))
def show_replay_button():
screen.blit(replay_button, (play_button_X, play_button_Y))
def play_button_clicked():
click = pygame.mouse.get_pressed()
if click[0] == 1:
pos = pygame.mouse.get_pos()
if play_button_X < pos[0] < play_button_X + play_button.get_width():
if play_button_Y < pos[1] < play_button_Y + play_button.get_height(
):
return True
return False
def game_over_screen():
show_big_text(gameover_text)
show_score()
show_replay_button()
def reset():
global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y
num_enemies = 2
enemy_X = []
enemy_Y = []
for i in range(num_enemies):
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(2)
player_X = 370
player_Y = 480
score = 0
bullet_fired = []
bullet_fired.append(False)
gamespeed = 0
num_bullet = 1
bullet_X = []
bullet_X.append(0)
bullet_Y = []
bullet_Y.append(player_Y)
running = True
while running:
screen.fill((0, 0, 0))
screen.blit(background, (0, 0))
dt = clock.tick(60)
while intro:
show_intro()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if play_button_clicked():
intro = False
pygame.display.update()
while gameover:
game_over_screen()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if play_button_clicked():
reset()
gameover = False
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player_movement = -0.2 - gamespeed
if event.key == pygame.K_RIGHT:
player_movement = 0.2 + gamespeed
if event.key == pygame.K_SPACE:
for i in range(num_bullet):
if not bullet_fired[i]:
bullet_X[i] = player_X
fire_bullet(bullet_X[i], bullet_Y[i], i)
break
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
player_movement = 0
player_X += player_movement * dt
if player_X <= 1:
player_X = 1
elif player_X >= 735:
player_X = 735
for i in range(num_bullet):
if bullet_Y[i] <= 1:
reset_bullet(i)
if bullet_fired[i]:
bullet_Y[i] -= bullet_movement * dt
fire_bullet(bullet_X[i], bullet_Y[i], i)
for i in range(num_enemies):
if enemy_Y[i] >= 440:
gameover = True
for j in range(num_bullet):
if bullet_fired[j]:
collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j],
bullet_Y[j])
if collision:
reset_enemy(i)
reset_bullet(j)
score += 1
if score != 0 and previous_score != score:
if score % 3 == 0:
add_enemy()
print('added enemy')
if score % 10 == 0:
gamespeed += gamespeed_increment
print('increased gamespeed')
if score % 20 == 0:
add_bullet()
print('added bullet')
previous_score = score
if enemy_X_movement[i] < 0:
enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt
else:
enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt
if enemy_X[i] <= 1:
enemy_X[i] = 2
enemy_X_movement[i] = -enemy_X_movement[i]
enemy_Y[i] += enemy_Y_movement + gamespeed
elif enemy_X[i] >= 735:
enemy_X[i] = 734
enemy_X_movement[i] = -enemy_X_movement[i]
enemy_Y[i] += enemy_Y_movement + gamespeed
spawn_enemy(enemy_X[i], enemy_Y[i])
player(player_X, player_Y)
show_score()
pygame.display.update()
| import math
import random
import pygame
pygame.init()
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
pygame.display.set_caption('space invaders')
background = pygame.image.load('background.png')
score = 0
previous_score = 0
score_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 32)
textX = 10
testY = 10
intro = True
intro_text = 'SpaceInvaders'
intro_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)
intro_font2 = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)
play_button = pygame.image.load('play-button.png')
play_button_X = SCREEN_WIDTH / 2 - play_button.get_width()
play_button_Y = SCREEN_HEIGHT / (4 / 3) - play_button.get_height()
gameover = False
gameover_text = 'Game Over'
replay_button = pygame.image.load('replay.png')
player_image = pygame.image.load('spaceship.png')
player_X = 370
player_Y = 480
player_movement = 0
bullet_image = pygame.image.load('hot.png')
bullet_X = []
bullet_Y = []
bullet_movement = 0.7
bullet_fired = []
num_bullet = 1
for i in range(num_bullet):
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
enemy_image = pygame.image.load('ufo.png')
enemy_X = []
enemy_Y = []
enemy_X_movement = []
enemy_Y_movement = 40
num_enemies = 2
gamespeed = 0
gamespeed_increment = 0.05
for i in range(num_enemies):
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
def player(x, y):
screen.blit(player_image, (x, y))
def fire_bullet(x, y, n):
global bullet_fired
bullet_fired[n] = True
screen.blit(bullet_image, (x + 16, y + 10))
def add_bullet():
global num_bullet
num_bullet += 1
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
def spawn_enemy(x, y):
screen.blit(enemy_image, (x, y))
def add_enemy():
global num_enemies
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
num_enemies += 1
def reset_enemy(index):
enemy_X[index] = random.randint(0, 736)
enemy_Y[index] = random.randint(50, 150)
enemy_X_movement[index] = 0.2
def reset_bullet(n):
global bullet_fired, bullet_Y
bullet_fired[n] = False
bullet_Y[n] = player_Y
def isCollion(eX, eY, bX, bY):
distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))
if distance < 27:
return True
else:
return False
def show_score():
text = score_font.render('Score: ' + str(score), True, (255, 255, 255))
screen.blit(text, (textX, testY))
def show_intro():
show_big_text(intro_text)
show_play_button()
def show_big_text(s):
text = intro_font.render(s, True, (89, 203, 255))
text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))
screen.blit(text, text_rect)
text2 = intro_font2.render(s, True, (250, 50, 183))
text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /
2 + 3))
screen.blit(text2, text_rect2)
def show_play_button():
screen.blit(play_button, (play_button_X, play_button_Y))
def show_replay_button():
screen.blit(replay_button, (play_button_X, play_button_Y))
def play_button_clicked():
click = pygame.mouse.get_pressed()
if click[0] == 1:
pos = pygame.mouse.get_pos()
if play_button_X < pos[0] < play_button_X + play_button.get_width():
if play_button_Y < pos[1] < play_button_Y + play_button.get_height(
):
return True
return False
def game_over_screen():
show_big_text(gameover_text)
show_score()
show_replay_button()
def reset():
global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y
num_enemies = 2
enemy_X = []
enemy_Y = []
for i in range(num_enemies):
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(2)
player_X = 370
player_Y = 480
score = 0
bullet_fired = []
bullet_fired.append(False)
gamespeed = 0
num_bullet = 1
bullet_X = []
bullet_X.append(0)
bullet_Y = []
bullet_Y.append(player_Y)
running = True
while running:
screen.fill((0, 0, 0))
screen.blit(background, (0, 0))
dt = clock.tick(60)
while intro:
show_intro()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if play_button_clicked():
intro = False
pygame.display.update()
while gameover:
game_over_screen()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if play_button_clicked():
reset()
gameover = False
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player_movement = -0.2 - gamespeed
if event.key == pygame.K_RIGHT:
player_movement = 0.2 + gamespeed
if event.key == pygame.K_SPACE:
for i in range(num_bullet):
if not bullet_fired[i]:
bullet_X[i] = player_X
fire_bullet(bullet_X[i], bullet_Y[i], i)
break
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
player_movement = 0
player_X += player_movement * dt
if player_X <= 1:
player_X = 1
elif player_X >= 735:
player_X = 735
for i in range(num_bullet):
if bullet_Y[i] <= 1:
reset_bullet(i)
if bullet_fired[i]:
bullet_Y[i] -= bullet_movement * dt
fire_bullet(bullet_X[i], bullet_Y[i], i)
for i in range(num_enemies):
if enemy_Y[i] >= 440:
gameover = True
for j in range(num_bullet):
if bullet_fired[j]:
collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j],
bullet_Y[j])
if collision:
reset_enemy(i)
reset_bullet(j)
score += 1
if score != 0 and previous_score != score:
if score % 3 == 0:
add_enemy()
print('added enemy')
if score % 10 == 0:
gamespeed += gamespeed_increment
print('increased gamespeed')
if score % 20 == 0:
add_bullet()
print('added bullet')
previous_score = score
if enemy_X_movement[i] < 0:
enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt
else:
enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt
if enemy_X[i] <= 1:
enemy_X[i] = 2
enemy_X_movement[i] = -enemy_X_movement[i]
enemy_Y[i] += enemy_Y_movement + gamespeed
elif enemy_X[i] >= 735:
enemy_X[i] = 734
enemy_X_movement[i] = -enemy_X_movement[i]
enemy_Y[i] += enemy_Y_movement + gamespeed
spawn_enemy(enemy_X[i], enemy_Y[i])
player(player_X, player_Y)
show_score()
pygame.display.update()
| import math
import random
import pygame
pygame.init()
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
pygame.display.set_caption('space invaders')
background = pygame.image.load('background.png')
score = 0
previous_score = 0
score_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 32)
textX = 10
testY = 10
# intro
intro = True
intro_text = "SpaceInvaders"
intro_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)
intro_font2 = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)
# PlayButton
play_button = pygame.image.load('play-button.png')
play_button_X = (SCREEN_WIDTH / 2) - play_button.get_width()
play_button_Y = (SCREEN_HEIGHT / (4 / 3)) - play_button.get_height()
# GameOver
gameover = False
gameover_text = "Game Over"
replay_button = pygame.image.load('replay.png')
# player
player_image = pygame.image.load('spaceship.png')
player_X = 370
player_Y = 480
player_movement = 0
# bullet
bullet_image = pygame.image.load('hot.png')
bullet_X = []
bullet_Y = []
bullet_movement = 0.7
bullet_fired = []
num_bullet = 1
for i in range(num_bullet):
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
# enemy
enemy_image = pygame.image.load('ufo.png')
enemy_X = []
enemy_Y = []
enemy_X_movement = []
enemy_Y_movement = 40
num_enemies = 2
# gamespeedincrement
gamespeed = 0
gamespeed_increment = 0.05
for i in range(num_enemies):
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
def player(x, y):
screen.blit(player_image, (x, y))
def fire_bullet(x, y, n):
global bullet_fired
bullet_fired[n] = True
screen.blit(bullet_image, (x + 16, y + 10))
def add_bullet():
global num_bullet
num_bullet += 1
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
def spawn_enemy(x, y):
screen.blit(enemy_image, (x, y))
def add_enemy():
global num_enemies
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
num_enemies += 1
def reset_enemy(index):
enemy_X[index] = random.randint(0, 736)
enemy_Y[index] = random.randint(50, 150)
enemy_X_movement[index] = 0.2
def reset_bullet(n):
global bullet_fired, bullet_Y
bullet_fired[n] = False
bullet_Y[n] = player_Y
def isCollion(eX, eY, bX, bY):
distance = math.sqrt(math.pow(eX - bX, 2) + (math.pow(eY - bY, 2)))
if distance < 27:
return True
else:
return False
def show_score():
text = score_font.render("Score: " + str(score), True, (255, 255, 255))
screen.blit(text, (textX, testY))
def show_intro():
show_big_text(intro_text)
show_play_button()
def show_big_text(s):
text = intro_font.render(s, True, (89, 203, 255))
text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))
screen.blit(text, text_rect)
text2 = intro_font2.render(s, True, (250, 50, 183))
text_rect2 = text.get_rect(center=((SCREEN_WIDTH / 2) + 3, (SCREEN_HEIGHT / 2) + 3))
screen.blit(text2, text_rect2)
def show_play_button():
screen.blit(play_button, (play_button_X, play_button_Y))
def show_replay_button():
screen.blit(replay_button, (play_button_X, play_button_Y))
def play_button_clicked():
click = pygame.mouse.get_pressed()
if click[0] == 1:
pos = pygame.mouse.get_pos()
if play_button_X < pos[0] < play_button_X + play_button.get_width():
if play_button_Y < pos[1] < play_button_Y + play_button.get_height():
return True
return False
def game_over_screen():
show_big_text(gameover_text)
show_score()
show_replay_button()
def reset():
global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y
num_enemies = 2
enemy_X = []
enemy_Y = []
for i in range(num_enemies):
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(2)
player_X = 370
player_Y = 480
score = 0
bullet_fired = []
bullet_fired.append(False)
gamespeed = 0
num_bullet = 1
bullet_X = []
bullet_X.append(0)
bullet_Y = []
bullet_Y.append(player_Y)
running = True
while running:
screen.fill((0, 0, 0))
screen.blit(background, (0, 0))
dt = clock.tick(60)
while intro:
show_intro()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if play_button_clicked():
intro = False
pygame.display.update()
while gameover:
game_over_screen()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if play_button_clicked():
reset()
gameover = False
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player_movement = -0.2 - gamespeed
if event.key == pygame.K_RIGHT:
player_movement = 0.2 + gamespeed
if event.key == pygame.K_SPACE:
for i in range(num_bullet):
if not bullet_fired[i]:
bullet_X[i] = player_X
fire_bullet(bullet_X[i], bullet_Y[i], i)
break
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
player_movement = 0
# playermovement
player_X += player_movement * dt
if player_X <= 1:
player_X = 1
elif player_X >= 735:
player_X = 735
# bulletmovement
for i in range(num_bullet):
if bullet_Y[i] <= 1:
reset_bullet(i)
if bullet_fired[i]:
bullet_Y[i] -= bullet_movement * dt
fire_bullet(bullet_X[i], bullet_Y[i], i)
# enemy_movement
for i in range(num_enemies):
if enemy_Y[i] >= 440:
gameover = True
for j in range(num_bullet):
if bullet_fired[j]:
collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j], bullet_Y[j])
if collision:
reset_enemy(i)
reset_bullet(j)
score += 1
if score != 0 and previous_score != score:
if score % 3 == 0:
add_enemy()
print("added enemy")
if score % 10 == 0:
gamespeed += gamespeed_increment
print("increased gamespeed")
if score % 20 == 0:
add_bullet()
print("added bullet")
previous_score = score
if enemy_X_movement[i] < 0:
enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt
else:
enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt
if enemy_X[i] <= 1:
enemy_X[i] = 2
enemy_X_movement[i] = -enemy_X_movement[i]
enemy_Y[i] += (enemy_Y_movement + gamespeed)
elif enemy_X[i] >= 735:
enemy_X[i] = 734
enemy_X_movement[i] = -enemy_X_movement[i]
enemy_Y[i] += (enemy_Y_movement + gamespeed)
spawn_enemy(enemy_X[i], enemy_Y[i])
player(player_X, player_Y)
show_score()
pygame.display.update()
| [
15,
16,
18,
19,
20
] |
1,084 | e2e4adaa8f7f62662e0c2915faff1bed72986351 | <mask token>
| <mask token>
admin.site.register(Hash)
| from django.contrib import admin
from .models import Hash
admin.site.register(Hash)
| null | null | [
0,
1,
2
] |
1,085 | d8af43d24a2f2b99bc8b5098f251e017852d6d86 | <mask token>
class BaseExecution:
<mask token>
<mask token>
| <mask token>
class BaseExecution:
def __init__(self, flag, parser):
self.flag = flag
self.parser = parser
<mask token>
| <mask token>
class BaseExecution:
def __init__(self, flag, parser):
self.flag = flag
self.parser = parser
def execute(self):
process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=
subprocess.PIPE, stderr=subprocess.PIPE)
output, err = process.communicate()
return_code = process.returncode
parser = self.parser(output, err, return_code)
result = parser.parse()
return result
| import subprocess
class BaseExecution:
def __init__(self, flag, parser):
self.flag = flag
self.parser = parser
def execute(self):
process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=
subprocess.PIPE, stderr=subprocess.PIPE)
output, err = process.communicate()
return_code = process.returncode
parser = self.parser(output, err, return_code)
result = parser.parse()
return result
| null | [
1,
2,
3,
4
] |
1,086 | 2f2030107f3a23c0d2f404a838eaccc8b35ac410 | <mask token>
| <mask token>
print('From fahrenheit to celcius', celcius)
| fahrenheit = float(input('Enter a fahrenheit degree: '))
celcius = (fahrenheit - 32) * 5 / 9
print('From fahrenheit to celcius', celcius)
| fahrenheit = float(input("Enter a fahrenheit degree: "))
celcius = ((fahrenheit - 32) * 5) / 9
print("From fahrenheit to celcius", celcius) | null | [
0,
1,
2,
3
] |
1,087 | c4fbf206482a04f3e2d2aa98a0dbf525a176c4e7 | <mask token>
class TestGroceryFuncs(unittest.TestCase):
def test_getRecipeNames(self):
recipe_names = grocery_functions.get_recipe_names('test-recipes')
self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')
self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')
self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')
self.assertTrue(recipe_names[3] ==
'Healthy Roasted Chicken and Veggies (one pan)')
self.assertTrue(recipe_names[4] ==
'Kielbasa, Pepper, Onion and Potato Hash')
<mask token>
<mask token>
<mask token>
def test_condenseList(self):
recipe_names = grocery_functions.get_recipe_names('test-recipes')
grocery_list = []
for recipe in recipe_names:
grocery_list += grocery_functions.get_ingredients_from_recipe_file(
'test-recipes\\' + recipe + '.txt')
grocery_list = grocery_functions.condense_grocery_list(grocery_list)
<mask token>
<mask token>
<mask token>
def test_update_default_ing_dept_file(self):
grocery_functions.update_default_ing_dept_file(grocery_functions.
get_all_ingredients('test-recipes'))
<mask token>
<mask token>
| <mask token>
class TestGroceryFuncs(unittest.TestCase):
def test_getRecipeNames(self):
recipe_names = grocery_functions.get_recipe_names('test-recipes')
self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')
self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')
self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')
self.assertTrue(recipe_names[3] ==
'Healthy Roasted Chicken and Veggies (one pan)')
self.assertTrue(recipe_names[4] ==
'Kielbasa, Pepper, Onion and Potato Hash')
def test_getIngredientsFromFile(self):
list = grocery_functions.get_ingredients_from_recipe_file(
'test-recipes\\Kielbasa, Pepper, Onion and Potato Hash.txt')
self.assertTrue(list[0].name == 'turkey kielbasa')
self.assertTrue(list[0].unit == 'ounce')
self.assertTrue(list[0].number == '14')
self.assertTrue(list[2].name == 'non-green bell pepper')
self.assertTrue(list[2].unit == '')
self.assertTrue(list[2].number == '1')
self.assertTrue(list[6].name == 'salt')
self.assertTrue(list[6].unit == '')
self.assertTrue(list[6].number == '1')
def test_getTagsFromFile(self):
list = grocery_functions.get_tags_from_recipe_file(
'test-recipes\\Chicken Curry in a Hurry.txt')
self.assertTrue(list[0] == 'chicken')
self.assertTrue(list[1] == 'easy')
self.assertTrue(list[2] == 'stove')
def test_getRecipeFromFile(self):
list = grocery_functions.get_recipe_from_recipe_file(
'test-recipes\\Healthy Roasted Chicken and Veggies (one pan).txt')
self.assertTrue(list[2] ==
'1 cup bell pepper, chopped (any colors you like)')
self.assertTrue(list[10] == '1 teaspoon italian seasoning')
self.assertTrue(list[15] ==
'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '
)
def test_condenseList(self):
recipe_names = grocery_functions.get_recipe_names('test-recipes')
grocery_list = []
for recipe in recipe_names:
grocery_list += grocery_functions.get_ingredients_from_recipe_file(
'test-recipes\\' + recipe + '.txt')
grocery_list = grocery_functions.condense_grocery_list(grocery_list)
<mask token>
<mask token>
<mask token>
def test_update_default_ing_dept_file(self):
grocery_functions.update_default_ing_dept_file(grocery_functions.
get_all_ingredients('test-recipes'))
def suite(self):
return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)
<mask token>
| <mask token>
sys.path.insert(0, '../src/')
<mask token>
class TestGroceryFuncs(unittest.TestCase):
def test_getRecipeNames(self):
recipe_names = grocery_functions.get_recipe_names('test-recipes')
self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')
self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')
self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')
self.assertTrue(recipe_names[3] ==
'Healthy Roasted Chicken and Veggies (one pan)')
self.assertTrue(recipe_names[4] ==
'Kielbasa, Pepper, Onion and Potato Hash')
def test_getIngredientsFromFile(self):
list = grocery_functions.get_ingredients_from_recipe_file(
'test-recipes\\Kielbasa, Pepper, Onion and Potato Hash.txt')
self.assertTrue(list[0].name == 'turkey kielbasa')
self.assertTrue(list[0].unit == 'ounce')
self.assertTrue(list[0].number == '14')
self.assertTrue(list[2].name == 'non-green bell pepper')
self.assertTrue(list[2].unit == '')
self.assertTrue(list[2].number == '1')
self.assertTrue(list[6].name == 'salt')
self.assertTrue(list[6].unit == '')
self.assertTrue(list[6].number == '1')
def test_getTagsFromFile(self):
list = grocery_functions.get_tags_from_recipe_file(
'test-recipes\\Chicken Curry in a Hurry.txt')
self.assertTrue(list[0] == 'chicken')
self.assertTrue(list[1] == 'easy')
self.assertTrue(list[2] == 'stove')
def test_getRecipeFromFile(self):
list = grocery_functions.get_recipe_from_recipe_file(
'test-recipes\\Healthy Roasted Chicken and Veggies (one pan).txt')
self.assertTrue(list[2] ==
'1 cup bell pepper, chopped (any colors you like)')
self.assertTrue(list[10] == '1 teaspoon italian seasoning')
self.assertTrue(list[15] ==
'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '
)
def test_condenseList(self):
recipe_names = grocery_functions.get_recipe_names('test-recipes')
grocery_list = []
for recipe in recipe_names:
grocery_list += grocery_functions.get_ingredients_from_recipe_file(
'test-recipes\\' + recipe + '.txt')
grocery_list = grocery_functions.condense_grocery_list(grocery_list)
def test_makeAllIngredientsFile(self):
grocery_functions.make_all_ingredients_file()
def test_getItemDeptDicts(self):
grocery_functions.get_item_dept_dicts(
'Smiths-Eu-JT-ItemDepartments.txt')
def test_checkRecipeFormat(self):
errors = grocery_functions.check_recipe_format('test-recipes', False)
self.assertTrue(errors == [])
errors = grocery_functions.check_recipe_format('broken-test-recipes',
False)
self.assertTrue(
'invalid format, "1 lb, chicken breasts" in: broken-test-recipes//broken_recipe.txt'
in errors)
self.assertTrue(
'invalid heading, "wrong_header" in file: broken-test-recipes//broken_recipe.txt'
in errors)
self.assertTrue(
'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors
)
def test_update_default_ing_dept_file(self):
grocery_functions.update_default_ing_dept_file(grocery_functions.
get_all_ingredients('test-recipes'))
def suite(self):
return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)
unittest.TextTestRunner(verbosity=2).run(suite)
| __author__ = 'Joe'
import sys
sys.path.insert(0, '../src/')
import grocery_functions
import unittest
class TestGroceryFuncs(unittest.TestCase):
def test_getRecipeNames(self):
recipe_names = grocery_functions.get_recipe_names('test-recipes')
self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')
self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')
self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')
self.assertTrue(recipe_names[3] ==
'Healthy Roasted Chicken and Veggies (one pan)')
self.assertTrue(recipe_names[4] ==
'Kielbasa, Pepper, Onion and Potato Hash')
def test_getIngredientsFromFile(self):
list = grocery_functions.get_ingredients_from_recipe_file(
'test-recipes\\Kielbasa, Pepper, Onion and Potato Hash.txt')
self.assertTrue(list[0].name == 'turkey kielbasa')
self.assertTrue(list[0].unit == 'ounce')
self.assertTrue(list[0].number == '14')
self.assertTrue(list[2].name == 'non-green bell pepper')
self.assertTrue(list[2].unit == '')
self.assertTrue(list[2].number == '1')
self.assertTrue(list[6].name == 'salt')
self.assertTrue(list[6].unit == '')
self.assertTrue(list[6].number == '1')
def test_getTagsFromFile(self):
list = grocery_functions.get_tags_from_recipe_file(
'test-recipes\\Chicken Curry in a Hurry.txt')
self.assertTrue(list[0] == 'chicken')
self.assertTrue(list[1] == 'easy')
self.assertTrue(list[2] == 'stove')
def test_getRecipeFromFile(self):
list = grocery_functions.get_recipe_from_recipe_file(
'test-recipes\\Healthy Roasted Chicken and Veggies (one pan).txt')
self.assertTrue(list[2] ==
'1 cup bell pepper, chopped (any colors you like)')
self.assertTrue(list[10] == '1 teaspoon italian seasoning')
self.assertTrue(list[15] ==
'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '
)
def test_condenseList(self):
recipe_names = grocery_functions.get_recipe_names('test-recipes')
grocery_list = []
for recipe in recipe_names:
grocery_list += grocery_functions.get_ingredients_from_recipe_file(
'test-recipes\\' + recipe + '.txt')
grocery_list = grocery_functions.condense_grocery_list(grocery_list)
def test_makeAllIngredientsFile(self):
grocery_functions.make_all_ingredients_file()
def test_getItemDeptDicts(self):
grocery_functions.get_item_dept_dicts(
'Smiths-Eu-JT-ItemDepartments.txt')
def test_checkRecipeFormat(self):
errors = grocery_functions.check_recipe_format('test-recipes', False)
self.assertTrue(errors == [])
errors = grocery_functions.check_recipe_format('broken-test-recipes',
False)
self.assertTrue(
'invalid format, "1 lb, chicken breasts" in: broken-test-recipes//broken_recipe.txt'
in errors)
self.assertTrue(
'invalid heading, "wrong_header" in file: broken-test-recipes//broken_recipe.txt'
in errors)
self.assertTrue(
'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors
)
def test_update_default_ing_dept_file(self):
grocery_functions.update_default_ing_dept_file(grocery_functions.
get_all_ingredients('test-recipes'))
def suite(self):
return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)
unittest.TextTestRunner(verbosity=2).run(suite)
| __author__ = 'Joe'
import sys
sys.path.insert(0,'../src/')
import grocery_functions
import unittest
class TestGroceryFuncs(unittest.TestCase):
def test_getRecipeNames(self):
recipe_names = grocery_functions.get_recipe_names("test-recipes")
self.assertTrue(recipe_names[0] == "Cajun Chicken & Rice")
self.assertTrue(recipe_names[1] == "Chicken Curry in a Hurry")
self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')
self.assertTrue(recipe_names[3] == 'Healthy Roasted Chicken and Veggies (one pan)')
self.assertTrue(recipe_names[4] == 'Kielbasa, Pepper, Onion and Potato Hash')
def test_getIngredientsFromFile(self):
list=grocery_functions.get_ingredients_from_recipe_file("test-recipes\Kielbasa, Pepper, Onion and Potato Hash.txt")
self.assertTrue(list[0].name == 'turkey kielbasa')
self.assertTrue(list[0].unit == 'ounce')
self.assertTrue(list[0].number == '14')
self.assertTrue(list[2].name == 'non-green bell pepper')
self.assertTrue(list[2].unit == '')
self.assertTrue(list[2].number == '1')
self.assertTrue(list[6].name == 'salt')
self.assertTrue(list[6].unit == '')
self.assertTrue(list[6].number == '1')
def test_getTagsFromFile(self):
list=grocery_functions.get_tags_from_recipe_file("test-recipes\Chicken Curry in a Hurry.txt")
self.assertTrue(list[0] == 'chicken')
self.assertTrue(list[1] == 'easy')
self.assertTrue(list[2] == 'stove')
def test_getRecipeFromFile(self):
list=grocery_functions.get_recipe_from_recipe_file("test-recipes\Healthy Roasted Chicken and Veggies (one pan).txt")
self.assertTrue(list[2]=="1 cup bell pepper, chopped (any colors you like)")
self.assertTrue(list[10]=="1 teaspoon italian seasoning")
self.assertTrue(list[15]=="Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, ")
def test_condenseList(self):
recipe_names = grocery_functions.get_recipe_names("test-recipes")
grocery_list=[]
for recipe in recipe_names:
grocery_list += grocery_functions.get_ingredients_from_recipe_file("test-recipes\\"+recipe+".txt")
grocery_list=grocery_functions.condense_grocery_list(grocery_list)
# grocery_functions.print_grocery_list(grocery_list)
# grocery_functions.sort_and_print_grocery_List(grocery_list, "Smiths-Eu-JT-ItemDepartments.txt")
def test_makeAllIngredientsFile(self):
grocery_functions.make_all_ingredients_file()
def test_getItemDeptDicts(self):
grocery_functions.get_item_dept_dicts("Smiths-Eu-JT-ItemDepartments.txt")
def test_checkRecipeFormat(self):
errors=grocery_functions.check_recipe_format("test-recipes", False)
self.assertTrue(errors == [])
errors=grocery_functions.check_recipe_format("broken-test-recipes", False)
self.assertTrue('invalid format, "1 lb, chicken breasts" in: broken-test-recipes//broken_recipe.txt' in errors)
self.assertTrue('invalid heading, "wrong_header" in file: broken-test-recipes//broken_recipe.txt' in errors)
self.assertTrue('Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors)
def test_update_default_ing_dept_file(self):
grocery_functions.update_default_ing_dept_file(grocery_functions.get_all_ingredients("test-recipes"))
def suite(self):
return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)
unittest.TextTestRunner(verbosity=2).run(suite) | [
4,
8,
12,
14,
15
] |
1,088 | a7db627c49b53cd3a073d866a0373336a46b4053 | <mask token>
| <mask token>
dot.edge('BaseException', 'SystemExit')
dot.edge('BaseException', 'KeyboardInterrupt')
dot.edge('BaseException', 'GeneratorExit')
dot.edge('BaseException', 'Exception')
dot.edge('Exception', 'StopIteration')
dot.edge('Exception', 'StopAsyncIteration')
dot.edge('Exception', 'ArithmeticError')
dot.edge('ArithmeticError', 'FloatingPointError')
dot.edge('ArithmeticError', 'OverflowError')
dot.edge('ArithmeticError', 'ZeroDivisionError')
dot.edge('Exception', 'AssertionError')
dot.edge('Exception', 'AttributeError')
dot.edge('Exception', 'BufferError')
dot.edge('Exception', 'EOFError')
dot.edge('Exception', 'ImportError')
dot.edge('ImportError', 'ModuleNotFoundError')
dot.edge('Exception', 'LookupError')
dot.edge('LookupError', 'IndexError')
dot.edge('LookupError', 'KeyError')
dot.edge('Exception', 'MemoryError')
dot.edge('Exception', 'NameError')
dot.edge('NameError', 'UnboundLocalError')
dot.edge('Exception', 'OSError')
dot.edge('OSError', 'BlockingIOError')
dot.edge('OSError', 'ChildProcessError')
dot.edge('OSError', 'ConnectionError')
dot.edge('ConnectionError', 'BrokenPipeError')
dot.edge('ConnectionError', 'ConnectionAbortedError')
dot.edge('ConnectionError', 'ConnectionRefusedError')
dot.edge('ConnectionError', 'ConnectionResetError')
dot.edge('OSError', 'FileExistsError')
dot.edge('OSError', 'FileNotFoundError')
dot.edge('OSError', 'InterruptedError')
dot.edge('OSError', 'IsADirectoryError')
dot.edge('OSError', 'NotADirectoryError')
dot.edge('OSError', 'PermissionError')
dot.edge('OSError', 'ProcessLookupError')
dot.edge('OSError', 'TimeoutError')
dot.edge('Exception', 'ReferenceError')
dot.edge('Exception', 'RuntimeError')
dot.edge('RuntimeError', 'NotImplementedError')
dot.edge('RuntimeError', 'RecursionError')
dot.edge('Exception', 'SyntaxError')
dot.edge('SyntaxError', 'IndentationError')
dot.edge('SyntaxError', 'TabError')
dot.edge('Exception', 'SystemError')
dot.edge('Exception', 'TypeError')
dot.edge('Exception', 'ValueError')
dot.edge('ValueError', 'UnicodeError')
dot.edge('UnicodeError', 'UnicodeDecodeError')
dot.edge('UnicodeError', 'UnicodeEncodeError')
dot.edge('UnicodeError', 'UnicodeTranslateError')
<mask token>
with open('exceptions.dot', 'w') as dot_file:
dot_file.write(dot_source)
| <mask token>
dot = Digraph()
dot.edge('BaseException', 'SystemExit')
dot.edge('BaseException', 'KeyboardInterrupt')
dot.edge('BaseException', 'GeneratorExit')
dot.edge('BaseException', 'Exception')
dot.edge('Exception', 'StopIteration')
dot.edge('Exception', 'StopAsyncIteration')
dot.edge('Exception', 'ArithmeticError')
dot.edge('ArithmeticError', 'FloatingPointError')
dot.edge('ArithmeticError', 'OverflowError')
dot.edge('ArithmeticError', 'ZeroDivisionError')
dot.edge('Exception', 'AssertionError')
dot.edge('Exception', 'AttributeError')
dot.edge('Exception', 'BufferError')
dot.edge('Exception', 'EOFError')
dot.edge('Exception', 'ImportError')
dot.edge('ImportError', 'ModuleNotFoundError')
dot.edge('Exception', 'LookupError')
dot.edge('LookupError', 'IndexError')
dot.edge('LookupError', 'KeyError')
dot.edge('Exception', 'MemoryError')
dot.edge('Exception', 'NameError')
dot.edge('NameError', 'UnboundLocalError')
dot.edge('Exception', 'OSError')
dot.edge('OSError', 'BlockingIOError')
dot.edge('OSError', 'ChildProcessError')
dot.edge('OSError', 'ConnectionError')
dot.edge('ConnectionError', 'BrokenPipeError')
dot.edge('ConnectionError', 'ConnectionAbortedError')
dot.edge('ConnectionError', 'ConnectionRefusedError')
dot.edge('ConnectionError', 'ConnectionResetError')
dot.edge('OSError', 'FileExistsError')
dot.edge('OSError', 'FileNotFoundError')
dot.edge('OSError', 'InterruptedError')
dot.edge('OSError', 'IsADirectoryError')
dot.edge('OSError', 'NotADirectoryError')
dot.edge('OSError', 'PermissionError')
dot.edge('OSError', 'ProcessLookupError')
dot.edge('OSError', 'TimeoutError')
dot.edge('Exception', 'ReferenceError')
dot.edge('Exception', 'RuntimeError')
dot.edge('RuntimeError', 'NotImplementedError')
dot.edge('RuntimeError', 'RecursionError')
dot.edge('Exception', 'SyntaxError')
dot.edge('SyntaxError', 'IndentationError')
dot.edge('SyntaxError', 'TabError')
dot.edge('Exception', 'SystemError')
dot.edge('Exception', 'TypeError')
dot.edge('Exception', 'ValueError')
dot.edge('ValueError', 'UnicodeError')
dot.edge('UnicodeError', 'UnicodeDecodeError')
dot.edge('UnicodeError', 'UnicodeEncodeError')
dot.edge('UnicodeError', 'UnicodeTranslateError')
dot_source = dot.source
with open('exceptions.dot', 'w') as dot_file:
dot_file.write(dot_source)
| from graphviz import Digraph
dot = Digraph()
dot.edge('BaseException', 'SystemExit')
dot.edge('BaseException', 'KeyboardInterrupt')
dot.edge('BaseException', 'GeneratorExit')
dot.edge('BaseException', 'Exception')
dot.edge('Exception', 'StopIteration')
dot.edge('Exception', 'StopAsyncIteration')
dot.edge('Exception', 'ArithmeticError')
dot.edge('ArithmeticError', 'FloatingPointError')
dot.edge('ArithmeticError', 'OverflowError')
dot.edge('ArithmeticError', 'ZeroDivisionError')
dot.edge('Exception', 'AssertionError')
dot.edge('Exception', 'AttributeError')
dot.edge('Exception', 'BufferError')
dot.edge('Exception', 'EOFError')
dot.edge('Exception', 'ImportError')
dot.edge('ImportError', 'ModuleNotFoundError')
dot.edge('Exception', 'LookupError')
dot.edge('LookupError', 'IndexError')
dot.edge('LookupError', 'KeyError')
dot.edge('Exception', 'MemoryError')
dot.edge('Exception', 'NameError')
dot.edge('NameError', 'UnboundLocalError')
dot.edge('Exception', 'OSError')
dot.edge('OSError', 'BlockingIOError')
dot.edge('OSError', 'ChildProcessError')
dot.edge('OSError', 'ConnectionError')
dot.edge('ConnectionError', 'BrokenPipeError')
dot.edge('ConnectionError', 'ConnectionAbortedError')
dot.edge('ConnectionError', 'ConnectionRefusedError')
dot.edge('ConnectionError', 'ConnectionResetError')
dot.edge('OSError', 'FileExistsError')
dot.edge('OSError', 'FileNotFoundError')
dot.edge('OSError', 'InterruptedError')
dot.edge('OSError', 'IsADirectoryError')
dot.edge('OSError', 'NotADirectoryError')
dot.edge('OSError', 'PermissionError')
dot.edge('OSError', 'ProcessLookupError')
dot.edge('OSError', 'TimeoutError')
dot.edge('Exception', 'ReferenceError')
dot.edge('Exception', 'RuntimeError')
dot.edge('RuntimeError', 'NotImplementedError')
dot.edge('RuntimeError', 'RecursionError')
dot.edge('Exception', 'SyntaxError')
dot.edge('SyntaxError', 'IndentationError')
dot.edge('SyntaxError', 'TabError')
dot.edge('Exception', 'SystemError')
dot.edge('Exception', 'TypeError')
dot.edge('Exception', 'ValueError')
dot.edge('ValueError', 'UnicodeError')
dot.edge('UnicodeError', 'UnicodeDecodeError')
dot.edge('UnicodeError', 'UnicodeEncodeError')
dot.edge('UnicodeError', 'UnicodeTranslateError')
dot_source = dot.source
with open('exceptions.dot', 'w') as dot_file:
dot_file.write(dot_source)
| from graphviz import Digraph
dot = Digraph()
dot.edge("BaseException", "SystemExit")
dot.edge("BaseException", "KeyboardInterrupt")
dot.edge("BaseException", "GeneratorExit")
dot.edge("BaseException", "Exception")
dot.edge("Exception", "StopIteration")
dot.edge("Exception", "StopAsyncIteration")
dot.edge("Exception", "ArithmeticError")
dot.edge("ArithmeticError", "FloatingPointError")
dot.edge("ArithmeticError", "OverflowError")
dot.edge("ArithmeticError", "ZeroDivisionError")
dot.edge("Exception", "AssertionError")
dot.edge("Exception", "AttributeError")
dot.edge("Exception", "BufferError")
dot.edge("Exception", "EOFError")
dot.edge("Exception", "ImportError")
dot.edge("ImportError", "ModuleNotFoundError")
dot.edge("Exception", "LookupError")
dot.edge("LookupError", "IndexError")
dot.edge("LookupError", "KeyError")
dot.edge("Exception", "MemoryError")
dot.edge("Exception", "NameError")
dot.edge("NameError", "UnboundLocalError")
dot.edge("Exception", "OSError")
dot.edge("OSError", "BlockingIOError")
dot.edge("OSError", "ChildProcessError")
dot.edge("OSError", "ConnectionError")
dot.edge("ConnectionError", "BrokenPipeError")
dot.edge("ConnectionError", "ConnectionAbortedError")
dot.edge("ConnectionError", "ConnectionRefusedError")
dot.edge("ConnectionError", "ConnectionResetError")
dot.edge("OSError", "FileExistsError")
dot.edge("OSError", "FileNotFoundError")
dot.edge("OSError", "InterruptedError")
dot.edge("OSError", "IsADirectoryError")
dot.edge("OSError", "NotADirectoryError")
dot.edge("OSError", "PermissionError")
dot.edge("OSError", "ProcessLookupError")
dot.edge("OSError", "TimeoutError")
dot.edge("Exception", "ReferenceError")
dot.edge("Exception", "RuntimeError")
dot.edge("RuntimeError", "NotImplementedError")
dot.edge("RuntimeError", "RecursionError")
dot.edge("Exception", "SyntaxError")
dot.edge("SyntaxError", "IndentationError")
dot.edge("SyntaxError", "TabError")
dot.edge("Exception", "SystemError")
dot.edge("Exception", "TypeError")
dot.edge("Exception", "ValueError")
dot.edge("ValueError", "UnicodeError")
dot.edge("UnicodeError", "UnicodeDecodeError")
dot.edge("UnicodeError", "UnicodeEncodeError")
dot.edge("UnicodeError", "UnicodeTranslateError")
dot_source = dot.source
with open("exceptions.dot", "w") as dot_file:
dot_file.write(dot_source)
| [
0,
1,
2,
3,
4
] |
1,089 | 438efbaf35401a29ea5408fee3b49b85f237760e | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('home', '0010_auto_20170512_2248')]
operations = [migrations.AlterField(model_name='classroom', name=
'subject5teacher', field=models.ForeignKey(default=None, on_delete=
django.db.models.deletion.CASCADE, related_name='+', to=
'home.Teacher', verbose_name='Chemistry'))]
| from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('home', '0010_auto_20170512_2248')]
operations = [migrations.AlterField(model_name='classroom', name=
'subject5teacher', field=models.ForeignKey(default=None, on_delete=
django.db.models.deletion.CASCADE, related_name='+', to=
'home.Teacher', verbose_name='Chemistry'))]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-12 20:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0010_auto_20170512_2248'),
]
operations = [
migrations.AlterField(
model_name='classroom',
name='subject5teacher',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='home.Teacher', verbose_name='Chemistry'),
),
]
| [
0,
1,
2,
3,
4
] |
1,090 | 22523304c9e2ce1339a7527cdbd67a81c780d806 | <mask token>
def best_hits(distf, maxscore, verbose=False):
"""
Find the best hits
"""
bh = {}
allph = set()
with open(distf, 'r') as din:
for li in din:
p = li.strip().split('\t')
if float(p[3]) <= maxscore:
if p[0] not in bh:
bh[p[0]] = set()
bh[p[0]].add(p[1])
allph.add(p[0])
if verbose:
for p in allph:
if p not in bh:
sys.stderr.write(
f"""WARNING: With a score of {maxscore} did not find any hits to {p}
"""
)
return bh
def find_vc(mdf, genomecol, vccol, verbose=False):
"""
Read the metadata file and return a hash of genome->viral cluster
"""
vc = {}
with open(mdf, 'r') as fin:
for li in fin:
p = li.strip().split('\t')
vc[p[genomecol]] = p[vccol]
if verbose:
sys.stderr.write(f'Found {len(vc)} virus clusters in {mdf}\n')
return vc
<mask token>
| <mask token>
def best_hits(distf, maxscore, verbose=False):
"""
Find the best hits
"""
bh = {}
allph = set()
with open(distf, 'r') as din:
for li in din:
p = li.strip().split('\t')
if float(p[3]) <= maxscore:
if p[0] not in bh:
bh[p[0]] = set()
bh[p[0]].add(p[1])
allph.add(p[0])
if verbose:
for p in allph:
if p not in bh:
sys.stderr.write(
f"""WARNING: With a score of {maxscore} did not find any hits to {p}
"""
)
return bh
def find_vc(mdf, genomecol, vccol, verbose=False):
"""
Read the metadata file and return a hash of genome->viral cluster
"""
vc = {}
with open(mdf, 'r') as fin:
for li in fin:
p = li.strip().split('\t')
vc[p[genomecol]] = p[vccol]
if verbose:
sys.stderr.write(f'Found {len(vc)} virus clusters in {mdf}\n')
return vc
def count_hits(bh, vc, verbose=False):
"""
Count the vc hits per genome
"""
hc = {}
for g in bh:
hc[g] = {}
for b in bh[g]:
hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1
besthit = None
bhc = 0
for h in hc[g]:
if hc[g][h] > bhc:
bhc = hc[g][h]
besthit = h
print(f'{g}\t{besthit}')
return hc
<mask token>
| <mask token>
def best_hits(distf, maxscore, verbose=False):
"""
Find the best hits
"""
bh = {}
allph = set()
with open(distf, 'r') as din:
for li in din:
p = li.strip().split('\t')
if float(p[3]) <= maxscore:
if p[0] not in bh:
bh[p[0]] = set()
bh[p[0]].add(p[1])
allph.add(p[0])
if verbose:
for p in allph:
if p not in bh:
sys.stderr.write(
f"""WARNING: With a score of {maxscore} did not find any hits to {p}
"""
)
return bh
def find_vc(mdf, genomecol, vccol, verbose=False):
"""
Read the metadata file and return a hash of genome->viral cluster
"""
vc = {}
with open(mdf, 'r') as fin:
for li in fin:
p = li.strip().split('\t')
vc[p[genomecol]] = p[vccol]
if verbose:
sys.stderr.write(f'Found {len(vc)} virus clusters in {mdf}\n')
return vc
def count_hits(bh, vc, verbose=False):
"""
Count the vc hits per genome
"""
hc = {}
for g in bh:
hc[g] = {}
for b in bh[g]:
hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1
besthit = None
bhc = 0
for h in hc[g]:
if hc[g][h] > bhc:
bhc = hc[g][h]
besthit = h
print(f'{g}\t{besthit}')
return hc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-d', help='mash distance file', required=True)
parser.add_argument('-c', help='distance cutoff score, default = 0',
default=0, type=float)
parser.add_argument('-m', help='metadata file', required=True)
parser.add_argument('-g', help='genome column, default = 0', default=0,
type=int)
parser.add_argument('-l', help='virus cluster col in the metadata file',
type=int, required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
bh = best_hits(args.d, args.c, args.v)
vc = find_vc(args.m, args.g, args.l, args.v)
count_hits(bh, vc, args.v)
| <mask token>
import os
import sys
import argparse
def best_hits(distf, maxscore, verbose=False):
"""
Find the best hits
"""
bh = {}
allph = set()
with open(distf, 'r') as din:
for li in din:
p = li.strip().split('\t')
if float(p[3]) <= maxscore:
if p[0] not in bh:
bh[p[0]] = set()
bh[p[0]].add(p[1])
allph.add(p[0])
if verbose:
for p in allph:
if p not in bh:
sys.stderr.write(
f"""WARNING: With a score of {maxscore} did not find any hits to {p}
"""
)
return bh
def find_vc(mdf, genomecol, vccol, verbose=False):
"""
Read the metadata file and return a hash of genome->viral cluster
"""
vc = {}
with open(mdf, 'r') as fin:
for li in fin:
p = li.strip().split('\t')
vc[p[genomecol]] = p[vccol]
if verbose:
sys.stderr.write(f'Found {len(vc)} virus clusters in {mdf}\n')
return vc
def count_hits(bh, vc, verbose=False):
"""
Count the vc hits per genome
"""
hc = {}
for g in bh:
hc[g] = {}
for b in bh[g]:
hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1
besthit = None
bhc = 0
for h in hc[g]:
if hc[g][h] > bhc:
bhc = hc[g][h]
besthit = h
print(f'{g}\t{besthit}')
return hc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-d', help='mash distance file', required=True)
parser.add_argument('-c', help='distance cutoff score, default = 0',
default=0, type=float)
parser.add_argument('-m', help='metadata file', required=True)
parser.add_argument('-g', help='genome column, default = 0', default=0,
type=int)
parser.add_argument('-l', help='virus cluster col in the metadata file',
type=int, required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
bh = best_hits(args.d, args.c, args.v)
vc = find_vc(args.m, args.g, args.l, args.v)
count_hits(bh, vc, args.v)
| """
We have created mash sketches of the GPDB database, the MGV database, and the SDSU phage, and
this will figure out the top hits and summarize their familes.
"""
import os
import sys
import argparse
def best_hits(distf, maxscore, verbose=False):
"""
Find the best hits
"""
bh = {}
allph = set()
with open(distf, 'r') as din:
for li in din:
p = li.strip().split("\t")
if float(p[3]) <= maxscore:
if p[0] not in bh:
bh[p[0]] = set()
bh[p[0]].add(p[1])
allph.add(p[0])
if verbose:
for p in allph:
if p not in bh:
sys.stderr.write(f"WARNING: With a score of {maxscore} did not find any hits to {p}\n")
return bh
def find_vc(mdf, genomecol, vccol, verbose=False):
"""
Read the metadata file and return a hash of genome->viral cluster
"""
vc = {}
with open(mdf, 'r') as fin:
for li in fin:
p = li.strip().split("\t")
vc[p[genomecol]] = p[vccol]
if verbose:
sys.stderr.write(f"Found {len(vc)} virus clusters in {mdf}\n")
return vc
def count_hits(bh, vc, verbose=False):
"""
Count the vc hits per genome
"""
hc = {}
for g in bh:
hc[g] = {}
for b in bh[g]:
hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1
besthit = None
bhc = 0
for h in hc[g]:
if hc[g][h] > bhc:
bhc = hc[g][h]
besthit = h
#print(f"{g}\t{besthit}\t{bhc}\t{len(bh[g])}")
print(f"{g}\t{besthit}")
return hc
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-d', help='mash distance file', required=True)
parser.add_argument('-c', help='distance cutoff score, default = 0', default=0, type=float)
parser.add_argument('-m', help='metadata file', required=True)
parser.add_argument('-g', help='genome column, default = 0', default=0, type=int)
parser.add_argument('-l', help='virus cluster col in the metadata file', type=int, required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
bh = best_hits(args.d, args.c, args.v)
vc = find_vc(args.m, args.g, args.l, args.v)
count_hits(bh, vc,args.v)
| [
2,
3,
4,
5,
6
] |
1,091 | 669eb2e898c3a127ae01e0ee3020a3674e5e340d | from yoloPydarknet import pydarknetYOLO
import cv2
import imutils
import time
yolo = pydarknetYOLO(obdata="../darknet/cfg/coco.data", weights="yolov3.weights",
cfg="../darknet/cfg/yolov3.cfg")
video_out = "yolo_output.avi"
start_time = time.time()
if __name__ == "__main__":
VIDEO_IN = cv2.VideoCapture(0)
if(video_out!=""):
width = int(VIDEO_IN.get(cv2.CAP_PROP_FRAME_WIDTH)) # float
height = int(VIDEO_IN.get(cv2.CAP_PROP_FRAME_HEIGHT)) # float
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(video_out,fourcc, 30.0, (int(width),int(height)))
frameID = 0
while True:
hasFrame, frame = VIDEO_IN.read()
# Stop the program if reached end of video
if not hasFrame:
print("Done processing !!!")
print("--- %s seconds ---" % (time.time() - start_time))
break
yolo.getObject(frame, labelWant="", drawBox=True, bold=1, textsize=0.6, bcolor=(0,0,255), tcolor=(255,255,255))
print ("Object counts:", yolo.objCounts)
cv2.imshow("Frame", imutils.resize(frame, width=850))
if(video_out!=""):
out.write(frame)
k = cv2.waitKey(1)
if k == 0xFF & ord("q"):
out.release()
break
| null | null | null | null | [
0
] |
1,092 | 6b55a9061bb118558e9077c77e18cfc81f3fa034 | <mask token>
| <mask token>
class Solution:
<mask token>
| <mask token>
class Solution:
def canDivideIntoSubsequences(self, nums: List[int], K: int) ->bool:
return len(nums) >= K * max(Counter(nums).values())
| from collections import Counter
class Solution:
def canDivideIntoSubsequences(self, nums: List[int], K: int) ->bool:
return len(nums) >= K * max(Counter(nums).values())
| #
# @lc app=leetcode id=1121 lang=python3
#
# [1121] Divide Array Into Increasing Sequences
#
# https://leetcode.com/problems/divide-array-into-increasing-sequences/description/
#
# algorithms
# Hard (53.30%)
# Likes: 32
# Dislikes: 11
# Total Accepted: 1.7K
# Total Submissions: 3.2K
# Testcase Example: '[1,2,2,3,3,4,4]\n3'
#
# Given a non-decreasing array of positive integers nums and an integer K, find
# out if this array can be divided into one or more disjoint increasing
# subsequences of length at least K.
#
#
#
# Example 1:
#
#
# Input: nums = [1,2,2,3,3,4,4], K = 3
# Output: true
# Explanation:
# The array can be divided into the two subsequences [1,2,3,4] and [2,3,4] with
# lengths at least 3 each.
#
#
# Example 2:
#
#
# Input: nums = [5,6,6,7,8], K = 3
# Output: false
# Explanation:
# There is no way to divide the array using the conditions required.
#
#
#
#
# Note:
#
#
# 1 <= nums.length <= 10^5
# 1 <= K <= nums.length
# 1 <= nums[i] <= 10^5
#
#
#
# @lc code=start
from collections import Counter
class Solution:
def canDivideIntoSubsequences(self, nums: List[int], K: int) -> bool:
return len(nums) >= K * max(Counter(nums).values())
# cur, groups = 1, 1
# for i in range(1, len(nums)):
# if nums[i] > nums[i - 1]:
# cur = 1
# else:
# cur += 1
# groups = max(groups, cur)
# return len(nums) >= K * groups
# @lc code=end
| [
0,
1,
2,
3,
4
] |
1,093 | d7524a455e62594e321b67f0a32a5c3a7437c1d6 | <mask token>
| <mask token>
def make_example():
addr = './example.xlsx'
work_book = Workbook()
work_sheet = work_book.active
work_sheet['A1'] = 'Hello World!'
select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')
work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])
work_sheet.append(['The quick brown fox', ' ', ' jumps over ',
'a lazy dog.'])
work_sheet.merge_cells('A3:B3')
work_sheet.merge_cells('A4:B4')
SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)
CENTER_ALIGN = Alignment(horizontal='center', vertical='center')
LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4
THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)
for row in work_sheet['A1:D4']:
for cell in row:
cell.font = SIMSUN_20_BOLD
cell.alignment = CENTER_ALIGN
cell.border = THIN_BORDER
work_sheet.row_dimensions[1].height = 15
work_sheet.row_dimensions[2].height = 20
for row_letter in range(3, 5, 1):
work_sheet.row_dimensions[row_letter].height = 17
for col_letter in ['A', 'B']:
work_sheet.column_dimensions[col_letter].width = 20
work_sheet.column_dimensions['C'].width = 17
work_sheet.column_dimensions['D'].width = 25
COLOR_MAP = ['ff9900', '000000']
COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=
COLOR_MAP[0])
BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])
work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD
work_sheet['A1'].fill = BG_FILL
work_book.save(addr)
<mask token>
| <mask token>
def make_example():
addr = './example.xlsx'
work_book = Workbook()
work_sheet = work_book.active
work_sheet['A1'] = 'Hello World!'
select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')
work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])
work_sheet.append(['The quick brown fox', ' ', ' jumps over ',
'a lazy dog.'])
work_sheet.merge_cells('A3:B3')
work_sheet.merge_cells('A4:B4')
SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)
CENTER_ALIGN = Alignment(horizontal='center', vertical='center')
LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4
THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)
for row in work_sheet['A1:D4']:
for cell in row:
cell.font = SIMSUN_20_BOLD
cell.alignment = CENTER_ALIGN
cell.border = THIN_BORDER
work_sheet.row_dimensions[1].height = 15
work_sheet.row_dimensions[2].height = 20
for row_letter in range(3, 5, 1):
work_sheet.row_dimensions[row_letter].height = 17
for col_letter in ['A', 'B']:
work_sheet.column_dimensions[col_letter].width = 20
work_sheet.column_dimensions['C'].width = 17
work_sheet.column_dimensions['D'].width = 25
COLOR_MAP = ['ff9900', '000000']
COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=
COLOR_MAP[0])
BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])
work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD
work_sheet['A1'].fill = BG_FILL
work_book.save(addr)
if __name__ == '__main__':
make_example()
| from openpyxl import Workbook
from openpyxl.styles import Font, Alignment, Border, Side, PatternFill, colors
def make_example():
addr = './example.xlsx'
work_book = Workbook()
work_sheet = work_book.active
work_sheet['A1'] = 'Hello World!'
select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')
work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])
work_sheet.append(['The quick brown fox', ' ', ' jumps over ',
'a lazy dog.'])
work_sheet.merge_cells('A3:B3')
work_sheet.merge_cells('A4:B4')
SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)
CENTER_ALIGN = Alignment(horizontal='center', vertical='center')
LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4
THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)
for row in work_sheet['A1:D4']:
for cell in row:
cell.font = SIMSUN_20_BOLD
cell.alignment = CENTER_ALIGN
cell.border = THIN_BORDER
work_sheet.row_dimensions[1].height = 15
work_sheet.row_dimensions[2].height = 20
for row_letter in range(3, 5, 1):
work_sheet.row_dimensions[row_letter].height = 17
for col_letter in ['A', 'B']:
work_sheet.column_dimensions[col_letter].width = 20
work_sheet.column_dimensions['C'].width = 17
work_sheet.column_dimensions['D'].width = 25
COLOR_MAP = ['ff9900', '000000']
COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=
COLOR_MAP[0])
BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])
work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD
work_sheet['A1'].fill = BG_FILL
work_book.save(addr)
if __name__ == '__main__':
make_example()
| # 引入基础的工作表
from openpyxl import Workbook
# 引入增强的修改功能
from openpyxl.styles import Font,Alignment,Border,Side,PatternFill,colors
# import openpyxl
def make_example():
# 设定文件目录
addr = './example.xlsx'
# 初始化文件,切换到活动的工作表
work_book = Workbook()
# 读取文件采用
# work_book = openpyxl.load_workbook(addr)
work_sheet = work_book.active
# 直接对表格对象赋值
work_sheet['A1'] = 'Hello World!'
# 采用指定行列的方法赋值(第2行,第二列)
select_cell = work_sheet.cell(row=2,column=2,value='I select this cell')
# 添加两行数据到表格
work_sheet.append(['The quick brown fox',' jumps over ','a lazy dog.'])
work_sheet.append(['The quick brown fox',' ',' jumps over ','a lazy dog.'])
# 合并两个单元格作为示范
work_sheet.merge_cells('A3:B3')
work_sheet.merge_cells('A4:B4')
# 遍历表格,读取表格中的数据
# 初始化字体
SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True)
# 初始化表格对齐模板
CENTER_ALIGN = Alignment(horizontal='center',vertical='center')
# 初始化表格边框样式
LE,RI,TO,BO = [Side(style='thin',color='000000')]*4
THIN_BORDER = Border(left=LE,right=RI,top=TO,bottom=BO)
# 遍历表格,读取表格中的数据
for row in work_sheet['A1:D4']:
for cell in row:
# 把样式赋值给表格
cell.font = SIMSUN_20_BOLD
cell.alignment = CENTER_ALIGN
cell.border = THIN_BORDER
# print(cell.value)
# 设置行高
work_sheet.row_dimensions[1].height=15
work_sheet.row_dimensions[2].height=20
for row_letter in range(3,5,1):
work_sheet.row_dimensions[row_letter].height=17
# 设置列宽
for col_letter in ['A','B']:
work_sheet.column_dimensions[col_letter].width=20
work_sheet.column_dimensions['C'].width=17
work_sheet.column_dimensions['D'].width=25
# 设置颜色
COLOR_MAP = ['ff9900','000000']
COLOR_SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True,color=COLOR_MAP[0])
BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])
work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD
work_sheet['A1'].fill = BG_FILL
# 保存到设定的addr
work_book.save(addr)
if __name__ == "__main__":
make_example() | [
0,
1,
2,
3,
4
] |
1,094 | 9fc9d766915bcefde4f0ba5c24cb83e33fc66272 | <mask token>
| <mask token>
dbindexer.autodiscover()
<mask token>
| <mask token>
dbindexer.autodiscover()
urlpatterns = patterns('harvester.views', url('^$', 'home', name='home'),
url('^settings/', 'settings', name='settings'))
| from django.conf.urls import patterns, include, url
import dbindexer
dbindexer.autodiscover()
urlpatterns = patterns('harvester.views', url('^$', 'home', name='home'),
url('^settings/', 'settings', name='settings'))
| from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
import dbindexer
dbindexer.autodiscover() #This needs to happen before anything else, hence strange import ordering
urlpatterns = patterns('harvester.views',
url(r'^$', 'home', name='home'),
url(r'^settings/', 'settings', name='settings'),
# Examples:
# url(r'^$', 'harvester.views.home', name='home'),
# url(r'^harvester/', include('harvester.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| [
0,
1,
2,
3,
4
] |
1,095 | c88e2336432f93d95b4e2285aa532b673a4a410b | <mask token>
class RDt(RPackage):
<mask token>
<mask token>
version('0.23', sha256=
'360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')
version('0.20', sha256=
'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')
version('0.17', sha256=
'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')
version('0.13', sha256=
'79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')
version('0.8', sha256=
'90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')
version('0.7', sha256=
'1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')
version('0.6', sha256=
'2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')
version('0.4', sha256=
'3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')
version('0.3', sha256=
'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')
version('0.2', sha256=
'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')
version('0.1', sha256=
'129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-crosstalk', type=('build', 'run'))
depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')
depends_on('r-promises', type=('build', 'run'), when='@0.5:')
| <mask token>
class RDt(RPackage):
<mask token>
cran = 'DT'
version('0.23', sha256=
'360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')
version('0.20', sha256=
'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')
version('0.17', sha256=
'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')
version('0.13', sha256=
'79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')
version('0.8', sha256=
'90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')
version('0.7', sha256=
'1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')
version('0.6', sha256=
'2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')
version('0.4', sha256=
'3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')
version('0.3', sha256=
'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')
version('0.2', sha256=
'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')
version('0.1', sha256=
'129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-crosstalk', type=('build', 'run'))
depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')
depends_on('r-promises', type=('build', 'run'), when='@0.5:')
| <mask token>
class RDt(RPackage):
"""A Wrapper of the JavaScript Library 'DataTables'.
Data objects in R can be rendered as HTML tables using the JavaScript
library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'
library has been included in this R package. The package name 'DT' is an
abbreviation of 'DataTables'."""
cran = 'DT'
version('0.23', sha256=
'360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')
version('0.20', sha256=
'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')
version('0.17', sha256=
'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')
version('0.13', sha256=
'79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')
version('0.8', sha256=
'90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')
version('0.7', sha256=
'1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')
version('0.6', sha256=
'2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')
version('0.4', sha256=
'3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')
version('0.3', sha256=
'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')
version('0.2', sha256=
'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')
version('0.1', sha256=
'129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-crosstalk', type=('build', 'run'))
depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')
depends_on('r-promises', type=('build', 'run'), when='@0.5:')
| from spack.package import *
class RDt(RPackage):
"""A Wrapper of the JavaScript Library 'DataTables'.
Data objects in R can be rendered as HTML tables using the JavaScript
library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'
library has been included in this R package. The package name 'DT' is an
abbreviation of 'DataTables'."""
cran = 'DT'
version('0.23', sha256=
'360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')
version('0.20', sha256=
'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')
version('0.17', sha256=
'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')
version('0.13', sha256=
'79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')
version('0.8', sha256=
'90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')
version('0.7', sha256=
'1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')
version('0.6', sha256=
'2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')
version('0.4', sha256=
'3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')
version('0.3', sha256=
'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')
version('0.2', sha256=
'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')
version('0.1', sha256=
'129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-crosstalk', type=('build', 'run'))
depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')
depends_on('r-promises', type=('build', 'run'), when='@0.5:')
| # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RDt(RPackage):
"""A Wrapper of the JavaScript Library 'DataTables'.
Data objects in R can be rendered as HTML tables using the JavaScript
library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'
library has been included in this R package. The package name 'DT' is an
abbreviation of 'DataTables'."""
cran = "DT"
version("0.23", sha256="360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70")
version("0.20", sha256="c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f")
version("0.17", sha256="e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56")
version("0.13", sha256="79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5")
version("0.8", sha256="90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21")
version("0.7", sha256="1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c")
version("0.6", sha256="2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916")
version("0.4", sha256="3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19")
version("0.3", sha256="ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb")
version("0.2", sha256="a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd")
version("0.1", sha256="129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756")
depends_on("[email protected]:", type=("build", "run"))
depends_on("[email protected]:", type=("build", "run"))
depends_on("[email protected]:", type=("build", "run"), when="@0.8:")
depends_on("r-magrittr", type=("build", "run"))
depends_on("r-crosstalk", type=("build", "run"))
depends_on("r-jquerylib", type=("build", "run"), when="@0.19:")
depends_on("r-promises", type=("build", "run"), when="@0.5:")
| [
1,
2,
3,
4,
5
] |
1,096 | 6b597f1570c022d17e4476e2ab8817e724a166a7 | <mask token>
| class Solution:
<mask token>
| class Solution:
def evalRPN(self, tokens: List[str]) ->int:
def operation(op1, op2, op):
if op == '+':
return op1 + op2
if op == '-':
return op1 - op2
if op == '*':
return op1 * op2
if op == '/':
return int(op1 / op2)
stack = []
for char in tokens:
if char in ['+', '-', '*', '/']:
op2 = stack.pop()
op1 = stack.pop()
res = operation(op1, op2, char)
stack.append(int(res))
else:
stack.append(int(char))
return stack.pop()
| class Solution:
def evalRPN(self, tokens: List[str]) -> int:
def operation(op1,op2,op):
if op == "+":
return op1 + op2
if op == "-":
return op1 - op2
if op == "*":
return op1 * op2
if op == "/":
return int(op1/op2)
stack = []
for char in tokens:
if char in ["+", "-", "*", "/"]:
op2 = stack.pop()
op1 = stack.pop()
res = operation(op1,op2,char)
stack.append(int(res))
else:
stack.append(int(char))
return stack.pop() | null | [
0,
1,
2,
3
] |
1,097 | 09c3a10230e7d0b3b893ccf236c39fc2dc12b2c6 | <mask token>
| <mask token>
print(dic['name'])
| dic = {'name': 'Eric', 'age': '25'}
print(dic['name'])
| dic = {'name': 'Eric', 'age': '25'} # 딕셔너리 형태
print(dic['name'])
| null | [
0,
1,
2,
3
] |
1,098 | ecdc8f5f76b92c3c9dcf2a12b3d9452166fcb706 | <mask token>
| <mask token>
TEST_NAME = 'read_only'
VM_NAME = '{0}_vm_%s'.format(TEST_NAME)
VM_COUNT = 2
DISK_NAMES = dict()
DISK_TIMEOUT = 600
SPARSE = True
DIRECT_LUNS = UNUSED_LUNS
DIRECT_LUN_ADDRESSES = UNUSED_LUN_ADDRESSES
DIRECT_LUN_TARGETS = UNUSED_LUN_TARGETS
| <mask token>
from rhevmtests.storage.config import *
TEST_NAME = 'read_only'
VM_NAME = '{0}_vm_%s'.format(TEST_NAME)
VM_COUNT = 2
DISK_NAMES = dict()
DISK_TIMEOUT = 600
SPARSE = True
DIRECT_LUNS = UNUSED_LUNS
DIRECT_LUN_ADDRESSES = UNUSED_LUN_ADDRESSES
DIRECT_LUN_TARGETS = UNUSED_LUN_TARGETS
| """
Config module for storage read only disks
"""
from rhevmtests.storage.config import * # flake8: noqa
TEST_NAME = "read_only"
VM_NAME = "{0}_vm_%s".format(TEST_NAME)
VM_COUNT = 2
DISK_NAMES = dict() # dictionary with storage type as key
DISK_TIMEOUT = 600
# allocation policies
SPARSE = True
DIRECT_LUNS = UNUSED_LUNS
DIRECT_LUN_ADDRESSES = UNUSED_LUN_ADDRESSES
DIRECT_LUN_TARGETS = UNUSED_LUN_TARGETS
| null | [
0,
1,
2,
3
] |
1,099 | a55024f0e5edec22125ce53ef54ee364be185cb8 | <mask token>
| <mask token>
@pytest.fixture
async def http_client(hass, hass_client_no_auth):
"""Initialize a Home Assistant Server for testing this module."""
await async_setup_component(hass, webhook.DOMAIN, {})
return await hass_client_no_auth()
@pytest.fixture
async def webhook_id_with_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id."""
await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {
CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})
await async_process_ha_core_config(hass, {'internal_url':
'http://example.local:8123'})
result = await hass.config_entries.flow.async_init('mailgun', context={
'source': config_entries.SOURCE_USER})
assert result['type'] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result[
'flow_id'], {})
assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result['result'].data['webhook_id']
@pytest.fixture
async def webhook_id_without_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id w/o API key."""
await async_setup_component(hass, mailgun.DOMAIN, {})
await async_process_ha_core_config(hass, {'internal_url':
'http://example.local:8123'})
result = await hass.config_entries.flow.async_init('mailgun', context={
'source': config_entries.SOURCE_USER})
assert result['type'] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result[
'flow_id'], {})
assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result['result'].data['webhook_id']
@pytest.fixture
async def mailgun_events(hass):
"""Return a list of mailgun_events triggered."""
events = []
@callback
def handle_event(event):
"""Handle Mailgun event."""
events.append(event)
hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)
return events
async def test_mailgun_webhook_with_missing_signature(http_client,
webhook_id_with_api_key, mailgun_events) ->None:
"""Test that webhook doesn't trigger an event without a signature."""
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun', 'signature': {}})
assert len(mailgun_events) == event_count
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun'})
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_with_different_api_key(http_client,
webhook_id_with_api_key, mailgun_events) ->None:
"""Test that webhook doesn't trigger an event with a wrong signature."""
timestamp = '1529006854'
token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun', 'signature': {'signature': hmac.new(key=
b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),
digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,
'token': token}})
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_event_with_correct_api_key(http_client,
webhook_id_with_api_key, mailgun_events) ->None:
"""Test that webhook triggers an event after validating a signature."""
timestamp = '1529006854'
token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(
API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),
digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,
'token': token}})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
async def test_mailgun_webhook_with_missing_signature_without_api_key(
http_client, webhook_id_without_api_key, mailgun_events) ->None:
"""Test that webhook triggers an event without a signature w/o API key."""
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',
json={'hello': 'mailgun', 'signature': {}})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',
json={'hello': 'mailgun'})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
async def test_mailgun_webhook_event_without_an_api_key(http_client,
webhook_id_without_api_key, mailgun_events) ->None:
"""Test that webhook triggers an event if there is no api key."""
timestamp = '1529006854'
token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',
json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=
bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),
digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,
'token': token}})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
| <mask token>
API_KEY = 'abc123'
@pytest.fixture
async def http_client(hass, hass_client_no_auth):
"""Initialize a Home Assistant Server for testing this module."""
await async_setup_component(hass, webhook.DOMAIN, {})
return await hass_client_no_auth()
@pytest.fixture
async def webhook_id_with_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id."""
await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {
CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})
await async_process_ha_core_config(hass, {'internal_url':
'http://example.local:8123'})
result = await hass.config_entries.flow.async_init('mailgun', context={
'source': config_entries.SOURCE_USER})
assert result['type'] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result[
'flow_id'], {})
assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result['result'].data['webhook_id']
@pytest.fixture
async def webhook_id_without_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id w/o API key."""
await async_setup_component(hass, mailgun.DOMAIN, {})
await async_process_ha_core_config(hass, {'internal_url':
'http://example.local:8123'})
result = await hass.config_entries.flow.async_init('mailgun', context={
'source': config_entries.SOURCE_USER})
assert result['type'] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result[
'flow_id'], {})
assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result['result'].data['webhook_id']
@pytest.fixture
async def mailgun_events(hass):
"""Return a list of mailgun_events triggered."""
events = []
@callback
def handle_event(event):
"""Handle Mailgun event."""
events.append(event)
hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)
return events
async def test_mailgun_webhook_with_missing_signature(http_client,
webhook_id_with_api_key, mailgun_events) ->None:
"""Test that webhook doesn't trigger an event without a signature."""
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun', 'signature': {}})
assert len(mailgun_events) == event_count
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun'})
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_with_different_api_key(http_client,
webhook_id_with_api_key, mailgun_events) ->None:
"""Test that webhook doesn't trigger an event with a wrong signature."""
timestamp = '1529006854'
token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun', 'signature': {'signature': hmac.new(key=
b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),
digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,
'token': token}})
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_event_with_correct_api_key(http_client,
webhook_id_with_api_key, mailgun_events) ->None:
"""Test that webhook triggers an event after validating a signature."""
timestamp = '1529006854'
token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(
API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),
digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,
'token': token}})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
async def test_mailgun_webhook_with_missing_signature_without_api_key(
http_client, webhook_id_without_api_key, mailgun_events) ->None:
"""Test that webhook triggers an event without a signature w/o API key."""
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',
json={'hello': 'mailgun', 'signature': {}})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',
json={'hello': 'mailgun'})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
async def test_mailgun_webhook_event_without_an_api_key(http_client,
webhook_id_without_api_key, mailgun_events) ->None:
"""Test that webhook triggers an event if there is no api key."""
timestamp = '1529006854'
token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',
json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=
bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),
digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,
'token': token}})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
| <mask token>
import hashlib
import hmac
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import mailgun, webhook
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import CONF_API_KEY, CONF_DOMAIN
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
API_KEY = 'abc123'
@pytest.fixture
async def http_client(hass, hass_client_no_auth):
"""Initialize a Home Assistant Server for testing this module."""
await async_setup_component(hass, webhook.DOMAIN, {})
return await hass_client_no_auth()
@pytest.fixture
async def webhook_id_with_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id."""
await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {
CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})
await async_process_ha_core_config(hass, {'internal_url':
'http://example.local:8123'})
result = await hass.config_entries.flow.async_init('mailgun', context={
'source': config_entries.SOURCE_USER})
assert result['type'] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result[
'flow_id'], {})
assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result['result'].data['webhook_id']
@pytest.fixture
async def webhook_id_without_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id w/o API key."""
await async_setup_component(hass, mailgun.DOMAIN, {})
await async_process_ha_core_config(hass, {'internal_url':
'http://example.local:8123'})
result = await hass.config_entries.flow.async_init('mailgun', context={
'source': config_entries.SOURCE_USER})
assert result['type'] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result[
'flow_id'], {})
assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result['result'].data['webhook_id']
@pytest.fixture
async def mailgun_events(hass):
"""Return a list of mailgun_events triggered."""
events = []
@callback
def handle_event(event):
"""Handle Mailgun event."""
events.append(event)
hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)
return events
async def test_mailgun_webhook_with_missing_signature(http_client,
webhook_id_with_api_key, mailgun_events) ->None:
"""Test that webhook doesn't trigger an event without a signature."""
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun', 'signature': {}})
assert len(mailgun_events) == event_count
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun'})
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_with_different_api_key(http_client,
webhook_id_with_api_key, mailgun_events) ->None:
"""Test that webhook doesn't trigger an event with a wrong signature."""
timestamp = '1529006854'
token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun', 'signature': {'signature': hmac.new(key=
b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),
digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,
'token': token}})
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_event_with_correct_api_key(http_client,
webhook_id_with_api_key, mailgun_events) ->None:
"""Test that webhook triggers an event after validating a signature."""
timestamp = '1529006854'
token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=
{'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(
API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),
digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,
'token': token}})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
async def test_mailgun_webhook_with_missing_signature_without_api_key(
http_client, webhook_id_without_api_key, mailgun_events) ->None:
"""Test that webhook triggers an event without a signature w/o API key."""
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',
json={'hello': 'mailgun', 'signature': {}})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',
json={'hello': 'mailgun'})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
async def test_mailgun_webhook_event_without_an_api_key(http_client,
webhook_id_without_api_key, mailgun_events) ->None:
"""Test that webhook triggers an event if there is no api key."""
timestamp = '1529006854'
token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'
event_count = len(mailgun_events)
await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',
json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=
bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),
digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,
'token': token}})
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key
assert mailgun_events[-1].data['hello'] == 'mailgun'
| """Test the init file of Mailgun."""
import hashlib
import hmac
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import mailgun, webhook
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import CONF_API_KEY, CONF_DOMAIN
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
API_KEY = "abc123"
@pytest.fixture
async def http_client(hass, hass_client_no_auth):
"""Initialize a Home Assistant Server for testing this module."""
await async_setup_component(hass, webhook.DOMAIN, {})
return await hass_client_no_auth()
@pytest.fixture
async def webhook_id_with_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id."""
await async_setup_component(
hass,
mailgun.DOMAIN,
{mailgun.DOMAIN: {CONF_API_KEY: API_KEY, CONF_DOMAIN: "example.com"}},
)
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"mailgun", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result["result"].data["webhook_id"]
@pytest.fixture
async def webhook_id_without_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id w/o API key."""
await async_setup_component(hass, mailgun.DOMAIN, {})
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"mailgun", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result["result"].data["webhook_id"]
@pytest.fixture
async def mailgun_events(hass):
"""Return a list of mailgun_events triggered."""
events = []
@callback
def handle_event(event):
"""Handle Mailgun event."""
events.append(event)
hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)
return events
async def test_mailgun_webhook_with_missing_signature(
http_client, webhook_id_with_api_key, mailgun_events
) -> None:
"""Test that webhook doesn't trigger an event without a signature."""
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={"hello": "mailgun", "signature": {}},
)
assert len(mailgun_events) == event_count
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}", json={"hello": "mailgun"}
)
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_with_different_api_key(
http_client, webhook_id_with_api_key, mailgun_events
) -> None:
"""Test that webhook doesn't trigger an event with a wrong signature."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=b"random_api_key",
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_event_with_correct_api_key(
http_client, webhook_id_with_api_key, mailgun_events
) -> None:
"""Test that webhook triggers an event after validating a signature."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=bytes(API_KEY, "utf-8"),
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_with_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
async def test_mailgun_webhook_with_missing_signature_without_api_key(
http_client, webhook_id_without_api_key, mailgun_events
) -> None:
"""Test that webhook triggers an event without a signature w/o API key."""
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}",
json={"hello": "mailgun", "signature": {}},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}", json={"hello": "mailgun"}
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
async def test_mailgun_webhook_event_without_an_api_key(
http_client, webhook_id_without_api_key, mailgun_events
) -> None:
"""Test that webhook triggers an event if there is no api key."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=bytes(API_KEY, "utf-8"),
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
| [
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.