hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d0d2ce0156532ea47e132214d25e254f16cf736c | 14,641 | ipynb | Jupyter Notebook | src/preprocess/translator.ipynb | pcrete/dengue_dash | 8f70ce80cd32d01fbcba0351fc2c9d105b6ae240 | [
"MIT"
] | 1 | 2018-06-08T12:48:24.000Z | 2018-06-08T12:48:24.000Z | src/preprocess/translator.ipynb | pcrete/dengue-analytics | 8f70ce80cd32d01fbcba0351fc2c9d105b6ae240 | [
"MIT"
] | null | null | null | src/preprocess/translator.ipynb | pcrete/dengue-analytics | 8f70ce80cd32d01fbcba0351fc2c9d105b6ae240 | [
"MIT"
] | 1 | 2019-01-04T08:59:31.000Z | 2019-01-04T08:59:31.000Z | 31.151064 | 120 | 0.429137 | [
[
[
"import os\nimport json\nimport pandas as pd\nfrom tqdm import tqdm_notebook\n\ndf_larval = pd.read_csv(os.path.join('..', 'data', 'breeding-sites', 'larval-survey-en.csv'))\ndf_larval.head()",
"_____no_output_____"
]
],
[
[
"## Shapefile",
"_____no_output_____"
]
],
[
[
"with open(os.path.join('..', 'data','shapefiles','Nakhon-Si-Thammarat.geojson')) as f:\n data = json.load(f)",
"_____no_output_____"
],
[
"for i, feature in enumerate(data['features']):\n prop = feature['properties']\n\n province = prop['PV_TN']\n district = prop['AP_TN']\n subdist = prop['TB_TN']\n \n df_tmp = df_larval.loc[(df_larval['province'] == province) & \n (df_larval['district'] == district)]\n province_en, district_en = df_tmp[['province_en','district_en']].values[0]\n \n prop['PV_EN'] = province_en\n prop['AP_EN'] = district_en",
"_____no_output_____"
],
[
"data['features'][2]['properties']",
"_____no_output_____"
],
[
"with open(os.path.join('..', 'data', 'shapefiles', 'Nakhon-Si-Thammarat-en.geojson'), 'w') as FILE:\n json.dump(data, FILE, indent=4, ensure_ascii=False, sort_keys=True)",
"_____no_output_____"
]
],
[
[
"## Dictonary file",
"_____no_output_____"
]
],
[
[
"province_entry = []\nfor feature in data['features']:\n prop = feature['properties']\n \n province_entry.append([\n prop['PV_TN'],\n prop['AP_TN'],\n prop['TB_TN'],\n prop['PV_EN'],\n prop['AP_EN'],\n prop['TB_EN'],\n ])\n \nprovince_entry = pd.DataFrame.from_records(province_entry, columns=['province_th', 'district_th', 'subdist_th',\n 'province_en', 'district_en', 'subdist_en'])\n\nprovince_entry.to_csv(os.path.join('..', 'data', 'shapefiles', 'Nakhon-Si-Thammarat-dictionary.csv'))\nprovince_entry.head()",
"_____no_output_____"
]
],
[
[
"## Detection file",
"_____no_output_____"
]
],
[
[
"with open(os.path.join('..', 'data','breeding-sites','detection.geojson')) as f:\n detection = json.load(f)",
"_____no_output_____"
],
[
"for feature in tqdm_notebook(detection['features']):\n prop = feature['properties']\n \n province = prop['province']\n district = prop['district']\n subdist = prop['subdist']\n\n df_tmp = province_entry.loc[\n (province_entry['province_th'] == province) & \n (province_entry['district_th'] == district) &\n (province_entry['subdist_th'] == subdist)\n ]\n \n province_en, district_en, subdist_en = df_tmp[['province_en','district_en', 'subdist_en']].values[0]\n \n prop['province_en'] = province_en\n prop['district_en'] = district_en\n prop['subdist_en'] = subdist_en",
"_____no_output_____"
],
[
"with open(os.path.join('..', 'data','breeding-sites','detection-en.geojson'), 'w') as FILE:\n json.dump(detection, FILE, indent=4, ensure_ascii=False, sort_keys=True)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0d2d2317710e6e98e3e30bcddcc0677be33413d | 45,582 | ipynb | Jupyter Notebook | Exercises/Hands_on_Cypher/Hands_on_Cypher.ipynb | AGuyNextDoor/AProfNextDoor | 5dbd7eed0d2c355ecfeaa5514c96831ba1770fea | [
"BSD-3-Clause"
] | 2 | 2021-06-14T10:11:51.000Z | 2021-06-17T10:14:55.000Z | Exercises/Hands_on_Cypher/Hands_on_Cypher.ipynb | AGuyNextDoor/AProfNextDoor | 5dbd7eed0d2c355ecfeaa5514c96831ba1770fea | [
"BSD-3-Clause"
] | null | null | null | Exercises/Hands_on_Cypher/Hands_on_Cypher.ipynb | AGuyNextDoor/AProfNextDoor | 5dbd7eed0d2c355ecfeaa5514c96831ba1770fea | [
"BSD-3-Clause"
] | 1 | 2021-06-17T12:03:53.000Z | 2021-06-17T12:03:53.000Z | 28.050462 | 2,741 | 0.388158 | [
[
[
"# Les imports pour l'exercice\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport string\nimport random\nfrom collections import deque",
"_____no_output_____"
]
],
[
[
"## Partie 1 : Code de César",
"_____no_output_____"
],
[
"### Implementation ",
"_____no_output_____"
],
[
"Le code suivant contient deux fonctions principales : `encryptMessage` et `decryptMessage`.\n\nCes fonctions permet d'encoder et de decoder un string donnée. \n\nL'encryption se fait de facon classique par decalage d'alphabet. La clé donnée est le nombre de decalage.",
"_____no_output_____"
],
[
"La fonction `convertAlphabets` permet de convertir un texte entre deux alphabets.\n\nLa fonction `shiftAlphabet` permet de décaler les éléments d'une matrice en faisant une rotation. C'est l'équivalent de decaler la roue interne du code de César.",
"_____no_output_____"
]
],
[
[
"alphabet_string = string.printable\n\nalphabetListOrdered = list(alphabet_string)\n\nnumberListOrdered = list(map(ord, alphabetListOrdered))\n\ndef convertAlphabets(messageCharNumList, numList, numListToConvert, encrypt = True) :\n index = 0\n newList = []\n for val in messageCharNumList:\n indexOfLetter = numListToConvert.index(val)\n newList.append(numList[indexOfLetter])\n\n index += 1\n if encrypt :\n return ' '.join(map(chr,newList))\n else : \n return ''.join(map(chr,newList))\n\ndef shiftAlphabet(listToShift, keyShift):\n keyShift = keyShift % len(listToShift)\n return listToShift[keyShift:] + listToShift[:keyShift]",
"_____no_output_____"
]
],
[
[
"Pour la fonction d'encryption, on utilisera l'alphabet de tout les caracteres imprimable en ascii donnée par `string.printable`.",
"_____no_output_____"
]
],
[
[
"string.printable",
"_____no_output_____"
],
[
"def encryptMessage(m, shiftKey):\n\n alphabet_string = string.printable\n\n alphabetListOrdered = list(alphabet_string)\n\n numberListOrdered = list(map(ord, alphabetListOrdered))\n\n alphabetListShuffled = shiftAlphabet(list(alphabetListOrdered), shiftKey)\n\n numberListShuffled = list(map(ord, alphabetListShuffled))\n\n return convertAlphabets(list(map(ord, list(m))), numberListShuffled, numberListOrdered)",
"_____no_output_____"
],
[
"def decryptMessage(m, shiftKey):\n m = m.replace(' ', '')\n alphabet_string = string.printable\n\n alphabetListOrdered = list(alphabet_string)\n\n numberListOrdered = list(map(ord, alphabetListOrdered))\n\n alphabetListShuffled = list(alphabetListOrdered)\n\n alphabetListShuffled = shiftAlphabet(list(alphabetListOrdered), shiftKey)\n\n numberListShuffled = list(map(ord, alphabetListShuffled))\n\n return convertAlphabets(list(map(ord, list(m))), numberListOrdered, numberListShuffled, False)\n",
"_____no_output_____"
]
],
[
[
"En dessous est un exemple d'encryption et de decryption. ",
"_____no_output_____"
]
],
[
[
"m = \"Vous savez, moi je ne crois pas qu'il y ait de bonne ou de mauvaise situation. Moi, si je devais resumer ma vie aujourd'hui avec vous, je dirais que c'est d'abord des rencontres. Des gens qui m'ont tendu la main, peut-etre a un moment ou je ne pouvais pas, ou j'etais seul chez moi\"\nm",
"_____no_output_____"
]
],
[
[
"On encrypte le texte plus haut au moins d'une clé de valeur `4051`",
"_____no_output_____"
]
],
[
[
"e = encryptMessage(m, 4501)\ne",
"[49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 58, 59, 60, 61, 62, 63, 64, 91, 92, 93, 94, 95, 96, 123, 124, 125, 126, 32, 9, 10, 13, 11, 12, 48]\n"
]
],
[
[
"On verifie que la decryption marche bien quand on remet la clé.",
"_____no_output_____"
]
],
[
[
"d = decryptMessage(e, 4501)\nd",
"_____no_output_____"
]
],
[
[
"### Exercice : Cracker un cypher par décalage avec l'analyse de fréquence",
"_____no_output_____"
],
[
"Voici un cypher généré avec l'algorithme d'encryption de cypher encryptMessage.\nLe but est d'arriver a decrypter le message grace a l'analyse de frequence des caracteres du message. \n\nLe message code est en anglais, donc un morceau de texte anglais vous est donne pour que vous puissez comparer les frequences.",
"_____no_output_____"
]
],
[
[
"# Ceci est le message codé à cracker.\n# Des espaces sont laissés expres pour permettre de bien reconnaitre les caracteres\n\n# Attention aux caracteres speciaux d'ascii. E.g. `\\r` est bien compté comme 1 caractere de lalphabet.\n\ncypher = \"Z C D M P C i W M S i B G Q A S Q Q i R F C i P C Q M J S R G M L \\r i J C R i K C i N J y A C i z C D M P C i W M S i M L C i M P i R U M i R F G L E Q \\r i ' i U y L R i W M S i R M i S L B C P Q R y L B i R U M i R F G L E Q i T C P W i A J C y P J W i y L B i R M i A M L Q G B C P i R F C K i D P M K i R F C i Q y K C i N M G L R i M D i T G C U i D P M K i U F G A F i ' i y K i N J y A G L E i R F C K i z C D M P C i W M S \\x0c i ' i y Q I i W M S i R M i A M L Q G B C P i G R i D P M K i K W i N M G L R i M D i T G C U \\r i z C A y S Q C i G D i W M S i y N N P M T C i M D i G R \\r i W M S i U G J J i z C i C L H M G L C B i R M i A y P P W i M S R i y J J i ' i Q y W \\x0c i ' R i U G J J i z C i y i E P C y R i P C Q N M L Q G z G J G R W \\x0c i < F C P C i y P C i N C M N J C i U F M i y Q I i K C i U F C R F C P i ' i y K i R F C i Q y K C i K y L i R F y R i ' i U y Q i G L i p x q o \\r i M P i U F C R F C P i R F C P C i F y Q i z C C L i y L W i A F y L E C i G L i K C \\x0c i [ M S i y P C i P G E F R i G L i y Q I G L E i R F y R i O S C Q R G M L \\x0c i * C R i K C \\r i F M U C T C P \\r i F y Q R C L i R M i y Q Q S P C i R F y R i ' i y K i R F C i Q y K C i % y L B F G i y Q i ' i U y Q i G L i p x q o \\x0c i ' i F y T C i L M R i A F y L E C B i G L i y L W i D S L B y K C L R y J i P C Q N C A R \\x0c i ' i y R R y A F i R F C i Q y K C i G K N M P R y L A C i R M i L M L T G M J C L A C i R F y R i ' i B G B i R F C L \\x0c i ' D i y R i y J J \\r i K W i C K N F y Q G Q i M L i G R i F y Q i E P M U L i Q R P M L E C P \\x0c i < F C P C i G Q i L M i P C y J i A M L R P y B G A R G M L i z C R U C C L i R F C i N P C Q C L R i P C Q M J S R G M L i y L B i K W i N P C T G M S Q i U P G R G L E Q i y L B i S R R C P y L A C Q \\x0c i - A A y Q G M L Q i J G I C i R F C i N P C Q C L R i B M i L M R i M A A S P i G L i C T C P W z M B W } Q i y L B i z S R i P y P C J W i G L i y L W z M B W } Q i J G D C \\x0c i ' i U y L R i W M S i R M i I L M U i y L B i D C C J i R F y R i R F C P C i G Q i L M R F G L E i z S R i N S P C Q R i Y F G K Q y i G L i y J J i R F y R i ' i y K i Q y W G L E i y L B i B M G L E i R M B y W \\x0c i < F C i B P y D R i P C Q M J S R G M L i M D i R F C i ? M P I G L E i ! M K K G R R C C i G Q i z y Q C B i M L i Y F G K Q y \\r i R F C i A M L R C K N J y R C B i Q R P S E E J C i Q G K G J y P J W i F y Q i G R Q i P M M R Q i G L i Y F G K Q y \\x0c i ' D \\r i R F C P C D M P C \\r i R F C P C i G Q i y L W i y K M L E i W M S i U F M i F y Q i J M Q R i D y G R F i G L i Y F G K Q y i M P i G Q i U C y P G C B i M D i G R \\r i J C R i F G K i L M R i T M R C i D M P i R F G Q i P C Q M J S R G M L \\x0c`\"\n",
"_____no_output_____"
],
[
"# Le code ici permet de transformer le cypher en tableau, enlevant au passage tout les espaces entre chaque caracteres\n\ncypherList = list(cypher)[0::2]\ncypherList\n",
"_____no_output_____"
]
],
[
[
"Pour vous aider, voici un morceau de discours en anglais sur lequel vous pouvez faire une premiere analyse de frequences de caracteres.\n\nLe texte est assez long pour etre representatif d'une distribution classique de la langue anglaise.\nVotre histogramme devrait ressembler a celui sur la page web suivante : \nhttps://www3.nd.edu/~busiforc/handouts/cryptography/letterfrequencies.html\n\n",
"_____no_output_____"
]
],
[
[
"englishText = \"I am the First Accused.I hold a Bachelor's Degree in Arts and practised as an attorney in Johannesburg for a number of years in partnership with Oliver Tambo. I am a convicted prisoner serving five years for leaving the country without a permit and for inciting people to go on strike at the end of May 1961. At the outset, I want to say that the suggestion made by the State in its opening that the struggle in South Africa is under the influence of foreigners or communists is wholly incorrect. I have done whatever I did, both as an individual and as a leader of my people, because of my experience in South Africa and my own proudly felt African background, and not because of what any outsider might have said. In my youth in the Transkei I listened to the elders of my tribe telling stories of the old days. Amongst the tales they related to me were those of wars fought by our ancestors in defence of the fatherland. The names of Dingane and Bambata, Hintsa and Makana, Squngthi and Dalasile, Moshoeshoe and Sekhukhuni, were praised as the glory of the entire African nation. I hoped then that life might offer me the opportunity to serve my people and make my own humble contribution to their freedom struggle. This is what has motivated me in all that I have done in relation to the charges made against me in this case. Having said this, I must deal immediately and at some length with the question of violence. Some of the things so far told to the Court are true and some are untrue. I do not, however, deny that I planned sabotage. I did not plan it in a spirit of recklessness, nor because I have any love of violence. I planned it as a result of a calm and sober assessment of the political situation that had arisen after many years of tyranny, exploitation, and oppression of my people by the Whites.\"",
"_____no_output_____"
]
],
[
[
"**Consigne** :\nComparer la frequence d'apparition des caracteres du cypher et du discours.\nPar simplicité, triez les tableaux dans l'ordre alphabetique.\n\nIl faut ensuite que vous decaliez l'alphabet du cypher jusqu'a que les deux distributions se superposent.\nVous pouvez utiliser la fonction `shiftAlphabet` donnée plus haut.\nCe décalage sera donc la clé d'encryption et de decryption!",
"_____no_output_____"
]
],
[
[
"# Zone a coder en dessous",
"_____no_output_____"
]
],
[
[
"## Partie 2 : Code de César Aléatoire",
"_____no_output_____"
],
[
"## Implementation",
"_____no_output_____"
],
[
"Voici une legere modification des fonctions d'encryptions du debut de l'exercice.\n\nLa seule difference etant qu'au lieu de mélanger par décalage, la position de chaque lettre est aléatoire",
"_____no_output_____"
]
],
[
[
"def convertAlphabetsRand(messageCharNumList, numList, numListToConvert, encrypt = True) :\n index = 0\n newList = []\n for val in messageCharNumList:\n indexOfLetter = numListToConvert.index(val)\n newList.append(numList[indexOfLetter])\n\n index += 1\n if encrypt :\n return ' '.join(map(chr,newList))\n else : \n return ''.join(map(chr,newList))\n\ndef encryptMessageRand(m, seedKey):\n\n alphabet_string = string.printable\n\n alphabetListOrdered = list(alphabet_string)\n\n numberListOrdered = list(map(ord, alphabetListOrdered))\n\n alphabetListShuffled = list(alphabetListOrdered)\n\n def seed():\n return seedKey\n\n random.shuffle(alphabetListShuffled, seed)\n\n numberListShuffled = list(map(ord, alphabetListShuffled))\n\n return convertAlphabetsRand(list(map(ord, list(m))), numberListShuffled, numberListOrdered)\n",
"_____no_output_____"
],
[
"def decryptMessageRand(m, seedKey):\n m = m.replace(' ', '')\n alphabet_string = string.printable\n\n alphabetListOrdered = list(alphabet_string)\n\n numberListOrdered = list(map(ord, alphabetListOrdered))\n\n alphabetListShuffled = list(alphabetListOrdered)\n\n def seed():\n return seedKey\n\n random.shuffle(alphabetListShuffled, seed)\n\n numberListShuffled = list(map(ord, alphabetListShuffled))\n\n return convertAlphabets(list(map(ord, list(m))), numberListOrdered, numberListShuffled, False)\n\n\n",
"_____no_output_____"
],
[
"m = \"Vous savez, moi je ne crois pas qu'il y ait de bonne ou de mauvaise situation. Moi, si je devais resumer ma vie aujourd'hui avec vous, je dirais que c'est d'abord des rencontres. Des gens qui m'ont tendu la main, peut-etre a un moment ou je ne pouvais pas, ou j'etais seul chez moi\"\nm",
"_____no_output_____"
],
[
"e = encryptMessageRand(m, 0.42)\ne",
"<ipython-input-16-6abd02a37210>:27: DeprecationWarning: The *random* parameter to shuffle() has been deprecated\nsince Python 3.9 and will be removed in a subsequent version.\n random.shuffle(alphabetListShuffled, seed)\n"
],
[
"d = decryptMessageRand(e, 0.42)\nd",
"<ipython-input-17-6c852534887f>:14: DeprecationWarning: The *random* parameter to shuffle() has been deprecated\nsince Python 3.9 and will be removed in a subsequent version.\n random.shuffle(alphabetListShuffled, seed)\n"
]
],
[
[
"## Cracking a Random Cypher",
"_____no_output_____"
],
[
"Voici un cypher généré avec l'algorithme d'encryption de cypher encryptMessageRand.\nLe but est d'arriver a decrypter le message grace a l'analyse de frequence des caracteres du message. \n\nLe message code est en anglais, donc un morceau de texte anglais vous est donne pour que vous puissez comparer les frequences.",
"_____no_output_____"
]
],
[
[
"random_cypher = 'J \\t _ n \\t i _ q q z \\t u p \\t k p j 2 \\t x j u i \\t z p v \\t u p e _ z \\t j 2 \\t x i _ u \\t x j m m \\t h p \\t e p x 2 \\t j 2 \\t i j t u p s z \\t _ t \\t u i f \\t h s f _ u f t u \\t e f n p 2 t u s _ u j p 2 \\t 1 p s \\t 1 s f f e p n \\t j 2 \\t u i f \\t i j t u p s z \\t p 1 \\t p v s \\t 2 _ u j p 2 / \\t G j 3 f \\t t d p s f \\t z f _ s t \\t _ h p - \\t _ \\t h s f _ u \\t B n f s j d _ 2 - \\t j 2 \\t x i p t f \\t t z n \\n p m j d \\t t i _ e p x \\t x f \\t t u _ 2 e \\t u p e _ z - \\t t j h 2 f e \\t u i f \\t F n _ 2 d j q _ u j p 2 \\t Q s p d m _ n _ u j p 2 / \\t 6 i j t \\t n p n f 2 u p v t \\t e f d s f f \\t d _ n f \\t _ t \\t _ \\t h s f _ u \\t \\n f _ d p 2 \\t m j h i u \\t p 1 \\t i p q f \\t u p \\t n j m m j p 2 t \\t p 1 \\t O f h s p \\t t m _ 3 f t \\t x i p \\t i _ e \\t \\n f f 2 \\t t f _ s f e \\t j 2 \\t u i f \\t 1 m _ n f t \\t p 1 \\t x j u i f s j 2 h \\t j 2 k v t u j d f / \\t J u \\t d _ n f \\t _ t \\t _ \\t k p z p v t \\t e _ z \\n s f _ l \\t u p \\t f 2 e \\t u i f \\t m p 2 h \\t 2 j h i u \\t p 1 \\t u i f j s \\t d _ q u j 3 j u z / \\t C v u \\t p 2 f \\t i v 2 e s f e \\t z f _ s t \\t m _ u f s - \\t u i f \\t O f h s p \\t t u j m m \\t j t \\t 2 p u \\t 1 s f f / \\t P 2 f \\t i v 2 e s f e \\t z f _ s t \\t m _ u f s - \\t u i f \\t m j 1 f \\t p 1 \\t u i f \\t O f h s p \\t j t \\t t u j m m \\t t _ e m z \\t d s j q q m f e \\t \\n z \\t u i f \\t n _ 2 _ d m f t \\t p 1 \\t t f h s f h _ u j p 2 \\t _ 2 e \\t u i f \\t d i _ j 2 t \\t p 1 \\t e j t d s j n j 2 _ u j p 2 / \\t P 2 f \\t i v 2 e s f e \\t z f _ s t \\t m _ u f s - \\t u i f \\t O f h s p \\t m j 3 f t \\t p 2 \\t _ \\t m p 2 f m z \\t j t m _ 2 e \\t p 1 \\t q p 3 f s u z \\t j 2 \\t u i f \\t n j e t u \\t p 1 \\t _ \\t 3 _ t u \\t p d f _ 2 \\t p 1 \\t n _ u f s j _ m \\t q s p t q f s j u z / \\t P 2 f \\t i v 2 e s f e \\t z f _ s t \\t m _ u f s - \\t u i f \\t O f h s p \\t j t \\t t u j m m \\t m _ 2 h v j t i f e \\t j 2 \\t u i f \\t d p s 2 f s t \\t p 1 \\t B n f s j d _ 2 \\t t p d j f u z \\t _ 2 e \\t 1 j 2 e t \\t i j n t f m 1 \\t _ 2 \\t f y j m f \\t j 2 \\t i j t \\t p x 2 \\t m _ 2 e / \\t B 2 e \\t t p \\t x f ( 3 f \\t d p n f \\t i f s f \\t u p e _ z \\t u p \\t e s _ n _ u j A f \\t _ \\t t i _ n f 1 v m \\t d p 2 e j u j p 2 /'\n",
"_____no_output_____"
],
[
"random_cypherList = list(random_cypher)[0::2]\nrandom_cypherList",
"_____no_output_____"
],
[
"englishText = \"I am the First Accused.I hold a Bachelor's Degree in Arts and practised as an attorney in Johannesburg for a number of years in partnership with Oliver Tambo. I am a convicted prisoner serving five years for leaving the country without a permit and for inciting people to go on strike at the end of May 1961. At the outset, I want to say that the suggestion made by the State in its opening that the struggle in South Africa is under the influence of foreigners or communists is wholly incorrect. I have done whatever I did, both as an individual and as a leader of my people, because of my experience in South Africa and my own proudly felt African background, and not because of what any outsider might have said. In my youth in the Transkei I listened to the elders of my tribe telling stories of the old days. Amongst the tales they related to me were those of wars fought by our ancestors in defence of the fatherland. The names of Dingane and Bambata, Hintsa and Makana, Squngthi and Dalasile, Moshoeshoe and Sekhukhuni, were praised as the glory of the entire African nation. I hoped then that life might offer me the opportunity to serve my people and make my own humble contribution to their freedom struggle. This is what has motivated me in all that I have done in relation to the charges made against me in this case. Having said this, I must deal immediately and at some length with the question of violence. Some of the things so far told to the Court are true and some are untrue. I do not, however, deny that I planned sabotage. I did not plan it in a spirit of recklessness, nor because I have any love of violence. I planned it as a result of a calm and sober assessment of the political situation that had arisen after many years of tyranny, exploitation, and oppression of my people by the Whites.\"",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0d2d2d3a397820940af333ddf38aa12293afef2 | 10,732 | ipynb | Jupyter Notebook | hashtag (2).ipynb | aparnasankarasetti/project_2 | 56e2c7c5c499435c2c7bb2307686d7970cd9c932 | [
"MIT"
] | null | null | null | hashtag (2).ipynb | aparnasankarasetti/project_2 | 56e2c7c5c499435c2c7bb2307686d7970cd9c932 | [
"MIT"
] | null | null | null | hashtag (2).ipynb | aparnasankarasetti/project_2 | 56e2c7c5c499435c2c7bb2307686d7970cd9c932 | [
"MIT"
] | null | null | null | 29.89415 | 172 | 0.279165 | [
[
[
"import pyspark\nfrom pyspark import SparkContext\nsc = SparkContext.getOrCreate();\nimport findspark\nfindspark.init()\nfrom pyspark.sql import SparkSession\nspark = SparkSession.builder.master(\"local[*]\").getOrCreate()\nspark.conf.set(\"spark.sql.repl.eagerEval.enabled\", True) # Property used to format output tables better\nspark",
"_____no_output_____"
],
[
"from __future__ import print_function\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext",
"_____no_output_____"
],
[
"sc.stop()",
"_____no_output_____"
],
[
"sc=SparkContext(appName=\"Twitter Data Analysis\")\nsc.setLogLevel(\"ERROR\")\nssc=StreamingContext(sc,10)",
"_____no_output_____"
],
[
"socket_stream = ssc.socketTextStream(\"127.0.0.1\",9999)",
"_____no_output_____"
],
[
"lines=socket_stream.window(60)",
"_____no_output_____"
],
[
"hashtags = lines.flatMap(lambda text:text.split(\" \")).filter(lambda word:word.lower().startswith('#')).map(lambda word:(word.lower(),1)).reduceByKey(lambda a,b:a+b)",
"_____no_output_____"
],
[
"author_counts_sorted_dstream=hashtags.transform(lambda foo:foo.sortBy(lambda x:x[0].lower()).sortBy(lambda x:x[1],ascending=False))",
"_____no_output_____"
],
[
"author_counts_sorted_dstream.pprint()",
"_____no_output_____"
],
[
"ssc.start()",
"_____no_output_____"
],
[
"ssc.awaitTermination()",
"-------------------------------------------\nTime: 2021-08-20 13:04:20\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:04:30\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:04:40\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:04:50\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:05:00\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:05:10\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:05:20\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:05:30\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:05:40\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:05:50\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:06:00\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:06:10\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:06:20\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:06:30\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:06:40\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:06:50\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:07:00\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:07:10\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:07:20\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:07:30\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:07:40\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:07:50\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:08:00\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:08:10\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:08:20\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:08:30\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:08:40\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:08:50\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:09:00\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:09:10\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:09:20\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:09:30\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:09:40\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:09:50\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:10:00\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:10:10\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:10:20\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:10:30\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:10:40\n-------------------------------------------\n\n-------------------------------------------\nTime: 2021-08-20 13:10:50\n-------------------------------------------\n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d2e214e445a61f7c0768c4dbc030ed49454223 | 233,997 | ipynb | Jupyter Notebook | NG_LDA.ipynb | shivamras304/LyricsMatch_DataX1 | a6288f984b504ee7424ed80bb4621cfe968b605c | [
"MIT"
] | 4 | 2019-11-02T12:17:11.000Z | 2020-01-13T02:40:43.000Z | NG_LDA.ipynb | shivamras304/LyricsMatch_DataX1 | a6288f984b504ee7424ed80bb4621cfe968b605c | [
"MIT"
] | null | null | null | NG_LDA.ipynb | shivamras304/LyricsMatch_DataX1 | a6288f984b504ee7424ed80bb4621cfe968b605c | [
"MIT"
] | 3 | 2019-10-23T19:00:16.000Z | 2020-03-04T19:00:19.000Z | 148.099367 | 153,977 | 0.691214 | [
[
[
"# Run in python console\nimport nltk; nltk.download('stopwords')",
"[nltk_data] Downloading package stopwords to /Users/neha/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n"
]
],
[
[
" Import Packages",
"_____no_output_____"
]
],
[
[
"import re\nimport numpy as np\nimport pandas as pd\nfrom pprint import pprint\n\n# Gensim\nimport gensim\nimport gensim.corpora as corpora\nfrom gensim.utils import simple_preprocess\nfrom gensim.models import CoherenceModel\n\n# spacy for lemmatization\nimport spacy\n\n# Plotting tools\nimport pyLDAvis\nimport pyLDAvis.gensim # don't skip this\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# Enable logging for gensim - optional\nimport logging\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)\n\nimport warnings\nwarnings.filterwarnings(\"ignore\",category=DeprecationWarning)",
"_____no_output_____"
],
[
"# NLTK Stop words\nfrom nltk.corpus import stopwords\nstop_words = stopwords.words('english')\nstop_words.extend(['from', 'subject', 're', 'edu', 'use'])",
"_____no_output_____"
]
],
[
[
"Importing Lyrics data",
"_____no_output_____"
]
],
[
[
"# Import Dataset\ndf = pd.read_csv('')\ndf1.head()",
"_____no_output_____"
],
[
"# df = df1.head(10)",
"_____no_output_____"
],
[
"print(df.genre.unique())",
"['Rock' 'Jazz' 'Pop' 'Country' 'Not Available' 'Metal']\n"
],
[
"print(df.artist.unique())",
"['discharge' 'fats-waller' 'backstreet-boys' 'don-williams' 'gnash'\n 'accept' 'ali-tatyana' 'bryan-ferry-roxy-music' 'from-autumn-to-ashes'\n 'everly-brothers']\n"
],
[
"print(df.year.unique())",
"[2007 2008 2006 2016 2004]\n"
]
],
[
[
" Remove newline characters",
"_____no_output_____"
]
],
[
[
"# Convert to list\n# data = df.lyrics.values.tolist()\n\n# data = [re.sub('[^a-zA-Z ]' ,'', str(sent)) for sent in data]\n\n# pprint(data[:1])",
"['Mother Dear MotherIts Cold TonightLike Every OtherEvery Other NightBut I '\n 'Never FeelFeel It AnywayIm Gonna Need SomethingSomething SoonI Can Feel '\n 'ItFeel It BeginBut I Dont KnowHow Im Gonna PayIt Must Be AboutMid December '\n 'Right NowAnd I Think Im AboutSeventeenIm Not Real SureHow Old I FeelI Lost '\n 'My ThoughtsIn Some DreamOh Mother I DontKnow HowI Got Where I AmBut Ill Try '\n 'To Explain AnyhowSee GraduallyYou Get Sucked InInto ItWithout Ever '\n 'ReallyRealisingJust Whats HappeningAnd That Is WhenThe Downward Spiral '\n 'BeginsAnyway Back To How It All StartedIt Started With DopeWhy Not After All '\n 'It Was Just The Once I Told MyselfI Didnt Even Like It Very MuchBut The '\n 'People I Was With All Did ItThen I Tried SpeedWhy Not After All It Was Just '\n 'The Once I Told MyselfThe Next Thing I Knew I Was Doing A Couple Of Grams A '\n 'WeekThen A Friend Introduced Me To Smack Chasing The DragonWhy Not After All '\n 'It Was Just The Once I Told MyselfWhy Not After All It Wasnt Using A '\n 'NeedleThen I Started Doing What I Said Id Never Do']\n"
],
[
"# def sent_to_words(sentences):\n# for sentence in sentences:\n# yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations\n\n# data_words = list(sent_to_words(data))\n\n# print(data_words[:1])",
"[['mother', 'dear', 'motherits', 'cold', 'tonightlike', 'every', 'otherevery', 'other', 'nightbut', 'never', 'feelfeel', 'it', 'anywayim', 'gonna', 'need', 'sooni', 'can', 'feel', 'itfeel', 'it', 'beginbut', 'dont', 'knowhow', 'im', 'gonna', 'payit', 'must', 'be', 'aboutmid', 'december', 'right', 'nowand', 'think', 'im', 'not', 'real', 'surehow', 'old', 'feeli', 'lost', 'my', 'thoughtsin', 'some', 'dreamoh', 'mother', 'dontknow', 'howi', 'got', 'where', 'ambut', 'ill', 'try', 'to', 'explain', 'anyhowsee', 'graduallyyou', 'get', 'sucked', 'ininto', 'itwithout', 'ever', 'whats', 'happeningand', 'that', 'is', 'whenthe', 'downward', 'spiral', 'beginsanyway', 'back', 'to', 'how', 'it', 'all', 'startedit', 'started', 'with', 'dopewhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfi', 'didnt', 'even', 'like', 'it', 'very', 'muchbut', 'the', 'people', 'was', 'with', 'all', 'did', 'itthen', 'tried', 'speedwhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfthe', 'next', 'thing', 'knew', 'was', 'doing', 'couple', 'of', 'grams', 'weekthen', 'friend', 'introduced', 'me', 'to', 'smack', 'chasing', 'the', 'dragonwhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfwhy', 'not', 'after', 'all', 'it', 'wasnt', 'using', 'needlethen', 'started', 'doing', 'what', 'said', 'id', 'never', 'do']]\n"
]
],
[
[
"### Creating Bigram and Trigram Models\nBigrams are two words frequently occurring together in the document. Trigrams are 3 words frequently occurring.\n\nSome examples in our example are: ‘front_bumper’, ‘oil_leak’, ‘maryland_college_park’ etc.\n\nGensim’s Phrases model can build and implement the bigrams, trigrams, quadgrams and more. The two important arguments to Phrases are min_count and threshold. The higher the values of these param, the harder it is for words to be combined to bigrams.",
"_____no_output_____"
]
],
[
[
"# Build the bigram and trigram models\nbigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.\ntrigram = gensim.models.Phrases(bigram[data_words], threshold=100) \n\n# Faster way to get a sentence clubbed as a trigram/bigram\nbigram_mod = gensim.models.phrases.Phraser(bigram)\ntrigram_mod = gensim.models.phrases.Phraser(trigram)\n\n# See trigram example\nprint(bigram_mod[data_words[0]])",
"['mother', 'dear', 'motherits', 'cold', 'tonightlike', 'every', 'otherevery', 'other', 'nightbut', 'never', 'feelfeel', 'it', 'anywayim', 'gonna', 'need', 'sooni', 'can', 'feel', 'itfeel', 'it', 'beginbut', 'dont', 'knowhow', 'im', 'gonna', 'payit', 'must', 'be', 'aboutmid', 'december', 'right', 'nowand', 'think', 'im', 'not', 'real', 'surehow', 'old', 'feeli', 'lost', 'my', 'thoughtsin', 'some', 'dreamoh', 'mother', 'dontknow', 'howi', 'got', 'where', 'ambut', 'ill', 'try', 'to', 'explain', 'anyhowsee', 'graduallyyou', 'get', 'sucked', 'ininto', 'itwithout', 'ever', 'whats', 'happeningand', 'that', 'is', 'whenthe', 'downward', 'spiral', 'beginsanyway', 'back', 'to', 'how', 'it', 'all', 'startedit', 'started', 'with', 'dopewhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfi', 'didnt', 'even', 'like', 'it', 'very', 'muchbut', 'the', 'people', 'was', 'with', 'all', 'did', 'itthen', 'tried', 'speedwhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfthe', 'next', 'thing', 'knew', 'was', 'doing', 'couple', 'of', 'grams', 'weekthen', 'friend', 'introduced', 'me', 'to', 'smack', 'chasing', 'the', 'dragonwhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfwhy', 'not', 'after', 'all', 'it', 'wasnt', 'using', 'needlethen', 'started', 'doing', 'what', 'said', 'id', 'never', 'do']\n"
]
],
[
[
"### Remove Stopwords, Make Bigrams and Lemmatize\nThe bigrams model is ready. Let’s define the functions to remove the stopwords, make bigrams and lemmatization and call them sequentially.",
"_____no_output_____"
]
],
[
[
"# Define functions for stopwords, bigrams, trigrams and lemmatization\ndef remove_stopwords(texts):\n return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\n\ndef make_bigrams(texts):\n return [bigram_mod[doc] for doc in texts]\n\ndef make_trigrams(texts):\n return [trigram_mod[bigram_mod[doc]] for doc in texts]\n\ndef lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n \"\"\"https://spacy.io/api/annotation\"\"\"\n texts_out = []\n for sent in texts:\n doc = nlp(\" \".join(sent)) \n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\n return texts_out",
"_____no_output_____"
]
],
[
[
"Let’s call the functions in order.",
"_____no_output_____"
]
],
[
[
"# Remove Stop Words\ndata_words_nostops = remove_stopwords(data_words)\n\n# Form Bigrams\ndata_words_bigrams = make_bigrams(data_words_nostops)\n\n# Initialize spacy 'en' model, keeping only tagger component (for efficiency)\n# python3 -m spacy download en\nnlp = spacy.load('en', disable=['parser', 'ner'])\n\n# Do lemmatization keeping only noun, adj, vb, adv\ndata_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\n\nprint(data_lemmatized[:1])",
"[['mother', 'dear', 'motherit', 'cold', 'tonightlike', 'otherevery', 'nightbut', 'never', 'feelfeel', 'anywayim', 'go', 'need', 'sooni', 'feel', 'itfeel', 'beginbut', 'do', 'not', 'knowhow', 'be', 'go', 'payit', 'must', 'aboutmid', 'december', 'right', 'nowand', 'think', 'be', 'real', 'surehow', 'old', 'feeli', 'lose', 'thoughtsin', 'dreamoh', 'mother', 'dontknow', 'howi', 'get', 'ambut', 'ill', 'try', 'explain', 'anyhowsee', 'graduallyyou', 'get', 'suck', 'ininto', 'itwithout', 'ever', 's', 'happeningand', 'downward', 'spiral', 'beginsanyway', 'back', 'startedit', 'start', 'dopewhy', 'tell', 'myselfi', 'do', 'not', 'even', 'like', 'muchbut', 'people', 'itthen', 'try', 'speedwhy', 'tell', 'myselfthe', 'next', 'thing', 'know', 'couple', 'gram', 'weekthen', 'friend', 'introduce', 'smack', 'chase', 'dragonwhy', 'tell', 'myselfwhy', 'be', 'not', 'use', 'needlethen', 'start', 'say', 'never']]\n"
]
],
[
[
"### Create the Dictionary and Corpus needed for Topic Modeling\nThe two main inputs to the LDA topic model are the dictionary(id2word) and the corpus. Let’s create them.",
"_____no_output_____"
]
],
[
[
"# Create Dictionary\nid2word = corpora.Dictionary(data_lemmatized)\n\n# Create Corpus\ntexts = data_lemmatized\n\n# Term Document Frequency\ncorpus = [id2word.doc2bow(text) for text in texts]\n\n# View\nprint(corpus[:1])",
"[[(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 3), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 2), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), (20, 1), (21, 1), (22, 1), (23, 1), (24, 1), (25, 1), (26, 2), (27, 2), (28, 1), (29, 1), (30, 1), (31, 1), (32, 1), (33, 1), (34, 1), (35, 1), (36, 1), (37, 1), (38, 1), (39, 1), (40, 1), (41, 1), (42, 2), (43, 1), (44, 1), (45, 1), (46, 1), (47, 1), (48, 1), (49, 1), (50, 1), (51, 2), (52, 1), (53, 1), (54, 3), (55, 1), (56, 1), (57, 1), (58, 1), (59, 1), (60, 1), (61, 1), (62, 1), (63, 1), (64, 1), (65, 1), (66, 1), (67, 1), (68, 2), (69, 1), (70, 1), (71, 1), (72, 3), (73, 1), (74, 1), (75, 1), (76, 1), (77, 2), (78, 1), (79, 1)]]\n"
]
],
[
[
"Gensim creates a unique id for each word in the document. The produced corpus shown above is a mapping of (word_id, word_frequency).\n\nFor example, (0, 1) above implies, word id 0 occurs once in the first document. Likewise, word id 1 occurs twice and so on.\n\nThis is used as the input by the LDA model.\n\nIf you want to see what word a given id corresponds to, pass the id as a key to the dictionary.",
"_____no_output_____"
]
],
[
[
"id2word[10]",
"_____no_output_____"
],
[
"# Human readable format of corpus (term-frequency)\n[[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]",
"_____no_output_____"
]
],
[
[
"### Building the Topic Model\nWe have everything required to train the LDA model. In addition to the corpus and dictionary, you need to provide the number of topics as well.\n\nApart from that, alpha and eta are hyperparameters that affect sparsity of the topics. According to the Gensim docs, both defaults to 1.0/num_topics prior.\n\nchunksize is the number of documents to be used in each training chunk. update_every determines how often the model parameters should be updated and passes is the total number of training passes.",
"_____no_output_____"
]
],
[
[
"# Build LDA model\nlda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,\n id2word=id2word,\n num_topics=20, \n random_state=100,\n update_every=1,\n chunksize=100,\n passes=10,\n alpha='auto',\n per_word_topics=True)",
"_____no_output_____"
],
[
"# Print the Keyword in the 10 topics\npprint(lda_model.print_topics())\ndoc_lda = lda_model[corpus]",
"[(0,\n '0.003*\"want\" + 0.003*\"que\" + 0.002*\"quiera\" + 0.002*\"yo\" + 0.002*\"famous\" + '\n '0.002*\"rocknroll\" + 0.002*\"dollar\" + 0.002*\"llegarelo\" + 0.002*\"record\" + '\n '0.002*\"be\"'),\n (1,\n '0.004*\"love\" + 0.003*\"que\" + 0.003*\"fall\" + 0.003*\"quiera\" + 0.003*\"not\" + '\n '0.002*\"fallin\" + 0.002*\"yo\" + 0.002*\"know\" + 0.002*\"there\" + 0.002*\"s\"'),\n (2,\n '0.079*\"want\" + 0.040*\"rocknroll\" + 0.040*\"famous\" + 0.026*\"be\" + '\n '0.026*\"record\" + 0.026*\"dollar\" + 0.020*\"not\" + 0.020*\"tell\" + '\n '0.020*\"harley\" + 0.020*\"place\"'),\n (3,\n '0.039*\"go\" + 0.034*\"come\" + 0.028*\"never\" + 0.028*\"be\" + 0.023*\"not\" + '\n '0.023*\"away\" + 0.023*\"say\" + 0.023*\"stand\" + 0.023*\"heart\" + 0.023*\"tear\"'),\n (4,\n '0.003*\"get\" + 0.003*\"be\" + 0.002*\"want\" + 0.002*\"not\" + 0.002*\"rocknroll\" + '\n '0.002*\"tell\" + 0.002*\"famous\" + 0.002*\"home\" + 0.002*\"lulus\" + '\n '0.002*\"back\"'),\n (5,\n '0.029*\"sit\" + 0.029*\"something\" + 0.022*\"back\" + 0.021*\"metricyou\" + '\n '0.021*\"bad\" + 0.021*\"careful\" + 0.021*\"street\" + 0.021*\"run\" + '\n '0.021*\"electriclay\" + 0.021*\"combatmy\"'),\n (6,\n '0.002*\"not\" + 0.002*\"love\" + 0.002*\"hate\" + 0.002*\"never\" + 0.002*\"youi\" + '\n '0.002*\"miss\" + 0.002*\"want\" + 0.002*\"watch\" + 0.002*\"do\" + 0.002*\"herand\"'),\n (7,\n '0.002*\"go\" + 0.002*\"come\" + 0.002*\"be\" + 0.002*\"never\" + 0.002*\"not\" + '\n '0.002*\"say\" + 0.002*\"away\" + 0.002*\"stand\" + 0.002*\"heart\" + 0.002*\"tear\"'),\n (8,\n '0.003*\"angel\" + 0.003*\"eyesangel\" + 0.002*\"eye\" + 0.002*\"love\" + '\n '0.002*\"shine\" + 0.002*\"light\" + 0.002*\"meoh\" + 0.002*\"get\" + 0.002*\"not\" + '\n '0.002*\"moon\"'),\n (9,\n '0.002*\"not\" + 0.002*\"miss\" + 0.002*\"want\" + 0.002*\"love\" + 0.002*\"never\" + '\n '0.002*\"hate\" + 0.002*\"youi\" + 0.002*\"watch\" + 0.002*\"need\" + 0.002*\"shiti\"'),\n (10,\n '0.111*\"que\" + 0.066*\"quiera\" + 0.061*\"yo\" + 0.030*\"nena\" + 0.030*\"amor\" + '\n '0.030*\"mundo\" + 0.030*\"sin\" + 0.025*\"haresin\" + 0.025*\"fin\" + '\n '0.025*\"puedo\"'),\n (11,\n '0.003*\"want\" + 0.002*\"chair\" + 0.002*\"something\" + 0.002*\"sit\" + '\n '0.002*\"betterpull\" + 0.002*\"mount\" + 0.002*\"doorshould\" + '\n '0.002*\"guttereasier\" + 0.002*\"work\" + 0.002*\"comebackdollar\"'),\n (12,\n '0.057*\"not\" + 0.046*\"hate\" + 0.042*\"love\" + 0.038*\"want\" + 0.031*\"never\" + '\n '0.027*\"youi\" + 0.027*\"miss\" + 0.019*\"do\" + 0.015*\"need\" + 0.015*\"get\"'),\n (13,\n '0.002*\"do\" + 0.002*\"tell\" + 0.002*\"not\" + 0.002*\"get\" + 0.002*\"go\" + '\n '0.002*\"be\" + 0.002*\"try\" + 0.002*\"mother\" + 0.002*\"never\" + 0.002*\"start\"'),\n (14,\n '0.003*\"not\" + 0.003*\"want\" + 0.003*\"love\" + 0.003*\"hate\" + 0.003*\"never\" + '\n '0.002*\"miss\" + 0.002*\"do\" + 0.002*\"youi\" + 0.002*\"need\" + 0.002*\"watch\"'),\n (15,\n '0.002*\"come\" + 0.002*\"go\" + 0.002*\"never\" + 0.002*\"tear\" + 0.002*\"say\" + '\n '0.002*\"heart\" + 0.002*\"be\" + 0.002*\"hold\" + 0.002*\"fear\" + 0.002*\"cry\"'),\n (16,\n '0.100*\"get\" + 0.051*\"back\" + 0.038*\"lulus\" + 0.026*\"not\" + 0.026*\"tell\" + '\n '0.026*\"home\" + 0.013*\"have\" + 0.013*\"good\" + 0.013*\"coquettesmister\" + '\n '0.013*\"tonight\"'),\n (17,\n '0.092*\"angel\" + 0.084*\"kiss\" + 0.054*\"eye\" + 0.039*\"not\" + '\n '0.038*\"eyesangel\" + 0.031*\"know\" + 0.023*\"uhhuhi\" + 0.023*\"yanever\" + '\n '0.023*\"miss\" + 0.023*\"get\"'),\n (18,\n '0.142*\"love\" + 0.066*\"fall\" + 0.044*\"fallin\" + 0.033*\"s\" + 0.033*\"know\" + '\n '0.033*\"againid\" + 0.033*\"there\" + 0.022*\"be\" + 0.022*\"inchorusfallin\" + '\n '0.022*\"go\"'),\n (19,\n '0.026*\"not\" + 0.026*\"be\" + 0.026*\"tell\" + 0.017*\"never\" + 0.017*\"go\" + '\n '0.017*\"get\" + 0.017*\"do\" + 0.017*\"start\" + 0.017*\"mother\" + 0.017*\"try\"')]\n"
],
[
"# Compute Perplexity\nprint('\\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.\n\n# Compute Coherence Score\ncoherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')\ncoherence_lda = coherence_model_lda.get_coherence()\nprint('\\nCoherence Score: ', coherence_lda)",
"\nPerplexity: -5.737356491247813\n\nCoherence Score: 0.7243141275215045\n"
],
[
"# Visualize the topics\npyLDAvis.enable_notebook()\nvis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)\nvis",
"/Users/neha/anaconda3/envs/projectlab/lib/python3.6/site-packages/pyLDAvis/_prepare.py:257: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version\nof pandas will change to not sort by default.\n\nTo accept the future behavior, pass 'sort=False'.\n\nTo retain the current behavior and silence the warning, pass 'sort=True'.\n\n return pd.concat([default_term_info] + list(topic_dfs))\n"
],
[
"mallet_path = '/Users/neha/Downloads/mallet-2.0.8/bin/mallet' # update this path\nldamallet = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=20, id2word=id2word)",
"_____no_output_____"
],
[
"# Show Topics\npprint(ldamallet.show_topics(formatted=False))\n\n# Compute Coherence Score\ncoherence_model_ldamallet = CoherenceModel(model=ldamallet, texts=data_lemmatized, dictionary=id2word, coherence='c_v')\ncoherence_ldamallet = coherence_model_ldamallet.get_coherence()\nprint('\\nCoherence Score: ', coherence_ldamallet)",
"[(8,\n [('stand', 0.14285714285714285),\n ('betterpull', 0.08571428571428572),\n ('cry', 0.08571428571428572),\n ('youbaby', 0.05714285714285714),\n ('town', 0.02857142857142857),\n ('magazinewhen', 0.02857142857142857),\n ('pageantive', 0.02857142857142857),\n ('saywont', 0.02857142857142857),\n ('make', 0.02857142857142857),\n ('graduallyyou', 0.02857142857142857)]),\n (7,\n [('shiti', 0.1111111111111111),\n ('wallplatinum', 0.07407407407407407),\n ('meoh', 0.07407407407407407),\n ('trust', 0.037037037037037035),\n ('wanna', 0.037037037037037035),\n ('perdere', 0.037037037037037035),\n ('heartyou', 0.037037037037037035),\n ('porqueseh', 0.037037037037037035),\n ('howi', 0.037037037037037035),\n ('soand', 0.037037037037037035)]),\n (6,\n [('back', 0.2608695652173913),\n ('stop', 0.043478260869565216),\n ('friend', 0.043478260869565216),\n ('sonare', 0.043478260869565216),\n ('trace', 0.043478260869565216),\n ('payit', 0.043478260869565216),\n ('ancho', 0.043478260869565216),\n ('dad', 0.043478260869565216),\n ('coquettesmister', 0.043478260869565216),\n ('slick', 0.043478260869565216)]),\n (9,\n [('ill', 0.125),\n ('youdont', 0.09375),\n ('putnobody', 0.09375),\n ('care', 0.03125),\n ('heart', 0.03125),\n ('bite', 0.03125),\n ('herall', 0.03125),\n ('bridge', 0.03125),\n ('tuxedo', 0.03125),\n ('bybut', 0.03125)]),\n (10,\n [('start', 0.08),\n ('amissin', 0.08),\n ('nowand', 0.04),\n ('knowi', 0.04),\n ('hairgotta', 0.04),\n ('happeningand', 0.04),\n ('guess', 0.04),\n ('nightbut', 0.04),\n ('wrongso', 0.04),\n ('drink', 0.04)]),\n (2,\n [('watch', 0.15),\n ('open', 0.1),\n ('life', 0.1),\n ('people', 0.05),\n ('mei', 0.05),\n ('marshall', 0.05),\n ('knowif', 0.05),\n ('sighfor', 0.05),\n ('bell', 0.05),\n ('kissagainst', 0.05)]),\n (5,\n [('mother', 0.09523809523809523),\n ('create', 0.047619047619047616),\n ('youfriend', 0.047619047619047616),\n ('itwithout', 0.047619047619047616),\n ('watch', 0.047619047619047616),\n ('didyou', 0.047619047619047616),\n ('hide', 0.047619047619047616),\n ('time', 0.047619047619047616),\n ('youand', 0.047619047619047616),\n ('wait', 0.047619047619047616)]),\n (17,\n [('sit', 0.07352941176470588),\n ('electriclay', 0.04411764705882353),\n ('careful', 0.04411764705882353),\n ('pence', 0.04411764705882353),\n ('street', 0.04411764705882353),\n ('work', 0.04411764705882353),\n ('mount', 0.04411764705882353),\n ('boot', 0.04411764705882353),\n ('metricyou', 0.04411764705882353),\n ('bad', 0.04411764705882353)]),\n (1,\n [('kiss', 0.3),\n ('yanever', 0.075),\n ('realize', 0.05),\n ('uhhuhi', 0.05),\n ('yanow', 0.05),\n ('startedit', 0.025),\n ('insidei', 0.025),\n ('good', 0.025),\n ('awake', 0.025),\n ('introduce', 0.025)]),\n (13,\n [('rocknroll', 0.08955223880597014),\n ('famous', 0.08955223880597014),\n ('record', 0.05970149253731343),\n ('dollar', 0.05970149253731343),\n ('allyeah', 0.04477611940298507),\n ('beverlywe', 0.04477611940298507),\n ('alla', 0.04477611940298507),\n ('place', 0.04477611940298507),\n ('allwe', 0.04477611940298507),\n ('maserati', 0.04477611940298507)])]\n\nCoherence Score: 0.612603937086269\n"
]
],
[
[
"How to find the optimal number of topics for LDA?\nMy approach to finding the optimal number of topics is to build many LDA models with different values of number of topics (k) and pick the one that gives the highest coherence value.\n\nChoosing a ‘k’ that marks the end of a rapid growth of topic coherence usually offers meaningful and interpretable topics. Picking an even higher value can sometimes provide more granular sub-topics.\n\nIf you see the same keywords being repeated in multiple topics, it’s probably a sign that the ‘k’ is too large.\n\nThe compute_coherence_values() (see below) trains multiple LDA models and provides the models and their corresponding coherence scores.",
"_____no_output_____"
]
],
[
[
"def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):\n \"\"\"\n Compute c_v coherence for various number of topics\n\n Parameters:\n ----------\n dictionary : Gensim dictionary\n corpus : Gensim corpus\n texts : List of input texts\n limit : Max num of topics\n\n Returns:\n -------\n model_list : List of LDA topic models\n coherence_values : Coherence values corresponding to the LDA model with respective number of topics\n \"\"\"\n coherence_values = []\n model_list = []\n for num_topics in range(start, limit, step):\n model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=num_topics, id2word=id2word)\n model_list.append(model)\n coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return model_list, coherence_values",
"_____no_output_____"
],
[
"# Can take a long time to run.\nmodel_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_lemmatized, start=2, limit=40, step=6)",
"_____no_output_____"
],
[
"# Show graph\nlimit=40; start=2; step=6;\nx = range(start, limit, step)\nplt.plot(x, coherence_values)\nplt.xlabel(\"Num Topics\")\nplt.ylabel(\"Coherence score\")\nplt.legend((\"coherence_values\"), loc='best')\nplt.show()",
"_____no_output_____"
],
[
"# Print the coherence scores\nfor m, cv in zip(x, coherence_values):\n print(\"Num Topics =\", m, \" has Coherence Value of\", round(cv, 4))",
"Num Topics = 2 has Coherence Value of 0.5643\nNum Topics = 8 has Coherence Value of 0.7051\nNum Topics = 14 has Coherence Value of 0.6569\nNum Topics = 20 has Coherence Value of 0.5923\nNum Topics = 26 has Coherence Value of 0.5513\nNum Topics = 32 has Coherence Value of 0.5063\nNum Topics = 38 has Coherence Value of 0.4846\n"
],
[
"# Select the model and print the topics\noptimal_model = model_list[3]\nmodel_topics = optimal_model.show_topics(formatted=False)\npprint(optimal_model.print_topics(num_words=10))",
"[(0,\n '0.111*\"uhhuhi\" + 0.037*\"saywont\" + 0.037*\"twois\" + 0.037*\"harmi\" + '\n '0.037*\"startwentyseven\" + 0.037*\"vestcause\" + 0.037*\"namei\" + '\n '0.037*\"wonderful\" + 0.037*\"make\" + 0.037*\"blow\"'),\n (1,\n '0.085*\"rocknroll\" + 0.085*\"famous\" + 0.056*\"dollar\" + 0.056*\"record\" + '\n '0.042*\"harley\" + 0.042*\"beverlywe\" + 0.042*\"place\" + 0.042*\"maserati\" + '\n '0.042*\"twentyfive\" + 0.042*\"golden\"'),\n (2,\n '0.080*\"mother\" + 0.040*\"nowand\" + 0.040*\"tape\" + 0.040*\"pride\" + '\n '0.040*\"graduallyyou\" + 0.040*\"tired\" + 0.040*\"ireeh\" + 0.040*\"whyit\" + '\n '0.040*\"sit\" + 0.040*\"feelfeel\"'),\n (3,\n '0.128*\"heart\" + 0.106*\"stand\" + 0.085*\"lonely\" + 0.064*\"trueim\" + '\n '0.064*\"youbut\" + 0.064*\"cry\" + 0.043*\"amissin\" + 0.043*\"againlove\" + '\n '0.021*\"leave\" + 0.021*\"knowi\"'),\n (4,\n '0.118*\"home\" + 0.118*\"tear\" + 0.088*\"hard\" + 0.088*\"fear\" + 0.088*\"sittin\" '\n '+ 0.059*\"youbaby\" + 0.029*\"wall\" + 0.029*\"busca\" + 0.029*\"insurance\" + '\n '0.029*\"makecause\"'),\n (5,\n '0.121*\"sit\" + 0.091*\"betterpull\" + 0.091*\"run\" + 0.091*\"cubic\" + '\n '0.091*\"metricyou\" + 0.061*\"homerefrain\" + 0.030*\"page\" + 0.030*\"insidei\" + '\n '0.030*\"prostitutewith\" + 0.030*\"myselfwhy\"'),\n (6,\n '0.104*\"quiera\" + 0.096*\"yo\" + 0.048*\"amor\" + 0.048*\"nena\" + 0.048*\"sin\" + '\n '0.048*\"mundo\" + 0.040*\"fin\" + 0.040*\"llegarelo\" + 0.040*\"puedo\" + '\n '0.040*\"pregunta\"'),\n (7,\n '0.188*\"hate\" + 0.141*\"miss\" + 0.125*\"youi\" + 0.047*\"herand\" + '\n '0.047*\"youyou\" + 0.047*\"fuck\" + 0.031*\"feeling\" + 0.031*\"watch\" + '\n '0.016*\"type\" + 0.016*\"heri\"'),\n (8,\n '0.429*\"kiss\" + 0.107*\"yanever\" + 0.071*\"yanow\" + 0.071*\"live\" + '\n '0.036*\"amrich\" + 0.036*\"yeahthing\" + 0.036*\"awake\" + 0.036*\"bemmm\" + '\n '0.036*\"otis\" + 0.036*\"limo\"'),\n (9,\n '0.111*\"give\" + 0.083*\"habit\" + 0.083*\"careful\" + 0.056*\"watch\" + '\n '0.056*\"wallplatinum\" + 0.028*\"affairsmanager\" + 0.028*\"happeningand\" + '\n '0.028*\"payit\" + 0.028*\"itfeel\" + 0.028*\"besosmas\"'),\n (10,\n '0.111*\"hold\" + 0.074*\"night\" + 0.074*\"everytime\" + 0.074*\"life\" + '\n '0.037*\"marshall\" + 0.037*\"mtvto\" + 0.037*\"guess\" + 0.037*\"video\" + '\n '0.037*\"thisjust\" + 0.037*\"songi\"'),\n (11,\n '0.152*\"feel\" + 0.121*\"ill\" + 0.091*\"shiti\" + 0.061*\"people\" + '\n '0.030*\"jumpercable\" + 0.030*\"friend\" + 0.030*\"real\" + 0.030*\"stop\" + '\n '0.030*\"bell\" + 0.030*\"needlethen\"'),\n (12,\n '0.111*\"doorshould\" + 0.074*\"start\" + 0.074*\"turn\" + 0.037*\"music\" + '\n '0.037*\"kill\" + 0.037*\"break\" + 0.037*\"head\" + 0.037*\"chronically\" + '\n '0.037*\"ancho\" + 0.037*\"para\"'),\n (13,\n '0.105*\"meand\" + 0.053*\"mailman\" + 0.053*\"backstagepassno\" + '\n '0.053*\"yourealize\" + 0.053*\"thing\" + 0.053*\"youif\" + 0.053*\"town\" + '\n '0.053*\"work\" + 0.053*\"nenael\" + 0.053*\"feeli\"'),\n (14,\n '0.073*\"chair\" + 0.024*\"moveit\" + 0.024*\"pageantlately\" + 0.024*\"music\" + '\n '0.024*\"introduce\" + 0.024*\"monstershould\" + 0.024*\"smack\" + 0.024*\"press\" + '\n '0.024*\"machineyou\" + 0.024*\"participant\"'),\n (15,\n '0.034*\"howi\" + 0.034*\"bybut\" + 0.034*\"speedwhy\" + 0.034*\"girl\" + '\n '0.034*\"sense\" + 0.034*\"beenyou\" + 0.034*\"hershe\" + 0.034*\"bite\" + '\n '0.034*\"elevemi\" + 0.034*\"burn\"'),\n (16,\n '0.152*\"fall\" + 0.121*\"fallin\" + 0.091*\"back\" + 0.061*\"call\" + 0.061*\"lulus\" '\n '+ 0.061*\"change\" + 0.030*\"awayout\" + 0.030*\"hope\" + 0.030*\"imstill\" + '\n '0.030*\"missin\"'),\n (17,\n '0.119*\"back\" + 0.071*\"combatmy\" + 0.071*\"electriclay\" + '\n '0.071*\"guttereasier\" + 0.071*\"embrace\" + 0.071*\"mount\" + 0.071*\"street\" + '\n '0.071*\"comebackdollar\" + 0.071*\"boot\" + 0.048*\"bad\"'),\n (18,\n '0.244*\"angel\" + 0.200*\"eye\" + 0.111*\"eyesangel\" + 0.067*\"shine\" + '\n '0.044*\"meoh\" + 0.044*\"light\" + 0.044*\"close\" + 0.022*\"doubtim\" + '\n '0.022*\"meangel\" + 0.022*\"deceive\"'),\n (19,\n '0.491*\"love\" + 0.057*\"againid\" + 0.057*\"youdont\" + 0.038*\"inchorusfallin\" + '\n '0.019*\"kinda\" + 0.019*\"tonight\" + 0.019*\"towngotta\" + 0.019*\"betterbut\" + '\n '0.019*\"hairblew\" + 0.019*\"gentle\"')]\n"
],
[
"def format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data):\n # Init output\n sent_topics_df = pd.DataFrame()\n\n # Get main topic in each document\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n # Get the Dominant topic, Perc Contribution and Keywords for each document\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0: # => dominant topic\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \", \".join([word for word, prop in wp])\n sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']\n\n # Add original text to the end of the output\n contents = pd.Series(texts)\n sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)\n return(sent_topics_df)\n\n\ndf_topic_sents_keywords = format_topics_sentences(ldamodel=optimal_model, corpus=corpus, texts=data)\n\n# Format\ndf_dominant_topic = df_topic_sents_keywords.reset_index()\ndf_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\n\n# Show\ndf_dominant_topic.head(10)\n",
"_____no_output_____"
]
],
[
[
" Find the most representative document for each topic\nSometimes just the topic keywords may not be enough to make sense of what a topic is about. So, to help with understanding the topic, you can find the documents a given topic has contributed to the most and infer the topic by reading that document. Whew!!",
"_____no_output_____"
]
],
[
[
"# Group top 5 sentences under each topic\nsent_topics_sorteddf_mallet = pd.DataFrame()\n\nsent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')\n\nfor i, grp in sent_topics_outdf_grpd:\n sent_topics_sorteddf_mallet = pd.concat([sent_topics_sorteddf_mallet, \n grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)], \n axis=0)\n\n# Reset Index \nsent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True)\n\n# Format\nsent_topics_sorteddf_mallet.columns = ['Topic_Num', \"Topic_Perc_Contrib\", \"Keywords\", \"Text\"]\n\n# Show\nsent_topics_sorteddf_mallet.head()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"raw",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code"
]
] |
d0d2e3dcbe54c21e11bbd6e07e582fc6bffb5071 | 242,293 | ipynb | Jupyter Notebook | src/python/aapred-drilldown.ipynb | yotamfr/prot2vec | eaee36f9e3929054b1c324acd053a52d0e7be2bd | [
"MIT"
] | 8 | 2017-10-01T14:34:25.000Z | 2021-04-27T13:18:00.000Z | src/python/aapred-drilldown.ipynb | yotamfr/prot2vec | eaee36f9e3929054b1c324acd053a52d0e7be2bd | [
"MIT"
] | 1 | 2020-01-23T17:17:18.000Z | 2020-01-23T17:17:18.000Z | src/python/aapred-drilldown.ipynb | yotamfr/prot2vec | eaee36f9e3929054b1c324acd053a52d0e7be2bd | [
"MIT"
] | 1 | 2018-05-04T04:54:32.000Z | 2018-05-04T04:54:32.000Z | 647.842246 | 175,826 | 0.9315 | [
[
[
"# Evaluate AminoAcids Prediction",
"_____no_output_____"
]
],
[
[
"%matplotlib inline \nimport pylab\npylab.rcParams['figure.figsize'] = (15.0, 12.0)\n\nimport os\n\nimport sys\n\nimport numpy as np\n\nfrom shutil import copyfile\n\nfrom src.python.aa_predict import *\nimport src.python.aa_predict as AA\n\ncheckpoint_path = \"../../data/trained/aapred_cnn_latest.tar\"\n\n\nemb_dim = 5\nwin_size = 10\n\nmodel = GoodOldCNN(emb_dim, win_size)\n\n\nif os.path.exists(checkpoint_path):\n print(\"=> loading checkpoint '%s'\" % checkpoint_path)\n checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpoint['state_dict'])\n print(\"=> loaded checkpoint '%s' (epoch %s)\" %\n (checkpoint_path, checkpoint['epoch'] + 1))\nelse:\n print(\"=> no checkpoint found at '%s'\" % checkpoint_path)\n\n",
"=> loading checkpoint '../../data/trained/aapred_cnn_latest.tar'\n=> loaded checkpoint '../../data/trained/aapred_cnn_latest.tar' (epoch 8)\n"
]
],
[
[
"### Define Evaluation Function(s)",
"_____no_output_____"
]
],
[
[
"\nimport torch\nimport torch.nn as nn\n\nfrom torch.autograd import Variable\n\nfrom pymongo import MongoClient\n",
"_____no_output_____"
]
],
[
[
"### 1 2 3 Predict...",
"_____no_output_____"
]
],
[
[
"\nclass_names = sorted(dictionary.keys(), key=lambda aa: dictionary[aa])\n\n\nclient = MongoClient(\"mongodb://127.0.0.1:27017\")\ndb = client['prot2vec']\n\nglobal collection_test, size_test, verbose\n\nAA.collection_test = db['sprot']\n\nAA.size_test = 100\n\nAA.verbose = True\n\nAA.use_cuda = False\n\nbatch_size = 32\n\nloader = WindowBatchLoader(win_size, batch_size, False)\ny_test, y_pred, _ = predict(model, loader)\n\n# data = []\n# for i, (x, y) in enumerate(loader):\n# data.append((np.random.permutation(x), np.random.permutation(y)))\n\n# y_test, y_pred, _ = predict(model, data)\n",
"100%\n"
]
],
[
[
"### Evaluate",
"_____no_output_____"
]
],
[
[
"import itertools\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\n\n# http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n# print(cm)\n# print(cm.shape)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, y_pred, labels=list(range(25)))\nnp.set_printoptions(precision=2)\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization')\n\n# Plot normalized confusion matrix\n# plt.figure()\n# plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n# title='Normalized confusion matrix')\n\nplt.show()\n",
"Confusion matrix, without normalization\n"
]
],
[
[
"### Plot Accuracy",
"_____no_output_____"
]
],
[
[
"###https://matplotlib.org/examples/api/barchart_demo.html\ndef plot_accuracy(title, scores):\n \n N = len(scores)\n acc = list(scores.values())\n\n ind = np.arange(N) # the x locations for the groups\n width = 0.2 # the width of the bars\n\n fig, ax = plt.subplots()\n rects1 = ax.bar(ind, acc, width, color='b')\n\n # add some text for labels, title and axes ticks\n ax.set_ylabel('Scores')\n ax.set_title(title)\n ax.set_xticks(ind)\n ax.set_xticklabels(list(scores.keys()))\n\n ax.legend((rects1,), ('acc',))\n \n autolabel(rects1, ax)\n\n\ndef autolabel(rects, ax):\n \"\"\"\n Attach a text label above each bar displaying its height\n \"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n '%.2f' % height,\n ha='center', va='bottom')\n \n \n \nindx = [i for i, row in enumerate(cnf_matrix) if row[i] > 0]\nacc_scores = {reverse_dictionary[i]:cnf_matrix[i, i]/np.sum(row) \n for i, row in enumerate(cnf_matrix) if i in indx}\n\nplot_accuracy(\"AA Prediction Accuracy\", acc_scores)\nplt.show()",
"_____no_output_____"
],
[
"import pandas as pd\n\naa_feat = pd.read_csv('Data/aa_feat.csv')\n\nx = aa_feat[\"Occurrence.in.Bacteria.proteins.....\"][indx]\n\ny = list(acc_scores.values())\n\nlabels = [reverse_dictionary[i] for i in indx]\n\n\ndef plot(x, y, labels, title):\n xy = list(zip(x, y))\n for i, label in enumerate(labels):\n x, y = xy[i]\n plt.scatter(x, y)\n plt.annotate(label,\n xy=(x, y),\n xytext=(5, 2),\n textcoords='offset points',\n ha='right', va='bottom')\n plt.title(title)\n\n \nplot(x, y, labels, \"Prediction acc vs. % Occurrence in Data\")\nm, b = np.polyfit(x, y, 1)\nplt.plot(x, m*x + b, '-')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d0d2e5a53f42c3f8adbd854301d2a33c57ab35a8 | 20,443 | ipynb | Jupyter Notebook | prep_decoder.ipynb | itsnarsi/1WeStar | c3d0caa1079db6900a227ab51694faabd6f0eb2d | [
"MIT"
] | null | null | null | prep_decoder.ipynb | itsnarsi/1WeStar | c3d0caa1079db6900a227ab51694faabd6f0eb2d | [
"MIT"
] | null | null | null | prep_decoder.ipynb | itsnarsi/1WeStar | c3d0caa1079db6900a227ab51694faabd6f0eb2d | [
"MIT"
] | null | null | null | 31.694574 | 122 | 0.468962 | [
[
[
"import os\nimport numpy as np\n\nimport torch\ntorch.manual_seed(29)\nfrom torch import nn\nimport torch.backends.cudnn as cudnn\nimport torch.nn.parallel\ncudnn.benchmark = True\nimport torch.nn.functional as F\n\nfrom PIL import Image, ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nfrom glob import glob\nfrom PIL.PngImagePlugin import PngImageFile, PngInfo\n\nfrom tqdm import tqdm",
"_____no_output_____"
],
[
"def bpg_enc(I, out_f, enc_cmd, w, h):\n# out_f = out_f\n I.save(\"test_en.png\")\n os.system(enc_cmd + ' -m 9 -f 444 -q 29 test_en.png -o ' + '\"'+out_f + '\"')\n if not os.path.exists(out_f): print(out_f)\n os.setxattr(out_f, 'user.h', bytes(str(h), 'utf-8'))\n os.setxattr(out_f, 'user.w', bytes(str(w), 'utf-8'))\n os.remove(\"test_en.png\")\n \ndef bpg_dec(bpg_enc_file, dec_cmd):\n# bpg_enc_file = bpg_enc_file.replace(\" \", \"\\ \")\n os.system(dec_cmd + ' \"' + bpg_enc_file + '\" -o test_de.png')\n h = int(os.getxattr(bpg_enc_file, 'user.h'))\n w = int(os.getxattr(bpg_enc_file, 'user.w'))\n I = Image.open(\"test_de.png\")\n return I, w, h",
"_____no_output_____"
],
[
"class quantclip(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \"\"\"\n @staticmethod\n def forward(self, input, quant):\n \"\"\"\n In the forward pass we receive a Tensor containing the input and return a\n Tensor containing the output. You can cache arbitrary Tensors for use in the\n backward pass using the save_for_backward method.\n \"\"\"\n self.save_for_backward(input)\n c = (input.clamp(min=-1, max =1)+1)/2.0 * quant\n c = 2 * (c.round()/quant) - 1\n return c\n @staticmethod\n def backward(self, grad_output):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \"\"\"\n input, = self.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < -1] = 0\n grad_input[input > 1] = 0\n return grad_input, None\n\nclass QuantCLIP(torch.nn.Module):\n\n def __init__(self, num_bits, dtype = torch.cuda.FloatTensor):\n super(QuantCLIP, self).__init__()\n\n self.quant = 2 ** num_bits - 1\n self.quantclip = quantclip\n\n def forward(self, input):\n return self.quantclip.apply(input, self.quant)\n\ndef getHAARFilters(num_filters):\n LL = np.asarray([[0.5, 0.5], [0.5, 0.5]])\n LH = np.asarray([[-0.5, -0.5], [0.5, 0.5]])\n HL = np.asarray([[-0.5, 0.5], [-0.5, 0.5]])\n HH = np.asarray([[0.5, -0.5], [-0.5, 0.5]])\n\n DWT = np.concatenate((LL[np.newaxis, ...],\n LH[np.newaxis, ...],\n HL[np.newaxis, ...],\n HH[np.newaxis, ...]))[:, np.newaxis, ...]\n DWT = np.float32(DWT)\n DWT = torch.from_numpy(DWT)\n\n return DWT.repeat(num_filters, 1, 1, 1)\n\nclass HaarDWT(torch.nn.Module):\n def __init__(self, in_ch = 1):\n super(HaarDWT, self).__init__()\n\n weights = getHAARFilters(in_ch)\n\n self.conv = nn.Conv2d(in_ch, in_ch * 4, 2, stride=2, bias=False, groups = in_ch)\n self.conv.weight.data = weights\n self.conv.weight.requires_grad = False\n\n def forward(self, input):\n return self.conv(input)\n\nclass HaarIDWT(torch.nn.Module):\n def __init__(self, out_ch = 1):\n super(HaarIDWT, self).__init__()\n\n weights = getHAARFilters(out_ch)\n\n self.conv = nn.ConvTranspose2d(out_ch * 4, out_ch, 2, stride=2, bias=False, groups = out_ch)\n self.conv.weight.data = weights\n self.conv.weight.requires_grad = False\n\n def forward(self, input):\n return self.conv(input)\n",
"_____no_output_____"
],
[
"\"\"\"\nSingle CONV blocks:\n\"\"\"\nclass BLOCK_3x3(nn.Module):\n def __init__(\n self, in_ch, out_ch, ker, stride = 1\n ):\n super(BLOCK_3x3, self).__init__()\n self.feat = nn.Sequential(\n nn.ReflectionPad2d(ker//2),\n nn.Conv2d(in_ch, out_ch, ker, stride = stride, bias = True)\n )\n\n def forward(self, x):\n x = self.feat(x)\n return x\n\n\n\"\"\"\nResidual CONV blocks:\n\"\"\"\nclass RES_3x3_BLOCK1(nn.Module):\n \"\"\"\n Residual Block:\n [INPUT] -> 2*[CONV 3x3] -> [OUTPUT] + [INPUT]\n \"\"\"\n def __init__(\n self, in_ch, out_ch, ker, squeeze = 2, res_scale = 0.25\n ):\n super(RES_3x3_BLOCK1, self).__init__()\n\n self.skip = in_ch == out_ch\n self.rs = res_scale\n self.feat = nn.Sequential(\n nn.BatchNorm2d(in_ch),\n nn.ReLU(inplace=True),\n BLOCK_3x3(in_ch, out_ch//squeeze, ker),\n nn.BatchNorm2d(out_ch//squeeze),\n nn.ReLU(inplace=True),\n BLOCK_3x3(out_ch//squeeze, out_ch, ker),\n )\n\n def forward(self, x):\n out = self.feat(x)\n if self.skip: out = self.rs * out + x\n return out\n\n\"\"\"\nEnocder:\n\"\"\"\nclass Encoder(nn.Module):\n def __init__(\n self,\n ):\n super(Encoder, self).__init__()\n\n self.E = nn.Sequential(\n HaarDWT(3),HaarDWT(12),\n BLOCK_3x3(in_ch = 48, out_ch = 128, ker = 3, stride = 1),\n RES_3x3_BLOCK1(in_ch = 128, out_ch = 128, ker = 3, squeeze = 4, res_scale = 1.0),\n RES_3x3_BLOCK1(in_ch = 128, out_ch = 128, ker = 3, squeeze = 4, res_scale = 1.0),\n nn.Conv2d(128, 3, 1),\n QuantCLIP(8)\n )\n\n def forward(self, x):\n x = self.E(x)\n return x\n\"\"\"\nDeocder:\n\"\"\"\nclass Decoder(nn.Module):\n def __init__(\n self,\n ):\n super(Decoder, self).__init__()\n\n self.D = nn.Sequential(\n BLOCK_3x3(in_ch = 3, out_ch = 256, ker = 3, stride = 1),\n RES_3x3_BLOCK1(in_ch = 256, out_ch = 256, ker = 3, squeeze = 4, res_scale = 1.0),\n RES_3x3_BLOCK1(in_ch = 256, out_ch = 256, ker = 3, squeeze = 4, res_scale = 1.0),\n RES_3x3_BLOCK1(in_ch = 256, out_ch = 256, ker = 3, squeeze = 4, res_scale = 1.0),\n RES_3x3_BLOCK1(in_ch = 256, out_ch = 256, ker = 3, squeeze = 4, res_scale = 1.0),\n nn.Conv2d(256, 48, 1),\n HaarIDWT(12),HaarIDWT(3),\n nn.ReLU(),\n )\n\n self.S = nn.Sequential(nn.ReflectionPad2d(1),\n nn.AvgPool2d(3, stride=1, padding=0))\n \n def forward(self, x):\n x = self.D(x)\n# x = self.S(x)\n return x\n",
"_____no_output_____"
],
[
"de_model = Decoder()\ncheck_point_file = \"/home/cibitaw1/local/1WeStar/weights/submission_weights/decode.pth\"\ncheckpoint = torch.load(check_point_file)\nde_model.load_state_dict(checkpoint, strict = False)\nde_model.cuda()\nprint('.')\n\nen_model = Encoder()\ncheck_point_file = \"/home/cibitaw1/local/1WeStar/weights/submission_weights/encode.pth\"\ncheckpoint = torch.load(check_point_file)\nen_model.load_state_dict(checkpoint, strict = False)\nen_model.cuda()\nprint('.')",
".\n.\n"
],
[
"de_model = Decoder()\ncheck_point_file = \"/media/cibitaw1/DATA/SP2020/compressACT/weights/\"+\\\n\"QuantACTShuffleV6_exp01/checkpoint.pth.tar\"\ncheckpoint = torch.load(check_point_file)\nde_model.load_state_dict(checkpoint['state_dict'], strict = False)\nde_model.cuda()\nprint('.')\n\nen_model = Encoder()\ncheck_point_file = \"/media/cibitaw1/DATA/SP2020/compressACT/weights/\"+\\\n\"QuantACTShuffleV6_exp01/checkpoint.pth.tar\"\ncheckpoint = torch.load(check_point_file)\nen_model.load_state_dict(checkpoint['state_dict'], strict = False)\nen_model.cuda()\nprint('.')\ntorch.save(de_model.state_dict(), \"/home/cibitaw1/local/1WeStar/weights/submission_weights/decode.pth\")\ntorch.save(de_model.state_dict(), \"/home/cibitaw1/local/1WeStar/submission_package/decode.pth\")\ntorch.save(en_model.state_dict(), \"/home/cibitaw1/local/1WeStar/weights/submission_weights/encode.pth\")",
".\n.\n"
],
[
"def compress(I_org, model):\n\n e_ = 512\n c_ = 4\n d_ = e_ // c_\n pad_ = 4\n\n w, h = I_org.size\n\n comp_w_new = np.ceil(w/c_)\n comp_h_new = np.ceil(h/c_)\n\n new_w = int(e_ * np.ceil(w/e_))\n new_h = int(e_ * np.ceil(h/e_))\n\n com_w = new_w // c_\n com_h = new_h // c_\n\n I = np.uint8(I_org).copy()\n I = np.pad(I, ((0, int(new_h - h)),\n (0, int(new_w - w)),\n (0, 0)), mode = \"reflect\")\n I = Image.fromarray(I)\n\n\n I1 = np.float32(I)/255.0\n I1 = np.transpose(I1, [2, 0, 1])\n\n Enout = np.zeros((3, com_h, com_w))\n Enout_w = np.zeros((3, com_h, com_w))\n for i in list(np.arange(0, new_h, e_)):\n for j in list(np.arange(0, new_w, e_)):\n if i == 0:\n x1 = int(i)\n x2 = int((i + e_) + (pad_*2*c_))\n else:\n x1 = int(i - (pad_*c_))\n x2 = int((i + e_) + (pad_*c_))\n\n if j == 0:\n y1 = int(j)\n y2 = int((j + e_) + (pad_*2*c_))\n else:\n y1 = int(j - (pad_*c_))\n y2 = int((j + e_) + (pad_*c_))\n It = torch.from_numpy(np.expand_dims(I1[:, x1:x2, y1:y2], 0))\n Xe = model(It.cuda())\n Xe = (Xe + 1.0)/2.0\n Enout[:, x1//c_:x2//c_, y1//c_:y2//c_] += Xe.data.squeeze().cpu().numpy()\n Enout_w[:, x1//c_:x2//c_, y1//c_:y2//c_] += 1.0\n\n Enout = Enout/Enout_w\n Enout = np.uint8(255 * Enout.transpose([1, 2, 0]))\n\n Enout = Image.fromarray(Enout).crop((0, 0, comp_w_new, comp_h_new))\n\n return Enout\n\n\ndef decompress(EnIn, model, w, h):\n\n e_ = 256\n c_ = 4\n d_ = e_ // c_\n pad_ = 4\n\n# w, h = int(EnIn.text['w']), int(EnIn.text['h'])\n\n comp_w_new = np.ceil(w/c_)\n comp_h_new = np.ceil(h/c_)\n\n new_w = int(e_ * np.ceil(w/e_))\n new_h = int(e_ * np.ceil(h/e_))\n\n com_w = new_w // c_\n com_h = new_h // c_\n\n\n Iout = np.zeros((3,new_h,new_w), dtype = np.float32)\n Iout_w = np.zeros((3,new_h,new_w), dtype = np.float32)\n\n EnIn = np.uint8(EnIn).copy()\n EnIn = np.pad(EnIn, ((0, int(com_h - EnIn.shape[0])),\n (0, int(com_w - EnIn.shape[1])),\n (0, 0)), mode = \"reflect\")\n\n\n EnIn = np.float32(EnIn)/255.0\n EnIn = np.transpose(EnIn, [2, 0, 1])\n for i in list(np.arange(0, com_h, d_)):\n for j in list(np.arange(0, com_w, d_)):\n\n if i == 0:\n x1 = int(i)\n x2 = int((i + d_) + pad_*2)\n else:\n x1 = int(i - pad_)\n x2 = int((i + d_) + pad_)\n\n if j == 0:\n y1 = int(j)\n y2 = int((j + d_) + pad_*2)\n else:\n y1 = int(j - pad_)\n y2 = int((j + d_) + pad_)\n\n It = torch.from_numpy(np.expand_dims(EnIn[:, x1:x2, y1:y2], 0))\n It = It * 2.0 - 1.0\n Xe = model(It.cuda()).data.squeeze().cpu()\n\n Iout[:, x1*c_:x2*c_, y1*c_:y2*c_] += np.clip(Xe.numpy(), 0, 1)\n Iout_w[:, x1*c_:x2*c_, y1*c_:y2*c_] += 1.0\n\n Iout = Iout/Iout_w\n\n Iout = np.uint8(255 * Iout.transpose([1, 2, 0]))\n Iout = Image.fromarray(Iout).crop((0, 0, w, h))\n\n return Iout\n",
"_____no_output_____"
],
[
"img_file = \"/media/cibitaw1/DATA/super_rez/professional_valid/valid/alberto-montalesi-176097.png\"\nI = Image.open(img_file).convert(\"RGB\")",
"_____no_output_____"
],
[
"Enout = compress(I, en_model)",
"_____no_output_____"
],
[
"bpg_enc(Enout, \"test_en.bpg\", \"bpgenc\", I.size[0], I.size[1])",
"_____no_output_____"
],
[
"Enout, w, h = bpg_dec(\"test_en.bpg\", \"bpgdec\")",
"_____no_output_____"
],
[
"Iout = decompress(Enout, de_model, w, h)",
"_____no_output_____"
],
[
"src_fldr = \"/media/cibitaw1/DATA/super_rez/professional_valid/valid\"\nimgs = glob(src_fldr + os.sep + \"*.png\")\nsrc_fldr = \"/media/cibitaw1/DATA/super_rez/mobile_valid/valid\"\nimgs += glob(src_fldr + os.sep + \"*.png\")",
"_____no_output_____"
],
[
"dst_fldr = \"/media/cibitaw1/DATA/super_rez/comp_test/compressed\"",
"_____no_output_____"
],
[
"for img in tqdm(imgs):\n I = Image.open(img).convert(\"RGB\")\n Enout = compress(I, en_model)\n img_name = os.path.join(dst_fldr, img.split(os.sep)[-1]).replace(\".png\", \".bpg\")\n bpg_enc(Enout, img_name, \"bpgenc\", I.size[0], I.size[1])",
"100%|██████████| 102/102 [01:05<00:00, 1.47it/s]\n"
],
[
"for img in tqdm(imgs):\n I = Image.open(img).convert(\"RGB\")\n img_name = os.path.join(dst_fldr, img.split(os.sep)[-1]).replace(\".png\", \".bpg\")\n new_img_name = img_name + '__w_' + str(I.size[0]) + '__h_' + str(I.size[1])\n os.rename(img_name, new_img_name)",
"100%|██████████| 102/102 [00:07<00:00, 13.37it/s]\n"
],
[
"def bpg_dec(bpg_enc_file, dec_cmd):\n# bpg_enc_file = bpg_enc_file.replace(\" \", \"\\ \")\n x = bpg_enc_file.split('__')\n bpg_enc_file = x[0]\n w = int(x[1].replace(\"w_\", \"\"))\n h = int(x[2].replace(\"h_\", \"\"))\n os.system(dec_cmd + ' \"' + bpg_enc_file + '\" -o test_de.png')\n # h = int(os.getxattr(bpg_enc_file, 'user.h'))\n # w = int(os.getxattr(bpg_enc_file, 'user.w'))\n I = Image.open(\"test_de.png\")\n os.remove(\"test_de.png\")\n return I, w, h, bpg_enc_file",
"_____no_output_____"
],
[
"X = bpg_dec(\"/media/cibitaw1/DATA/super_rez/comp_test/images/IMG_20170725_123034.bpg__w_2024__h_1518\", \"bpgdec\")",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"X1.split('__')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d2f89bdb2ffe2aebfa4a0b6a54a7b7bce6532f | 99,741 | ipynb | Jupyter Notebook | docs_src/tutorial.inference.ipynb | ahmedsharf11/fastai | eeddfde65ddb463688bf275265d7691052c12bf1 | [
"Apache-2.0"
] | null | null | null | docs_src/tutorial.inference.ipynb | ahmedsharf11/fastai | eeddfde65ddb463688bf275265d7691052c12bf1 | [
"Apache-2.0"
] | null | null | null | docs_src/tutorial.inference.ipynb | ahmedsharf11/fastai | eeddfde65ddb463688bf275265d7691052c12bf1 | [
"Apache-2.0"
] | null | null | null | 91.004562 | 38,152 | 0.832396 | [
[
[
"# Create a Learner for inference",
"_____no_output_____"
]
],
[
[
"from fastai import *\nfrom fastai.gen_doc.nbdoc import *",
"_____no_output_____"
]
],
[
[
"In this tutorial, we'll see how the same API allows you to create an empty [`DataBunch`](/basic_data.html#DataBunch) for a [`Learner`](/basic_train.html#Learner) at inference time (once you have trained your model) and how to call the `predict` method to get the predictions on a single item.",
"_____no_output_____"
]
],
[
[
"jekyll_note(\"\"\"As usual, this page is generated from a notebook that you can find in the docs_srs folder of the\n[fastai repo](https://github.com/fastai/fastai). We use the saved models from [this tutorial](/tutorial.data.html) to\nhave this notebook run fast.\n\"\"\")",
"_____no_output_____"
]
],
[
[
"## Vision",
"_____no_output_____"
],
[
"To quickly get acces to all the vision functions inside fastai, we use the usual import statements.",
"_____no_output_____"
]
],
[
[
"from fastai import *\nfrom fastai.vision import *",
"_____no_output_____"
]
],
[
[
"### A classification problem",
"_____no_output_____"
],
[
"Let's begin with our sample of the MNIST dataset.",
"_____no_output_____"
]
],
[
[
"mnist = untar_data(URLs.MNIST_TINY)\ntfms = get_transforms(do_flip=False)",
"_____no_output_____"
]
],
[
[
"It's set up with an imagenet structure so we use it to split our training and validation set, then labelling.",
"_____no_output_____"
]
],
[
[
"data = (ImageItemList.from_folder(mnist)\n .split_by_folder() \n .label_from_folder()\n .transform(tfms, size=32)\n .databunch()\n .normalize(imagenet_stats)) ",
"_____no_output_____"
]
],
[
[
"Now that our data has been properly set up, we can train a model. Once the time comes to deploy it for inference, we'll need to save the information this [`DataBunch`](/basic_data.html#DataBunch) contains (classes for instance), to do this, we call `data.export()`. This will create an 'export.pkl' file that you'll need to copy with your model file if you want do deploy pn another device.",
"_____no_output_____"
]
],
[
[
"data.export()",
"_____no_output_____"
]
],
[
[
"To create the [`DataBunch`](/basic_data.html#DataBunch) for inference, you'll need to use the `load_empty` method. Note that for now, transforms and normalization aren't saved inside the export file. This is going to be integrated in a future version of the library. For now, we pass the transforms we applied on the validation set, along with all relevant kwargs, and we normalize with the same statistics as during training.\n\nThen, we use it to create a [`Learner`](/basic_train.html#Learner) and load the model we trained before.",
"_____no_output_____"
]
],
[
[
"empty_data = ImageDataBunch.load_empty(mnist, tfms=tfms[1],size=32).normalize(imagenet_stats)\nlearn = create_cnn(empty_data, models.resnet18)\nlearn.load('mini_train');",
"_____no_output_____"
]
],
[
[
"You can now get the predictions on any image via `learn.predict`.",
"_____no_output_____"
]
],
[
[
"img = data.train_ds[0][0]\nlearn.predict(img)",
"_____no_output_____"
]
],
[
[
"It returns a tuple of three things: the object predicted (with the class in this instance), the underlying data (here the corresponding index) and the raw probabilities.",
"_____no_output_____"
],
[
"### A multilabel problem",
"_____no_output_____"
],
[
"Now let's try these on the planet dataset, which is a little bit different in the sense that each image can have multiple tags (and not jsut one label).",
"_____no_output_____"
]
],
[
[
"planet = untar_data(URLs.PLANET_TINY)\nplanet_tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.)",
"_____no_output_____"
]
],
[
[
"Here each images is labelled in a file named 'labels.csv'. We have to add 'train' as a prefix to the filenames, '.jpg' as a suffix and he labels are separated by spaces.",
"_____no_output_____"
]
],
[
[
"data = (ImageItemList.from_csv(planet, 'labels.csv', folder='train', suffix='.jpg')\n .random_split_by_pct()\n .label_from_df(sep=' ')\n .transform(planet_tfms, size=128)\n .databunch()\n .normalize(imagenet_stats))",
"_____no_output_____"
]
],
[
[
"Again, we call `data.export()` to export our data object properties.",
"_____no_output_____"
]
],
[
[
"data.export()",
"_____no_output_____"
]
],
[
[
"We can then create the [`DataBunch`](/basic_data.html#DataBunch) for inference, by using the `load_empty` method as before.",
"_____no_output_____"
]
],
[
[
"empty_data = ImageDataBunch.load_empty(planet, tfms=tfms[1],size=32).normalize(imagenet_stats)\nlearn = create_cnn(empty_data, models.resnet18)\nlearn.load('mini_train');",
"_____no_output_____"
]
],
[
[
"And we get the predictions on any image via `learn.predict`.",
"_____no_output_____"
]
],
[
[
"img = data.train_ds[0][0]\nlearn.predict(img)",
"_____no_output_____"
]
],
[
[
"Here we can specify a particular theshold to consider the predictions are a hit or not. The default is 0.5 but we can change it.",
"_____no_output_____"
]
],
[
[
"learn.predict(img, thresh=0.3)",
"_____no_output_____"
]
],
[
[
"### A regression example",
"_____no_output_____"
],
[
"For the next example, we are going to use the [BIWI head pose](https://data.vision.ee.ethz.ch/cvl/gfanelli/head_pose/head_forest.html#db) dataset. On pictures of persons, we have to find the center of their face. For the fastai docs, we have built a small subsample of the dataset (200 images) and prepared a dictionary for the correspondance fielname to center.",
"_____no_output_____"
]
],
[
[
"biwi = untar_data(URLs.BIWI_SAMPLE)\nfn2ctr = pickle.load(open(biwi/'centers.pkl', 'rb'))",
"_____no_output_____"
]
],
[
[
"To grab our data, we use this dictionary to label our items. We also use the [`PointsItemList`](/vision.data.html#PointsItemList) class to have the targets be of type [`ImagePoints`](/vision.image.html#ImagePoints) (which will make sure the data augmentation is properly applied to them). When calling [`transform`](/tabular.transform.html#tabular.transform) we make sure to set `tfm_y=True`.",
"_____no_output_____"
]
],
[
[
"data = (ImageItemList.from_folder(biwi)\n .random_split_by_pct()\n .label_from_func(lambda o:fn2ctr[o.name], label_cls=PointsItemList)\n .transform(get_transforms(), tfm_y=True, size=(120,160))\n .databunch()\n .normalize(imagenet_stats))",
"_____no_output_____"
]
],
[
[
"As before, the road to inference is pretty straightforward: export the data, then load an empty [`DataBunch`](/basic_data.html#DataBunch).",
"_____no_output_____"
]
],
[
[
"data.export()",
"_____no_output_____"
],
[
"empty_data = ImageDataBunch.load_empty(biwi, tfms=get_transforms()[1], tfm_y=True, size=(120,60)).normalize(imagenet_stats)\nlearn = create_cnn(empty_data, models.resnet18)\nlearn.load('mini_train');",
"_____no_output_____"
]
],
[
[
"And now we can a prediction on an image.",
"_____no_output_____"
]
],
[
[
"img = data.train_ds[0][0]\nlearn.predict(img)",
"_____no_output_____"
]
],
[
[
"To visualize the predictions, we can use the [`Image.show`](/vision.image.html#Image.show) method.",
"_____no_output_____"
]
],
[
[
"img.show(y=learn.predict(img)[0])",
"_____no_output_____"
]
],
[
[
"### A segmentation example",
"_____no_output_____"
],
[
"Now we are going to look at the [camvid dataset](http://mi.eng.cam.ac.uk/research/projects/VideoRec/CamVid/) (at least a small sample of it), where we have to predict the class of each pixel in an image. Each image in the 'images' subfolder as an equivalent in 'labels' that is its segmentations mask.",
"_____no_output_____"
]
],
[
[
"camvid = untar_data(URLs.CAMVID_TINY)\npath_lbl = camvid/'labels'\npath_img = camvid/'images'",
"_____no_output_____"
]
],
[
[
"We read the classes in 'codes.txt' and the function maps each image filename with its corresponding mask filename.",
"_____no_output_____"
]
],
[
[
"codes = np.loadtxt(camvid/'codes.txt', dtype=str)\nget_y_fn = lambda x: path_lbl/f'{x.stem}_P{x.suffix}'",
"_____no_output_____"
]
],
[
[
"The data block API allows us to uickly get everything in a [`DataBunch`](/basic_data.html#DataBunch) and then we can have a look with `show_batch`.",
"_____no_output_____"
]
],
[
[
"data = (SegmentationItemList.from_folder(path_img)\n .random_split_by_pct()\n .label_from_func(get_y_fn, classes=codes)\n .transform(get_transforms(), tfm_y=True, size=128)\n .databunch(bs=16, path=camvid)\n .normalize(imagenet_stats))",
"_____no_output_____"
]
],
[
[
"As before, we export the data then create an empty [`DataBunch`](/basic_data.html#DataBunch) that we pass to a [`Learner`](/basic_train.html#Learner).",
"_____no_output_____"
]
],
[
[
"data.export()",
"_____no_output_____"
],
[
"empty_data = ImageDataBunch.load_empty(camvid, tfms=get_transforms()[1], tfm_y=True, size=128).normalize(imagenet_stats)\nlearn = Learner.create_unet(empty_data, models.resnet18)\nlearn.load('mini_train');",
"_____no_output_____"
]
],
[
[
"And now we can a prediction on an image.",
"_____no_output_____"
]
],
[
[
"img = data.train_ds[0][0]\nlearn.predict(img)",
"_____no_output_____"
]
],
[
[
"To visualize the predictions, we can use the [`Image.show`](/vision.image.html#Image.show) method.",
"_____no_output_____"
]
],
[
[
"img.show(y=learn.predict(img)[0])",
"_____no_output_____"
]
],
[
[
"## Text",
"_____no_output_____"
],
[
"Next application is text, so let's start by importing everything we'll need.",
"_____no_output_____"
]
],
[
[
"from fastai import *\nfrom fastai.text import *",
"_____no_output_____"
]
],
[
[
"### Language modelling",
"_____no_output_____"
],
[
"First let's look a how to get a language model ready for inference. Since we'll load the model trained in the [visualize data tutorial](/tutorial.data.html), we load the vocabulary used there.",
"_____no_output_____"
]
],
[
[
"imdb = untar_data(URLs.IMDB_SAMPLE)",
"_____no_output_____"
],
[
"vocab = Vocab(pickle.load(open(imdb/'tmp'/'itos.pkl', 'rb')))\ndata_lm = (TextList.from_csv(imdb, 'texts.csv', cols='text', vocab=vocab)\n .random_split_by_pct()\n .label_for_lm()\n .databunch())",
"_____no_output_____"
]
],
[
[
"Like in vision, we just have to type `data_lm.export()` to save all the information inside the [`DataBunch`](/basic_data.html#DataBunch) we'll need. In this case, this includes all the vocabulary we created.",
"_____no_output_____"
]
],
[
[
"data_lm.export()",
"_____no_output_____"
]
],
[
[
"Now let's define a language model learner from an empty data object.",
"_____no_output_____"
]
],
[
[
"empty_data = TextLMDataBunch.load_empty(imdb)\nlearn = language_model_learner(empty_data)\nlearn.load('mini_train_lm');",
"_____no_output_____"
]
],
[
[
"Then we can predict with the usual method, here we can specify how many words we want the model to predict.",
"_____no_output_____"
]
],
[
[
"learn.predict('This is a simple test of', n_words=20)",
"Total time: 00:01\n\n"
]
],
[
[
"### Classification",
"_____no_output_____"
],
[
"Now let's see a classification example. We have to use the same vocabulary as for the language model if we want to be able to use the encoder we saved.",
"_____no_output_____"
]
],
[
[
"data_clas = (TextList.from_csv(imdb, 'texts.csv', cols='text', vocab=vocab)\n .split_from_df(col='is_valid')\n .label_from_df(cols='label')\n .databunch(bs=42))",
"_____no_output_____"
]
],
[
[
"Again we export the data.",
"_____no_output_____"
]
],
[
[
"data_clas.export()",
"_____no_output_____"
]
],
[
[
"Now let's define a text classifier from an empty data object.",
"_____no_output_____"
]
],
[
[
"empty_data = TextClasDataBunch.load_empty(imdb)\nlearn = text_classifier_learner(empty_data)\nlearn.load('mini_train_clas');",
"_____no_output_____"
]
],
[
[
"Then we can predict with the usual method.",
"_____no_output_____"
]
],
[
[
"learn.predict('I really loved that movie!')",
"_____no_output_____"
]
],
[
[
"# Tabular",
"_____no_output_____"
],
[
"Last application brings us to tabular data. First let's import everything we'll need.",
"_____no_output_____"
]
],
[
[
"from fastai import *\nfrom fastai.tabular import *",
"_____no_output_____"
]
],
[
[
"We'll use a sample of the [adult dataset](https://archive.ics.uci.edu/ml/datasets/adult) here. Once we read the csv file, we'll need to specify the dependant variable, the categorical variables, the continuous variables and the processors we want to use.",
"_____no_output_____"
]
],
[
[
"adult = untar_data(URLs.ADULT_SAMPLE)\ndf = pd.read_csv(adult/'adult.csv')\ndep_var = '>=50k'\ncat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']\ncont_names = ['education-num', 'hours-per-week', 'age', 'capital-loss', 'fnlwgt', 'capital-gain']\nprocs = [FillMissing, Categorify, Normalize]",
"_____no_output_____"
]
],
[
[
"Then we can use the data block API to grab everything together before using `data.show_batch()`",
"_____no_output_____"
]
],
[
[
"data = (TabularList.from_df(df, path=adult, cat_names=cat_names, cont_names=cont_names, procs=procs)\n .split_by_idx(valid_idx=range(800,1000))\n .label_from_df(cols=dep_var)\n .databunch())",
"_____no_output_____"
]
],
[
[
"We define a [`Learner`](/basic_train.html#Learner) object that we fit and then save the model.",
"_____no_output_____"
]
],
[
[
"learn = tabular_learner(data, layers=[200,100], metrics=accuracy)\nlearn.fit(1, 1e-2)\nlearn.save('mini_train')",
"Total time: 00:04\nepoch train_loss valid_loss accuracy\n1 0.328005 0.354749 0.820000 (00:04)\n\n"
]
],
[
[
"As in the other applications, we just have to type `data.export()` to save everything we'll need for inference (here the inner state of each processor).",
"_____no_output_____"
]
],
[
[
"data.export()",
"_____no_output_____"
]
],
[
[
"Then we create an empty data object and a learner from it like before.",
"_____no_output_____"
]
],
[
[
"data = TabularDataBunch.load_empty(adult)\nlearn = tabular_learner(data, layers=[200,100])\nlearn.load('mini_train');",
"_____no_output_____"
]
],
[
[
"And we can predict on a row of dataframe that has the right `cat_names` and `cont_names`.",
"_____no_output_____"
]
],
[
[
"learn.predict(df.iloc[0])",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d2fdd4d1f199b3c3396db109456e8ad640a658 | 36,471 | ipynb | Jupyter Notebook | example2/example2.ipynb | alexw16/sharenet | 122ce05e1adb05bce8e8f5f5c3aeceae2e56b303 | [
"MIT"
] | null | null | null | example2/example2.ipynb | alexw16/sharenet | 122ce05e1adb05bce8e8f5f5c3aeceae2e56b303 | [
"MIT"
] | null | null | null | example2/example2.ipynb | alexw16/sharenet | 122ce05e1adb05bce8e8f5f5c3aeceae2e56b303 | [
"MIT"
] | null | null | null | 136.085821 | 25,592 | 0.85424 | [
[
[
"# Example 2: mouse blood lineage networks\nA pipeline providing an example of ShareNet's usage on a mouse blood lineage dataset is included in the ```~/sharenet/example2``` subdirectory. Here, we go through the different steps associated with this pipeline.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd",
"_____no_output_____"
],
[
"def convert_method_name(name):\n method_dict = {'genie': 'GENIE3','pidc':'PIDC','corr': 'Pearson','gaussian': 'BVS'}\n for m in method_dict.keys():\n if m in name:\n new_name = method_dict[m]\n return new_name\n\ndef convert_dataset_name(name):\n if 'nonspecific' in name:\n return 'Non-Specific ChIP'\n elif 'specific' in name:\n return 'Specific ChIP'\n elif 'STRING' in name:\n return 'STRING'",
"_____no_output_____"
]
],
[
[
"## Running ShareNet",
"_____no_output_____"
],
[
"A Bash script, named ```run_example2.sh```, is included in ```~/sharenet/example2``` subdirectory. This script runs the below command, which fits the ShareNet model to the set of networks inferred using PIDC in the mouse blood lineage dataset. The input data required to perform this step (i.e. the initial network estimates and the network edge score standard deviation estimates) are provided in the ```~/sharenet/example2/data``` subdirectory.\n\n```\npython -u \"${script_dir}/sharenet_example2.py\" -d $data_dir -r $results_dir -f \"pidc.edges.txt.gz\" -sf \"pidc.edges.txt.gz\" -K 24 -nc 10 -tol 0.01\n```\n\nA description of the various flags used in this command are as follows. \n- ```-d```: data directory (path to the directory that includes the initial network estimates and standard deviation estimates) \n- ```-r```: results directory (path to the directory where the revised network edge scores and other variational parameters are to be written)\n- ```-f```: file name for the initial network estimates (suffix of the file names for the initial network estimates; in this example, the file names are in the format \"cluster{cluster_no}.pidc.edges.txt.gz\") \n- ```-sf```: file name for the standard deviation estimates (suffix of the file names for the standard deviation estimates; in this example, the file names are in the format \"V.cluster{cluster_no}.pidc.edges.txt.gz\") \n- ```-K```: number of cell types to consider from the dataset (in this example, the mouse blood lineage dataset contains 24 clusters, or cell types) \n- ```-nc```: number of mixture components in the ShareNet model \n- ```-K```: number of cell types to consider from the dataset (in this example, the mouse blood lineage dataset contains 24 clusters, or cell types) \n- ```-tol```: tolerance criterion for convergence ",
"_____no_output_____"
],
[
"## Evaluating Accuracy\nWe also include a Bash script to calculate the accuracy of the baseline PIDC networks and the networks inferred using ShareNet applied to the initial PIDC networks. The script writes the accuracy results to a separate subdirectory ```~/sharenet/example2/accuracy``` using the set of reference networks that can be found in ```~/sharenet/example2/reference```. \n\nHere is an example of one command in this script. \n```python -u \"${script_dir}/sharenet_accuracy.py\" -d $base_dir -r $results_dir -K 24 -f $file_name -rn \"STRING\"```\n\nThe various flags used in this command are as follows. \n- ```-d```: base data directory (path to the base directory that includes the ```/reference/``` subdirectory and where the ```/accuracy/``` subdirectory will be written) \n- ```-r```: results directory (path to the directory where the revised network edge scores and other variational parameters are to be written)\n- ```-K```: number of cell types to consider from the dataset (in this example, the mouse blood lineage dataset contains 24 clusters, or cell types) \n- ```-f```: file name for the initial network estimates (suffix of the file names for the initial network estimates; in this example, the file names are in the format \"cluster{cluster_no}.pidc.edges.txt.gz\") \n- ```-rn```: reference network (reference network against which the inferrred networks are to be compared) ",
"_____no_output_____"
],
[
"## Plot Results\n\nAfter running the scripts for ShareNet and calculating the network accuracy results, plots used to compare the accuracy of networks inferred with and without ShareNet can be generated with the code below. \n\n### AUPRC Ratio: With vs. Without ShareNet",
"_____no_output_____"
]
],
[
[
"data_dir = '../sharenet/example2/reference'\nbaseline_df = pd.read_csv(os.path.join(data_dir,'baseline_auprc.csv'))\nbaseline_df.index = baseline_df['ref_network'].values",
"_____no_output_____"
],
[
"from scipy.stats import wilcoxon\n \nresults_dir = '../sharenet/example2/accuracy'\n\nmethod = 'sharenet.nc10'\nmeasure = 'auprc'\n\nbase_method_list = ['pidc.edges']\ndf_list = []\nfor base_method in base_method_list:\n for ref_network in ['nonspecific_chip','STRING','specific_chip']:\n file_name = '{}.{}.csv'.format(ref_network,base_method)\n df = pd.read_csv(os.path.join(results_dir,file_name))\n df['method'] = base_method\n df['ref_network'] = ref_network\n df_list.append(df)\nnoshare_df = pd.concat(df_list)\n\ndf_list = []\nfor base_method in base_method_list:\n for ref_network in ['nonspecific_chip','STRING','specific_chip']:\n file_name = '{}.{}.{}.csv'.format(ref_network,method,base_method)\n df = pd.read_csv(os.path.join(results_dir,file_name))\n df['method'] = base_method\n df['ref_network'] = ref_network\n df_list.append(df)\nshare_df = pd.concat(df_list)\n\nfor base_method in base_method_list:\n data_dict = {'x1': [],'x2': [],'ref_network': [],'cluster_no': []}\n cluster_no_list = sorted(list(set(share_df[share_df['ref_network'] == ref_network]['cluster_no'])))\n for ref_network in ['nonspecific_chip','STRING','specific_chip']:\n for cluster_no in cluster_no_list:\n noshare_cond = (noshare_df['cluster_no'] == cluster_no) & \\\n (noshare_df['ref_network'] == ref_network) & \\\n (noshare_df['method'] == base_method)\n share_cond = (share_df['cluster_no'] == cluster_no) & \\\n (share_df['ref_network'] == ref_network) & \\\n (share_df['method'] == base_method)\n \n noshare_val = noshare_df[noshare_cond][measure].values[0]\n share_val = share_df[share_cond][measure].values[0]\n\n if ref_network in ['nonspecific_chip','STRING']:\n baseline_auprc = baseline_df.loc[ref_network]['auprc']\n else:\n baseline_auprc = baseline_df.loc['{}_specific_chip'.format(cluster_no)]['auprc']\n \n data_dict['x1'].append(noshare_val/baseline_auprc)\n data_dict['x2'].append(share_val/baseline_auprc)\n data_dict['cluster_no'].append(cluster_no)\n data_dict['ref_network'].append(ref_network)\n df = pd.DataFrame(data_dict)\n df['ref_network'] = [convert_dataset_name(m) for m in df['ref_network']]\n \n plt.figure(figsize=(4,4))\n plt.plot(np.linspace(0,50),np.linspace(0,50),c='black',linestyle='--',lw=0.5)\n \n sns.scatterplot(x='x1',y='x2',data=df,hue='ref_network')\n \n min_x = min(df['x1'].min(),df['x2'].min())\n max_x = max(df['x1'].max(),df['x2'].max())\n\n plt.xlim(min_x*0.99,max_x*1.01)\n plt.ylim(min_x*0.99,max_x*1.01)\n plt.xlabel(measure.upper() + ' Ratio\\n (without ShareNet)',fontsize=16)\n plt.ylabel(measure.upper() + ' Ratio\\n (with ShareNet)',fontsize=16)\n plt.title(convert_method_name(base_method.split('.')[0]),fontsize=16)\n \n lg = plt.legend(fontsize=16,bbox_to_anchor=(1,1),markerscale=2)\n lg.remove()\n plt.show()",
"_____no_output_____"
]
],
[
[
"### Wilcoxon Rank-Sum Test ",
"_____no_output_____"
]
],
[
[
"print(wilcoxon(df['x2'],y=df['x1'],alternative='greater'))",
"WilcoxonResult(statistic=1829.0, pvalue=8.570746990858083e-12)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d3022da639c88a9a0c57db73c7dc6e6c718a1a | 24,207 | ipynb | Jupyter Notebook | LeonardoZhu_NightSky/LeonardoZhu_Planning Observations.ipynb | pratsingh/NightSky | ef6ba6fae0b57b9449f4fad5cdab938371f8a3ac | [
"MIT"
] | null | null | null | LeonardoZhu_NightSky/LeonardoZhu_Planning Observations.ipynb | pratsingh/NightSky | ef6ba6fae0b57b9449f4fad5cdab938371f8a3ac | [
"MIT"
] | 1 | 2019-04-12T22:58:28.000Z | 2019-04-12T22:58:28.000Z | LeonardoZhu_NightSky/LeonardoZhu_Planning Observations.ipynb | pratsingh/NightSky | ef6ba6fae0b57b9449f4fad5cdab938371f8a3ac | [
"MIT"
] | 25 | 2019-04-05T22:28:56.000Z | 2019-04-15T23:21:40.000Z | 37.588509 | 238 | 0.495477 | [
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport astropy.units as u\nfrom astropy.time import Time\nimport pytz\nimport astropy\nfrom astropy.coordinates import SkyCoord\nfrom astroplan import Observer, FixedTarget, observability_table, Constraint\nfrom astropy.coordinates import get_sun, get_body, get_moon\nfrom astroplan import moon_illumination\nfrom astroplan import *\nfrom astroplan import download_IERS_A\ndownload_IERS_A()",
"Downloading http://maia.usno.navy.mil/ser7/finals2000A.all [Done]\n"
]
],
[
[
"# Setup",
"_____no_output_____"
]
],
[
[
"# Set up the location of observatory\npalomar = Observer.at_site('Palomar')",
"_____no_output_____"
]
],
[
[
"We get the dataset from https://www.cosmos.esa.int/web/hipparcos/sample-tables-1",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv('data2.csv')",
"_____no_output_____"
],
[
"#Pick out the objects having names\nmask = list()\nfor n in range(len(data['Name'])):\n if(isinstance(data['Name'][n],str)):\n mask.append(True)\n else:\n mask.append(False)\ndata[mask].to_csv('hipparcos.csv')",
"_____no_output_____"
],
[
"star_table = pd.read_csv('hipparcos.csv')",
"_____no_output_____"
],
[
"#Set the observing window\nobs_dates = list()\nfor i in range(15,31):\n date = \"2019-10-\" + str(i)\n time = Time(date)\n obs_dates.append(time)",
"_____no_output_____"
]
],
[
[
"# Visibility",
"_____no_output_____"
]
],
[
[
"#Complie a list of all defined objects\nstars = list()\nfor s in range(len(star_table['Name'])):\n coords = SkyCoord(star_table['ra (deg)'][s]*u.deg, \n star_table['dec (deg)'][s]*u.deg, \n frame='icrs')\n stars.append(FixedTarget(name=star_table['Name'][s], coord=coords))",
"_____no_output_____"
]
],
[
[
"#Create an observability table",
"_____no_output_____"
]
],
[
[
"time = Time([\"2019-10-15 00:00\", \"2019-10-30 23:59\"])",
"_____no_output_____"
],
[
"#The moon's illumination values are from: https://www.calendar-12.com/moon_calendar/2019/october\n#Use the mean value of the moon's illumination during our observing window\nconstraint = [AltitudeConstraint(30*u.deg, 90*u.deg),AirmassConstraint(2), \n AtNightConstraint.twilight_astronomical(),MoonIlluminationConstraint(0.4),\n MoonSeparationConstraint(min = 10*u.deg)]",
"_____no_output_____"
],
[
"observability_table(observer = palomar,\n constraints = constraint,\n targets = stars,\n time_range = time)",
"_____no_output_____"
],
[
"#Pick ten observable objects for the following steps\ntargets = list()\ntargets.append(stars[4])\ntargets.append(stars[5])\ntargets.append(stars[6])\ntargets.append(stars[7])\ntargets.append(stars[8])\ntargets.append(stars[3])\ntargets.append(stars[38])\ntargets.append(stars[37])\ntargets.append(stars[36])\ntargets.append(stars[29])",
"_____no_output_____"
],
[
"#Check the visibility of the ten objects with the highest value of moon's illumination\nconstraint = [AltitudeConstraint(30*u.deg, 90*u.deg),AirmassConstraint(2), \n AtNightConstraint.twilight_astronomical(),MoonIlluminationConstraint(0.95),\n MoonSeparationConstraint(min = 10*u.deg)]\nobservability_table(observer = palomar,\n constraints = constraint,\n targets = targets,\n time_range = time)",
"_____no_output_____"
],
[
"#Check the visibility of the ten objects with the lowest value of moon's illumination\nconstraint = [AltitudeConstraint(30*u.deg, 90*u.deg),AirmassConstraint(2), \n AtNightConstraint.twilight_astronomical(),MoonIlluminationConstraint(0.01),\n MoonSeparationConstraint(min = 10*u.deg)]\nobservability_table(observer = palomar,\n constraints = constraint,\n targets = targets,\n time_range = time)",
"_____no_output_____"
]
],
[
[
"Therefore, these ten objects can be observed through the whole observing window.",
"_____no_output_____"
],
[
"# Moon Phase",
"_____no_output_____"
],
[
"We found the moon phases from: https://www.calendar-12.com/moon_calendar/2019/october\nAnd based on the moon phases calendar, the moon will be waning gibbous on Oct 15th, 2019 (the start of our observing window) and keep waning until Oct 27th, 2019. Then the moon will be waxing from a new one at the end of our window.",
"_____no_output_____"
]
],
[
[
"#The moon's illumination\nfor i in range(len(obs_dates)):\n moon_ratio = moon_illumination(obs_dates[i])\n print(obs_dates[i])\n print(moon_ratio)\n print(\"\\n\")",
"2019-10-15 00:00:00.000\n0.9865561943920241\n\n\n2019-10-16 00:00:00.000\n0.9562520875685778\n\n\n2019-10-17 00:00:00.000\n0.9074664247952995\n\n\n2019-10-18 00:00:00.000\n0.841225405905627\n\n\n2019-10-19 00:00:00.000\n0.759277143961225\n\n\n2019-10-20 00:00:00.000\n0.6641422433892282\n\n\n2019-10-21 00:00:00.000\n0.5591863674409921\n\n\n2019-10-22 00:00:00.000\n0.4487097215990062\n\n\n2019-10-23 00:00:00.000\n0.33800752114318117\n\n\n2019-10-24 00:00:00.000\n0.23329781313288983\n\n\n2019-10-25 00:00:00.000\n0.14137422531697247\n\n\n2019-10-26 00:00:00.000\n0.06888788130022877\n\n\n2019-10-27 00:00:00.000\n0.021331664431187447\n\n\n2019-10-28 00:00:00.000\n0.0020219486781750673\n\n\n2019-10-29 00:00:00.000\n0.011482916033771717\n\n\n2019-10-30 00:00:00.000\n0.047509435596680194\n\n\n"
]
],
[
[
"Combined with the moon's illumination, the moon will interfere with our observation somewhat at the beginning of our observing window. And with the moon waning later, the interference would be ignored.",
"_____no_output_____"
],
[
"# Visibility one month later",
"_____no_output_____"
]
],
[
[
"#New time range\ntime2 = Time([\"2019-11-15 00:00\", \"2019-11-30 23:59\"])",
"_____no_output_____"
],
[
"obs_dates2 = list()\nfor i in range(15,31):\n date = \"2019-11-\" + str(i)\n time = Time(date)\n obs_dates2.append(time)",
"_____no_output_____"
],
[
"# The moon's illumination values are from: https://www.calendar-12.com/moon_calendar/2019/november\n#Use the mean value of the moon's illumination during the observing window again\nillum = list()\nfor i in range(len(obs_dates2)):\n moon_ratio = moon_illumination(obs_dates2[i])\n illum.append(moon_ratio)\nnp.mean(illum)",
"_____no_output_____"
],
[
"constraint = [AltitudeConstraint(30*u.deg, 90*u.deg),AirmassConstraint(2), \n AtNightConstraint.twilight_astronomical(),MoonIlluminationConstraint(0.35),\n MoonSeparationConstraint(min = 10*u.deg)]",
"_____no_output_____"
],
[
"observability_table(observer = palomar,\n constraints = constraint,\n targets = targets,\n time_range = time2)",
"_____no_output_____"
]
],
[
[
"Thus, the situation will be worse for the ten objects.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d0d302d015dcd82be66477bc1328caadc4d38d8b | 31,798 | ipynb | Jupyter Notebook | tutorial/source/01-intro_part_i(blueprint).ipynb | HeyangGong/Pyro4CI | 28778477ce8ff3bc12b3ac8face2208e5b05ff7c | [
"MIT"
] | null | null | null | tutorial/source/01-intro_part_i(blueprint).ipynb | HeyangGong/Pyro4CI | 28778477ce8ff3bc12b3ac8face2208e5b05ff7c | [
"MIT"
] | null | null | null | tutorial/source/01-intro_part_i(blueprint).ipynb | HeyangGong/Pyro4CI | 28778477ce8ff3bc12b3ac8face2208e5b05ff7c | [
"MIT"
] | null | null | null | 41.949868 | 635 | 0.561859 | [
[
[
"*本文讲解了概率编程的基本模块*\n\n- 随机函数是某个数据生成过程的模型\n- 初等随机函数就是一类可以显式计算样本概率的随机函数",
"_____no_output_____"
],
[
"核心问题:\n- 样本是有名字的,如何获得样本的名字?如何使用样本的名字?",
"_____no_output_____"
],
[
"# An Introduction to Models in Pyro\n\nThe basic unit of probabilistic programs is the _stochastic function_. \nThis is an arbitrary Python callable that combines two ingredients:\n\n- deterministic Python code; and\n- primitive stochastic functions that call a random number generator\n\n\nThroughout the tutorials and documentation, **we will often call stochastic functions models**.",
"_____no_output_____"
],
[
"----\n\n概率编程的基本单元是 stochastic function, 包含 determinstic and primitive stochastic functions that call a random number generator. 也就是说随机函数像是一个具备有 '\\_\\_call\\_\\_' 方法pytorch基本模块. \n\n在这个教程里,我们把随机函数叫做模型是因为随机函数是某个数据生成过程(DGP)的一个实现。 Expressing models as 随机函数意味着模型可以像 Python 可调用对象一样可以组合,复用,引入和序列化。",
"_____no_output_____"
]
],
[
[
"import torch\nimport pyro\n\npyro.set_rng_seed(101)",
"_____no_output_____"
]
],
[
[
"## Primitive Stochastic Functions\n\n原始随机函数是构建模型的基础模块,下面给出正态分布的原始随机函数及其使用。",
"_____no_output_____"
]
],
[
[
"loc = 0. # mean zero\nscale = 1. # unit variance\nnormal = torch.distributions.Normal(loc, scale) # create a normal distribution object\nx = normal.rsample() # draw a sample from N(0,1)\nprint(\"sample\", x)\nprint(\"log prob\", normal.log_prob(x)) # score the sample from N(0,1)",
"sample tensor(-1.3905)\nlog prob tensor(-1.8857)\n"
],
[
"[x for x in dir(normal) if not '_' in x]",
"_____no_output_____"
]
],
[
[
"随机函数有几个常见的方法,包括抽样,计算概率等。",
"_____no_output_____"
],
[
"## A Simple Model\n",
"_____no_output_____"
],
[
"所有的 probalistic programs 是通过 primitive functions and deterministic computation 组合得到的。我们最终的目的是要是用 probablistic programming 来模拟真实世界,我们现在从一个具体的例子出发。\n\n现在我们有一堆关于每天平均气温和天气情况的数据。我们想到天气情况和气温的关系。如下的简单随机函数描述了数据的生成过程。 ",
"_____no_output_____"
]
],
[
[
"from graphviz import Source\nSource('digraph{rankdir=LR; cloudy -> temperature}')",
"_____no_output_____"
],
[
"def weather():\n cloudy = torch.distributions.Bernoulli(0.3).sample()\n cloudy = 'cloudy' if cloudy.item() == 1.0 else 'sunny'\n mean_temp = {'cloudy': 55.0, 'sunny': 75.0}[cloudy]\n scale_temp = {'cloudy': 10.0, 'sunny': 15.0}[cloudy]\n temp = torch.distributions.Normal(mean_temp, scale_temp).rsample()\n return cloudy, temp.item()\n\ng = weather()\nprint(g)",
"('cloudy', 46.847618103027344)\n"
]
],
[
[
"However, `weather` is entirely independent of Pyro - it only calls PyTorch. **We need to turn it into a Pyro program if we want to use this model for anything other than sampling fake data.**\n",
"_____no_output_____"
],
[
"这个模型除生成假数据还能干嘛呢?定义观测数据用于变分推断,提取生成过程中间结果?",
"_____no_output_____"
],
[
"## Model with Pyro\n\n\nThe `pyro.sample` Primitive\n\n",
"_____no_output_____"
]
],
[
[
"%psource pyro.sample",
"_____no_output_____"
],
[
"# 到底该样本的名字用在哪里?怎么获取?\nx = pyro.sample(\"my_sample\", pyro.distributions.Normal(loc, scale))\nprint(x)\n",
"tensor(1.2663)\n"
],
[
"torch.distributions.Normal(loc, scale).rsample(), pyro.distributions.Normal(loc, scale).rsample(), \\\ntorch.distributions.Normal(loc, scale), pyro.distributions.Normal(loc, scale)",
"_____no_output_____"
]
],
[
[
"Just like a direct call to `torch.distributions.Normal().rsample()`, this returns a sample from the unit normal distribution. **The crucial difference** is that this sample is _named_. Pyro's backend uses these names to uniquely identify sample statements and _change their behavior at runtime_ depending on how the enclosing stochastic function is being used. As we will see, this is how Pyro can implement the various manipulations that underlie inference algorithms.\n\n---\n\n重要区别是一个有名字,一个没有名字。后段会在抽样声明中使用这个名字。**那么究竟如何使用这个名字呢?** 可能是用 `pyro.param` 。",
"_____no_output_____"
],
[
"Now that we've introduced `pyro.sample` and `pyro.distributions` we can rewrite our simple model as a Pyro program:",
"_____no_output_____"
]
],
[
[
"def weather():\n cloudy = pyro.sample('cloudy', pyro.distributions.Bernoulli(0.3))\n cloudy = 'cloudy' if cloudy.item() == 1.0 else 'sunny'\n mean_temp = {'cloudy': 55.0, 'sunny': 75.0}[cloudy]\n scale_temp = {'cloudy': 10.0, 'sunny': 15.0}[cloudy]\n temp = pyro.sample('temp', pyro.distributions.Normal(mean_temp, scale_temp))\n return cloudy, temp.item()\n\nfor _ in range(3):\n print(weather())",
"('cloudy', 64.5440444946289)\n('sunny', 94.37557983398438)\n('sunny', 72.5186767578125)\n"
]
],
[
[
"Procedurally, `weather()` is still a non-deterministic Python callable that returns two random samples. Because the randomness is now invoked with `pyro.sample`, however, it is much more than that. In particular `weather()` specifies a joint probability distribution over two named random variables: `cloudy` and `temp`. As such, **it defines a probabilistic model that we can reason about using the techniques of probability theory.** For example we might ask: if I observe a temperature of 70 degrees, how likely is it to be cloudy? How to formulate and answer these kinds of questions will be the subject of the next tutorial.",
"_____no_output_____"
],
[
"## 一般随机函数\n\n**Universality: Stochastic Recursion, Higher-order Stochastic Functions, and Random Control Flow**\n\nWe've now seen how to define a simple model. Building off of it is easy. For example:",
"_____no_output_____"
]
],
[
[
"from graphviz import Source\nSource('digraph{rankdir=LR; cloudy -> temperature -> ice_cream; cloudy -> ice_cream}')",
"_____no_output_____"
],
[
"def ice_cream_sales():\n cloudy, temp = weather()\n expected_sales = 200. if cloudy == 'sunny' and temp > 80.0 else 50.\n ice_cream = pyro.sample('ice_cream', pyro.distributions.Normal(expected_sales, 10.0))\n return ice_cream\n\nice_cream_sales()",
"_____no_output_____"
]
],
[
[
"**This kind of modularity, familiar to any programmer, is obviously very powerful.** But is it powerful enough to encompass all the different kinds of models we'd like to express?",
"_____no_output_____"
],
[
"---\n\n这种模块化是非常强大的. 下面给出一个随机控制得到几何分布的例子.",
"_____no_output_____"
],
[
"\n几何分布:\n\n一种方程思维是 $T = \\sum_{i=1}^{T-1}I(X_i = 0) + I(X_T = 1)$, 这个似乎没有什么用, 还是从定义出发, 第一次抽样到 1 的次数. 这种 X 与 T 之间的依赖关系非常奇怪. 事实上 $T = ~(X_1, X_2, ...)$, T本质上只与 X 的样本序列有关系, 有一个时间维度. ",
"_____no_output_____"
]
],
[
[
"def geometric(p, t=None):\n if t is None:\n t = 0\n x = pyro.sample(\"x_{}\".format(t), pyro.distributions.Bernoulli(p)) # 这里体现了样本名字的作用!!!\n if x.item() == 1:\n return 0\n else:\n return 1 + geometric(p, t + 1)\n \nprint(geometric(0.5))",
"3\n"
]
],
[
[
"Note that the names `x_0`, `x_1`, etc., in `geometric()` are generated dynamically and that different executions can have different numbers of named random variables. \n\nWe are also free to define stochastic functions that accept as input or produce as output other stochastic functions:",
"_____no_output_____"
]
],
[
[
"from graphviz import Source\nSource('digraph{rankdir=LR; scale, mu_latent -> z1, z2 -> y}')",
"_____no_output_____"
],
[
"def normal_product(loc, scale):\n z1 = pyro.sample(\"z1\", pyro.distributions.Normal(loc, scale))\n z2 = pyro.sample(\"z2\", pyro.distributions.Normal(loc, scale))\n y = z1 * z2\n return y\n\ndef make_normal_normal():\n mu_latent = pyro.sample(\"mu_latent\", pyro.distributions.Normal(0, 1))\n fn = lambda scale: normal_product(mu_latent, scale)\n return fn\n\nprint(make_normal_normal()(1.))",
"tensor(0.7048)\n"
]
],
[
[
"Pyro 是可以构建通复杂随机函数,模拟各种数据生成过程,是一门通用的概率编程语言。",
"_____no_output_____"
],
[
"## Next Steps?\n\n从先验分布到后验分布。",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
d0d30e3a7482a60cbeedd01599f30f1bbeae5acc | 31,115 | ipynb | Jupyter Notebook | tutorials/nlp/Entity_Linking_Medical.ipynb | vadam5/NeMo | 3c5db09539293c3c19a6bb7437011f91261119af | [
"Apache-2.0"
] | null | null | null | tutorials/nlp/Entity_Linking_Medical.ipynb | vadam5/NeMo | 3c5db09539293c3c19a6bb7437011f91261119af | [
"Apache-2.0"
] | null | null | null | tutorials/nlp/Entity_Linking_Medical.ipynb | vadam5/NeMo | 3c5db09539293c3c19a6bb7437011f91261119af | [
"Apache-2.0"
] | null | null | null | 43.396095 | 1,020 | 0.58528 | [
[
[
"\"\"\"\nYou can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n\nInstructions for setting up Colab are as follows:\n1. Open a new Python 3 notebook.\n2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n4. Run this cell to set up dependencies.\n\"\"\"\n\n# **** These install steps won't work until my fork is added the the NVIDIA repo, ****\n# in the meantime, clone my fork and use ./reinstall\n\n## Install dependencies\n!pip install wget\n!pip install faiss-gpu\n\n## Install NeMo\nBRANCH = 'r1.0.0rc1'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]",
"_____no_output_____"
],
[
"import faiss\nimport torch\nimport wget\nimport os\nimport numpy as np\nimport pandas as pd\n\nfrom omegaconf import OmegaConf\nfrom pytorch_lightning import Trainer\nfrom IPython.display import display\nfrom tqdm import tqdm\n\nfrom nemo.collections import nlp as nemo_nlp\nfrom nemo.utils.exp_manager import exp_manager",
"_____no_output_____"
]
],
[
[
"## Entity Linking",
"_____no_output_____"
],
[
"#### Task Description\n[Entity linking](https://en.wikipedia.org/wiki/Entity_linking) is the process of connecting concepts mentioned in natural language to their canonical forms stored in a knowledge base. For example, say a knowledge base contained the entity 'ID3452 influenza' and we wanted to process some natural language containing the sentence \"The patient has flu like symptoms\". An entity linking model would match the word 'flu' to the knowledge base entity 'ID3452 influenza', allowing for disambiguation and normalization of concepts referenced in text. Entity linking applications range from helping automate data ingestion to assisting in real time dialogue concept normalization. \n\nWithin nemo and this tutorial we use the entity linking approach described in the [Self-alignment Pre-training for Biomedical Entity Representations](https://arxiv.org/abs/2010.11784) paper. The main idea behind this approach is to reshape an initial concept embedding space such that synonyms of the same concept are pulled closer together and unrelated concepts are pushed further apart. The concept embeddings from this reshaped space can then be used to build a knowledge base embedding index. This index stores concept IDs mapped to their respective concept embeddings in a format conducive to efficient nearest neighbor search. We can link query concepts to their canonical forms in the knowledge base by performing a nearest neighbor search- matching concept query embeddings to the most similar concepts embeddings in the knowledge base index. \n\nIn this tutorial we will be using the [faiss](https://github.com/facebookresearch/faiss) library to build our concept index.",
"_____no_output_____"
],
[
"#### Self Alignment Pretraining\nSelf-Alignment pretraining is a second stage pretraining of an existing encoder (called second stage because the encoder model can be further finetuned after this more general pretraining step). The dataset used during training consists of pairs of concept synonyms that map to the same ID. At each training iteration, we only select *hard* examples present in the mini batch to calculate the loss and update the model weights. In this context, a hard example is an example where a concept is closer to an unrelated concept in the mini batch than it is to the synonym concept it is paired with by some margin. I encourage you to take a look at [section 2 of the paper](https://arxiv.org/pdf/2010.11784.pdf) for a more formal and in depth description of how hard examples are selected.\n\nWe then use a [metric learning loss](https://openaccess.thecvf.com/content_CVPR_2019/papers/Wang_Multi-Similarity_Loss_With_General_Pair_Weighting_for_Deep_Metric_Learning_CVPR_2019_paper.pdf) calculated from the hard examples selected. This loss helps reshape the embedding space. The concept representation space is rearranged to be more suitable for entity matching via embedding cosine similarity. \n\nNow that we have idea of what's going on, let's get started!",
"_____no_output_____"
],
[
"## Dataset Preprocessing",
"_____no_output_____"
]
],
[
[
"# Download data\nDATA_DIR = \"tiny_example_data\"\nwget.download('https://github.com/vadam5/NeMo/blob/main/examples/nlp/entity_linking/data/tiny_example_data.zip?raw=true',\n os.path.join(\"tiny_example_data.zip\"))\n\n!unzip tiny_example_data.zip",
"_____no_output_____"
]
],
[
[
"In this tutorial we will be using a tiny toy dataset to demonstrate how to use NeMo's entity linking model functionality. The dataset includes synonyms for 12 medical concepts. Here's the dataset before preprocessing:",
"_____no_output_____"
]
],
[
[
"raw_data = pd.read_csv(os.path.join(DATA_DIR, \"tiny_example_dev_data.csv\"), names=[\"ID\", \"CONCEPT\"], index_col=False)\nprint(raw_data)",
" ID CONCEPT\n0 1 Head ache\n1 1 Headache\n2 1 Migraine\n3 1 Pain in the head\n4 1 cephalgia\n5 1 cephalalgia\n6 2 heart attack\n7 2 Myocardial infraction\n8 2 necrosis of heart muscle\n9 2 MI\n10 3 CAD\n11 3 Coronary artery disease\n12 3 atherosclerotic heart disease\n13 3 heart disease\n14 3 damage of major heart blood vessels\n15 4 myocardial ischemia\n16 4 cardiac ischemia\n17 4 reduced ability to pump blood\n18 5 gradual loss of kidney function\n19 5 kidneys cannot filter blood\n20 5 chronic kidney disease\n21 5 chronic kidney failure\n22 5 CKD\n23 6 alchohol intoxication\n24 6 acute alchohol intoxication\n25 6 alchohol poisoning\n26 6 severe drunkenness\n27 6 over consumption of alcohol\n28 7 diabetes mellitus\n29 7 diabetes\n30 7 inability to process glucose\n31 7 unable to take up sugar\n32 7 Type 2 diabetes\n33 8 Hyperinsulinemia\n34 8 High blood sugar\n35 8 abnormally high levels of insulin\n36 9 Dipeptidyl peptidase-4 inhibitor\n37 9 dpp-4 inhibitor\n38 9 alogliptin\n39 9 Nesina\n40 9 Vipidia\n41 10 hypoglycemia\n42 10 low blood sugar\n43 11 anticoagulants\n44 11 blood thinners\n45 11 Apixaban\n46 11 Eliquis\n47 12 Ibuprofen\n48 12 Aspirin\n49 12 over the counter nonsteroidal anti-inflammator...\n50 12 NSAID\n"
]
],
[
[
"We've already paired off the concepts for this dataset with the format `ID concept_synonym1 concept_synonym2`. Here are the first ten rows:",
"_____no_output_____"
]
],
[
[
"training_data = pd.read_table(os.path.join(DATA_DIR, \"tiny_example_train_pairs.tsv\"), names=[\"ID\", \"CONCEPT_SYN1\", \"CONCEPT_SYN2\"], delimiter='\\t')\nprint(training_data.head(10))",
" ID CONCEPT_SYN1 CONCEPT_SYN2\n0 1 Pain in the head cephalgia\n1 1 Pain in the head cephalalgia\n2 1 Migraine cephalgia\n3 1 Head ache Pain in the head\n4 1 Head ache Migraine\n5 1 Head ache cephalalgia\n6 1 Headache Migraine\n7 1 Migraine cephalalgia\n8 1 cephalgia cephalalgia\n9 1 Headache Pain in the head\n"
]
],
[
[
"Use the [Unified Medical Language System (UMLS)](https://www.nlm.nih.gov/research/umls/index.html) dataset for full medical domain entity linking training. The data contains over 9 million entities and is a table of medical concepts with their corresponding concept IDs (CUI). After [requesting a free license and making a UMLS Terminology Services (UTS) account](https://www.nlm.nih.gov/research/umls/index.html), the [entire UMLS dataset](https://www.nlm.nih.gov/research/umls/licensedcontent/umlsknowledgesources.html) can be downloaded from the NIH's website. If you've cloned the NeMo repo you can run the data processing script located in `examples/nlp/entity_linking/data/umls_dataset_processing.py` on the full dataset. This script will take in the initial table of UMLS concepts and produce a .tsv file with each row formatted as `CUI\\tconcept_synonym1\\tconcept_synonym2`. Once the UMLS dataset .RRF file is downloaded, the script can be run from the `examples/nlp/entity_linking` directory like so: \n```\npython data/umls_dataset_processing.py --cfg conf/umls_medical_entity_linking_config.yaml\n```",
"_____no_output_____"
],
[
"## Model Training",
"_____no_output_____"
],
[
"Second stage pretrain a BERT Base encoder on the self-alignment pretraining task (SAP) for improved entity linking.",
"_____no_output_____"
]
],
[
[
"# Download config\nwget.download(\"https://raw.githubusercontent.com/vadam5/NeMo/main/examples/nlp/entity_linking/conf/tiny_example_entity_linking_config.yaml\",\n os.path.join(\"tiny_example_entity_linking_config.yaml\"))\n\n# Load in config file\ncfg = OmegaConf.load(os.path.join(\"tiny_example_entity_linking_config.yaml\"))",
"_____no_output_____"
],
[
"# Initialize the trainer and model\ntrainer = Trainer(**cfg.trainer)\nexp_manager(trainer, cfg.get(\"exp_manager\", None))\nmodel = nemo_nlp.models.EntityLinkingModel(cfg=cfg.model, trainer=trainer)",
"_____no_output_____"
],
[
"# Train and save the model\ntrainer.fit(model)\nmodel.save_to(cfg.model.nemo_path)",
"_____no_output_____"
]
],
[
[
"You can run the script at `examples/nlp/entity_linking/self_alignment_pretraining.py` to train a model on a larger dataset. Run\n\n```\npython self_alignment_pretraining.py\n```\nfrom the `examples/nlp/entity_linking` directory.",
"_____no_output_____"
],
[
"## Model Evaluation\n\nLet's evaluate our freshly trained model and compare its performance with a BERT Base encoder that hasn't undergone self-alignment pretraining. We first need to restore our trained model and load our BERT Base Baseline model.",
"_____no_output_____"
]
],
[
[
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# Restore second stage pretrained model\nsap_model_cfg = cfg\nsap_model = nemo_nlp.models.EntityLinkingModel.restore_from(sap_model_cfg.model.nemo_path).to(device)\n\n# Load original model\nbase_model_cfg = OmegaConf.load(\"tiny_example_entity_linking_config.yaml\")\n\n# Set train/val datasets to None to avoid loading datasets associated with training\nbase_model_cfg.model.train_ds = None\nbase_model_cfg.model.validation_ds = None\nbase_model_cfg.index.index_save_name = \"base_model_index\"\nbase_model = nemo_nlp.models.EntityLinkingModel(base_model_cfg.model).to(device)",
"_____no_output_____"
]
],
[
[
"We are going evaluate our model on a nearest neighbors task using top 1 and top 5 accuarcy as our metric. We will be using a tiny example test knowledge base and test queries. For this evaluation we are going to be comparing every test query with every concept vector in our test set knowledge base. We will rank each item in the knowledge base by its cosine similarity with the test query. We'll then compare the IDs of the predicted most similar test knowledge base concepts with our ground truth query IDs to calculate top 1 and top 5 accuarcy. For this metric higher is better.",
"_____no_output_____"
]
],
[
[
"# Helper function to get data embeddings\ndef get_embeddings(model, dataloader):\n embeddings, cids = [], []\n\n with torch.no_grad():\n for batch in tqdm(dataloader):\n input_ids, token_type_ids, attention_mask, batch_cids = batch\n batch_embeddings = model.forward(input_ids=input_ids.to(device), \n token_type_ids=token_type_ids.to(device), \n attention_mask=attention_mask.to(device))\n\n # Accumulate index embeddings and their corresponding IDs\n embeddings.extend(batch_embeddings.cpu().detach().numpy())\n cids.extend(batch_cids)\n \n return embeddings, cids",
"_____no_output_____"
],
[
"def evaluate(model, test_kb, test_queries, ks):\n # Initialize knowledge base and query data loaders\n test_kb_dataloader = model.setup_dataloader(test_kb, is_index_data=True)\n test_query_dataloader = model.setup_dataloader(test_queries, is_index_data=True)\n \n # Get knowledge base and query embeddings\n test_kb_embs, test_kb_cids = get_embeddings(model, test_kb_dataloader)\n test_query_embs, test_query_cids = get_embeddings(model, test_query_dataloader)\n\n # Calculate the cosine distance between each query and knowledge base concept\n score_matrix = np.matmul(np.array(test_query_embs), np.array(test_kb_embs).T)\n accs = {k : 0 for k in ks}\n \n # Compare the knowledge base IDs of the knowledge base entities with \n # the smallest cosine distance from the query \n for query_idx in tqdm(range(len(test_query_cids))):\n query_emb = test_query_embs[query_idx]\n query_cid = test_query_cids[query_idx]\n query_scores = score_matrix[query_idx]\n\n for k in ks:\n topk_idxs = np.argpartition(query_scores, -k)[-k:]\n topk_cids = [test_kb_cids[idx] for idx in topk_idxs]\n \n # If the correct query ID is amoung the top k closest kb IDs\n # the model correctly linked the entity\n match = int(query_cid in topk_cids)\n accs[k] += match\n\n for k in ks:\n accs[k] /= len(test_query_cids)\n \n return accs",
"_____no_output_____"
],
[
"test_kb = OmegaConf.create({\n \"data_file\": os.path.join(DATA_DIR, \"tiny_example_test_kb.tsv\"),\n \"max_seq_length\": 128,\n \"batch_size\": 10,\n \"shuffle\": False,\n})\n\ntest_queries = OmegaConf.create({\n \"data_file\": os.path.join(DATA_DIR, \"tiny_example_test_queries.tsv\"),\n \"max_seq_length\": 128,\n \"batch_size\": 10,\n \"shuffle\": False,\n})\n\nks = [1, 5]\n\nbase_accs = evaluate(base_model, test_kb, test_queries, ks)\nbase_accs[\"Model\"] = \"BERT Base Baseline\"\nsap_accs = evaluate(sap_model, test_kb, test_queries, ks)\nsap_accs[\"Model\"] = \"BERT + SAP\"\n\nprint(\"Top 1 and Top 5 Accuracy Comparison:\")\nresults_df = pd.DataFrame([base_accs, sap_accs], columns=[\"Model\", 1, 5])\nresults_df = results_df.style.set_properties(**{'text-align': 'left', }).set_table_styles([dict(selector='th', props=[('text-align', 'left')])])\ndisplay(results_df)",
"_____no_output_____"
]
],
[
[
"The purpose of this section was to show an example of evaluating your entity linking model. This evaluation set contains very little data, and no serious conclusions should be drawn about model performance. Top 1 accuracy should be between 0.7 and 1.0 for both models and top 5 accuracy should be between 0.9 and 1.0. When evaluating a model trained on a larger dataset, you can use a nearest neighbors index to speed up the evaluation time.",
"_____no_output_____"
],
[
"## Building an Index",
"_____no_output_____"
],
[
"To qualitatively observe the improvement we gain from the second stage pretraining, let's build two indices. One will be built with BERT base embeddings before self-alignment pretraining and one will be built with the model we just trained. Our knowledge base in this tutorial will be in the same domain and have some over lapping concepts as the training set. This data file is formatted as `ID\\tconcept`.",
"_____no_output_____"
],
[
"The `EntityLinkingDataset` class can load the data used for training the entity linking encoder as well as for building the index if the `is_index_data` flag is set to true. ",
"_____no_output_____"
]
],
[
[
"def build_index(cfg, model):\n # Setup index dataset loader\n index_dataloader = model.setup_dataloader(cfg.index.index_ds, is_index_data=True)\n \n # Get index dataset embeddings\n embeddings, _ = get_embeddings(model, index_dataloader)\n \n # Train IVFFlat index using faiss\n embeddings = np.array(embeddings)\n quantizer = faiss.IndexFlatL2(cfg.index.dims)\n index = faiss.IndexIVFFlat(quantizer, cfg.index.dims, cfg.index.nlist)\n index = faiss.index_cpu_to_all_gpus(index)\n index.train(embeddings)\n \n # Add concept embeddings to index\n for i in tqdm(range(0, embeddings.shape[0], cfg.index.index_batch_size)):\n index.add(embeddings[i:i+cfg.index.index_batch_size])\n\n # Save index\n faiss.write_index(faiss.index_gpu_to_cpu(index), cfg.index.index_save_name)",
"_____no_output_____"
],
[
"build_index(sap_model_cfg, sap_model.to(device))\nbuild_index(base_model_cfg, base_model.to(device))",
"_____no_output_____"
]
],
[
[
"## Entity Linking via Nearest Neighbor Search",
"_____no_output_____"
],
[
"Now its time to query our indices!",
"_____no_output_____"
]
],
[
[
"def query_index(cfg, model, index, queries, id2string):\n query_embs = get_query_embedding(queries, model).cpu().detach().numpy()\n \n # Use query embedding to find closet concept embedding in knowledge base\n distances, neighbors = index.search(query_embs, cfg.index.top_n)\n neighbor_concepts = [[id2string[concept_id] for concept_id in query_neighbor] \\\n for query_neighbor in neighbors]\n \n for query_idx in range(len(queries)):\n print(f\"\\nThe most similar concepts to {queries[query_idx]} are:\")\n for cid, concept, dist in zip(neighbors[query_idx], neighbor_concepts[query_idx], distances[query_idx]):\n print(cid, concept, 1 - dist)\n\n \ndef get_query_embedding(queries, model):\n model_input = model.tokenizer(queries,\n add_special_tokens = True,\n padding = True,\n truncation = True,\n max_length = 512,\n return_token_type_ids = True,\n return_attention_mask = True)\n\n query_emb = model.forward(input_ids=torch.LongTensor(model_input[\"input_ids\"]).to(device),\n token_type_ids=torch.LongTensor(model_input[\"token_type_ids\"]).to(device),\n attention_mask=torch.LongTensor(model_input[\"attention_mask\"]).to(device))\n \n return query_emb",
"_____no_output_____"
],
[
"# Load indices\nsap_index = faiss.read_index(sap_model_cfg.index.index_save_name)\nbase_index = faiss.read_index(base_model_cfg.index.index_save_name)",
"_____no_output_____"
],
[
"# Map concept IDs to one canonical string\nindex_data = open(sap_model_cfg.index.index_ds.data_file, \"r\", encoding='utf-8-sig')\nid2string = {}\n\nfor line in index_data:\n cid, concept = line.split(\"\\t\")\n id2string[int(cid) - 1] = concept.strip()",
"_____no_output_____"
],
[
"id2string",
"_____no_output_____"
],
[
"# Query both indices\nqueries = [\"high blood sugar\", \"head pain\"]\nprint(\"BERT Base output before Self Alignment Pretraining:\")\nquery_index(base_model_cfg, base_model, base_index, queries, id2string)\nprint(\"-\" * 50)\nprint(\"BERT Base output after Self Alignment Pretraining:\")\nquery_index(sap_model_cfg, sap_model, sap_index, queries, id2string)",
"BERT Base output before Self Alignment Pretraining:\n\nThe most similar concepts to high blood sugar are:\n6 diabetes 0.9095035269856453\n0 Headache 0.9046077728271484\n8 Nesina 0.8512845933437347\n\nThe most similar concepts to head pain are:\n1 Myocardial infraction 0.7848673164844513\n4 chronic kidney disease 0.766732469201088\n3 myocardial ischemia 0.761662483215332\n--------------------------------------------------\nBERT Base output after Self Alignment Pretraining:\n\nThe most similar concepts to high blood sugar are:\n7 Hyperinsulinemia 0.22790831327438354\n9 hypoglycemia 0.15696585178375244\n6 diabetes 0.06939101219177246\n\nThe most similar concepts to head pain are:\n0 Headache 0.6710055470466614\n9 hypoglycemia 0.23891878128051758\n3 myocardial ischemia 0.170110821723938\n"
]
],
[
[
"Even after only training on this tiny amount of data, the qualitative performance boost from self-alignment pretraining is visible. The baseline model links \"*high blood sugar*\" to the entity \"*6 diabetes*\" while our SAP BERT model accurately links \"*high blood sugar*\" to \"*Hyperinsulinemia*\". Similarly, \"*head pain*\" and \"*Myocardial infraction*\" are not the same concept, but \"*head pain*\" and \"*Headache*\" are.",
"_____no_output_____"
],
[
"For larger knowledge bases keeping the default embedding size might be too large and cause out of memory issues. You can apply PCA or some other dimensionality reduction method to your data to reduce its memory footprint. Code for creating a text file of all the UMLS entities in the correct format needed to build an index and creating a dictionary mapping concept ids to canonical concept strings can be found here `examples/nlp/entity_linking/data/umls_dataset_processing.py`. \n\nThe code for extracting knowledge base concept embeddings, training and applying a pca transformation to the embeddings, builing a faiss index and querying the index from the command line is located at `examples/nlp/entity_linking/build_and_query_index.py`. \n\nIf you've cloned the NeMo repo, both of these steps can be run as follows on the commandline from the `examples/nlp/entity_linking/` directory.\n\n```\npython data/umls_dataset_processing.py --index --cfg /conf/medical_entity_linking_config.yaml\npython build_and_query_index.py --restore --cfg conf/medical_entity_linking_config.yaml --top_n 5 \n```\nIntermidate steps of the index building process are saved. In the occurance of an error, previously completed steps do not need to be rerun. ",
"_____no_output_____"
],
[
"## Command Recap",
"_____no_output_____"
],
[
"Here is a recap of the commands and steps to repeat this process on the full UMLS dataset. \n\n1) Download the UMLS datset file `MRCONSO.RRF` from the NIH website and place it in the `examples/nlp/entity_linking/data` directory.\n\n2) Run the following commands from the `examples/nlp/entity_linking` directory\n```\npython data/umls_dataset_processing.py --cfg conf/umls_medical_entity_linking_config.yaml\npython self_alignment_pretraining.py\npython data/umls_dataset_processing.py --index --cfg conf/umls_medical_entity_linking_config.yaml\npython build_and_query_index.py --restore --cfg conf/umls_medical_entity_linking_config.yaml --top_n 5\n```\nThe model will take ~24hrs to train on two GPUs and ~48hrs to train on one GPU.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d0d3114f53014be5cb890cbefa73e4f85baf6a1e | 3,620 | ipynb | Jupyter Notebook | 06/06.ipynb | bruninmi/AoC2021 | d412d96ac8a165b4d8f4b8a46b117c3148fdf67e | [
"MIT"
] | 2 | 2021-12-08T11:08:21.000Z | 2021-12-13T12:38:51.000Z | 06/06.ipynb | bruninmi/AoC2021 | d412d96ac8a165b4d8f4b8a46b117c3148fdf67e | [
"MIT"
] | null | null | null | 06/06.ipynb | bruninmi/AoC2021 | d412d96ac8a165b4d8f4b8a46b117c3148fdf67e | [
"MIT"
] | null | null | null | 18.95288 | 78 | 0.449448 | [
[
[
"# Imports & read file\nimport time\n\ndef read_file(filename):\n with open(filename) as infile:\n return [int(i) for i in infile.readline().strip().split(',')]\n return None",
"_____no_output_____"
],
[
"# Part One\ndef simulate_days(ages, days):\n fish = [0] * 7\n for age in ages:\n fish[age] += 1\n day = 0\n babies = [0] * 7\n for _ in range(days):\n babies[(day + 2) % 7] = fish[day]\n fish[day] += babies[day]\n babies[day] = 0\n day = (day + 1) % 7\n return sum(fish) + sum(babies)",
"_____no_output_____"
],
[
"# Test Part One\nstart = time.time()\nprint(simulate_days(read_file(\"test.txt\"), 80) == 5934)\ntime.time() - start",
"True\n"
],
[
"# Solve Part One\nstart = time.time()\nprint(simulate_days(read_file(\"input.txt\"), 80))\ntime.time() - start",
"374927\n"
],
[
"# Part Two",
"_____no_output_____"
],
[
"# Test Part Two\nstart = time.time()\nprint(simulate_days(read_file(\"test.txt\"), 256) == 26984457539)\ntime.time() - start",
"True\n"
],
[
"# Solve Part Two\nstart = time.time()\nprint(simulate_days(read_file(\"input.txt\"), 256))\ntime.time() - start",
"1687617803407\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d32523d6f668dd0b323f555c46a67f497fbf8e | 7,905 | ipynb | Jupyter Notebook | docs/tutorials/classification/knn.ipynb | pmla/scikit-network | 6e74a2338f53f1e54a25e7e19ab9fcf56371c275 | [
"BSD-3-Clause"
] | 457 | 2018-07-24T12:42:14.000Z | 2022-03-31T08:30:39.000Z | docs/tutorials/classification/knn.ipynb | pmla/scikit-network | 6e74a2338f53f1e54a25e7e19ab9fcf56371c275 | [
"BSD-3-Clause"
] | 281 | 2018-07-13T05:01:19.000Z | 2022-03-31T14:13:43.000Z | docs/tutorials/classification/knn.ipynb | pmla/scikit-network | 6e74a2338f53f1e54a25e7e19ab9fcf56371c275 | [
"BSD-3-Clause"
] | 58 | 2019-04-22T09:04:32.000Z | 2022-03-30T12:43:08.000Z | 21.023936 | 211 | 0.542315 | [
[
[
"# Nearest neighbors",
"_____no_output_____"
],
[
"This notebook illustrates the classification of the nodes of a graph by the [k-nearest neighbors algorithm](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm), based on the labels of a few nodes.",
"_____no_output_____"
]
],
[
[
"from IPython.display import SVG",
"_____no_output_____"
],
[
"import numpy as np",
"_____no_output_____"
],
[
"from sknetwork.data import karate_club, painters, movie_actor\nfrom sknetwork.classification import KNN\nfrom sknetwork.embedding import GSVD\nfrom sknetwork.visualization import svg_graph, svg_digraph, svg_bigraph",
"_____no_output_____"
]
],
[
[
"## Graphs",
"_____no_output_____"
]
],
[
[
"graph = karate_club(metadata=True)\nadjacency = graph.adjacency\nposition = graph.position\nlabels_true = graph.labels",
"_____no_output_____"
],
[
"seeds = {i: labels_true[i] for i in [0, 33]}",
"_____no_output_____"
],
[
"knn = KNN(GSVD(3), n_neighbors=1)\nlabels_pred = knn.fit_transform(adjacency, seeds)",
"_____no_output_____"
],
[
"precision = np.round(np.mean(labels_pred == labels_true), 2)\nprecision",
"_____no_output_____"
],
[
"image = svg_graph(adjacency, position, labels=labels_pred, seeds=seeds)\nSVG(image)",
"_____no_output_____"
],
[
"# soft classification (here probability of label 1)\nknn = KNN(GSVD(3), n_neighbors=2)\nknn.fit(adjacency, seeds)\nmembership = knn.membership_",
"_____no_output_____"
],
[
"scores = membership[:,1].toarray().ravel()",
"_____no_output_____"
],
[
"image = svg_graph(adjacency, position, scores=scores, seeds=seeds)\nSVG(image)",
"_____no_output_____"
]
],
[
[
"## Directed graphs",
"_____no_output_____"
]
],
[
[
"graph = painters(metadata=True)\nadjacency = graph.adjacency\nposition = graph.position\nnames = graph.names",
"_____no_output_____"
],
[
"rembrandt = 5\nklimt = 6\ncezanne = 11\nseeds = {cezanne: 0, rembrandt: 1, klimt: 2}",
"_____no_output_____"
],
[
"knn = KNN(GSVD(3), n_neighbors=2)\nlabels = knn.fit_transform(adjacency, seeds)",
"_____no_output_____"
],
[
"image = svg_digraph(adjacency, position, names, labels=labels, seeds=seeds)\nSVG(image)",
"_____no_output_____"
],
[
"# soft classification\nmembership = knn.membership_\nscores = membership[:,0].toarray().ravel()",
"_____no_output_____"
],
[
"image = svg_digraph(adjacency, position, names, scores=scores, seeds=[cezanne])\nSVG(image)",
"_____no_output_____"
]
],
[
[
"## Bipartite graphs",
"_____no_output_____"
]
],
[
[
"graph = movie_actor(metadata=True)\nbiadjacency = graph.biadjacency\nnames_row = graph.names_row\nnames_col = graph.names_col",
"_____no_output_____"
],
[
"inception = 0\ndrive = 3\nbudapest = 8",
"_____no_output_____"
],
[
"seeds_row = {inception: 0, drive: 1, budapest: 2}",
"_____no_output_____"
],
[
"knn = KNN(GSVD(3), n_neighbors=2)\nlabels_row = knn.fit_transform(biadjacency, seeds_row)\nlabels_col = knn.labels_col_",
"_____no_output_____"
],
[
"image = svg_bigraph(biadjacency, names_row, names_col, labels_row, labels_col, seeds_row=seeds_row)\nSVG(image)",
"_____no_output_____"
],
[
"# soft classification\nmembership_row = knn.membership_row_\nmembership_col = knn.membership_col_",
"_____no_output_____"
],
[
"scores_row = membership_row[:,1].toarray().ravel()\nscores_col = membership_col[:,1].toarray().ravel()",
"_____no_output_____"
],
[
"image = svg_bigraph(biadjacency, names_row, names_col, scores_row=scores_row, scores_col=scores_col, \n seeds_row=seeds_row)\nSVG(image)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d32cb2e1876b327abb7dabd0f23b5f3ead25a2 | 192,461 | ipynb | Jupyter Notebook | Cohort.ipynb | admatuszak/Cohort-Table | 4e63ac6c9bfc7aa4979dba5a65bbf095b11e3671 | [
"MIT"
] | 1 | 2021-02-05T00:16:16.000Z | 2021-02-05T00:16:16.000Z | .ipynb_checkpoints/Cohort-checkpoint.ipynb | admatuszak/Cohort-Table | 4e63ac6c9bfc7aa4979dba5a65bbf095b11e3671 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/Cohort-checkpoint.ipynb | admatuszak/Cohort-Table | 4e63ac6c9bfc7aa4979dba5a65bbf095b11e3671 | [
"MIT"
] | null | null | null | 96.762695 | 11,136 | 0.623191 | [
[
[
"import pandas as pd\nfrom matplotlib.ticker import FuncFormatter\nfrom Cohort import CohortTable\nimport numpy as np\nimport altair as alt\nimport math\nfrom IPython.display import display, Markdown",
"_____no_output_____"
],
[
"# Pulled from class module; need to remove self references\ndef print_all_tables(self):\n display(Markdown('## Productivity Table'))\n display(Markdown('The following table contains the percentage of productivity for each cohort by year.'))\n display(Markdown('The maximum percentage for each cell is 100% or 1. Any value less than 1 is used to discount the \\\n productivity of that cohort class for that particular year.\\n'))\n self.print_table(self.productivity_df, 'Productivity Table')\n \n display(Markdown('## Employee Count before Attrition'))\n display(Markdown('This table for each year, by each cohort, if no attrition were to occur.\\n'))\n self.print_table(self.employee_count_df, 'Employee Count (Before Attrition) by Year', precision=0, create_sum=True, sum_title='Employees')\n \n display(Markdown('## Attrition Mask Table'))\n display(Markdown('This table represents the *percentage* of the cohort **population** that has left. The number for each cohort starts\\\n at 1 (or 100%) and decreases over time. If the argument *attrition_y0* is **TRUE**, the first year of the cohort\\\n is reduced by the annual attrition rate. Otherwise, attrition starts in the second year of each cohort.\\n'))\n self.print_table(pd.DataFrame(self.attrition_mask), 'Attrition Mask - 0% to 100% of Employee Count')\n \n display(Markdown('## Retained Employees after Attrition'))\n display(Markdown('This table contains the number of employees that remain with the company after accounting for attrition. This \\\n table contains only whole employees, not fractions, to illustrate when each person is expected to leave as opposed \\\n to the Full Time Equivalent (FTE) table below.\\n'))\n self.print_table(self.retained_employee_count_df, 'Employees, After Attrition, by Year', precision=0, create_sum=True, sum_title='Employees')\n \n display(Markdown('## Full Time Equivalent Table'))\n display(Markdown('This table takes the retained employees after attrition from the table above and calculates the \\\n number of FTE after applying mid-year hiring. We assume that hiring takes place throughout the year rather than have \\\n all employees hired on the first of the year. This results in a lower FTE figure for the first year of the cohort.\\n'))\n self.print_table(self.retained_fte_df, 'FTE Table', create_sum=True, sum_title='FTE')\n \n display(Markdown('## Full Time Equivalent after Factoring Productivity Ramp Up'))\n display(Markdown('This table takes the FTE figures from the table above and applies the ramp up in productivity.\\n'))\n self.print_table(self.retained_fte_factored_df, 'FTE After Applying Productivity Ramp', create_sum=True, sum_title='FTE')\n \n display(Markdown('## Revenue Table'))\n display(Markdown('This table takes the final FTE figures, after factoring for productivity ramp up periods, and calculates \\\n the total revenue per year and per cohort.\\n'))\n self.print_table(self.revenue_df, 'Total Revenue by Year', precision=0, create_sum=True, sum_title='Revenue')\n \n def print_table(self, df, table_title, precision=2, create_sum=False, sum_title='Sum'):\n df.index.name='Cohort'\n if create_sum:\n sum_title = 'Sum of '+sum_title\n df.loc[sum_title] = df.sum()\n format_string = '{:,.' + str(precision) + 'f}'\n df_styled = df.style.format(format_string).set_caption(table_title)\n display(df_styled)",
"_____no_output_____"
],
[
"myTable = CohortTable(forecast_period=10, n_years=3, hires_per_year=[1,2,2,3,4,6], \\\n revenue_goal=1000000, annual_attrition=.16, first_year_full_hire=True, attrition_y0=False)\n\nmyTable.print_all_tables()",
"_____no_output_____"
],
[
"ax = myTable.retained_fte_factored_df.loc['Sum of FTE'].plot(kind='bar', title='Revenue by Year')\nax.set_xlabel('Year')\nax.set_ylabel('Revenue')\nax.yaxis.set_major_formatter(FuncFormatter('{0:,.0f}'.format))",
"_____no_output_____"
],
[
"myTable.revenue_df.loc['Sum of Revenue'] = myTable.revenue_df.sum()",
"_____no_output_____"
],
[
"revenue_melt = myTable.revenue_df.loc[['Sum of Revenue']].melt(var_name='Year', value_name='Revenue')\nchart = alt.Chart(revenue_melt).mark_area().encode(\n x = alt.X('Year', sort=list(revenue_melt.index)),\n y = alt.Y('Revenue'),\n tooltip = ['Year', alt.Tooltip('Revenue', format=',.0f')]\n).properties(title='Total Revenue by Year', width=600, height=400).interactive()\n\ndisplay(revenue_melt)\ndisplay(chart)",
"_____no_output_____"
],
[
"def size_list(l, length, pad=0):\n if len(l) >= length:\n del l[length:]\n else:\n l.extend([pad] * (length - len(l)))\n\n return l",
"_____no_output_____"
],
[
"n_years = 5\nforecast_period = 10\nramp_log = [math.log2(n) for n in np.delete(np.linspace(1,2,n_years+1),0)]\nramp_log_full = size_list(ramp_log, forecast_period, pad=1)\n\nproductivity_list = [np.roll(ramp_log_full, i) for i in range(forecast_period)]\nproductivity_list = np.triu(productivity_list)",
"_____no_output_____"
],
[
"pd.DataFrame(productivity_list)",
"_____no_output_____"
],
[
"ramp_exp = [math.exp(1-(1/n**2)) for n in np.delete(np.linspace(0,1,n_years+1),0)]",
"_____no_output_____"
],
[
"sns.lineplot(data=productivity_list[0])",
"_____no_output_____"
],
[
"def sigmoid(x, width, center):\n return 1 / (1 + np.exp(width*(-x - center)))",
"_____no_output_____"
],
[
"sigmoid(-10, 0,0)",
"_____no_output_____"
],
[
"s_curve = [sigmoid(n, .1, 0) for n in np.linspace(-10,10,50)]\nsns.lineplot(data=s_curve)",
"_____no_output_____"
],
[
"s_curve = [sigmoid(n, .3, -10) for n in np.linspace(-10,10,50)]\nsns.lineplot(data=s_curve)",
"_____no_output_____"
],
[
"s_curve",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d3345174548717110a0e0a83bef885e64af2fb | 101,351 | ipynb | Jupyter Notebook | wandb/run-20210518_105803-3lpt85em/tmp/code/main.ipynb | Programmer-RD-AI/Sign-Language-Recognition | ec86fcfd3795921572217b6e721bb6a74b2abcca | [
"Apache-2.0"
] | 1 | 2021-05-18T11:21:03.000Z | 2021-05-18T11:21:03.000Z | wandb/run-20210518_105803-3lpt85em/tmp/code/main.ipynb | Programmer-RD-AI/Sign-Language-Recognition | ec86fcfd3795921572217b6e721bb6a74b2abcca | [
"Apache-2.0"
] | null | null | null | wandb/run-20210518_105803-3lpt85em/tmp/code/main.ipynb | Programmer-RD-AI/Sign-Language-Recognition | ec86fcfd3795921572217b6e721bb6a74b2abcca | [
"Apache-2.0"
] | null | null | null | 32.370169 | 6,656 | 0.505787 | [
[
[
"# WorkFlow",
"_____no_output_____"
],
[
"## Classes",
"_____no_output_____"
],
[
"## Load the data",
"_____no_output_____"
],
[
"## Test Modelling",
"_____no_output_____"
],
[
"## Modelling",
"_____no_output_____"
],
[
"**<hr>**",
"_____no_output_____"
],
[
"## Classes",
"_____no_output_____"
]
],
[
[
"NAME = \"change the conv2d\"",
"_____no_output_____"
],
[
"BATCH_SIZE = 32",
"_____no_output_____"
],
[
"import os\nimport cv2\nimport torch\nimport numpy as np",
"_____no_output_____"
],
[
"def load_data(img_size=112):\n data = []\n index = -1\n labels = {}\n for directory in os.listdir('./data/'):\n index += 1\n labels[f'./data/{directory}/'] = [index,-1]\n print(len(labels))\n for label in labels:\n for file in os.listdir(label):\n filepath = label + file\n img = cv2.imread(filepath,cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img,(img_size,img_size))\n img = img / 255.0\n data.append([\n np.array(img),\n labels[label][0]\n ])\n labels[label][1] += 1\n for _ in range(12):\n np.random.shuffle(data)\n print(len(data))\n np.save('./data.npy',data)\n return data",
"_____no_output_____"
],
[
"import torch",
"_____no_output_____"
],
[
"def other_loading_data_proccess(data):\n X = []\n y = []\n print('going through the data..')\n for d in data:\n X.append(d[0])\n y.append(d[1])\n print('splitting the data')\n VAL_SPLIT = 0.25\n VAL_SPLIT = len(X)*VAL_SPLIT\n VAL_SPLIT = int(VAL_SPLIT)\n X_train = X[:-VAL_SPLIT]\n y_train = y[:-VAL_SPLIT]\n X_test = X[-VAL_SPLIT:]\n y_test = y[-VAL_SPLIT:]\n print('turning data to tensors')\n X_train = torch.from_numpy(np.array(X_train))\n y_train = torch.from_numpy(np.array(y_train))\n X_test = torch.from_numpy(np.array(X_test))\n y_test = torch.from_numpy(np.array(y_test))\n return [X_train,X_test,y_train,y_test]",
"_____no_output_____"
]
],
[
[
"**<hr>**",
"_____no_output_____"
],
[
"## Load the data",
"_____no_output_____"
]
],
[
[
"REBUILD_DATA = True\nif REBUILD_DATA:\n data = load_data()\n np.random.shuffle(data)\n X_train,X_test,y_train,y_test = other_loading_data_proccess(data)",
"36\n2515\n"
]
],
[
[
"## Test Modelling",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn",
"_____no_output_____"
],
[
"import torch.nn.functional as F",
"_____no_output_____"
],
[
"# class Test_Model(nn.Module):\n# def __init__(self):\n# super().__init__()\n# self.conv1 = nn.Conv2d(1, 6, 5)\n# self.pool = nn.MaxPool2d(2, 2)\n# self.conv2 = nn.Conv2d(6, 16, 5)\n# self.fc1 = nn.Linear(16 * 25 * 25, 120)\n# self.fc2 = nn.Linear(120, 84)\n# self.fc3 = nn.Linear(84, 36)\n\n# def forward(self, x):\n# x = self.pool(F.relu(self.conv1(x)))\n# x = self.pool(F.relu(self.conv2(x)))\n# x = x.view(-1, 16 * 25 * 25)\n# x = F.relu(self.fc1(x))\n# x = F.relu(self.fc2(x))\n# x = self.fc3(x)\n# return x",
"_____no_output_____"
],
[
"class Test_Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.pool = nn.MaxPool2d(2, 2)\n self.conv1 = nn.Conv2d(1, 32, 5)\n self.conv3 = nn.Conv2d(32,64,5)\n self.conv2 = nn.Conv2d(64, 128, 5)\n self.fc1 = nn.Linear(128 * 10 * 10, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc4 = nn.Linear(256,128)\n self.fc3 = nn.Linear(128, 36)\n\n def forward(self, x,shape=False):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv3(x)))\n x = self.pool(F.relu(self.conv2(x)))\n if shape:\n print(x.shape)\n x = x.view(-1, 128 * 10 * 10)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc4(x))\n x = self.fc3(x)\n return x",
"_____no_output_____"
],
[
"device = torch.device('cuda')",
"_____no_output_____"
],
[
"model = Test_Model().to(device)",
"_____no_output_____"
],
[
"preds = model(X_test.reshape(-1,1,112,112).float().to(device),True)",
"torch.Size([628, 128, 10, 10])\n"
],
[
"preds[0]",
"_____no_output_____"
],
[
"optimizer = torch.optim.SGD(model.parameters(),lr=0.1)\ncriterion = nn.CrossEntropyLoss()",
"_____no_output_____"
],
[
"EPOCHS = 5",
"_____no_output_____"
],
[
"loss_logs = []",
"_____no_output_____"
],
[
"from tqdm import tqdm",
"_____no_output_____"
],
[
"PROJECT_NAME = \"Sign-Language-Recognition\"",
"_____no_output_____"
],
[
"def test(net,X,y):\n correct = 0\n total = 0\n net.eval()\n with torch.no_grad():\n for i in range(len(X)):\n real_class = torch.argmax(y[i]).to(device)\n net_out = net(X[i].view(-1,1,112,112).to(device).float())\n net_out = net_out[0]\n predictied_class = torch.argmax(net_out)\n if predictied_class == real_class:\n correct += 1\n total += 1\n return round(correct/total,3)",
"_____no_output_____"
],
[
"import wandb",
"_____no_output_____"
],
[
"len(os.listdir('./data/'))",
"_____no_output_____"
],
[
"import random",
"_____no_output_____"
],
[
"# index = random.randint(0,29)\n# print(index)\n# wandb.init(project=PROJECT_NAME,name=NAME)\n# for _ in tqdm(range(EPOCHS)):\n# for i in range(0,len(X_train),BATCH_SIZE):\n# X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device)\n# y_batch = y_train[i:i+BATCH_SIZE].to(device)\n# model.to(device)\n# preds = model(X_batch.float())\n# loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))\n# optimizer.zero_grad()\n# loss.backward()\n# optimizer.step()\n# wandb.log({'loss':loss.item(),'accuracy':test(model,X_train,y_train)*100,'val_accuracy':test(model,X_test,y_test)*100,'pred':torch.argmax(preds[index]),'real':torch.argmax(y_batch[index])})\n# wandb.finish()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"df = pd.Series(loss_logs)",
"/home/indika/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: DeprecationWarning: The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning.\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"df.plot.line(figsize=(12,6))",
"_____no_output_____"
],
[
"test(model,X_test,y_test)",
"_____no_output_____"
],
[
"test(model,X_train,y_train)",
"_____no_output_____"
],
[
"preds",
"_____no_output_____"
],
[
"X_testing = X_train\ny_testing = y_train\ncorrect = 0\ntotal = 0\nmodel.eval()\nwith torch.no_grad():\n for i in range(len(X_testing)):\n real_class = torch.argmax(y_testing[i]).to(device)\n net_out = model(X_testing[i].view(-1,1,112,112).to(device).float())\n net_out = net_out[0]\n predictied_class = torch.argmax(net_out)\n# print(predictied_class)\n if str(predictied_class) == str(real_class):\n correct += 1\n total += 1\nprint(round(correct/total,3))",
"0.0\n"
],
[
"# for real,pred in zip(y_batch,preds):\n# print(real)\n# print(torch.argmax(pred))\n# print('\\n')",
"_____no_output_____"
]
],
[
[
"## Modelling",
"_____no_output_____"
]
],
[
[
"# conv2d_output\n# conv2d_1_ouput\n# conv2d_2_ouput\n# output_fc1\n# output_fc2\n# output_fc4\n# max_pool2d_keranl\n# max_pool2d\n# num_of_linear\n\n# activation\n# best num of epochs\n# best optimizer\n# best loss\n## best lr",
"_____no_output_____"
],
[
"class Test_Model(nn.Module):\n def __init__(self,conv2d_output=128,conv2d_1_ouput=32,conv2d_2_ouput=64,output_fc1=512,output_fc2=256,output_fc4=128,output=36,activation=F.relu,max_pool2d_keranl=2):\n super().__init__()\n print(conv2d_output)\n print(conv2d_1_ouput)\n print(conv2d_2_ouput)\n print(output_fc1)\n print(output_fc2)\n print(output_fc4)\n print(activation)\n self.conv2d_output = conv2d_output\n self.pool = nn.MaxPool2d(max_pool2d_keranl)\n self.conv1 = nn.Conv2d(1, conv2d_1_ouput, 5)\n self.conv3 = nn.Conv2d(conv2d_1_ouput,conv2d_2_ouput,5)\n self.conv2 = nn.Conv2d(conv2d_2_ouput, conv2d_output, 5)\n self.fc1 = nn.Linear(conv2d_output * 10 * 10, output_fc1)\n self.fc2 = nn.Linear(output_fc1, output_fc2)\n self.fc4 = nn.Linear(output_fc2,output_fc4)\n self.fc3 = nn.Linear(output_fc4, output)\n self.activation = activation\n\n def forward(self, x,shape=False):\n x = self.pool(self.activation(self.conv1(x)))\n x = self.pool(self.activation(self.conv3(x)))\n x = self.pool(self.activation(self.conv2(x)))\n if shape:\n print(x.shape)\n x = x.view(-1, self.conv2d_output * 10 * 10)\n x = self.activation(self.fc1(x))\n x = self.activation(self.fc2(x))\n x = self.activation(self.fc4(x))\n x = self.fc3(x)\n return x",
"_____no_output_____"
],
[
"# conv2d_output\n# conv2d_1_ouput\n# conv2d_2_ouput\n# output_fc1\n# output_fc2\n# output_fc4\n# max_pool2d_keranl\n# max_pool2d\n# num_of_linear\n# best num of epochs\n# best loss\n## best lr\n# batch size",
"_____no_output_____"
],
[
"EPOCHS = 3\nBATCH_SIZE = 32",
"_____no_output_____"
],
[
"# conv2d_output\n# conv2d_1_ouput\n# conv2d_2_ouput\n# output_fc1\n# output_fc2\n# output_fc4\n# max_pool2d_keranl\n# max_pool2d\n# num_of_linear\n\n# activation = \n# best num of epochs\n# best optimizer = \n# best loss\n## best lr",
"_____no_output_____"
],
[
"def get_loss(criterion,y,model,X):\n preds = model(X.view(-1,1,112,112).to(device).float())\n preds.to(device)\n loss = criterion(preds,torch.tensor(y,dtype=torch.long).to(device))\n loss.backward()\n return loss.item()",
"_____no_output_____"
],
[
"optimizers = [torch.optim.SGD,torch.optim.Adadelta,torch.optim.Adagrad,torch.optim.Adam,torch.optim.AdamW,torch.optim.SparseAdam,torch.optim.Adamax]\nfor optimizer in optimizers:\n model = Test_Model(activation=nn.ReLU())\n criterion = optimizer(model.parameters(),lr=0.1)\n wandb.init(project=PROJECT_NAME,name=f'optimizer-{optimizer}')\n for _ in tqdm(range(EPOCHS)):\n for i in range(0,len(X_train),BATCH_SIZE):\n X_batch = X_train[i:i+BATCH_SIZE]\n y_batch = y_train[i:i+BATCH_SIZE]\n model.to(device)\n preds = model(X_batch.float())\n loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'accuracy':test(model,X_train,y_train)*100,'val_accuracy':test(model,X_test,y_test)*100,'pred':torch.argmax(preds[index]),'real':torch.argmax(y_batch[index]),'val_loss':get_loss(criterion,y_test,model,X_test)})\n print(f'{torch.argmax(preds[index])} \\n {y_batch[index]}')\n print(f'{torch.argmax(preds[1])} \\n {y_batch[1]}')\n print(f'{torch.argmax(preds[2])} \\n {y_batch[2]}')\n print(f'{torch.argmax(preds[3])} \\n {y_batch[3]}')\n print(f'{torch.argmax(preds[4])} \\n {y_batch[4]}')\n wandb.finish()",
"_____no_output_____"
],
[
"# activations = [nn.ELU(),nn.LeakyReLU(),nn.PReLU(),nn.ReLU(),nn.ReLU6(),nn.RReLU(),nn.SELU(),nn.CELU(),nn.GELU(),nn.SiLU(),nn.Tanh()]\n# for activation in activations:\n# model = Test_Model(activation=activation)\n# optimizer = torch.optim.SGD(model.parameters(),lr=0.1)\n# criterion = nn.CrossEntropyLoss()\n# index = random.randint(0,29)\n# print(index)\n# wandb.init(project=PROJECT_NAME,name=f'activation-{activation}')\n# for _ in tqdm(range(EPOCHS)):\n# for i in range(0,len(X_train),BATCH_SIZE):\n# X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device)\n# y_batch = y_train[i:i+BATCH_SIZE].to(device)\n# model.to(device)\n# preds = model(X_batch.float())\n# loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))\n# optimizer.zero_grad()\n# loss.backward()\n# optimizer.step()\n# wandb.log({'loss':loss.item(),'accuracy':test(model,X_train,y_train)*100,'val_accuracy':test(model,X_test,y_test)*100,'pred':torch.argmax(preds[index]),'real':torch.argmax(y_batch[index]),'val_loss':get_loss(criterion,y_test,model,X_test)})\n# print(f'{torch.argmax(preds[index])} \\n {y_batch[index]}')\n# print(f'{torch.argmax(preds[1])} \\n {y_batch[1]}')\n# print(f'{torch.argmax(preds[2])} \\n {y_batch[2]}')\n# print(f'{torch.argmax(preds[3])} \\n {y_batch[3]}')\n# print(f'{torch.argmax(preds[4])} \\n {y_batch[4]}')\n# wandb.finish()",
"128\n32\n64\n512\n256\n128\nELU(alpha=1.0)\n8\n"
],
[
"for real,pred in zip(y_batch,preds):\n print(real)\n print(torch.argmax(pred))\n print('\\n')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d339739795500ea329fe5c7000509cb7bb445b | 67,074 | ipynb | Jupyter Notebook | intro-glove/LFS_ContinousSpace_SGD_NN-bk2.ipynb | vinaytejakoona/MTP | 424adf334a45dc3e67454db205ffeb1107f9f504 | [
"MIT"
] | null | null | null | intro-glove/LFS_ContinousSpace_SGD_NN-bk2.ipynb | vinaytejakoona/MTP | 424adf334a45dc3e67454db205ffeb1107f9f504 | [
"MIT"
] | null | null | null | intro-glove/LFS_ContinousSpace_SGD_NN-bk2.ipynb | vinaytejakoona/MTP | 424adf334a45dc3e67454db205ffeb1107f9f504 | [
"MIT"
] | null | null | null | 69.434783 | 6,382 | 0.749247 | [
[
[
"%load_ext autoreload\n%reload_ext autoreload\n%autoreload 2\n%matplotlib inline\n\nimport os\n\n# TO USE A DATABASE OTHER THAN SQLITE, USE THIS LINE\n# Note that this is necessary for parallel execution amongst other things...\n# os.environ['SNORKELDB'] = 'postgres:///snorkel-intro'\n\nfrom snorkel import SnorkelSession\nsession = SnorkelSession()\n\n# Here, we just set how many documents we'll process for automatic testing- you can safely ignore this!\nn_docs = 500 if 'CI' in os.environ else 2591\n\nfrom snorkel.models import candidate_subclass\n\nSpouse = candidate_subclass('Spouse', ['person1', 'person2'])\n\ntrain_cands = session.query(Spouse).filter(Spouse.split == 0).order_by(Spouse.id).all()\ndev_cands = session.query(Spouse).filter(Spouse.split == 1).order_by(Spouse.id).all()\ntest_cands = session.query(Spouse).filter(Spouse.split == 2).order_by(Spouse.id).all()\n",
"_____no_output_____"
],
[
"from util import load_external_labels\n\n#%time load_external_labels(session, Spouse, annotator_name='gold')\n\nfrom snorkel.annotations import load_gold_labels\n\n#L_gold_dev = load_gold_labels(session, annotator_name='gold', split=1, zero_one=True)\n#L_gold_test = load_gold_labels(session, annotator_name='gold', split=2, zero_one=True)\n\nL_gold_dev = load_gold_labels(session, annotator_name='gold', split=1)\nL_gold_test = load_gold_labels(session, annotator_name='gold', split=2)",
"_____no_output_____"
],
[
"#gold_labels_dev = [x[0,0] for x in L_gold_dev.todense()]\n#for i,L in enumerate(gold_labels_dev):\n# print(i,gold_labels_dev[i])\n\ngold_labels_dev = []\nfor i,L in enumerate(L_gold_dev):\n gold_labels_dev.append(L[0,0])\n \ngold_labels_test = []\nfor i,L in enumerate(L_gold_test):\n gold_labels_test.append(L[0,0])\n \nprint(len(gold_labels_dev),len(gold_labels_test))",
"(2796, 2697)\n"
],
[
"from gensim.parsing.preprocessing import STOPWORDS\nimport gensim.matutils as gm\n\nfrom gensim.models.keyedvectors import KeyedVectors\n\n# Load pretrained model (since intermediate data is not included, the model cannot be refined with additional data)\nmodel = KeyedVectors.load_word2vec_format('../glove_w2v.txt', binary=False) # C binary format\n\n\nwordvec_unavailable= set()\ndef write_to_file(wordvec_unavailable):\n with open(\"wordvec_unavailable.txt\",\"w\") as f:\n for word in wordvec_unavailable:\n f.write(word+\"\\n\")\n\ndef preprocess(tokens):\n btw_words = [word for word in tokens if word not in STOPWORDS]\n btw_words = [word for word in btw_words if word.isalpha()]\n return btw_words\n\ndef get_word_vectors(btw_words): # returns vector of embeddings of words\n word_vectors= []\n for word in btw_words:\n try:\n word_v = np.array(model[word])\n word_v = word_v.reshape(len(word_v),1)\n #print(word_v.shape)\n word_vectors.append(model[word])\n except:\n wordvec_unavailable.add(word)\n return word_vectors\n\ndef get_similarity(word_vectors,target_word): # sent(list of word vecs) to word similarity\n similarity = 0\n target_word_vector = 0\n try:\n target_word_vector = model[target_word]\n except:\n wordvec_unavailable.add(target_word+\" t\")\n return similarity\n target_word_sparse = gm.any2sparse(target_word_vector,eps=1e-09)\n for wv in word_vectors:\n wv_sparse = gm.any2sparse(wv, eps=1e-09)\n similarity = max(similarity,gm.cossim(wv_sparse,target_word_sparse))\n return similarity\n",
"_____no_output_____"
],
[
"##### Continuous ################\n\nsoftmax_Threshold = 0.3\nLF_Threshold = 0.3\n\nimport re\nfrom snorkel.lf_helpers import (\n get_left_tokens, get_right_tokens, get_between_tokens,\n get_text_between, get_tagged_text,\n)\n\n\nspouses = {'spouse', 'wife', 'husband', 'ex-wife', 'ex-husband'}\nfamily = {'father', 'mother', 'sister', 'brother', 'son', 'daughter',\n 'grandfather', 'grandmother', 'uncle', 'aunt', 'cousin'}\nfamily = family | {f + '-in-law' for f in family}\nother = {'boyfriend', 'girlfriend' 'boss', 'employee', 'secretary', 'co-worker'}\n\n# Helper function to get last name\ndef last_name(s):\n name_parts = s.split(' ')\n return name_parts[-1] if len(name_parts) > 1 else None \n\ndef LF_husband_wife(c):\n global LF_Threshold\n sc = 0\n word_vectors = get_word_vectors(preprocess(get_between_tokens(c)))\n for sw in spouses:\n sc=max(sc,get_similarity(word_vectors,sw))\n return (1,sc)\n\ndef LF_husband_wife_left_window(c):\n global LF_Threshold\n sc_1 = 0\n word_vectors = get_word_vectors(preprocess(get_left_tokens(c[0])))\n for sw in spouses:\n sc_1=max(sc_1,get_similarity(word_vectors,sw))\n \n sc_2 = 0\n word_vectors = get_word_vectors(preprocess(get_left_tokens(c[1])))\n for sw in spouses:\n sc_2=max(sc_2,get_similarity(word_vectors,sw))\n return(1,max(sc_1,sc_2))\n \ndef LF_same_last_name(c):\n p1_last_name = last_name(c.person1.get_span())\n p2_last_name = last_name(c.person2.get_span())\n if p1_last_name and p2_last_name and p1_last_name == p2_last_name:\n if c.person1.get_span() != c.person2.get_span():\n return (1,1)\n return (0,0)\n\ndef LF_no_spouse_in_sentence(c):\n return (-1,0.75) if np.random.rand() < 0.75 and len(spouses.intersection(c.get_parent().words)) == 0 else (0,0)\n\ndef LF_and_married(c):\n global LF_Threshold\n word_vectors = get_word_vectors(preprocess(get_right_tokens(c)))\n sc = get_similarity(word_vectors,'married')\n \n if 'and' in get_between_tokens(c):\n return (1,sc)\n else:\n return (0,0)\n\ndef LF_familial_relationship(c):\n global LF_Threshold\n sc = 0\n word_vectors = get_word_vectors(preprocess(get_between_tokens(c)))\n for fw in family:\n sc=max(sc,get_similarity(word_vectors,fw))\n \n return (-1,sc) \n\ndef LF_family_left_window(c):\n global LF_Threshold\n sc_1 = 0\n word_vectors = get_word_vectors(preprocess(get_left_tokens(c[0])))\n for fw in family:\n sc_1=max(sc_1,get_similarity(word_vectors,fw))\n \n sc_2 = 0\n word_vectors = get_word_vectors(preprocess(get_left_tokens(c[1])))\n for fw in family:\n sc_2=max(sc_2,get_similarity(word_vectors,fw))\n \n return (-1,max(sc_1,sc_2))\n\ndef LF_other_relationship(c):\n global LF_Threshold\n sc = 0\n word_vectors = get_word_vectors(preprocess(get_between_tokens(c)))\n for ow in other:\n sc=max(sc,get_similarity(word_vectors,ow))\n \n return (-1,sc) \n\ndef LF_other_relationship_left_window(c):\n global LF_Threshold\n sc = 0\n word_vectors = get_word_vectors(preprocess(get_left_tokens(c)))\n for ow in other:\n sc=max(sc,get_similarity(word_vectors,ow))\n return (-1,sc) \n\nimport bz2\n\n# Function to remove special characters from text\ndef strip_special(s):\n return ''.join(c for c in s if ord(c) < 128)\n\n# Read in known spouse pairs and save as set of tuples\nwith bz2.BZ2File('data/spouses_dbpedia.csv.bz2', 'rb') as f:\n known_spouses = set(\n tuple(strip_special(x).strip().split(',')) for x in f.readlines()\n )\n# Last name pairs for known spouses\nlast_names = set([(last_name(x), last_name(y)) for x, y in known_spouses if last_name(x) and last_name(y)])\n \ndef LF_distant_supervision(c):\n p1, p2 = c.person1.get_span(), c.person2.get_span()\n return (1,1) if (p1, p2) in known_spouses or (p2, p1) in known_spouses else (0,0)\n\ndef LF_distant_supervision_last_names(c):\n p1, p2 = c.person1.get_span(), c.person2.get_span()\n p1n, p2n = last_name(p1), last_name(p2)\n return (1,1) if (p1 != p2) and ((p1n, p2n) in last_names or (p2n, p1n) in last_names) else (0,1)\n\nimport numpy as np\n\ndef LF_Three_Lists_Left_Window(c):\n global softmax_Threshold\n c1,s1 = LF_husband_wife_left_window(c)\n c2,s2 = LF_family_left_window(c)\n c3,s3 = LF_other_relationship_left_window(c)\n sc = np.array([s1,s2,s3])\n c = [c1,c2,c3]\n sharp_param = 1.5\n prob_sc = np.exp(sc * sharp_param - np.max(sc))\n prob_sc = prob_sc / np.sum(prob_sc)\n #print 'Left:',s1,s2,s3,prob_sc\n \n if s1==s2 or s3==s1:\n return (0,0)\n return c[np.argmax(prob_sc)],1\n\ndef LF_Three_Lists_Between_Words(c):\n global softmax_Threshold\n c1,s1 = LF_husband_wife(c)\n c2,s2 = LF_familial_relationship(c)\n c3,s3 = LF_other_relationship(c)\n sc = np.array([s1,s2,s3])\n c = [c1,c2,c3]\n sharp_param = 1.5\n \n prob_sc = np.exp(sc * sharp_param - np.max(sc))\n prob_sc = prob_sc / np.sum(prob_sc)\n #print 'BW:',s1,s2,s3,prob_sc\n if s1==s2 or s3==s1:\n return (0,0)\n return c[np.argmax(prob_sc)],1\n \nLFs = [LF_distant_supervision, LF_distant_supervision_last_names,LF_same_last_name,\n LF_and_married, LF_Three_Lists_Between_Words,LF_Three_Lists_Left_Window, LF_no_spouse_in_sentence\n ]",
"_____no_output_____"
],
[
"\nimport numpy as np\nimport math\n\ndef PHI(K,LAMDAi,SCOREi):\n return [K*l*s for (l,s) in zip(LAMDAi,SCOREi)]\n\ndef softmax(THETA,LAMDAi,SCOREi):\n x = []\n for k in [1,-1]:\n product = np.dot(PHI(k,LAMDAi,SCOREi),THETA)\n x.append(product)\n return np.exp(x) / np.sum(np.exp(x), axis=0)\n\ndef function_conf(THETA,LAMDA,P_cap,Confidence):\n s = 0.0\n i = 0\n for LAMDAi in LAMDA:\n s = s + Confidence[i]*np.dot(np.log(softmax(THETA,LAMDAi)),P_cap[i])\n i = i+1\n return -s\n\ndef function(THETA,LAMDA,SCORE,P_cap):\n s = 0.0\n i = 0\n for i in range(len(LAMDA)):\n s = s + np.dot(np.log(softmax(THETA,LAMDA[i],SCORE[i])),P_cap[i])\n i = i+1\n return -s\n\ndef P_K_Given_LAMDAi_THETA(K,THETA,LAMDAi,SCOREi):\n x = softmax(THETA,LAMDAi,SCOREi)\n if(K==1):\n return x[0]\n else:\n return x[1]\n \n\nnp.random.seed(78)\nTHETA = np.random.rand(len(LFs),1)\n\ndef PHIj(j,K,LAMDAi,SCOREi):\n return LAMDAi[j]*K*SCOREi[j]\n\ndef RIGHT(j,LAMDAi,SCOREi,THETA):\n phi = []\n for k in [1,-1]:\n phi.append(PHIj(j,k,LAMDAi,SCOREi))\n x = softmax(THETA,LAMDAi,SCOREi)\n return np.dot(phi,x)\n \n\ndef function_conf_der(THETA,LAMDA,P_cap,Confidence):\n der = []\n for j in range(len(THETA)):\n i = 0\n s = 0.0\n for LAMDAi in LAMDA:\n p = 0\n for K in [1,-1]:\n s = s + Confidence[i]*(PHIj(j,K,LAMDAi)-RIGHT(j,LAMDAi,THETA))*P_cap[i][p]\n p = p+1\n i = i+1\n der.append(-s)\n return np.array(der)\n\ndef function_der(THETA,LAMDA,SCORE,P_cap):\n der = []\n for j in range(len(THETA)):\n i = 0\n s = 0.0\n for index in range(len(LAMDA)):\n p = 0\n for K in [1,-1]:\n s = s + (PHIj(j,K,LAMDA[index],SCORE[index])-RIGHT(j,LAMDA[index],SCORE[index],THETA))*P_cap[i][p]\n p = p+1\n i = i+1\n der.append(-s)\n return np.array(der)\n\n\nimport numpy as np\n\n\ndef get_LAMDA(cands):\n LAMDA = []\n SCORE = []\n for ci in cands:\n L=[]\n S=[]\n P_ik = []\n for LF in LFs:\n #print LF.__name__\n l,s = LF(ci)\n L.append(l)\n S.append((s+1)/2) #to scale scores in [0,1] \n LAMDA.append(L)\n SCORE.append(S) \n return LAMDA,SCORE\n\ndef get_Confidence(LAMDA):\n confidence = []\n for L in LAMDA:\n Total_L = float(len(L))\n No_zeros = L.count(0)\n No_Non_Zeros = Total_L - No_zeros\n confidence.append(No_Non_Zeros/Total_L)\n return confidence \n \ndef get_Initial_P_cap(LAMDA):\n P_cap = []\n for L in LAMDA:\n P_ik = []\n denominator=float(L.count(1)+L.count(-1))\n if(denominator==0):\n denominator=1\n P_ik.append(L.count(1)/denominator)\n P_ik.append(L.count(-1)/denominator)\n P_cap.append(P_ik)\n return P_cap\n #print(np.array(LAMDA))\n #print(np.array(P_cap))append(L)\n #LAMDA=np.array(LAMDA).astype(int)\n #P_cap=np.array(P_cap)\n #print(np.array(LAMDA).shape)\n #print(np.array(P_cap).shape)\n #print(L)\n #print(ci.chemical.get_span(),ci.disease.get_span(),\"No.Os\",L.count(0),\"No.1s\",L.count(1),\"No.-1s\",L.count(-1))\n #print(ci.chemical.get_span(),ci.disease.get_span(),\"P(0):\",L.count(0)/len(L),\" P(1)\",L.count(1)/len(L),\"P(-1)\",L.count(-1)/len(L))\n\n \ndef get_P_cap(LAMDA,SCORE,THETA):\n P_cap = []\n for i in range(len(LAMDA)):\n P_capi = softmax(THETA,LAMDA[i],SCORE[i])\n P_cap.append(P_capi)\n return P_cap\n\n\ndef score(predicted_labels,gold_labels):\n tp =0.0\n tn =0.0\n fp =0.0\n fn =0.0\n for i in range(len(gold_labels)):\n if(predicted_labels[i]==gold_labels[i]):\n if(predicted_labels[i]==1):\n tp=tp+1\n else:\n tn=tn+1\n else:\n if(predicted_labels[i]==1):\n fp=fp+1\n else:\n fn=fn+1\n print(\"tp\",tp,\"tn\",tn,\"fp\",fp,\"fn\",fn)\n precision = tp/(tp+fp)\n recall = tp/(tp+fn)\n f1score = (2*precision*recall)/(precision+recall)\n print(\"precision:\",precision)\n print(\"recall:\",recall)\n print(\"F1 score:\",f1score)\n \n \n \nfrom scipy.optimize import minimize\nimport cPickle as pickle\n\ndef get_marginals(P_cap):\n marginals = []\n for P_capi in P_cap:\n marginals.append(P_capi[0])\n return marginals\n\ndef predict_labels(marginals):\n predicted_labels=[]\n for i in marginals:\n if(i<0.5):\n predicted_labels.append(-1)\n else:\n predicted_labels.append(1)\n return predicted_labels\n\ndef print_details(label,THETA,LAMDA,SCORE):\n print(label)\n P_cap = get_P_cap(LAMDA,SCORE,THETA)\n marginals=get_marginals(P_cap)\n plt.hist(marginals, bins=20)\n plt.show()\n plt.bar(range(0,2796),marginals)\n plt.show()\n predicted_labels=predict_labels(marginals)\n print(len(marginals),len(predicted_labels),len(gold_labels_dev))\n #score(predicted_labels,gold_labels_dev)\n print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(predicted_labels),average='binary')) \n \n \n \ndef train(No_Iter,Use_Confidence=True,theta_file_name=\"THETA\"):\n global THETA\n global dev_LAMDA,dev_SCORE\n LAMDA,SCORE = get_LAMDA(train_cands)\n P_cap = get_Initial_P_cap(LAMDA)\n Confidence = get_Confidence(LAMDA)\n for iteration in range(No_Iter):\n if(Use_Confidence==True):\n res = minimize(function_conf,THETA,args=(LAMDA,P_cap,Confidence), method='BFGS',jac=function_conf_der,options={'disp': True, 'maxiter':20}) #nelder-mead\n else:\n res = minimize(function,THETA,args=(LAMDA,SCORE,P_cap), method='BFGS',jac=function_der,options={'disp': True, 'maxiter':20}) #nelder-mead \n THETA = res.x # new THETA\n print(THETA)\n P_cap = get_P_cap(LAMDA,SCORE,THETA) #new p_cap \n print_details(\"train iteration: \"+str(iteration),THETA,dev_LAMDA,dev_SCORE)\n #score(predicted_labels,gold_labels)\n NP_P_cap = np.array(P_cap)\n np.savetxt('Train_P_cap.txt', NP_P_cap, fmt='%f')\n pickle.dump(NP_P_cap,open(\"Train_P_cap.p\",\"wb\"))\n NP_THETA = np.array(THETA)\n np.savetxt(theta_file_name+'.txt', NP_THETA, fmt='%f') \n pickle.dump( NP_THETA, open( theta_file_name+'.p', \"wb\" )) # save the file as \"outfile_name.npy\" \n\n \ndef test(THETA):\n global dev_LAMDA,dev_SCORE\n P_cap = get_P_cap(dev_LAMDA,dev_SCORE,THETA)\n print_details(\"test:\",THETA,dev_LAMDA,dev_SCORE)\n NP_P_cap = np.array(P_cap)\n np.savetxt('Dev_P_cap.txt', NP_P_cap, fmt='%f')\n pickle.dump(NP_P_cap,open(\"Dev_P_cap.p\",\"wb\"))\n \ndef load_marginals(s):\n marginals = []\n if(s==\"train\"):\n train_P_cap = np.load(\"Train_P_cap.npy\")\n marginals = train_P_cap[:,0]\n return marginals\n\n",
"_____no_output_____"
],
[
"''' output:\n\n [[[L_x1],[S_x1]],\n [[L_x2],[S_x2]],\n ......\n ......\n ]\n\n'''\ndef get_L_S_Tensor(cands): \n \n L_S = []\n for ci in cands[2:4]:\n L_S_ci=[]\n L=[]\n S=[]\n P_ik = []\n for LF in LFs:\n #print LF.__name__\n l,s = LF(ci)\n L.append(l)\n S.append((s+1)/2) #to scale scores in [0,1] \n L_S_ci.append(L)\n L_S_ci.append(S)\n L_S.append(L_S_ci) \n return L_S\n\ndef get_L_S(cands): # sign gives label abs value gives score\n \n L_S = []\n for ci in cands[2:4]:\n l_s=[]\n for LF in LFs:\n #print LF.__name__\n l,s = LF(ci)\n s= (s+1)/2 #to scale scores in [0,1] \n l_s.append(l*s)\n L_S.append(l_s)\n return L_S\n\ndef get_Initial_P_cap_L_S(L_S):\n P_cap = []\n for L,S in L_S[:2]:\n P_ik = []\n denominator=float(L.count(1)+L.count(-1))\n if(denominator==0):\n denominator=1\n P_ik.append(L.count(1)/denominator)\n P_ik.append(L.count(-1)/denominator)\n P_cap.append(P_ik)\n return P_cap\n\n",
"_____no_output_____"
],
[
"from sklearn.metrics import precision_recall_fscore_support\nimport matplotlib.pyplot as plt\n \n \n\n#L_S = get_L_S_Tensor(train_cands)\n\n\n#dev_L_S = get_L_S_Tensor(dev_cands)\n#train_L_S = get_L_S_Tensor(train_cands)\n\ndev_L_S = get_L_S_Tensor(dev_cands)\ntrain_L_S = get_L_S_Tensor(train_cands)\n\nfor x in train_L_S:\n print(x)\n \npcap= get_Initial_P_cap_L_S(train_L_S) \n\nfor x in pcap:\n print(x)\n\n#L_S = tf.Variable(L_S, tf.float32)\n\n#write_to_file(wordvec_unavailable)",
"[[0, 0, 0, 1, -1, -1, 0], [0.5, 1.0, 0.5, 0.56849847781063756, 1.0, 1.0, 0.5]]\n[[0, 0, 0, 1, -1, -1, -1], [0.5, 1.0, 0.5, 0.5, 1.0, 1.0, 0.875]]\n[0.3333333333333333, 0.6666666666666666]\n[0.25, 0.75]\n"
],
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport tensorflow as tf\nfrom tensorflow.contrib.tensorboard.plugins import projector\n\nresult_dir = \"./\"\nconfig = projector.ProjectorConfig()\ntf.logging.set_verbosity(tf.logging.INFO)\nsummary_writer = tf.summary.FileWriter(result_dir)\nprojector.visualize_embeddings(summary_writer, config)\n\ntf.reset_default_graph()\nL_S = get_L_S_Tensor(train_cands)\n\nP_cap= get_Initial_P_cap_L_S(train_L_S) \n\n\ndim = 2 #(labels,scores)\n\n_x = tf.placeholder(tf.float64,shape=(dim,len(LFs)))\n_p_cap = tf.placeholder(tf.float64,shape=(2))\n\nalphas = tf.get_variable('alpha', _x.get_shape()[-1],initializer=tf.constant_initializer(0.2),\n dtype=tf.float64)\n\nthetas = tf.get_variable('theta', _x.get_shape()[-1],initializer=tf.constant_initializer(0.0),\n dtype=tf.float64)\n\nprint([n.name for n in tf.get_default_graph().as_graph_def().node])\n\n#for k = 1\n\nk_p1 = tf.ones(shape=(dim,len(LFs)),dtype=tf.float64)\n\nk_n1 = tf.negative(k_p1)\n\n\nl,s = tf.unstack(_x)\n\nprelu_out_s = tf.maximum(tf.subtract(tf.abs(s),alphas,name='subtract'), 0,name='max') \n\n\nmul_L_S = tf.multiply(l,prelu_out_s)\n\nphi_p1 = tf.reduce_sum(tf.multiply(mul_L_S,thetas))\n\nphi_n1 = tf.reduce_sum(tf.multiply(tf.multiply(mul_L_S,k_n1),thetas))\n\nphi_out = tf.stack([phi_p1,phi_n1])\n\nloss = tf.reduce_sum(tf.multiply(tf.log(tf.nn.softmax(phi_out)),_p_cap))\n\n\ntrain_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\n\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n\nfor i in range(1):\n for L_S_i,P_cap_i in zip(L_S,P_cap):\n print(sess.run([loss],feed_dict={_x:L_S_i,_p_cap:P_cap_i}))\n\n\n\n\n\n \n",
"[u'Placeholder', u'Placeholder_1', u'alpha/Initializer/Const', u'alpha', u'alpha/Assign', u'alpha/read', u'theta/Initializer/Const', u'theta', u'theta/Assign', u'theta/read']\n[-0.69314718055994529]\n[-0.69314718055994529]\n"
],
[
"# All LF_Threshold =0.3 and softmax_Threshold=0.3 ,to be run\ntrain(2,Use_Confidence=False,theta_file_name=\"THETA\")\n\ntest(THETA)",
"Warning: Maximum number of iterations has been exceeded.\n Current function value: 330.069609\n Iterations: 20\n Function evaluations: 23\n Gradient evaluations: 23\n[ 3.29293033 0.80240776 3.54276763 3.57746115 2.57487472 2.54625167\n 4.16422647]\ntrain iteration: 0\n"
],
[
"def print_details(label,THETA,LAMDA,SCORE):\n print(label)\n P_cap = get_P_cap(LAMDA,SCORE,THETA)\n marginals=get_marginals(P_cap)\n plt.hist(marginals, bins=20)\n plt.show()\n #plt.bar(range(0,2796),marginals)\n #plt.show()\n predicted_labels=predict_labels(marginals)\n print(len(marginals),len(predicted_labels),len(gold_labels_dev))\n #score(predicted_labels,gold_labels_dev)\n print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(predicted_labels),average='binary')) \n \ndef predict_labels(marginals):\n predicted_labels=[]\n for i in marginals:\n if(i<0.5):\n predicted_labels.append(-1)\n else:\n predicted_labels.append(1)\n return predicted_labels\n\n#import cPickle as pickle\n#THETA = pickle.load( open( \"THETA.p\", \"rb\" ) )\n#test(THETA)\n#LAMDA,SCORE = get_LAMDA(dev_cands)\n#Confidence = get_Confidence(LAMDA)\n\n#P_cap = get_P_cap(LAMDA,SCORE,THETA)\n#marginals=get_marginals(P_cap)\n#plt.hist(marginals, bins=20)\n#plt.show()\n#plt.bar(range(0,888),train_marginals)\n#plt.show()\n\nprint_details(\"dev set\",THETA,dev_LAMDA,dev_SCORE)\npredicted_labels=predict_labels(marginals)\n\n\nsorted_predicted_labels=[x for (y,x) in sorted(zip(Confidence,predicted_labels))] #sort Labels as per Confidence\nsorted_predicted_labels=list(reversed(sorted_predicted_labels))\n\n\nfor i,j in enumerate(reversed(sorted(zip(Confidence,predicted_labels,gold_labels_dev)))):\n if i>20:\n break\n print i,j\n#print(len(marginals),len(predicted_labels),len(gold_labels_dev))\n#no_of_labels=186#int(len(predicted_labels)*0.1) #54 - >0.2 , 108>= 0.15 , 186>= 0.12\n#print(len(sorted_predicted_labels[0:no_of_labels]))\nno_of_labels=2796\nscore(predicted_labels[0:no_of_labels],gold_labels_dev[0:no_of_labels])\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d33c046d1ba872f1fc00c4cc99ae2e00d5b981 | 16,482 | ipynb | Jupyter Notebook | notebooks/12/12_vibrating_building.ipynb | gbrault/resonance | bf66993a98fbbb857511f83bc072449b98f0b4c2 | [
"MIT"
] | 31 | 2017-11-10T16:44:04.000Z | 2022-01-13T12:22:02.000Z | notebooks/12/12_vibrating_building.ipynb | gbrault/resonance | bf66993a98fbbb857511f83bc072449b98f0b4c2 | [
"MIT"
] | 178 | 2017-07-19T20:16:13.000Z | 2020-03-10T04:13:46.000Z | notebooks/12/12_vibrating_building.ipynb | gbrault/resonance | bf66993a98fbbb857511f83bc072449b98f0b4c2 | [
"MIT"
] | 12 | 2018-04-05T22:58:43.000Z | 2021-01-14T04:06:26.000Z | 21.658344 | 402 | 0.50455 | [
[
[
"# Modes of a Vibrating Building\n\nIn this notebook we will find the vibrational modes of a simple model of a building. We will assume that the mass of the floors are much more than the mass of the walls and that the lateral stiffness of the walls can be modeled by a simple linear spring. We will investigate how the building may vibrate under initial conditions that could be caused by a gust of wind and during ground vibration.",
"_____no_output_____"
]
],
[
[
"from IPython.display import YouTubeVideo",
"_____no_output_____"
],
[
"YouTubeVideo('g0cz-oDfUg0', width=600)",
"_____no_output_____"
],
[
"YouTubeVideo('hSwjkG3nv1c', width=600)",
"_____no_output_____"
],
[
"YouTubeVideo('kzVvd4Dk6sw', width=600)",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom resonance.linear_systems import FourStoryBuildingSystem",
"_____no_output_____"
]
],
[
[
"This gives a bit nicer printing of large NumPy arrays.",
"_____no_output_____"
]
],
[
[
"np.set_printoptions(precision=5, linewidth=100, suppress=True)",
"_____no_output_____"
],
[
"%matplotlib notebook",
"_____no_output_____"
]
],
[
[
"# Simulate the four story building",
"_____no_output_____"
]
],
[
[
"sys = FourStoryBuildingSystem()",
"_____no_output_____"
],
[
"sys.constants",
"_____no_output_____"
],
[
"sys.coordinates",
"_____no_output_____"
],
[
"sys.plot_configuration();",
"_____no_output_____"
],
[
"traj = sys.free_response(30, sample_rate=10)",
"_____no_output_____"
],
[
"traj[list(sys.coordinates.keys())].plot(subplots=True);",
"_____no_output_____"
],
[
"sys.animate_configuration(fps=10)",
"_____no_output_____"
],
[
"M, C, K = sys.canonical_coefficients()",
"_____no_output_____"
],
[
"M",
"_____no_output_____"
],
[
"C",
"_____no_output_____"
],
[
"K",
"_____no_output_____"
]
],
[
[
"# Exercise\n\nThe system can be normalized by the mass matrix and transformed into a symmetric eigenvalue problem by introducing the new coordinate vector:\n\n$$\\mathbf{q}=\\mathbf{L}^T\\mathbf{x}$$\n\n$\\mathbf{L}$ is the Cholesky decomposition of the symmetric mass matrix, i.e. $\\mathbf{M}=\\mathbf{L}\\mathbf{L}^T$.\n\nThe equation of motion becomes:\n\n$$\\ddot{\\mathbf{q}} + \\tilde{\\mathbf{K}} \\mathbf{q} = 0$$\n\nCompute $\\tilde{\\mathbf{K}}$.",
"_____no_output_____"
]
],
[
[
"L = np.linalg.cholesky(M)\nL",
"_____no_output_____"
],
[
"M**0.5",
"_____no_output_____"
],
[
"import numpy.linalg as la",
"_____no_output_____"
],
[
"from numpy.linalg import inv",
"_____no_output_____"
],
[
"K_tilde = inv(L) @ K @ inv(L.T)",
"_____no_output_____"
],
[
"K_tilde",
"_____no_output_____"
]
],
[
[
"Notice that $\\tilde{\\mathbf{K}}$ is symmetric, so we are guaranteed to get real eigenvalues and orthogonal eigenvectors when solving this system.\n\n# Exercise\n\nFind the eigenvalues and eigenvectors. Create the spectral matrix $\\mathbf{\\Lambda}$ and the matrix $P$ which contains the orthonormal eigenvectors of $\\tilde{\\mathbf{K}}$.\n\n$$\n\\mathbf{P} = \\left[ \\mathbf{v}_1, \\ldots, \\mathbf{v}_4 \\right]\n$$",
"_____no_output_____"
]
],
[
[
"evals, evecs = np.linalg.eig(K_tilde)\nevals",
"_____no_output_____"
],
[
"evecs",
"_____no_output_____"
],
[
"Lambda = np.diag(evals)\nLambda",
"_____no_output_____"
],
[
"P = evecs",
"_____no_output_____"
]
],
[
[
"# Exercise\n\nProve that the eigenvectors in $\\mathbf{P}$ are orthonormal.",
"_____no_output_____"
]
],
[
[
"np.dot(P[:, 0], P[:, 1])",
"_____no_output_____"
],
[
"np.linalg.norm(P[:, 0])",
"_____no_output_____"
],
[
"P[:, 0].T @ P[:, 1]",
"_____no_output_____"
],
[
"P[:, 0].T @ P[:, 0]",
"_____no_output_____"
]
],
[
[
"An orthonormal matrix has the property that its transpose multiplied by itself is the identity matrix.",
"_____no_output_____"
]
],
[
[
"P.T @ P",
"_____no_output_____"
]
],
[
[
"# Exercise\n\nFind the natural freqencies of the system in both radians per second and Hertz, store them in an array in the order of the eigenvalues with names `ws` and `fs`.",
"_____no_output_____"
]
],
[
[
"ws = np.sqrt(evals)\nws",
"_____no_output_____"
],
[
"fs = ws / 2 / np.pi\nfs",
"_____no_output_____"
]
],
[
[
"# Exercise\n\nTransform the eigenvectors back into the coordinate system associated with $\\mathbf{x}$. \n\n$$\n\\mathbf{S} = \\left[ \\mathbf{u}_1, \\ldots, \\mathbf{u}_4 \\right]\n$$",
"_____no_output_____"
]
],
[
[
"S = np.linalg.inv(L.T) @ P\nS",
"_____no_output_____"
],
[
"sys.coordinates",
"_____no_output_____"
]
],
[
[
"# Exercise: visualize the modeshapes\n\nThe eigenmodes (mode shapes) are contained in each column of $\\mathbf{S}$. Create a plot for each mode shape with these specifications:\n\n- The title of each plot should be the frequency of the corresponding modeshape in Hz.\n- The y axis should be made up of the values [0, 3, 6, 9, 12] meters.\n- The x axis should plot the five values. The first should be zero and the remaining values should be the components of the mode shape in order of the component associated with the lowest floor to the highest.\n- Plot lines with small circles at each data point.",
"_____no_output_____"
]
],
[
[
"S[:, 0]",
"_____no_output_____"
],
[
"np.hstack((0, S[:, 0]))",
"_____no_output_____"
],
[
"u1 = S[:, 0]\nu1",
"_____no_output_____"
],
[
"u1[::-1]",
"_____no_output_____"
],
[
"S[:, 2]",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(1, 4)\n\nfor i in range(4):\n axes[i].plot(np.hstack((0, S[:, i])), [0, 3, 6, 9, 12], marker='o')\n axes[i].set_title('{:1.2f} Hz'.format(fs[i]))\n \nplt.tight_layout()",
"_____no_output_____"
],
[
"fs[0]",
"_____no_output_____"
],
[
"S[:, 0]",
"_____no_output_____"
],
[
"sys.coordinates['x1'] = S[0, 2]\nsys.coordinates['x2'] = S[1, 2]\nsys.coordinates['x3'] = S[2, 2]\nsys.coordinates['x4'] = S[3, 2]",
"_____no_output_____"
],
[
"traj = sys.free_response(30, sample_rate=10)",
"_____no_output_____"
],
[
"traj[list(sys.coordinates.keys())].plot(subplots=True)",
"_____no_output_____"
],
[
"sys.animate_configuration(fps=10)",
"_____no_output_____"
]
],
[
[
"# Simulating the trajectory\n\nThe trajectory of building's coordinates can be found with:\n\n$$\n\\mathbf{x}(t) = \\sum_{i=1}^n c_i \\sin(\\omega_i t + \\phi_i) \\mathbf{u}_i\n$$\n\nwhere\n\n$$\n\\phi_i = \\arctan \\frac{\\omega_i \\mathbf{v}_i^T \\mathbf{q}_0}{\\mathbf{v}_i^T \\dot{\\mathbf{q}}_0}\n$$\n\nand\n\n$$\nc_i = \\frac{\\mathbf{v}^T_i \\mathbf{q}_0}{\\sin\\phi_i}\n$$\n\n$c_i$ are the modal participation factors and reflect what proportion of each mode is excited given specific initial conditions. If the initial conditions are the eigenmode, $\\mathbf{u}_i$, the all but the $i$th $c_i$ will be zero.\n\n# Exercise\n\nShow that if $\\mathbf{q}_0 = \\mathbf{v}_i$ then $c_i = 1$ all other modal participation factors are 0. Also, report all of the phase angles, $\\phi_i$, in degrees.",
"_____no_output_____"
]
],
[
[
"for i in range(4):\n x0 = S[:, i]\n xd0 = np.zeros(4)\n print(x0)\n\n q0 = L.T @ x0\n qd0 = L.T @ xd0\n\n phis = np.arctan2(ws * P.T @ q0, P.T @ xd0)\n print(np.rad2deg(phis))\n\n cs = P.T @ q0 / np.sin(phis)\n print(cs)\n print('=' * 40)",
"_____no_output_____"
]
],
[
[
"# Exercise\n\nCreate a function called `simulate()` that returns the trajectories of the coordinates given an array of monotonically increasing time values and the initial conditions of the system.\n\nIt should look like:\n\n```python\ndef simulate(t, x0, xd0):\n \"\"\"Returns the state trajectory.\n \n Parameters\n ==========\n t : ndarray, shape(m,)\n Monotonic values of time.\n x0 : ndarray, shape(n,)\n The initial conditions of each coordinate.\n xd0 : ndarray, shape(n,)\n The initial conditions of each speed.\n \n Returns\n =======\n x : ndarray, shape(m, n)\n The trajectories of each state.\n \n \"\"\"\n \n # your code here\n \n return x\n```",
"_____no_output_____"
]
],
[
[
"def simulate(t, x0, xd0):\n \n q0 = L.T @ x0\n qd0 = L.T @ xd0\n phis = np.arctan2(ws * P.T @ q0, P.T @ xd0)\n cs = P.T @ q0 / np.sin(phis)\n \n x = np.zeros((len(x0), len(t)))\n for ci, wi, phii, ui in zip(cs, ws, phis, S.T):\n x += ci * np.sin(wi * t + phii) * np.tile(ui, (len(t), 1)).T\n \n return x",
"_____no_output_____"
]
],
[
[
"# Exercise\n\nUsing the plotting function below, show that the results found here are the same as the simulations from the `FourStoryBuildingSystem` given the same initial conditions.",
"_____no_output_____"
]
],
[
[
"def plot_trajectories(t, x):\n \n fig, axes = plt.subplots(4, 1)\n \n for i, ax in enumerate(axes.flatten()):\n ax.plot(t, x[i])\n ax.set_ylabel(r'$x_{}$ [m]'.format(i + 1))\n ax.set_xlabel('Time [s]')\n \n plt.tight_layout()",
"_____no_output_____"
],
[
"t = np.linspace(0, 50, num=50 * 60)\nx0 = np.array([0.001, 0.010, 0.020, 0.025])\nxd0 = np.zeros(4)\nx = simulate(t, x0, xd0)\nplot_trajectories(t, x)",
"_____no_output_____"
]
],
[
[
"This shows the plot of a single mode:",
"_____no_output_____"
]
],
[
[
"x = simulate(t, S[:, 0], np.zeros(4))\nplot_trajectories(t, x)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d3430455bd53cd0b0359caab07a0afa0f0bca1 | 53,916 | ipynb | Jupyter Notebook | 1_data_prep.ipynb | LyaSolis/exBERT | f3276fe8efb83b56eba07ad926f53abb673043bc | [
"Apache-2.0"
] | null | null | null | 1_data_prep.ipynb | LyaSolis/exBERT | f3276fe8efb83b56eba07ad926f53abb673043bc | [
"Apache-2.0"
] | null | null | null | 1_data_prep.ipynb | LyaSolis/exBERT | f3276fe8efb83b56eba07ad926f53abb673043bc | [
"Apache-2.0"
] | null | null | null | 39.383492 | 509 | 0.400642 | [
[
[
"<a href=\"https://colab.research.google.com/github/LyaSolis/exBERT/blob/master/1_data_prep.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
],
[
"!ls /content/drive/MyDrive/GitHub/bluebert",
"1_data_prep_blue_bert.ipynb elmo\t mt-bluebert requirements.txt\nbert\t\t\t LICENSE.txt NER_output tokenizer\nbluebert\t\t mribert\t README.md\n"
]
],
[
[
"# Preprocess Data\n### We will make 2 types of dataset: for BlueBERT pretraining and finetuning and for exBERT.",
"_____no_output_____"
],
[
"### Input file format for BlueBERT: \n1. One sentence per line. These should ideally be actual sentences, not entire paragraphs or arbitrary spans of text. \n(Because we use the sentence boundaries for the \"next sentence prediction\" task).\n",
"_____no_output_____"
]
],
[
[
"import pandas as pd\ndf = pd.read_csv(\"/content/drive/MyDrive/GitHub/exBERT/data/paragrafs.csv\")\ndf.head(1)",
"_____no_output_____"
],
[
"df = df.drop(['Unnamed: 0'], axis = 1)\ndf.head(1)",
"_____no_output_____"
],
[
"import re\n# Testing patterns\ntext = \"Patients with. chronic lymphocytic 's leukemia (. CL Patients with chronic. lymphocytic. leukemia (CL\"\nre.findall(\"\\.(?= [a-z])\", text)",
"_____no_output_____"
],
[
"re.sub(r\"\\.(?= [a-z])\", \".\\\\n\", text)",
"_____no_output_____"
],
[
"df[df['txts'].isna()]",
"_____no_output_____"
],
[
"print(df['txts'][384])",
"nan\n"
],
[
"df = df[df['txts'].notna()]\ndf[df['txts'].isna()]",
"_____no_output_____"
],
[
"sent_list = []\nfor pargr in df['txts']:\n pargr = pargr.strip()\n pargr = re.sub(r\"\\.(?= [A-Z])\", \".\\\\n\", pargr) # Adding new lines to ends of sentences only\n pargr1 = pargr.strip()\n sent_list.append(pargr1)\ndf['sents']=sent_list\ndf.head(1)",
"_____no_output_____"
]
],
[
[
"\n 2. Blank lines between documents. Document boundaries are needed so that the \"next sentence prediction\" task doesn't span between documents.",
"_____no_output_____"
]
],
[
[
"# Adding blank lines between docs\nmask = df['articleids'].ne(df['articleids'].shift(-1))\ndf1 = pd.DataFrame('',index=mask.index[mask] + .5, columns=df.columns)\n\ndf = pd.concat([df, df1]).sort_index().reset_index(drop=True).iloc[:-1]\ndf.tail(3)",
"_____no_output_____"
]
],
[
[
" Now we will put updated text into text file",
"_____no_output_____"
]
],
[
[
"text_file = []\nfor row in df['sents']:\n row = row.split('\\n')\n for i in row:\n i = i.lstrip()\n text_file.append(i)\n\ntext_file[:1]",
"_____no_output_____"
],
[
"save_file = \"drive/MyDrive/GitHub/exBERT/data/bluebert_train_data.txt\"\n\nwith open(save_file, 'w') as f:\n for item in text_file:\n f.write(\"%s\\n\" % item)",
"_____no_output_____"
]
],
[
[
"Preprocessed PubMed texts corpus used to pre-train the BlueBERT models contains ~4000M words extracted from the PubMed ASCII code version. \n\nOther operations include:\n\n - lowercasing the text\n - removing speical chars \\x00-\\x7F\n - tokenizing the text using the NLTK Treebank tokenizer\n",
"_____no_output_____"
]
],
[
[
"preprocessed_text = []\nfor line in text_file:\n line = line.lower()\n line = re.sub(r'[\\r\\n]+', ' ', line)\n line = re.sub(r'[^\\x00-\\x7F]+', ' ', line)\n preprocessed_text.append(line)\npreprocessed_text[:1]\n",
"_____no_output_____"
],
[
"len(preprocessed_text)",
"_____no_output_____"
],
[
"from nltk import TreebankWordTokenizer",
"_____no_output_____"
],
[
"pubmed_sent_nltk = []\nfor line in preprocessed_text:\n tokenized = TreebankWordTokenizer().tokenize(line)\n sentence = ' '.join(tokenized)\n sentence = re.sub(r\"\\s's\\b\", \"'s\", sentence)\n pubmed_sent_nltk.append(sentence)\n\npubmed_sent_nltk[:1]",
"_____no_output_____"
],
[
"len(pubmed_sent_nltk)",
"_____no_output_____"
],
[
"save_file = \"drive/MyDrive/GitHub/exBERT/data/bluebert_clean_train_data.txt\"\n\nwith open(save_file, 'w') as f:\n for item in pubmed_sent_nltk:\n f.write(\"%s\\n\" % item)",
"_____no_output_____"
]
],
[
[
"## For exBERT text file needs to have paragraphs separated by new lines (no blank lines though).",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(\"/content/drive/MyDrive/GitHub/exBERT/data/paragrafs.csv\")\ntext = df[\"txts\"]",
"_____no_output_____"
],
[
"text.to_csv(\"/content/drive/MyDrive/GitHub/exBERT/data/exbert_train_data.txt\", sep='\\n', index=False, header=False)",
"_____no_output_____"
]
],
[
[
"Next we will create our new dictionary and tokenizer (notebook 2_get_vocab_and_tokenizer.ipynb)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
d0d34a743d6df577adb4ded64ef7fa76e24554fb | 444 | ipynb | Jupyter Notebook | docs/documentation/utilities/friction_factor.ipynb | Gorkowski/particula | a7d48fbd355f6f2b1ff86eb0305514f5e9bdd362 | [
"MIT"
] | 3 | 2021-11-02T00:26:41.000Z | 2021-12-19T15:41:27.000Z | docs/documentation/utilities/friction_factor.ipynb | Gorkowski/particula | a7d48fbd355f6f2b1ff86eb0305514f5e9bdd362 | [
"MIT"
] | 156 | 2021-10-31T15:49:51.000Z | 2022-03-29T01:47:17.000Z | docs/documentation/utilities/friction_factor.ipynb | ngam/particula | a22f2154a1973d0ef19e347db6991d8a3e3b8226 | [
"MIT"
] | 3 | 2021-11-02T03:50:11.000Z | 2022-03-06T20:21:33.000Z | 14.8 | 51 | 0.522523 | [
[
[
"# `friction_factor`\n",
"_____no_output_____"
]
],
[
[
"from particula.util import friction_factor\nhelp(friction_factor)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
d0d34f50d95c741ac659e6fa0e40e39c38a23e4f | 1,039,270 | ipynb | Jupyter Notebook | I2A2_PETR4_Multinomial_Naive_Bayes_+_ARIMA_Trading_System.ipynb | nakanoelio/I2A2-challenge-petr4-trad-sys | bf5b4d46a6b588a325fbe03a28a5a71c82e3d4a9 | [
"MIT"
] | null | null | null | I2A2_PETR4_Multinomial_Naive_Bayes_+_ARIMA_Trading_System.ipynb | nakanoelio/I2A2-challenge-petr4-trad-sys | bf5b4d46a6b588a325fbe03a28a5a71c82e3d4a9 | [
"MIT"
] | null | null | null | I2A2_PETR4_Multinomial_Naive_Bayes_+_ARIMA_Trading_System.ipynb | nakanoelio/I2A2-challenge-petr4-trad-sys | bf5b4d46a6b588a325fbe03a28a5a71c82e3d4a9 | [
"MIT"
] | null | null | null | 186.049051 | 136,274 | 0.736427 | [
[
[
"<a href=\"https://colab.research.google.com/github/nakanoelio/i2a2-challenge-petr4-trad-sys/blob/main/I2A2_PETR4_Multinomial_Naive_Bayes_%2B_ARIMA_Trading_System.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"!pip install yfinance\n!pip install --upgrade mplfinance\n \n#Instalação da Biblioteca TA-lib\nurl = 'https://launchpad.net/~mario-mariomedina/+archive/ubuntu/talib/+files'\n!wget $url/libta-lib0_0.4.0-oneiric1_amd64.deb -qO libta.deb\n!wget $url/ta-lib0-dev_0.4.0-oneiric1_amd64.deb -qO ta.deb\n!dpkg -i libta.deb ta.deb\n!pip install ta-lib\n!pip install pandas_ta",
"Requirement already satisfied: yfinance in /usr/local/lib/python3.7/dist-packages (0.1.59)\nRequirement already satisfied: pandas>=0.24 in /usr/local/lib/python3.7/dist-packages (from yfinance) (1.1.5)\nRequirement already satisfied: numpy>=1.15 in /usr/local/lib/python3.7/dist-packages (from yfinance) (1.19.5)\nRequirement already satisfied: requests>=2.20 in /usr/local/lib/python3.7/dist-packages (from yfinance) (2.23.0)\nRequirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from yfinance) (0.0.9)\nRequirement already satisfied: lxml>=4.5.1 in /usr/local/lib/python3.7/dist-packages (from yfinance) (4.6.3)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24->yfinance) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24->yfinance) (2.8.1)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (2020.12.5)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (1.24.3)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas>=0.24->yfinance) (1.15.0)\nRequirement already up-to-date: mplfinance in /usr/local/lib/python3.7/dist-packages (0.12.7a17)\nRequirement already satisfied, skipping upgrade: pandas in /usr/local/lib/python3.7/dist-packages (from mplfinance) (1.1.5)\nRequirement already satisfied, skipping upgrade: matplotlib in /usr/local/lib/python3.7/dist-packages (from mplfinance) (3.2.2)\nRequirement already satisfied, skipping upgrade: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->mplfinance) (2018.9)\nRequirement already satisfied, skipping upgrade: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->mplfinance) (2.8.1)\nRequirement already satisfied, skipping upgrade: numpy>=1.15.4 in /usr/local/lib/python3.7/dist-packages (from pandas->mplfinance) (1.19.5)\nRequirement already satisfied, skipping upgrade: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->mplfinance) (0.10.0)\nRequirement already satisfied, skipping upgrade: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->mplfinance) (2.4.7)\nRequirement already satisfied, skipping upgrade: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->mplfinance) (1.3.1)\nRequirement already satisfied, skipping upgrade: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas->mplfinance) (1.15.0)\n(Reading database ... 160798 files and directories currently installed.)\nPreparing to unpack libta.deb ...\nUnpacking libta-lib0 (0.4.0-oneiric1) over (0.4.0-oneiric1) ...\nPreparing to unpack ta.deb ...\nUnpacking ta-lib0-dev (0.4.0-oneiric1) over (0.4.0-oneiric1) ...\nSetting up libta-lib0 (0.4.0-oneiric1) ...\nSetting up ta-lib0-dev (0.4.0-oneiric1) ...\nProcessing triggers for man-db (2.8.3-2ubuntu0.1) ...\nProcessing triggers for libc-bin (2.27-3ubuntu1.2) ...\n/sbin/ldconfig.real: /usr/local/lib/python3.7/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n\nRequirement already satisfied: ta-lib in /usr/local/lib/python3.7/dist-packages (0.4.20)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from ta-lib) (1.19.5)\nRequirement already satisfied: pandas_ta in /usr/local/lib/python3.7/dist-packages (0.2.45b0)\nRequirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from pandas_ta) (1.1.5)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->pandas_ta) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->pandas_ta) (2.8.1)\nRequirement already satisfied: numpy>=1.15.4 in /usr/local/lib/python3.7/dist-packages (from pandas->pandas_ta) (1.19.5)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas->pandas_ta) (1.15.0)\n"
],
[
"import numpy as np\nimport pandas as pd\nimport scipy as sp\nimport seaborn as sn\nimport matplotlib.pyplot as plt\n\nimport yfinance as yf\n\nimport talib as ta\nimport pandas_ta as pd_ta\n\nfrom sklearn import metrics\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB, ComplementNB, BernoulliNB, CategoricalNB\n\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\nfrom statsmodels.tsa.stattools import adfuller, kpss\nfrom statsmodels.tsa.arima_model import ARIMAResults\n\nfrom tqdm import tqdm",
"/usr/local/lib/python3.7/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"stock_ticker = 'PETR4.SA'\nstart_date = '2016-01-26'\nend_date = '2021-5-27'\n\nyf_petr4 = yf.Ticker(stock_ticker)\ndf_petr4 = yf_petr4.history(start=start_date, end=end_date)\n\nstock_ticker = '^BVSP'\nyf_ibov = yf.Ticker(stock_ticker)\ndf_ibov = yf_ibov.history(start=start_date, end=end_date)",
"_____no_output_____"
],
[
"df_petr4.head(20)",
"_____no_output_____"
],
[
"\ndef arima_for(df):\n list_arima = []\n for i in tqdm(range(df[\"Close\"].shape[0]-1)):\n try:\n arima_model = ARIMA(df[\"Close\"].iloc[:i].to_list(), order=(1, 1, 1))\n arima_model_fit = arima_model.fit()\n a = arima_model_fit.forecast()[0].item()\n b = df[\"Close\"].iloc[i+1]\n list_arima.append(a)\n except:\n a = df[\"Close\"].iloc[i+1]\n b = df[\"Close\"].iloc[i+1]\n list_arima.append(a)\n list_arima.append(df[\"Close\"].iloc[i])\n return list_arima\n\narima_forecast = np.array(arima_for(df_petr4))",
"\r 0%| | 0/1323 [00:00<?, ?it/s]/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/ar_model.py:763: RuntimeWarning: divide by zero encountered in log\n return np.log(self.sigma2) + (1 + self.df_model) * np.log(nobs)/nobs\n 1%|▏ | 18/1323 [00:00<00:39, 33.31it/s]/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/tsatools.py:668: RuntimeWarning: overflow encountered in exp\n newparams = ((1-np.exp(-params))/(1+np.exp(-params))).copy()\n/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/tsatools.py:668: RuntimeWarning: invalid value encountered in true_divide\n newparams = ((1-np.exp(-params))/(1+np.exp(-params))).copy()\n/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/tsatools.py:669: RuntimeWarning: overflow encountered in exp\n tmp = ((1-np.exp(-params))/(1+np.exp(-params))).copy()\n/usr/local/lib/python3.7/dist-packages/statsmodels/tsa/tsatools.py:669: RuntimeWarning: invalid value encountered in true_divide\n tmp = ((1-np.exp(-params))/(1+np.exp(-params))).copy()\n 2%|▏ | 21/1323 [00:00<01:10, 18.55it/s]/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n 2%|▏ | 28/1323 [00:00<00:59, 21.84it/s]/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n 2%|▏ | 31/1323 [00:01<01:21, 15.77it/s]/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n 3%|▎ | 34/1323 [00:01<01:33, 13.78it/s]/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n 3%|▎ | 37/1323 [00:01<01:30, 14.20it/s]/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n 3%|▎ | 39/1323 [00:01<01:44, 12.28it/s]/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n 3%|▎ | 41/1323 [00:02<01:50, 11.59it/s]/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n 3%|▎ | 43/1323 [00:02<01:58, 10.78it/s]/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n 3%|▎ | 45/1323 [00:02<02:08, 9.96it/s]/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n/usr/local/lib/python3.7/dist-packages/statsmodels/base/model.py:492: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n 'available', HessianInversionWarning)\n100%|██████████| 1323/1323 [03:10<00:00, 6.93it/s]\n"
],
[
"arima_forecast",
"_____no_output_____"
],
[
"df_petr4",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"#Calculo dos Indicadores\ndef indicadores(stock_data):\n \n data = stock_data.copy()\n data['W%R'] = ta.WILLR(data['High'], data['Low'], data['Close'], timeperiod=14) #Retorna valor do indicador Williams %R\n data['MACD'], data['Signal-line'], data['Histograma_MACD'] = ta.MACD(data['Close'], fastperiod=12, slowperiod=26, signalperiod=9) #Valores do indicador MACD\n data.loc[:, 'Momento_MACD']=np.where(data['Histograma_MACD']>0, 1, 0) #Retorna 1 para compra pelo MACD (momento positivo), 0 para venda\n data['Tendencia_MACD']=np.where(data['Histograma_MACD'].diff()>0, 1, 0) #Derivada, sinaliza reversao de tendencia no histograma MACD, 1 para compra, 0 para venda\n data.loc[:, 'W%R_Compra']= np.where(data['W%R']<-80, 1, 0) # Retorna 1 para sinal de compra, caso Williams %R < -80\n data.loc[:, 'W%R_Venda']= np.where(data['W%R']>-20, 1, 0) # Retorna 1 para sinal de venda, caso Williams %R > -20\n \n data['Hammer']=ta.CDLHAMMER(data['Open'],data['High'], data['Low'], data['Close'])/100 #Sinal de compra pra martelo\n data['Shooting_star'] = ta.CDLSHOOTINGSTAR(data['Open'],data['High'], data['Low'], data['Close'])/-100 #Sinal de venda 'estrela cadente'\n data[\"EMA12\"] = ta.EMA(data[\"Close\"], timeperiod=12)\n data[\"EMA26\"] = ta.EMA(data[\"Close\"], timeperiod=26)\n\n \n #return data.drop([\"Open\",\"Close\",\"High\",\"Low\",\"Volume\",\"Dividends\",\"Stock Splits\"],axis=\"columns\")\n return data[['Momento_MACD','Tendencia_MACD','W%R_Compra','W%R_Venda']]#,\"EMA12\",\"EMA26\",'W%R','MACD','Hammer','Shooting_star','Momento_MACD','Tendencia_MACD','W%R_Compra','W%R_Venda']]",
"_____no_output_____"
],
[
"def isSupport(df,i):\n #Estamos utilizando dados futuros!\n #support = df['Low'][i] <= df['Low'][i-1] and df['Low'][i] <= df['Low'][i+1] and df['Low'][i] < df['Low'][i+2] and df['Low'][i] < df['Low'][i-2]\n #support = df['Low'][i-1] <= df['Low'][i-3] and df['Low'][i-2] <= df['Low'][i-1] and df['Low'][i-2] < df['Low'][i] and df['Low'][i-2] < df['Low'][i-4]\n support = df['Low'][i] <= df['Low'][i-2] and df['Low'][i-1] <= df['Low'][i-2] and np.abs(df['Low'][i]-df['Low'][i-1]) < np.abs(df['Low'][i-1]-df['Low'][i-2])\n \n return support\n\ndef isResistance(df,i):\n #Estamos utilizando dados futuros!\n #resistance = df['High'][i] > df['High'][i-1] and df['High'][i] > df['High'][i+1] and df['High'][i] > df['High'][i+2] and df['High'][i] > df['High'][i-2]\n #resistance = df['High'][i-2] > df['High'][i-3] and df['High'][i-2] > df['High'][i-1] and df['High'][i-2] > df['High'][i] and df['High'][i-2] > df['High'][i-4]\n resistance = df['High'][i] > df['High'][i-2] and df['High'][i-1] > df['High'][i-2] and np.abs(df['High'][i]-df['High'][i-1]) < np.abs(df['High'][i-1]-df['High'][i-2])\n \n return resistance ",
"_____no_output_____"
],
[
"def sup_res(df_data):\n s = np.mean(df_data['High'] - df_data['Low'])\n levels = []\n support = [0,0]\n resistance = [0,0]\n for i in range(2,df_data.shape[0]-2):\n if isSupport(df_data,i):\n l = df_data['Low'][i]\n support.append(1)\n resistance.append(0)\n levels.append((i,l))\n #if isFarFromLevel(l,levels,s):\n #support.append(1)\n #resistance.append(0)\n #levels.append((i,l))\n #else:\n #support.append(0)\n #resistance.append(0)\n elif isResistance(df_data,i):\n l = df_data['High'][i]\n support.append(0)\n resistance.append(1)\n #if isFarFromLevel(l,levels,s):\n #resistance.append(1)\n #support.append(0)\n #levels.append((i,l))\n #else:\n #resistance.append(0)\n #support.append(0)\n else:\n resistance.append(0)\n support.append(0)\n support.extend([0,0])\n resistance.extend([0,0])\n\n return support, resistance",
"_____no_output_____"
],
[
"def feat_gen(data_f, p_window,return_period):\n \n data_frame = data_f.copy()\n\n #data_frame[\"Close_Return\"] = data_frame[\"Close\"].diff()\n data_frame[\"Close_Return_Rel\"] = data_frame[\"Close\"].pct_change()\n \n #data_frame[\"Close_Return\"].fillna(0,inplace=True)\n #data_frame[\"Close_Return_Rel\"].fillna(0,inplace=True)\n tresh = data_frame[\"Close_Return_Rel\"].std()*0.05*return_period\n #tresh = 0\n data_frame[\"Expected_Close_Return\"] = data_frame[\"Close_Return_Rel\"].rolling(return_period).sum().apply(lambda x: 2 if x > tresh else (1 if x <= tresh and x >= -tresh else 0))\n #data_frame.loc[data_frame[\"Close_Return_Rel\"].rolling(return_period).sum() > tresh, \"Expected_Close_Return\"] = 2\n #data_frame.loc[data_frame[\"Close_Return_Rel\"].rolling(return_period).sum() <= tresh, \"Expected_Close_Return\"] = 1\n #data_frame.loc[data_frame[\"Close_Return_Rel\"].rolling(return_period).sum() < -tresh, \"Expected_Close_Return\"] = 0\n #data_frame.loc[data_frame[\"Close_Return_Rel\"].rolling(return_period).sum() >= tresh, f\"Expected_Close_Return\"] = 1\n #data_frame.loc[data_frame[\"Close_Return_Rel\"].rolling(return_period).sum() < tresh, f\"Expected_Close_Return\"] = 0\n \n #new_col_names = []\n \n data_frame['ARIMA_forecast'] = arima_forecast\n data_frame[\"ARIMA_forecast_Ret\"] = (data_frame[\"ARIMA_forecast\"]-data_frame[\"Close\"])/data_frame[\"Close\"]\n data_frame[\"ARIMA_forecast_Ret_Disc\"] = data_frame[\"ARIMA_forecast_Ret\"].apply(lambda x: 2 if x > tresh else (1 if x <= tresh and x >= -tresh else 0))\n \n for i in range(0,p_window):\n \n data_frame[f'Return_Lag_{i}period'] = data_frame[\"Close_Return_Rel\"].shift(periods=i).apply(lambda x: 2 if x > tresh else (1 if x <= tresh and x >= -tresh else 0))\n #data_frame[f'Return_Lag_{i}period'] = data_frame[\"Close_Return_Rel\"].rolling(i+1).sum()\n \n data_frame[\"Expected_Close_Return\"] = data_frame[\"Expected_Close_Return\"].shift(-return_period)\n \n\n return data_frame.drop([\"Open\",\"Close\",\"High\",\"Low\",\"Volume\",\"Dividends\",\"Stock Splits\",\"Close_Return_Rel\",\"ARIMA_forecast_Ret\"],axis=\"columns\").fillna(0)#\"Close_Return_Rel\"\n #return data_frame[[\"Expected_Close_Return\"].fillna(0)",
"_____no_output_____"
],
[
"def calc_beta(data_frame_asset,data_frame_bench, beta_window):\n data_frame_beta = pd.concat([data_frame_bench[\"Close\"].pct_change(), data_frame_asset[\"Close\"].pct_change()],axis=1,ignore_index=True)\n\n data_frame_beta.columns=[\"Close_IBOV\",\"Close_PETR4\"]\n\n data_frame_beta[\"Beta\"] = data_frame_beta[\"Close_PETR4\"].rolling(beta_window).cov(data_frame_beta[\"Close_IBOV\"].rolling(beta_window))/data_frame_beta[\"Close_IBOV\"].rolling(beta_window).var()\n\n data_frame_beta[\"Beta_expected_PETR4\"] = data_frame_beta[\"Close_IBOV\"]*data_frame_beta[\"Beta\"]\n\n data_frame_beta[\"PETR4_Excess_Variat\"] = (data_frame_beta[\"Close_PETR4\"] - data_frame_beta[\"Beta_expected_PETR4\"])#/data_frame_beta[\"Beta_expected_PETR4\"]\n #data_frame_beta[\"PETR4_Excess_Variat\"].describe()\n var_tolerance = 1#data_frame_beta[\"Close_IBOV\"].std()#/data_frame_beta[\"Close_IBOV\"].mean()\n\n data_frame_beta[\"PETR4_Excess_Variat_Disc\"] = data_frame_beta[\"PETR4_Excess_Variat\"].apply(lambda x: 2 if x > var_tolerance else (1 if x <= var_tolerance and x >= -var_tolerance else 0))\n \n \n #data_frame_beta.loc[data_frame_beta[\"PETR4_Excess_Variat\"] > var_tolerance, \"PETR4_Excess_Variat_Disc\"] = 1\n #data_frame_beta.loc[data_frame_beta[\"PETR4_Excess_Variat\"] <= var_tolerance , \"PETR4_Excess_Variat_Disc\"] = 0\n #data_frame_beta.loc[data_frame_beta[\"PETR4_Excess_Variat\"] < -var_tolerance, \"PETR4_Excess_Variat_Disc\"] = 1\n \n\n \n\n #return data_frame_beta.drop([\"Close_IBOV\",\"Close_PETR4\",], axis=\"columns\").fillna(0)\n \n #return data_frame_beta[\"PETR4_Excess_Variat_Disc\"].fillna(0)\n return data_frame_beta[\"PETR4_Excess_Variat_Disc\"].fillna(0)",
"_____no_output_____"
],
[
"def gen_feat_data(data_frame_orig,p_window,return_period,data_frame_bench,beta_window):\n\n #df_feat = data_frame_orig\n df_feat = pd.concat([data_frame_orig, feat_gen(data_frame_orig, p_window, return_period)],axis=1)\n df_feat = pd.concat([df_feat,indicadores(data_frame_orig)],axis=1)\n\n #sup,res = sup_res(data_frame_orig)\n #df_feat[\"Support\"] = sup\n #df_feat[\"Resistance\"] = res\n df_feat = pd.concat([df_feat,calc_beta(data_frame_orig,data_frame_bench,beta_window)],axis=1)\n #df_feat = df_feat.reindex(columns=(list([col for col in df_feat.columns if col != \"Expected_Close_Return\"]+[\"Expected_Close_Return\"])))\n df_feat = df_feat.drop([\"Stock Splits\",\"Dividends\",\"Volume\",'Open','High','Low'],axis=1)#\"Close\"\n\n return df_feat",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"def train_eval(df,train_init_date,train_end_date):#,test_end_date):\n \n dia_ini_train_idx = df.index.get_loc(train_init_date)\n dia_fin_train_idx = df.index.get_loc(train_end_date)\n #dia_fin_test_idx = df.index.get_loc(test_end_date)\n y_label_idx = df.columns.get_loc(\"Expected_Close_Return\")\n X_tr = df.iloc[dia_ini_train_idx:dia_fin_train_idx].drop('Expected_Close_Return',axis='columns')\n X_ts = df.iloc[dia_fin_train_idx].drop('Expected_Close_Return')#,axis='columns')\n #print(X_ts)\n y_tr = df.iloc[dia_ini_train_idx:dia_fin_train_idx,y_label_idx]\n y_ts = df.iloc[dia_fin_train_idx,y_label_idx]\n\n return X_tr, X_ts, y_tr, y_ts",
"_____no_output_____"
],
[
"def run_model(X,y,model_type):\n \n nb_model = model_type\n nb_model.fit(X, y)\n\n \n \n #np.column_stack((y_test.to_list(),nb_model.predict(X_test)))\n #print(nb_model.predict_proba(X_test)[:10])\n #print(f'test_score = {nb_model.score(X_test,y_test)}')\n\n return nb_model\n\ndef meas_acc(X,y,nb_model):\n y_pred = nb_model.predict(X)\n print(\"Number of mislabeled points out of a total %d points : %d\" % (X.shape[0], (y != y_pred).sum()))\n print(\"Train Accuracy: %f\"% metrics.balanced_accuracy_score(y, y_pred))\n cf_train2 = metrics.confusion_matrix(y, y_pred, normalize=\"all\") \n sn.heatmap(cf_train2,linewidths=.5,annot=True,cmap=\"YlGnBu\",cbar=False,square=True,xticklabels=(1,2,3), yticklabels=(1,2,3))\n ",
"_____no_output_____"
],
[
"data_frame_orig = df_petr4\np_window = 120\nbeta_window = 30\ndata_frame_bench = df_ibov\n\nreturn_period = 1\ndf_petr4_1 = gen_feat_data(data_frame_orig,p_window,return_period,data_frame_bench,beta_window).fillna(0)\n\nreturn_period = 2\ndf_petr4_2 = gen_feat_data(data_frame_orig,p_window,return_period,data_frame_bench,beta_window).fillna(0)\n\nreturn_period = 3\ndf_petr4_3 = gen_feat_data(data_frame_orig,p_window,return_period,data_frame_bench,beta_window).fillna(0)\n\nreturn_period = 5\ndf_petr4_5 = gen_feat_data(data_frame_orig,p_window,return_period,data_frame_bench,beta_window).fillna(0)\n\nreturn_period = 10\ndf_petr4_10 = gen_feat_data(data_frame_orig,p_window,return_period,data_frame_bench,beta_window).fillna(0)",
"_____no_output_____"
],
[
"df_petr4_1.tail(5)",
"_____no_output_____"
],
[
"X,_,y,_ = train_eval(df_petr4_1,\"2016-01-26 00:00:00\",\"2018-01-26 00:00:00\")\ngnb1 = run_model(X,y, MultinomialNB())\nmeas_acc(X,y,gnb1)",
"Number of mislabeled points out of a total 506 points : 147\nTrain Accuracy: 0.607579\n"
],
[
"X,_,y,_ = train_eval(df_petr4_2,\"2016-01-26 00:00:00\",\"2018-01-26 00:00:00\")\ngnb2 = run_model(X,y, MultinomialNB())\nmeas_acc(X,y,gnb2)",
"Number of mislabeled points out of a total 506 points : 164\nTrain Accuracy: 0.573948\n"
],
[
"X,_,y,_ = train_eval(df_petr4_3,\"2016-01-26 00:00:00\",\"2018-01-26 00:00:00\")\ngnb3 = run_model(X,y, MultinomialNB())\nmeas_acc(X,y,gnb3)",
"Number of mislabeled points out of a total 506 points : 159\nTrain Accuracy: 0.617196\n"
],
[
"X,_,y,_ = train_eval(df_petr4_5,\"2016-01-26 00:00:00\",\"2018-01-26 00:00:00\")\ngnb5 = run_model(X,y, MultinomialNB())\nmeas_acc(X,y,gnb5)",
"Number of mislabeled points out of a total 506 points : 154\nTrain Accuracy: 0.577378\n"
],
[
"X,_,y,_ = train_eval(df_petr4_10,\"2016-01-26 00:00:00\",\"2018-01-26 00:00:00\")\ngnb10 = run_model(X,y, MultinomialNB())\nmeas_acc(X,y,gnb10)",
"Number of mislabeled points out of a total 506 points : 166\nTrain Accuracy: 0.525812\n"
],
[
"def rolling_results(df,dia_ini_train_idx,dia_ini_test_idx,model):\n results = []\n for i in df.iloc[dia_ini_train_idx:dia_ini_test_idx].index:\n X,X_test,y,y_test = train_eval(df,\"2016-01-26 00:00:00\",i)\n nb_model = run_model(X,y,model)\n y_predict = nb_model.predict(X_test.to_numpy().reshape(1, -1)).item()\n y_predictX = nb_model.predict(X)\n y_prob = nb_model.predict_proba(X_test.to_numpy().reshape(1, -1))\n acc = metrics.balanced_accuracy_score(y, y_predictX) \n results.append([i,y_test,y_predict]+list(y_prob[0])+[acc])\n \n return results, nb_model",
"_____no_output_____"
],
[
"dia_ini_test_idx = df_petr4_1.index.get_loc(\"2018-01-26 00:00:00\")\ndia_end_test_idx = df_petr4_1.index.get_loc(\"2021-05-26 00:00:00\")",
"_____no_output_____"
],
[
"res1,_ = rolling_results(df_petr4_1,dia_ini_test_idx,dia_end_test_idx,BernoulliNB())\nres2,_ = rolling_results(df_petr4_2,dia_ini_test_idx,dia_end_test_idx,BernoulliNB())\nres3,_ = rolling_results(df_petr4_2,dia_ini_test_idx,dia_end_test_idx,BernoulliNB())\nres5,_ = rolling_results(df_petr4_5,dia_ini_test_idx,dia_end_test_idx,BernoulliNB())\nres10,_ = rolling_results(df_petr4_10,dia_ini_test_idx,dia_end_test_idx,BernoulliNB())\nprint(res1)\nprint(res2)\nprint(res5)\nprint(res10)",
"[[Timestamp('2018-01-26 00:00:00'), 0.0, 1.0, 0.0022171427157754975, 0.9917905304093151, 0.00599232687491042, 0.6799616156895328], [Timestamp('2018-01-29 00:00:00'), 0.0, 1.0, 0.013544639392311495, 0.9730907256690101, 0.013364634938679327, 0.6793454510619427], [Timestamp('2018-01-30 00:00:00'), 2.0, 1.0, 0.00574618014733271, 0.990639634836504, 0.0036141850161670454, 0.6787226591615904], [Timestamp('2018-01-31 00:00:00'), 2.0, 0.0, 0.5158975697552445, 0.373583475542249, 0.11051895470251118, 0.6770149210738564], [Timestamp('2018-02-01 00:00:00'), 0.0, 1.0, 0.019163394128120993, 0.9741370015766715, 0.0066996042952068, 0.6731902356902357], [Timestamp('2018-02-02 00:00:00'), 0.0, 1.0, 0.009868153288069588, 0.9408776110465044, 0.049254235665422225, 0.6721835186031165], [Timestamp('2018-02-05 00:00:00'), 2.0, 1.0, 0.0870225012153782, 0.7403858824009099, 0.17259161638370885, 0.6732575757575757], [Timestamp('2018-02-06 00:00:00'), 0.0, 1.0, 0.07314773914615176, 0.9011232330016251, 0.02572902785221865, 0.6703197064989518], [Timestamp('2018-02-07 00:00:00'), 0.0, 1.0, 0.012975736440053951, 0.9269969978461343, 0.060027265713815905, 0.6640469038455521], [Timestamp('2018-02-08 00:00:00'), 0.0, 1.0, 0.007745245220567362, 0.956930598826468, 0.035324155952964495, 0.6614115656848704], [Timestamp('2018-02-09 00:00:00'), 2.0, 1.0, 0.023144150656797684, 0.842543271916119, 0.13431257742707742, 0.658802191446954], [Timestamp('2018-02-15 00:00:00'), 1.0, 0.0, 0.6994739065349048, 0.23438398666640028, 0.06614210679870179, 0.6653739233140322], [Timestamp('2018-02-16 00:00:00'), 2.0, 1.0, 0.048004128569072796, 0.9459660192150838, 0.0060298522158414155, 0.6567959307134833], [Timestamp('2018-02-19 00:00:00'), 2.0, 1.0, 0.07516854086853889, 0.8361679936519409, 0.08866346547951322, 0.6532826226189817], [Timestamp('2018-02-20 00:00:00'), 0.0, 1.0, 0.06565829127944296, 0.9314913825863076, 0.002850326134250811, 0.6507881073170391], [Timestamp('2018-02-21 00:00:00'), 2.0, 1.0, 0.009636368927745873, 0.9499904400268615, 0.040373191045391575, 0.6481801675894238], [Timestamp('2018-02-22 00:00:00'), 2.0, 1.0, 0.10569527345348428, 0.8747371805903119, 0.019567545956203147, 0.6502071708485129], [Timestamp('2018-02-23 00:00:00'), 2.0, 0.0, 0.4971059656404256, 0.3926772373932423, 0.11021679696633536, 0.6509966949742859], [Timestamp('2018-02-26 00:00:00'), 1.0, 1.0, 0.01252234092203072, 0.9758475045436232, 0.011630154534343457, 0.6522145143420222], [Timestamp('2018-02-27 00:00:00'), 0.0, 1.0, 0.02690633299317419, 0.9342213115058106, 0.0388723555010212, 0.6599587584111135], [Timestamp('2018-02-28 00:00:00'), 0.0, 1.0, 0.002977994222500154, 0.9944245359171766, 0.002597469860329071, 0.6626742267422673], [Timestamp('2018-03-01 00:00:00'), 2.0, 1.0, 0.01645456096692003, 0.7865049477218913, 0.19704049131118256, 0.6601089098269624], [Timestamp('2018-03-02 00:00:00'), 2.0, 1.0, 0.39721139358435337, 0.4122323608930794, 0.19055624552257408, 0.6608904435560632], [Timestamp('2018-03-05 00:00:00'), 0.0, 1.0, 0.002094776486332845, 0.9972324035256228, 0.0006728199880448683, 0.658441860189433], [Timestamp('2018-03-06 00:00:00'), 0.0, 1.0, 0.025514358449629394, 0.8539531290105745, 0.12053251253979815, 0.6579009396400701], [Timestamp('2018-03-07 00:00:00'), 1.0, 1.0, 0.28625847175812236, 0.3919227903256841, 0.321818737916187, 0.6581929181929181], [Timestamp('2018-03-08 00:00:00'), 2.0, 1.0, 0.015243987170042095, 0.9327803433559873, 0.0519756694739678, 0.6539942900237018], [Timestamp('2018-03-09 00:00:00'), 0.0, 1.0, 0.021805704540193177, 0.9732155676585954, 0.004978727801209375, 0.6503552384831903], [Timestamp('2018-03-12 00:00:00'), 0.0, 1.0, 0.15689977172352967, 0.7865737815519503, 0.056526446724521014, 0.6485527120428026], [Timestamp('2018-03-13 00:00:00'), 2.0, 1.0, 0.1292363215499539, 0.7914520942104893, 0.07931158423955878, 0.6424195955754565], [Timestamp('2018-03-14 00:00:00'), 0.0, 0.0, 0.5577663902171537, 0.3549859022303504, 0.08724770755250298, 0.6456260079789491], [Timestamp('2018-03-15 00:00:00'), 2.0, 1.0, 0.09212620036098514, 0.6507696614923142, 0.25710413814670635, 0.6478978803930017], [Timestamp('2018-03-16 00:00:00'), 0.0, 1.0, 0.012576626141128663, 0.9761987519505011, 0.011224621908363786, 0.6522927668216817], [Timestamp('2018-03-19 00:00:00'), 2.0, 1.0, 0.3635366022661894, 0.48439055654267393, 0.15207284119113237, 0.6426863334888235], [Timestamp('2018-03-20 00:00:00'), 2.0, 1.0, 0.4475516467133373, 0.4993061408436634, 0.05314221244299302, 0.6412641300191034], [Timestamp('2018-03-21 00:00:00'), 0.0, 0.0, 0.6087472764664077, 0.15986172894663442, 0.2313909945869549, 0.6361153030960359], [Timestamp('2018-03-22 00:00:00'), 1.0, 1.0, 0.08053333311649279, 0.8495851043454057, 0.06988156253810195, 0.6358012181803853], [Timestamp('2018-03-23 00:00:00'), 2.0, 0.0, 0.5489365180340101, 0.18479236962164108, 0.2662711123443437, 0.6280810671073108], [Timestamp('2018-03-26 00:00:00'), 0.0, 1.0, 0.1893958656911307, 0.7556776459971668, 0.05492648831170324, 0.6316149354540995], [Timestamp('2018-03-27 00:00:00'), 0.0, 1.0, 0.1517828197876221, 0.5539309875127238, 0.2942861926996503, 0.6311075981712099], [Timestamp('2018-03-28 00:00:00'), 2.0, 2.0, 0.1297256874062739, 0.21425108538110083, 0.6560232272126211, 0.6259427036246241], [Timestamp('2018-03-29 00:00:00'), 0.0, 1.0, 0.28758185047089346, 0.6121596795248712, 0.10025847000423657, 0.6326646221995059], [Timestamp('2018-04-02 00:00:00'), 0.0, 1.0, 0.006383760906261653, 0.9179623128426332, 0.0756539262511075, 0.6306199972866638], [Timestamp('2018-04-03 00:00:00'), 0.0, 1.0, 0.15174956431670594, 0.5546390736681149, 0.2936113620151852, 0.6255110480916932], [Timestamp('2018-04-04 00:00:00'), 2.0, 1.0, 0.022962883329054398, 0.7967343023555492, 0.18030281431539386, 0.6265542561414121], [Timestamp('2018-04-05 00:00:00'), 2.0, 0.0, 0.7328067028098907, 0.11485508782149874, 0.15233820936860593, 0.6303088827330559], [Timestamp('2018-04-06 00:00:00'), 0.0, 0.0, 0.7163516188947928, 0.17302436968158516, 0.11062401142362738, 0.61754097075198], [Timestamp('2018-04-09 00:00:00'), 2.0, 1.0, 0.1861067639134702, 0.468480779576901, 0.34541245650962393, 0.6209572350639095], [Timestamp('2018-04-10 00:00:00'), 2.0, 0.0, 0.6405195444806705, 0.18997978566668075, 0.169500669852644, 0.6198236070204705], [Timestamp('2018-04-11 00:00:00'), 0.0, 0.0, 0.5034676877359014, 0.07791530577508818, 0.4186170064890047, 0.6190356045652207], [Timestamp('2018-04-12 00:00:00'), 0.0, 1.0, 0.09403758286955188, 0.6851474644016238, 0.220814952728829, 0.6224301520076168], [Timestamp('2018-04-13 00:00:00'), 0.0, 0.0, 0.5385737686797997, 0.14265727320661895, 0.3187689581135745, 0.6231225968177087], [Timestamp('2018-04-16 00:00:00'), 2.0, 1.0, 0.2373651897696352, 0.468152334382877, 0.29448247584748877, 0.6211335929645789], [Timestamp('2018-04-17 00:00:00'), 2.0, 0.0, 0.714198792852982, 0.06378338369973001, 0.2220178234472889, 0.6176800282063439], [Timestamp('2018-04-18 00:00:00'), 2.0, 0.0, 0.8149597099094867, 0.04976417994030622, 0.13527611015020194, 0.6286513786513787], [Timestamp('2018-04-19 00:00:00'), 2.0, 0.0, 0.5013041730736293, 0.46332157660286116, 0.03537425032351656, 0.6214654751240117], [Timestamp('2018-04-20 00:00:00'), 2.0, 0.0, 0.526013040557756, 0.176617223829678, 0.29736973561256225, 0.6230100292600292], [Timestamp('2018-04-23 00:00:00'), 0.0, 0.0, 0.49994198907793036, 0.43569300727159355, 0.06436500365048059, 0.6233904919026027], [Timestamp('2018-04-24 00:00:00'), 0.0, 1.0, 0.03684947591206701, 0.5144937342861756, 0.44865678980176, 0.6221445926746406], [Timestamp('2018-04-25 00:00:00'), 2.0, 2.0, 0.1109638638364336, 0.07697721711851696, 0.8120589190450435, 0.6224638611100549], [Timestamp('2018-04-26 00:00:00'), 2.0, 0.0, 0.8846430216808118, 0.06658434295480158, 0.04877263536438381, 0.6213496273841101], [Timestamp('2018-04-27 00:00:00'), 2.0, 0.0, 0.5876826044695859, 0.215242400685706, 0.19707499484470983, 0.6190873487909571], [Timestamp('2018-04-30 00:00:00'), 0.0, 1.0, 0.16263326225041963, 0.5458227143230223, 0.2915440234265602, 0.6171769155502033], [Timestamp('2018-05-02 00:00:00'), 0.0, 2.0, 0.13035286096349477, 0.3114956127662432, 0.5581515262702563, 0.6175049759981267], [Timestamp('2018-05-03 00:00:00'), 0.0, 1.0, 0.14153144914951954, 0.623115010374704, 0.2353535404757833, 0.6166986829794442], [Timestamp('2018-05-04 00:00:00'), 2.0, 2.0, 0.1931338836879737, 0.027510430994353432, 0.7793556853176689, 0.6144310650828061], [Timestamp('2018-05-07 00:00:00'), 2.0, 0.0, 0.5343576612268418, 0.15840100067057802, 0.307241338102576, 0.61480508911385], [Timestamp('2018-05-08 00:00:00'), 2.0, 2.0, 0.24554997629554112, 0.2416914228108337, 0.5127586008936238, 0.615511210696862], [Timestamp('2018-05-09 00:00:00'), 2.0, 1.0, 0.13254303950505658, 0.7711654829433819, 0.09629147755155522, 0.6158840151858893], [Timestamp('2018-05-10 00:00:00'), 0.0, 0.0, 0.8157433068395611, 0.1254039238127651, 0.058852769347677636, 0.6133174431632581], [Timestamp('2018-05-11 00:00:00'), 2.0, 2.0, 0.0637373586537416, 0.29600169761801876, 0.640260943728246, 0.6183860986492565], [Timestamp('2018-05-14 00:00:00'), 2.0, 0.0, 0.4927610632385842, 0.3356804109780451, 0.1715585257833638, 0.6161695679239538], [Timestamp('2018-05-15 00:00:00'), 2.0, 2.0, 0.4156879825183443, 0.09774905401792876, 0.4865629634637323, 0.6136146811703153], [Timestamp('2018-05-16 00:00:00'), 0.0, 1.0, 0.4261635356049496, 0.498146992176681, 0.07568947221836703, 0.6083387118856226], [Timestamp('2018-05-17 00:00:00'), 0.0, 2.0, 0.09806606605443852, 0.05529854754498554, 0.8466353864005808, 0.6090218243246533], [Timestamp('2018-05-18 00:00:00'), 0.0, 2.0, 0.0961002507903564, 0.16216271199861068, 0.7417370372110318, 0.6154960981047938], [Timestamp('2018-05-21 00:00:00'), 0.0, 2.0, 0.1278881179984068, 0.09903081014202927, 0.7730810718595623, 0.6161423117944856], [Timestamp('2018-05-22 00:00:00'), 0.0, 2.0, 0.16136619072224737, 0.09276931157638547, 0.7458644977013698, 0.6171049090839196], [Timestamp('2018-05-23 00:00:00'), 0.0, 2.0, 0.46699822900171656, 0.035618039453098, 0.49738373154518095, 0.6123272137453888], [Timestamp('2018-05-24 00:00:00'), 0.0, 2.0, 0.14698539904767738, 0.20169565106027706, 0.6513189498920465, 0.6158181592964201], [Timestamp('2018-05-25 00:00:00'), 0.0, 2.0, 0.3193530005873428, 0.14566214638489838, 0.5349848530277619, 0.6107687563746768], [Timestamp('2018-05-28 00:00:00'), 2.0, 2.0, 0.11112023889351123, 0.11443256426415634, 0.7744471968423308, 0.606711637662264], [Timestamp('2018-05-29 00:00:00'), 0.0, 0.0, 0.8981923178701245, 0.06345515645589644, 0.03835252567398569, 0.6081906417499637], [Timestamp('2018-05-30 00:00:00'), 0.0, 0.0, 0.5615317544009988, 0.05789996808202955, 0.38056827751697775, 0.6074099318403116], [Timestamp('2018-06-01 00:00:00'), 2.0, 0.0, 0.46978042644548534, 0.06921961576909093, 0.4609999577854244, 0.6108374631904043], [Timestamp('2018-06-04 00:00:00'), 0.0, 0.0, 0.7175237553437637, 0.10082114569406189, 0.18165509896217738, 0.6022809839499306], [Timestamp('2018-06-05 00:00:00'), 0.0, 2.0, 0.16524765266084354, 0.16233431986352076, 0.6724180274756368, 0.6017888731229201], [Timestamp('2018-06-06 00:00:00'), 0.0, 0.0, 0.46455235406192796, 0.43815450297528724, 0.09729314296278856, 0.6074204219553057], [Timestamp('2018-06-07 00:00:00'), 0.0, 0.0, 0.4974389646616372, 0.031815253771030984, 0.4707457815673312, 0.6069238828333684], [Timestamp('2018-06-08 00:00:00'), 2.0, 0.0, 0.6192618269745834, 0.03444552875650851, 0.3462926442689121, 0.6050448846431934], [Timestamp('2018-06-11 00:00:00'), 2.0, 2.0, 0.3006893539429125, 0.17578386089000642, 0.5235267851670807, 0.6107254174683434], [Timestamp('2018-06-12 00:00:00'), 0.0, 0.0, 0.6321790463438332, 0.26529666405524405, 0.10252428960092255, 0.614400950584569], [Timestamp('2018-06-13 00:00:00'), 0.0, 0.0, 0.9486391147672645, 0.022691655966066378, 0.02866922926666689, 0.605025246114355], [Timestamp('2018-06-14 00:00:00'), 0.0, 0.0, 0.6473976663252088, 0.0425274281874475, 0.3100749054873508, 0.6016883844752696], [Timestamp('2018-06-15 00:00:00'), 0.0, 0.0, 0.687894621811983, 0.06560106201875417, 0.24650431616926424, 0.5973637081133545], [Timestamp('2018-06-18 00:00:00'), 2.0, 2.0, 0.23704994989915323, 0.26402949290326516, 0.49892055719758516, 0.5977116473373417], [Timestamp('2018-06-19 00:00:00'), 2.0, 0.0, 0.4028044087674332, 0.38162664477523844, 0.21556894645733105, 0.5994357767464314], [Timestamp('2018-06-20 00:00:00'), 0.0, 0.0, 0.9217638808026295, 0.0057495525835196785, 0.07248656661385634, 0.5998024721735238], [Timestamp('2018-06-21 00:00:00'), 1.0, 0.0, 0.41287643175434324, 0.2190348254388168, 0.3680887428068429, 0.5982146412689985], [Timestamp('2018-06-22 00:00:00'), 2.0, 2.0, 0.08679147884230158, 0.037167551082971755, 0.8760409700747211, 0.6010772010804569], [Timestamp('2018-06-25 00:00:00'), 2.0, 0.0, 0.5935954722799697, 0.16612261566416223, 0.24028191205586122, 0.5990062190774849], [Timestamp('2018-06-26 00:00:00'), 2.0, 0.0, 0.7172882710030684, 0.23237885996642912, 0.05033286903050412, 0.6028963214236335], [Timestamp('2018-06-27 00:00:00'), 2.0, 0.0, 0.6614934104139911, 0.03584190492931504, 0.3026646846567004, 0.5945348097781067], [Timestamp('2018-06-28 00:00:00'), 2.0, 0.0, 0.6296694718735291, 0.04741332029168508, 0.32291720783477934, 0.5948955601388569], [Timestamp('2018-06-29 00:00:00'), 2.0, 2.0, 0.20677019167217717, 0.1363059844283233, 0.6569238238995003, 0.5878894060846203], [Timestamp('2018-07-02 00:00:00'), 2.0, 1.0, 0.3647782458540916, 0.41821024833021003, 0.21701150581569312, 0.5876935489007895], [Timestamp('2018-07-03 00:00:00'), 2.0, 1.0, 0.08688635670586052, 0.4811605085858246, 0.43195313470831137, 0.5880439487688743], [Timestamp('2018-07-04 00:00:00'), 2.0, 0.0, 0.3817690275064667, 0.37335792179776534, 0.2448730506957631, 0.5873271469338616], [Timestamp('2018-07-05 00:00:00'), 2.0, 0.0, 0.7641452721013531, 0.004491362996427805, 0.2313633649022173, 0.5887380529661632], [Timestamp('2018-07-06 00:00:00'), 0.0, 2.0, 0.3080618820346652, 0.05780242502453867, 0.6341356929407908, 0.5925477295884446], [Timestamp('2018-07-10 00:00:00'), 0.0, 2.0, 0.35227133373862596, 0.1768277081341828, 0.47090095812719474, 0.5866954403320807], [Timestamp('2018-07-11 00:00:00'), 2.0, 2.0, 0.07646722279743091, 0.002759896874737313, 0.9207728803278262, 0.5862364101509362], [Timestamp('2018-07-12 00:00:00'), 2.0, 2.0, 0.4990597492406375, 0.001020912344507746, 0.49991933841485164, 0.5862941456135825], [Timestamp('2018-07-13 00:00:00'), 0.0, 0.0, 0.6598416727906619, 0.02407115319199041, 0.31608717401734066, 0.5842400214113602], [Timestamp('2018-07-16 00:00:00'), 2.0, 2.0, 0.2484632801475001, 0.24722385423513987, 0.5043128656173623, 0.5882814911810804], [Timestamp('2018-07-17 00:00:00'), 0.0, 0.0, 0.5337237793025003, 0.0402451502149699, 0.4260310704825253, 0.5904234800838574], [Timestamp('2018-07-18 00:00:00'), 2.0, 2.0, 0.2605742327352364, 0.014721941296268066, 0.7247038259684947, 0.5920719637174572], [Timestamp('2018-07-19 00:00:00'), 2.0, 2.0, 0.26164232352317857, 0.0035450028380282035, 0.734812673638795, 0.5913424828318279], [Timestamp('2018-07-20 00:00:00'), 1.0, 0.0, 0.5066113543721236, 0.002714237899546527, 0.49067440772833226, 0.5903312066200606], [Timestamp('2018-07-23 00:00:00'), 2.0, 0.0, 0.5403159397282402, 0.2590805474395456, 0.2006035128322205, 0.5970460749594215], [Timestamp('2018-07-24 00:00:00'), 2.0, 2.0, 0.3939702123022736, 0.0051197224072047146, 0.6009100652905167, 0.5963256699749979], [Timestamp('2018-07-25 00:00:00'), 0.0, 0.0, 0.6607742064980924, 0.04915201944555101, 0.2900737740563618, 0.5953169149947255], [Timestamp('2018-07-26 00:00:00'), 2.0, 2.0, 0.1343785228408865, 0.004153626311657733, 0.8614678508474557, 0.5887201901694655], [Timestamp('2018-07-27 00:00:00'), 2.0, 0.0, 0.6125406384769113, 0.0019377083523037537, 0.3855216531707897, 0.5882994523345401], [Timestamp('2018-07-30 00:00:00'), 0.0, 0.0, 0.8436747141743397, 0.003781512458299268, 0.15254377336736571, 0.5873015873015873], [Timestamp('2018-07-31 00:00:00'), 2.0, 2.0, 0.060498244531193145, 0.038839050840879116, 0.9006627046279245, 0.5892255892255892], [Timestamp('2018-08-01 00:00:00'), 2.0, 2.0, 0.4666890162927988, 0.015381712244612282, 0.5179292714625955, 0.5929172813230784], [Timestamp('2018-08-02 00:00:00'), 2.0, 2.0, 0.4787503562068006, 0.004342572874195747, 0.5169070709190027, 0.5908918771904458], [Timestamp('2018-08-03 00:00:00'), 0.0, 0.0, 0.7390331071191669, 0.009274866115823573, 0.25169202676500824, 0.5945639526945204], [Timestamp('2018-08-06 00:00:00'), 0.0, 2.0, 0.31323751912822495, 0.1692613822697488, 0.5175010986020282, 0.5938429459824148], [Timestamp('2018-08-07 00:00:00'), 0.0, 2.0, 0.024406879906080453, 0.017845553254531354, 0.9577475668393908, 0.5908010366905851], [Timestamp('2018-08-08 00:00:00'), 2.0, 2.0, 0.19579081450076133, 0.003665221524793048, 0.8005439639744487, 0.5906618105108167], [Timestamp('2018-08-09 00:00:00'), 0.0, 0.0, 0.6550494089922336, 0.05346605442390238, 0.2914845365838645, 0.5909788090936465], [Timestamp('2018-08-10 00:00:00'), 2.0, 2.0, 0.40845662747634437, 0.0202945866651818, 0.5712487858584698, 0.5928635367201056], [Timestamp('2018-08-13 00:00:00'), 1.0, 0.0, 0.6668956769290166, 0.0042398321421693265, 0.32886449092881787, 0.5941917794858159], [Timestamp('2018-08-14 00:00:00'), 0.0, 2.0, 0.3456771008195589, 0.07409980547696506, 0.5802230937034768, 0.5951119057833189], [Timestamp('2018-08-15 00:00:00'), 0.0, 2.0, 0.07461005438282932, 0.00585973004389297, 0.9195302155732806, 0.5956699246654714], [Timestamp('2018-08-16 00:00:00'), 0.0, 0.0, 0.5279087196459353, 0.14367991256860332, 0.3284113677854559, 0.592362630660503], [Timestamp('2018-08-17 00:00:00'), 0.0, 2.0, 0.43816986476457226, 0.023966631415218174, 0.5378635038202043, 0.5942089825068547], [Timestamp('2018-08-20 00:00:00'), 0.0, 2.0, 0.12600115386536284, 0.0018571079997929173, 0.872141738134849, 0.5934869076468489], [Timestamp('2018-08-21 00:00:00'), 2.0, 2.0, 0.35607753792962704, 0.02058613147419083, 0.6233363305961852, 0.5917571735827379], [Timestamp('2018-08-22 00:00:00'), 0.0, 0.0, 0.6477720637238975, 0.10904330540842297, 0.24318463086768305, 0.5908011411828206], [Timestamp('2018-08-23 00:00:00'), 2.0, 0.0, 0.6878795487983553, 0.029257245270534776, 0.2828632059311097, 0.5926297192456889], [Timestamp('2018-08-24 00:00:00'), 2.0, 0.0, 0.9214469915506662, 0.0319984859002774, 0.0465545225490545, 0.594211467830987], [Timestamp('2018-08-27 00:00:00'), 0.0, 0.0, 0.8051827221731107, 0.004346597699502228, 0.1904706801273927, 0.5935198797263023], [Timestamp('2018-08-28 00:00:00'), 2.0, 2.0, 0.2706558910902188, 0.032462209633832916, 0.6968818992759425, 0.5943257880004867], [Timestamp('2018-08-29 00:00:00'), 0.0, 0.0, 0.8852546694702498, 0.006480821354667211, 0.10826450917508285, 0.5921171171171172], [Timestamp('2018-08-30 00:00:00'), 2.0, 0.0, 0.6643144541812208, 0.004689306319421479, 0.33099623949936197, 0.5916640482678218], [Timestamp('2018-08-31 00:00:00'), 0.0, 2.0, 0.39283168427112364, 0.0937771664327983, 0.5133911492960819, 0.5919817312202353], [Timestamp('2018-09-03 00:00:00'), 0.0, 2.0, 0.10652561453706504, 0.14134099279132126, 0.7521333926716108, 0.5927806724213909], [Timestamp('2018-09-04 00:00:00'), 2.0, 2.0, 0.3977449551678564, 0.06917032646598778, 0.5330847183661622, 0.5945641573509434], [Timestamp('2018-09-05 00:00:00'), 2.0, 0.0, 0.7515258290834776, 0.01893040700611718, 0.22954376391040224, 0.5951363373801744], [Timestamp('2018-09-06 00:00:00'), 2.0, 0.0, 0.7710677942040691, 0.007281846010793805, 0.22165035978513692, 0.5954561668044814], [Timestamp('2018-09-10 00:00:00'), 0.0, 0.0, 0.6678154863368092, 0.06005128821374706, 0.2721332254494416, 0.5942663389134232], [Timestamp('2018-09-11 00:00:00'), 2.0, 2.0, 0.08988007279136928, 0.012869638337815124, 0.8972502888708208, 0.594797391515581], [Timestamp('2018-09-12 00:00:00'), 0.0, 0.0, 0.5666050050516753, 0.003049185609547606, 0.4303458093387744, 0.5928805408139507], [Timestamp('2018-09-13 00:00:00'), 2.0, 2.0, 0.2151628654304471, 0.007202219768708127, 0.7776349148008406, 0.5949043902432521], [Timestamp('2018-09-14 00:00:00'), 2.0, 0.0, 0.7159196804785857, 0.023285902461495263, 0.2607944170599182, 0.6012820909027772], [Timestamp('2018-09-17 00:00:00'), 2.0, 0.0, 0.5827545449392353, 0.0007233928580261002, 0.4165220622027454, 0.5959190638066649], [Timestamp('2018-09-18 00:00:00'), 0.0, 0.0, 0.7388580538933445, 0.03130309605210646, 0.2298388500545484, 0.5962381943914501], [Timestamp('2018-09-19 00:00:00'), 0.0, 2.0, 0.38985034313619055, 0.08491620197502259, 0.5252334548887843, 0.5992397089171283], [Timestamp('2018-09-20 00:00:00'), 2.0, 2.0, 0.16982817475754938, 0.0026834241662461603, 0.8274884010762009, 0.6009890597439699], [Timestamp('2018-09-21 00:00:00'), 0.0, 2.0, 0.2877660569693335, 0.013499577926896707, 0.6987343651037756, 0.6037663486682714], [Timestamp('2018-09-24 00:00:00'), 2.0, 2.0, 0.13182704985744828, 0.0049093347911151955, 0.8632636153514424, 0.6030428120211403], [Timestamp('2018-09-25 00:00:00'), 2.0, 0.0, 0.787854137749173, 0.01932738945906642, 0.1928184727917621, 0.6048373895012551], [Timestamp('2018-09-26 00:00:00'), 2.0, 0.0, 0.9481343396186804, 0.002932313196746881, 0.04893334718457863, 0.6041848028852134], [Timestamp('2018-09-27 00:00:00'), 0.0, 0.0, 0.6905699532589712, 0.031691451332458, 0.27773859540857765, 0.5983747319744762], [Timestamp('2018-09-28 00:00:00'), 0.0, 2.0, 0.2341500530589924, 0.0015933875671410537, 0.7642565593738718, 0.6022991501252372], [Timestamp('2018-10-01 00:00:00'), 2.0, 2.0, 0.3637479652378837, 0.018190875174891596, 0.6180611595872275, 0.599403428124469], [Timestamp('2018-10-02 00:00:00'), 2.0, 0.0, 0.6051661333438674, 0.013407723779702644, 0.3814261428764328, 0.5977921913536445], [Timestamp('2018-10-03 00:00:00'), 2.0, 0.0, 0.5293632106419034, 0.003097164443322914, 0.4675396249147777, 0.5988161554117926], [Timestamp('2018-10-04 00:00:00'), 0.0, 0.0, 0.8104827863954088, 0.021854204123156994, 0.1676630094814288, 0.5952197535384044], [Timestamp('2018-10-05 00:00:00'), 2.0, 2.0, 0.27147759592191695, 0.01650115897345344, 0.7120212451046359, 0.6008533611981888], [Timestamp('2018-10-08 00:00:00'), 2.0, 0.0, 0.5441723612007168, 0.143030249035678, 0.31279738976359944, 0.5999513762264478], [Timestamp('2018-10-09 00:00:00'), 0.0, 0.0, 0.6531397163341175, 0.005317121398549412, 0.34154316226733306, 0.5978354978354977], [Timestamp('2018-10-10 00:00:00'), 0.0, 2.0, 0.05251586678196599, 0.011787334976299203, 0.9356967982417363, 0.5947085764477068], [Timestamp('2018-10-11 00:00:00'), 2.0, 2.0, 0.09765260170977574, 0.001216967193832425, 0.9011304310963884, 0.5906518511572663], [Timestamp('2018-10-15 00:00:00'), 2.0, 2.0, 0.24636250895528186, 0.006394155485799466, 0.747243335558912, 0.5912148732846687], [Timestamp('2018-10-16 00:00:00'), 0.0, 2.0, 0.3273555612663984, 0.23809935701737875, 0.434545081716222, 0.590065364839733], [Timestamp('2018-10-17 00:00:00'), 0.0, 2.0, 0.10599658713085125, 0.012058457064215752, 0.8819449558049278, 0.5954463701765861], [Timestamp('2018-10-18 00:00:00'), 2.0, 2.0, 0.40082630689973625, 0.043256145224703564, 0.5559175478755637, 0.5995506136635168], [Timestamp('2018-10-19 00:00:00'), 2.0, 2.0, 0.19412268473036765, 0.0023853810759499267, 0.8034919341936806, 0.6008007209688649], [Timestamp('2018-10-22 00:00:00'), 0.0, 0.0, 0.6467665284385827, 0.0032818451224834078, 0.3499516264389298, 0.5977710400892468], [Timestamp('2018-10-23 00:00:00'), 0.0, 2.0, 0.20201620956900726, 0.007943664480853964, 0.790040125950141, 0.598283072859344], [Timestamp('2018-10-24 00:00:00'), 2.0, 2.0, 0.08514222346073654, 0.036519610420282046, 0.8783381661189801, 0.5976052216476849], [Timestamp('2018-10-25 00:00:00'), 2.0, 0.0, 0.5636830050803385, 0.036195191314917975, 0.40012180360474, 0.5997855351672202], [Timestamp('2018-10-26 00:00:00'), 0.0, 2.0, 0.192507216916245, 0.0029017176380247405, 0.8045910654457232, 0.6015170902111603], [Timestamp('2018-10-29 00:00:00'), 2.0, 2.0, 0.29289783267298763, 0.28921129816558744, 0.4178908691614181, 0.5912289280565637], [Timestamp('2018-10-30 00:00:00'), 0.0, 0.0, 0.5611546353982774, 0.0009352705031159214, 0.4379100940986056, 0.5866084409263383], [Timestamp('2018-10-31 00:00:00'), 0.0, 2.0, 0.11403589670591831, 0.081350418793654, 0.8046136845004269, 0.5899274840035597], [Timestamp('2018-11-01 00:00:00'), 2.0, 2.0, 0.2185468948027404, 0.0019928861327098897, 0.7794602190645531, 0.589508052474084], [Timestamp('2018-11-05 00:00:00'), 0.0, 0.0, 0.6172207478372925, 0.018359755353522526, 0.36441949680919017, 0.5879457888349225], [Timestamp('2018-11-06 00:00:00'), 0.0, 2.0, 0.08753983613545778, 0.003171156069167855, 0.9092890077953723, 0.5903186525650507], [Timestamp('2018-11-07 00:00:00'), 0.0, 2.0, 0.3175022849763575, 0.08808377935604109, 0.5944139356676059, 0.5884947454779857], [Timestamp('2018-11-08 00:00:00'), 2.0, 0.0, 0.7014448344034486, 0.008564428500987481, 0.28999073709556406, 0.5876146472412994], [Timestamp('2018-11-09 00:00:00'), 0.0, 2.0, 0.10018900142735798, 0.002274885581112001, 0.8975361129915269, 0.5839633609590807], [Timestamp('2018-11-12 00:00:00'), 0.0, 0.0, 0.5328764876609103, 0.06837803092185421, 0.3987454814172331, 0.5854120944824287], [Timestamp('2018-11-13 00:00:00'), 2.0, 0.0, 0.625591226591273, 0.011712392379093056, 0.3626963810296301, 0.5873070226705372], [Timestamp('2018-11-14 00:00:00'), 2.0, 0.0, 0.7877684845850929, 0.007564354145621857, 0.2046671612692838, 0.5846035324409026], [Timestamp('2018-11-16 00:00:00'), 2.0, 0.0, 0.876990713564472, 0.003782487801700339, 0.11922679863383222, 0.5862921986013361], [Timestamp('2018-11-19 00:00:00'), 0.0, 0.0, 0.6955124627026807, 0.054601859085689555, 0.24988567821163607, 0.5805376790713922], [Timestamp('2018-11-21 00:00:00'), 2.0, 0.0, 0.6129654769566332, 0.016711999190539957, 0.3703225238528327, 0.5819596229007545], [Timestamp('2018-11-22 00:00:00'), 0.0, 0.0, 0.5688386391333993, 0.02964176392950125, 0.40151959693709754, 0.5871726671099712], [Timestamp('2018-11-23 00:00:00'), 0.0, 2.0, 0.433779073553594, 0.05428881607880113, 0.5119321103676098, 0.5906563289881003], [Timestamp('2018-11-26 00:00:00'), 2.0, 2.0, 0.22535595422449545, 0.06296709751947764, 0.7116769482560286, 0.5855365611280928], [Timestamp('2018-11-27 00:00:00'), 0.0, 0.0, 0.8453505788410368, 0.005299273575398554, 0.14935014758356574, 0.580240307637568], [Timestamp('2018-11-28 00:00:00'), 0.0, 2.0, 0.35629737462568795, 0.0280903320883511, 0.615612293285961, 0.5826850055518997], [Timestamp('2018-11-29 00:00:00'), 2.0, 2.0, 0.3257871500686894, 0.06011633731259567, 0.614096512618714, 0.58156684585256], [Timestamp('2018-11-30 00:00:00'), 2.0, 0.0, 0.6170835516408545, 0.010327570969267204, 0.3725888773898846, 0.5802955470274452], [Timestamp('2018-12-03 00:00:00'), 0.0, 0.0, 0.7449663916427699, 0.02433463354877405, 0.23069897480845789, 0.5805924754402506], [Timestamp('2018-12-04 00:00:00'), 2.0, 0.0, 0.6057038360402061, 0.05164579526117745, 0.34265036869861853, 0.5822220538252769], [Timestamp('2018-12-05 00:00:00'), 0.0, 0.0, 0.8042973221714745, 0.03333153739399428, 0.16237114043453724, 0.5816090988861505], [Timestamp('2018-12-06 00:00:00'), 2.0, 2.0, 0.32971072245326194, 0.0069588483263106505, 0.6633304292204324, 0.5809754143541609], [Timestamp('2018-12-07 00:00:00'), 0.0, 2.0, 0.34825489517799235, 0.023354657700472483, 0.628390447121542, 0.5839889790976747], [Timestamp('2018-12-10 00:00:00'), 0.0, 0.0, 0.4729051734569341, 0.0579177313569482, 0.4691770951861242, 0.5804256331430244], [Timestamp('2018-12-11 00:00:00'), 1.0, 2.0, 0.07379158091477463, 0.26727619121331, 0.6589322278719146, 0.5784306443483566], [Timestamp('2018-12-12 00:00:00'), 2.0, 2.0, 0.24033896370763677, 0.13648583580891452, 0.623175200483444, 0.5825795156113219], [Timestamp('2018-12-13 00:00:00'), 0.0, 0.0, 0.5208712691551133, 0.04215254145351025, 0.43697618939137245, 0.5817555155417325], [Timestamp('2018-12-14 00:00:00'), 0.0, 0.0, 0.4966632170640469, 0.19892348040406796, 0.30441330253188764, 0.5842525370627173], [Timestamp('2018-12-17 00:00:00'), 0.0, 2.0, 0.25474103064277115, 0.02743878469198138, 0.7178201846652457, 0.588870176797006], [Timestamp('2018-12-18 00:00:00'), 2.0, 2.0, 0.27022671981250745, 0.048830316049228766, 0.6809429641382672, 0.5871241450509744], [Timestamp('2018-12-19 00:00:00'), 0.0, 0.0, 0.6837594561216551, 0.0074232354904450685, 0.30881730838790367, 0.5905353027446051], [Timestamp('2018-12-20 00:00:00'), 2.0, 2.0, 0.30042444078282077, 0.04701602029304678, 0.6525595389241384, 0.585949525684625], [Timestamp('2018-12-21 00:00:00'), 2.0, 2.0, 0.39530574274266883, 0.048791141354199936, 0.5559031159031323, 0.5851347411387396], [Timestamp('2018-12-26 00:00:00'), 1.0, 0.0, 0.8190658425970575, 0.03266033551274758, 0.1482738218901917, 0.5881103278027012], [Timestamp('2018-12-27 00:00:00'), 2.0, 2.0, 0.4054204336729057, 0.030335707413430713, 0.5642438589136566, 0.5915711892420648], [Timestamp('2018-12-28 00:00:00'), 2.0, 0.0, 0.7529118006800044, 0.0011131007456831397, 0.24597509857431493, 0.5885409998906682], [Timestamp('2019-01-02 00:00:00'), 2.0, 0.0, 0.6223483316355973, 0.16173355935866127, 0.21591810900573638, 0.5903492928588779], [Timestamp('2019-01-03 00:00:00'), 2.0, 2.0, 0.460880603527064, 0.0014719278912008848, 0.5376474685817334, 0.5904172572712133], [Timestamp('2019-01-04 00:00:00'), 2.0, 0.0, 0.6809798042672227, 0.019784400753371775, 0.299235794979406, 0.5891580084493325], [Timestamp('2019-01-07 00:00:00'), 0.0, 0.0, 0.6459772154354216, 0.045382237550999585, 0.3086405470135808, 0.588766191481303], [Timestamp('2019-01-08 00:00:00'), 2.0, 2.0, 0.03592024464207051, 0.025848602067953294, 0.9382311532899724, 0.5816416627004038], [Timestamp('2019-01-09 00:00:00'), 0.0, 0.0, 0.6539923075543242, 0.018366929712896385, 0.3276407627327817, 0.5834677452707677], [Timestamp('2019-01-10 00:00:00'), 0.0, 2.0, 0.13861129091064892, 0.038861421324786205, 0.82252728776457, 0.5832927225471085], [Timestamp('2019-01-11 00:00:00'), 0.0, 2.0, 0.04670813361946886, 0.012675099038797174, 0.9406167673417338, 0.5837780547300248], [Timestamp('2019-01-14 00:00:00'), 1.0, 2.0, 0.1394074541383147, 0.0662766905798157, 0.7943158552818669, 0.595139810310089], [Timestamp('2019-01-15 00:00:00'), 1.0, 2.0, 0.042135914689645726, 0.04430683814305894, 0.9135572471672926, 0.5995739291885337], [Timestamp('2019-01-16 00:00:00'), 2.0, 2.0, 0.29567964914294703, 0.005941345921795511, 0.6983790049352588, 0.5876365612158234], [Timestamp('2019-01-17 00:00:00'), 2.0, 0.0, 0.6902906353694042, 0.06208449630077572, 0.24762486832982625, 0.5848576132250176], [Timestamp('2019-01-18 00:00:00'), 2.0, 0.0, 0.6428234426328756, 0.02280145795903093, 0.3343750994080898, 0.5842604739388073], [Timestamp('2019-01-21 00:00:00'), 0.0, 2.0, 0.2529112017936097, 0.006297076199139726, 0.7407917220072493, 0.5825771446232896], [Timestamp('2019-01-22 00:00:00'), 2.0, 2.0, 0.2570996923232027, 0.03492544224527327, 0.7079748654315177, 0.5876165564878288], [Timestamp('2019-01-23 00:00:00'), 2.0, 0.0, 0.5000539416792048, 0.006404592091372706, 0.4935414662294263, 0.5857267094521701], [Timestamp('2019-01-24 00:00:00'), 0.0, 2.0, 0.3675253790399041, 0.03170413126851904, 0.6007704896915783, 0.5855760354071581], [Timestamp('2019-01-28 00:00:00'), 2.0, 2.0, 0.24434675628191374, 0.013434682591546877, 0.7422185611265376, 0.5817105074860529], [Timestamp('2019-01-29 00:00:00'), 2.0, 0.0, 0.5647313397515301, 0.017295980197911034, 0.41797268005055355, 0.5811189604886638], [Timestamp('2019-01-30 00:00:00'), 1.0, 0.0, 0.7488999538864776, 0.08151525246418077, 0.1695847936493405, 0.5857252916574951], [Timestamp('2019-01-31 00:00:00'), 2.0, 2.0, 0.18929034341595155, 0.007503880013062554, 0.8032057765709798, 0.5685425685425686], [Timestamp('2019-02-01 00:00:00'), 2.0, 0.0, 0.7592860546331356, 0.0008393108060321631, 0.23987463456083308, 0.5707642673963917], [Timestamp('2019-02-04 00:00:00'), 2.0, 0.0, 0.7330592729090568, 0.039360811892571804, 0.2275799151983698, 0.5697333243844872], [Timestamp('2019-02-05 00:00:00'), 0.0, 2.0, 0.1201251391160021, 0.03141539010719048, 0.8484594707768068, 0.5657064012734117], [Timestamp('2019-02-06 00:00:00'), 0.0, 2.0, 0.19732303078375313, 0.013127238940038576, 0.7895497302762062, 0.5715221477107174], [Timestamp('2019-02-07 00:00:00'), 1.0, 2.0, 0.36148443135740266, 0.004755395732664222, 0.6337601729099319, 0.5642962716624174], [Timestamp('2019-02-08 00:00:00'), 0.0, 2.0, 0.2955102245142202, 0.014224309954267432, 0.6902654655315081, 0.5551182941079195], [Timestamp('2019-02-11 00:00:00'), 2.0, 2.0, 0.17886953886819337, 0.02826206848386897, 0.7928683926479354, 0.553657590234036], [Timestamp('2019-02-12 00:00:00'), 2.0, 0.0, 0.8346158960667278, 0.027601234600571567, 0.13778286933270198, 0.5528640498822969], [Timestamp('2019-02-13 00:00:00'), 2.0, 0.0, 0.8709887548299821, 0.0002917520931504399, 0.12871949307687094, 0.5522861930062394], [Timestamp('2019-02-14 00:00:00'), 0.0, 0.0, 0.6608766267267256, 0.09948345200740746, 0.23963992126586242, 0.5508587770008183], [Timestamp('2019-02-15 00:00:00'), 0.0, 2.0, 0.16315763171714637, 0.05899068536187399, 0.7778516829209831, 0.552617084628321], [Timestamp('2019-02-18 00:00:00'), 2.0, 2.0, 0.1811332078339939, 0.08342502273865572, 0.7354417694273493, 0.552441232038326], [Timestamp('2019-02-19 00:00:00'), 0.0, 2.0, 0.4183582119760018, 0.03677181826036346, 0.5448699697636409, 0.5514487180720505], [Timestamp('2019-02-20 00:00:00'), 2.0, 0.0, 0.5949931477837774, 0.04722310398156752, 0.35778374823465836, 0.5506405652999428], [Timestamp('2019-02-21 00:00:00'), 0.0, 1.0, 0.393514274415307, 0.4106246542468797, 0.1958610713378144, 0.5524102088310071], [Timestamp('2019-02-22 00:00:00'), 0.0, 2.0, 0.3566105499175175, 0.014871860445544888, 0.6285175896369322, 0.5518103368935555], [Timestamp('2019-02-25 00:00:00'), 0.0, 0.0, 0.6811063587935514, 0.036553933694886784, 0.28233970751155874, 0.5514209375139155], [Timestamp('2019-02-26 00:00:00'), 2.0, 2.0, 0.3112076884761463, 0.017486236451860683, 0.6713060750719902, 0.5516734717724633], [Timestamp('2019-02-27 00:00:00'), 1.0, 0.0, 0.5358718246821391, 0.07279044739187275, 0.3913377279259926, 0.5549067055110413], [Timestamp('2019-02-28 00:00:00'), 0.0, 2.0, 0.49620421407258825, 0.003927607100272408, 0.4998681788271428, 0.5709659512269539], [Timestamp('2019-03-01 00:00:00'), 2.0, 0.0, 0.494715797305864, 0.013207929476856157, 0.4920762732172866, 0.5718299686341309], [Timestamp('2019-03-07 00:00:00'), 0.0, 2.0, 0.4110158770750581, 0.04681367821035982, 0.5421704447145781, 0.570222732420626], [Timestamp('2019-03-08 00:00:00'), 2.0, 0.0, 0.5067799667590388, 0.05573413087830053, 0.4374859023626606, 0.5706761935116929], [Timestamp('2019-03-11 00:00:00'), 0.0, 0.0, 0.77259034435946, 0.031000244295268157, 0.19640941134526702, 0.5707585682557873], [Timestamp('2019-03-12 00:00:00'), 2.0, 2.0, 0.10405269030533915, 0.0028838672464272857, 0.8930634424482347, 0.5701707939611166], [Timestamp('2019-03-13 00:00:00'), 2.0, 0.0, 0.6747865854173425, 0.1223085342446317, 0.20290488033802365, 0.5687735299693941], [Timestamp('2019-03-14 00:00:00'), 2.0, 0.0, 0.7101227896810128, 0.10719193853396325, 0.1826852717850294, 0.5719791835521695], [Timestamp('2019-03-15 00:00:00'), 2.0, 2.0, 0.3638730631878835, 0.21509622204577267, 0.4210307147663462, 0.5639653569407389], [Timestamp('2019-03-18 00:00:00'), 2.0, 0.0, 0.8312938782623245, 0.036623702416357234, 0.13208241932131773, 0.5615793010752689], [Timestamp('2019-03-19 00:00:00'), 0.0, 0.0, 0.6212669325923542, 0.02108044009246171, 0.357652627315179, 0.5585306726463412], [Timestamp('2019-03-20 00:00:00'), 0.0, 2.0, 0.38060381949969446, 0.026543921171625615, 0.5928522593286787, 0.5589817370284802], [Timestamp('2019-03-21 00:00:00'), 0.0, 2.0, 0.11749867136956628, 0.003749909935408557, 0.8787514186950193, 0.5687431891119726], [Timestamp('2019-03-22 00:00:00'), 2.0, 2.0, 0.2827795260858976, 0.027069340121182717, 0.6901511337929173, 0.5607067420831722], [Timestamp('2019-03-25 00:00:00'), 2.0, 2.0, 0.34791931988862235, 0.012506798347814223, 0.639573881763563, 0.5618171501426256], [Timestamp('2019-03-26 00:00:00'), 0.0, 0.0, 0.7669315159245498, 0.0633991303125776, 0.1696693537628694, 0.5618900557480403], [Timestamp('2019-03-27 00:00:00'), 2.0, 2.0, 0.32031609614664663, 0.04509960467510359, 0.6345842991782494, 0.5573629874705143], [Timestamp('2019-03-28 00:00:00'), 1.0, 0.0, 0.7643056044608066, 0.001552244975597704, 0.23414215056359164, 0.5656967429120451], [Timestamp('2019-03-29 00:00:00'), 0.0, 2.0, 0.23087438107238026, 0.04341338143342842, 0.7257122374941923, 0.5633072566515911], [Timestamp('2019-04-01 00:00:00'), 2.0, 2.0, 0.354466869298472, 0.007064644606786974, 0.6384684860947365, 0.5637409381963838], [Timestamp('2019-04-02 00:00:00'), 0.0, 0.0, 0.7151984801853695, 0.01513302144295124, 0.26966849837168283, 0.5617600506489395], [Timestamp('2019-04-03 00:00:00'), 2.0, 2.0, 0.1364221063005061, 0.003472781451108291, 0.8601051122483897, 0.560978565477543], [Timestamp('2019-04-04 00:00:00'), 2.0, 1.0, 0.19601617321729187, 0.734930076901759, 0.06905374988095063, 0.5602236616622062], [Timestamp('2019-04-05 00:00:00'), 2.0, 2.0, 0.37550498205024213, 0.002867115300048045, 0.6216279026497029, 0.5604899377905512], [Timestamp('2019-04-08 00:00:00'), 0.0, 0.0, 0.7539041670038669, 0.09339793264642683, 0.1526979003497067, 0.5605494072202842], [Timestamp('2019-04-09 00:00:00'), 0.0, 2.0, 0.3017381059183334, 0.013639813576795622, 0.6846220805048714, 0.5559110494350645], [Timestamp('2019-04-10 00:00:00'), 0.0, 2.0, 0.13851355190673503, 0.03295446019272285, 0.8285319879005368, 0.5553392088542735], [Timestamp('2019-04-11 00:00:00'), 0.0, 2.0, 0.19535898703147878, 0.004885406396394403, 0.799755606572121, 0.5561763712577229], [Timestamp('2019-04-12 00:00:00'), 2.0, 2.0, 0.08596989006727537, 0.005016680326624627, 0.9090134296060947, 0.5564192343604109], [Timestamp('2019-04-15 00:00:00'), 2.0, 0.0, 0.5842586027359157, 0.02062772341016209, 0.39511367385392626, 0.5493736997404479], [Timestamp('2019-04-16 00:00:00'), 1.0, 0.0, 0.646364146321609, 0.025468195501056293, 0.3281676581773301, 0.5520958716080667], [Timestamp('2019-04-17 00:00:00'), 2.0, 0.0, 0.7127866572012849, 0.016072665365247187, 0.27114067743346953, 0.5574471852673072], [Timestamp('2019-04-18 00:00:00'), 0.0, 0.0, 0.525700857302647, 0.005143996212641594, 0.46915514648470896, 0.5577142317333923], [Timestamp('2019-04-22 00:00:00'), 2.0, 2.0, 0.3202728569175598, 0.0865812154552908, 0.5931459276271509, 0.5599870091247001], [Timestamp('2019-04-23 00:00:00'), 0.0, 0.0, 0.6691261436240767, 0.006397327660604849, 0.3244765287153205, 0.5602507907292797], [Timestamp('2019-04-24 00:00:00'), 2.0, 2.0, 0.17624606713077268, 0.002475606604073228, 0.8212783262651475, 0.5610896254922603], [Timestamp('2019-04-25 00:00:00'), 0.0, 0.0, 0.7404240818412264, 0.00796231211926596, 0.25161360603950705, 0.5629702526814279], [Timestamp('2019-04-26 00:00:00'), 2.0, 2.0, 0.06809322205623967, 0.008417843957411478, 0.9234889339863521, 0.5626033494147901], [Timestamp('2019-04-29 00:00:00'), 0.0, 0.0, 0.7913892216530497, 0.038302861535488744, 0.17030791681146312, 0.5636716879923401], [Timestamp('2019-04-30 00:00:00'), 0.0, 2.0, 0.1502176755707116, 0.017214331120941815, 0.8325679933083423, 0.564302102317105], [Timestamp('2019-05-02 00:00:00'), 2.0, 2.0, 0.3584697766872811, 0.01962004277789979, 0.6219101805348192, 0.5663443801523781], [Timestamp('2019-05-03 00:00:00'), 0.0, 0.0, 0.7790257726868389, 0.0016641718055207598, 0.21931005550763738, 0.5675974420068334], [Timestamp('2019-05-06 00:00:00'), 0.0, 2.0, 0.11544490481318707, 0.033819690704905585, 0.8507354044819117, 0.5662286048957735], [Timestamp('2019-05-07 00:00:00'), 2.0, 0.0, 0.5402133969771836, 0.00983297524258024, 0.44995362778023257, 0.5648631246052459], [Timestamp('2019-05-08 00:00:00'), 0.0, 2.0, 0.3653555789496894, 0.03282547605300221, 0.6018189449973046, 0.5641346629384464], [Timestamp('2019-05-09 00:00:00'), 0.0, 2.0, 0.33527105312962874, 0.006012202367051397, 0.6587167445033227, 0.5616062623274162], [Timestamp('2019-05-10 00:00:00'), 0.0, 2.0, 0.4171008489341296, 0.021608701608675028, 0.5612904494571932, 0.5552317336056274], [Timestamp('2019-05-13 00:00:00'), 2.0, 2.0, 0.3918174021138554, 0.1359833363730449, 0.47219926151309305, 0.5636359351432881], [Timestamp('2019-05-14 00:00:00'), 0.0, 2.0, 0.32471250016496855, 0.02362096657194986, 0.6516665332630773, 0.5634599673202615], [Timestamp('2019-05-15 00:00:00'), 0.0, 2.0, 0.16029344805135198, 0.02383902787495688, 0.8158675240736896, 0.5629022075594655], [Timestamp('2019-05-16 00:00:00'), 0.0, 2.0, 0.2684508346696797, 0.1721636382938841, 0.5593855270364314, 0.5605736901706705], [Timestamp('2019-05-17 00:00:00'), 2.0, 2.0, 0.3968448335151579, 0.06852924055452139, 0.5346259259303143, 0.5543624866870352], [Timestamp('2019-05-20 00:00:00'), 2.0, 0.0, 0.6502960457365836, 0.029020248637316327, 0.32068370562609955, 0.5516455941570065], [Timestamp('2019-05-21 00:00:00'), 0.0, 0.0, 0.5872339997464635, 0.049931487177279354, 0.36283451307625086, 0.5507620517521704], [Timestamp('2019-05-22 00:00:00'), 0.0, 0.0, 0.5768716933435756, 0.0834613655104419, 0.33966694114598645, 0.5450113087639452], [Timestamp('2019-05-23 00:00:00'), 2.0, 0.0, 0.6721668300344209, 0.00037668581400969643, 0.3274564841515633, 0.5428809536162706], [Timestamp('2019-05-24 00:00:00'), 2.0, 0.0, 0.776713706844452, 0.06712981691274454, 0.1561564762428015, 0.5449059696342305], [Timestamp('2019-05-27 00:00:00'), 2.0, 0.0, 0.6779451205526404, 0.02049228696521213, 0.30156259248214445, 0.542443873569945], [Timestamp('2019-05-28 00:00:00'), 0.0, 0.0, 0.6464689116260393, 0.043805186744789705, 0.30972590162917557, 0.541126765803512], [Timestamp('2019-05-29 00:00:00'), 0.0, 2.0, 0.4214835862514451, 0.14390983574674251, 0.43460657800180885, 0.5442734729565607], [Timestamp('2019-05-30 00:00:00'), 0.0, 2.0, 0.14097881169143003, 0.06049960465808099, 0.7985215836504835, 0.546616706730222], [Timestamp('2019-05-31 00:00:00'), 2.0, 2.0, 0.42649398432221997, 0.04611452976074711, 0.5273914859170314, 0.5446610965844093], [Timestamp('2019-06-03 00:00:00'), 2.0, 0.0, 0.502201856226857, 0.010278242533853135, 0.4875199012392894, 0.5455481848319339], [Timestamp('2019-06-04 00:00:00'), 0.0, 0.0, 0.5711761913919683, 0.024747203980193123, 0.404076604627839, 0.5456404069977591], [Timestamp('2019-06-05 00:00:00'), 2.0, 2.0, 0.23718809764440205, 0.022165576009273692, 0.7406463263463193, 0.5487384778612748], [Timestamp('2019-06-06 00:00:00'), 2.0, 0.0, 0.510461297410849, 0.03451005395348159, 0.45502864863567627, 0.5515267360525872], [Timestamp('2019-06-07 00:00:00'), 0.0, 0.0, 0.5513934331480531, 0.049901225794762756, 0.3987053410571815, 0.555084446507123], [Timestamp('2019-06-10 00:00:00'), 2.0, 2.0, 0.12402580088796164, 0.11116745402881542, 0.7648067450832284, 0.5555046948356808], [Timestamp('2019-06-11 00:00:00'), 0.0, 0.0, 0.8884629964354673, 0.049564701775797054, 0.06197230178873212, 0.5584484777517565], [Timestamp('2019-06-12 00:00:00'), 2.0, 2.0, 0.1794268752533894, 0.00759192754119817, 0.8129811972054126, 0.5588609048308503], [Timestamp('2019-06-13 00:00:00'), 0.0, 0.0, 0.8649889491971299, 0.04517476894819544, 0.0898362818546766, 0.5553175839390793], [Timestamp('2019-06-14 00:00:00'), 2.0, 2.0, 0.11204176706460636, 0.027785148310450425, 0.8601730846249435, 0.5585793684508639], [Timestamp('2019-06-17 00:00:00'), 2.0, 1.0, 0.17709629852200864, 0.6287684577799185, 0.19413524369806792, 0.554402195027195], [Timestamp('2019-06-18 00:00:00'), 2.0, 0.0, 0.6416583489373503, 0.0238088611279999, 0.33453278993465485, 0.5532747533474277], [Timestamp('2019-06-19 00:00:00'), 2.0, 0.0, 0.5417903317627579, 0.07678288318191588, 0.38142678505533056, 0.5552505185263307], [Timestamp('2019-06-21 00:00:00'), 1.0, 0.0, 0.7814985960620663, 0.059437267787302316, 0.1590641361506312, 0.5476290684624018], [Timestamp('2019-06-24 00:00:00'), 0.0, 2.0, 0.12938749163019655, 0.2646379306680468, 0.6059745777017517, 0.5508493050159716], [Timestamp('2019-06-25 00:00:00'), 2.0, 2.0, 0.4261344088553747, 0.04007100541709445, 0.53379458572753, 0.5420981533941874], [Timestamp('2019-06-26 00:00:00'), 0.0, 2.0, 0.44267595050094405, 0.09603795654110574, 0.46128609295794937, 0.5507420680473825], [Timestamp('2019-06-27 00:00:00'), 2.0, 2.0, 0.1489795226786523, 0.002678570804954128, 0.8483419065163998, 0.5541441800415456], [Timestamp('2019-06-28 00:00:00'), 0.0, 0.0, 0.6418576821648309, 0.05535935011189726, 0.3027829677232705, 0.5492731738221076], [Timestamp('2019-07-01 00:00:00'), 0.0, 2.0, 0.04792954871224411, 0.06329981547786621, 0.8887706358098847, 0.5498366543346098], [Timestamp('2019-07-02 00:00:00'), 2.0, 0.0, 0.41071992140444025, 0.3800233968776418, 0.20925668171791453, 0.5422871800182419], [Timestamp('2019-07-03 00:00:00'), 2.0, 2.0, 0.45184432936663627, 0.021235425070851802, 0.5269202455625162, 0.5417786785086282], [Timestamp('2019-07-04 00:00:00'), 1.0, 0.0, 0.8548622538544433, 0.006520166294694599, 0.13861757985086015, 0.5429733651566452], [Timestamp('2019-07-05 00:00:00'), 2.0, 0.0, 0.5074915734349665, 0.00791401940396289, 0.4845944071610681, 0.5534254512968048], [Timestamp('2019-07-08 00:00:00'), 2.0, 0.0, 0.578578401724524, 0.01049452947318867, 0.41092706880228175, 0.5506948356368561], [Timestamp('2019-07-10 00:00:00'), 2.0, 0.0, 0.5947672932478423, 0.05865607474174212, 0.34657663201041417, 0.5517049049833412], [Timestamp('2019-07-11 00:00:00'), 2.0, 0.0, 0.5110661806703067, 0.0951932305351024, 0.39374058879459733, 0.5491420824404368], [Timestamp('2019-07-12 00:00:00'), 0.0, 2.0, 0.44347033898992416, 0.0922720333859961, 0.46425762762408285, 0.5501447054817842], [Timestamp('2019-07-15 00:00:00'), 0.0, 2.0, 0.16254454299934745, 0.19844682635173777, 0.6390086306489083, 0.5559799677446736], [Timestamp('2019-07-16 00:00:00'), 0.0, 2.0, 0.08679126438020918, 0.06449480075634095, 0.8487139348634435, 0.5531699678347723], [Timestamp('2019-07-17 00:00:00'), 0.0, 2.0, 0.2885176981181225, 0.031380829861825936, 0.6801014720200554, 0.55246475901072], [Timestamp('2019-07-18 00:00:00'), 0.0, 2.0, 0.2122949166101069, 0.022321834106354608, 0.7653832492835428, 0.5562289562289563], [Timestamp('2019-07-19 00:00:00'), 2.0, 2.0, 0.471188618786591, 0.022443621666470532, 0.5063677595469332, 0.5497292873331655], [Timestamp('2019-07-22 00:00:00'), 1.0, 2.0, 0.167824451403537, 0.03793428200446224, 0.794241266592001, 0.5524100179818423], [Timestamp('2019-07-23 00:00:00'), 0.0, 2.0, 0.16604663033802788, 0.06048400058699665, 0.7734693690749744, 0.5540056515432897], [Timestamp('2019-07-24 00:00:00'), 0.0, 2.0, 0.2301661943635882, 0.031040007618883297, 0.7387937980175341, 0.5505116516613885], [Timestamp('2019-07-25 00:00:00'), 0.0, 2.0, 0.25118045829332264, 0.07333216597132816, 0.6754873757353517, 0.5464708093124453], [Timestamp('2019-07-26 00:00:00'), 2.0, 2.0, 0.36665408040806924, 0.013437753404192385, 0.6199081661877317, 0.5426079093456493], [Timestamp('2019-07-29 00:00:00'), 0.0, 0.0, 0.9314656244056627, 0.0025123295071096665, 0.06602204608722455, 0.5410295002042149], [Timestamp('2019-07-30 00:00:00'), 0.0, 2.0, 0.1653079690273268, 0.010427623930676923, 0.8242644070419975, 0.5414359430764112], [Timestamp('2019-07-31 00:00:00'), 2.0, 1.0, 0.23895001896374107, 0.4057379589866191, 0.35531202204963463, 0.5400186713245335], [Timestamp('2019-08-01 00:00:00'), 2.0, 2.0, 0.46252669841332683, 0.002458240453744599, 0.5350150611329256, 0.5410230668316511], [Timestamp('2019-08-02 00:00:00'), 0.0, 0.0, 0.9028266277462239, 0.04073029704768625, 0.05644307520609012, 0.5405214365331811], [Timestamp('2019-08-05 00:00:00'), 2.0, 2.0, 0.4214610784008633, 0.0025394397536806983, 0.5759994818454629, 0.5434956999376048], [Timestamp('2019-08-06 00:00:00'), 0.0, 0.0, 0.7470091010332959, 0.012699572662794173, 0.2402913263039083, 0.5470614962416586], [Timestamp('2019-08-07 00:00:00'), 2.0, 2.0, 0.47852220771106396, 0.007947950031265643, 0.5135298422576655, 0.545652204293627], [Timestamp('2019-08-08 00:00:00'), 0.0, 2.0, 0.4618685761650161, 0.006031040697576836, 0.5321003831374116, 0.5484597508654815], [Timestamp('2019-08-09 00:00:00'), 0.0, 0.0, 0.6750043756036752, 0.002996784025864337, 0.32199884037045456, 0.542389979931301], [Timestamp('2019-08-12 00:00:00'), 2.0, 2.0, 0.23400390077781216, 0.024997630347058625, 0.740998468875125, 0.5477630632439547], [Timestamp('2019-08-13 00:00:00'), 0.0, 0.0, 0.6721371591534805, 0.014467044374057526, 0.3133957964724562, 0.5495036156059897], [Timestamp('2019-08-14 00:00:00'), 0.0, 2.0, 0.44895385364274626, 0.03568076981671737, 0.5153653765405345, 0.544924304831241], [Timestamp('2019-08-15 00:00:00'), 0.0, 2.0, 0.19662181801473558, 0.005520811158667598, 0.7978573708265946, 0.5493970574349153], [Timestamp('2019-08-16 00:00:00'), 2.0, 0.0, 0.632795900786313, 0.006617252100015677, 0.3605868471136734, 0.5481516671878088], [Timestamp('2019-08-19 00:00:00'), 1.0, 2.0, 0.4174922677468499, 0.0037638666071593, 0.5787438656459912, 0.5467603158565656], [Timestamp('2019-08-20 00:00:00'), 2.0, 0.0, 0.48923844702817443, 0.03356021467171581, 0.4772013383001129, 0.5295721673387854], [Timestamp('2019-08-21 00:00:00'), 0.0, 0.0, 0.5944459968542202, 0.01705276450447346, 0.3885012386413055, 0.5272893778683336], [Timestamp('2019-08-22 00:00:00'), 0.0, 2.0, 0.42280112039875084, 0.014673465126517266, 0.5625254144747371, 0.5269364680474337], [Timestamp('2019-08-23 00:00:00'), 0.0, 2.0, 0.18981868393246717, 0.0017291142940932325, 0.8084522017734442, 0.5264349826047717], [Timestamp('2019-08-26 00:00:00'), 2.0, 2.0, 0.3560207126795898, 0.016024610137975, 0.6279546771824394, 0.5296263982539422], [Timestamp('2019-08-27 00:00:00'), 2.0, 0.0, 0.5772742918352449, 0.003788969803517478, 0.418936738361236, 0.5286990682797942], [Timestamp('2019-08-28 00:00:00'), 2.0, 0.0, 0.6337392873138632, 0.038880001516425025, 0.3273807111697083, 0.5259890411795233], [Timestamp('2019-08-29 00:00:00'), 1.0, 0.0, 0.6262649701496282, 0.052033353094439926, 0.3217016767559362, 0.5269783209838256], [Timestamp('2019-08-30 00:00:00'), 0.0, 2.0, 0.27189944782726744, 0.05363826458169161, 0.674462287591047, 0.5314300735780356], [Timestamp('2019-09-02 00:00:00'), 2.0, 2.0, 0.15182564913700725, 0.0962181642912526, 0.7519561865717372, 0.5318157236335316], [Timestamp('2019-09-03 00:00:00'), 2.0, 0.0, 0.6328550719971344, 0.017122011306789017, 0.35002291669608004, 0.5329440900198567], [Timestamp('2019-09-04 00:00:00'), 2.0, 0.0, 0.7568272638783861, 0.0011166765088315752, 0.2420560596127871, 0.5355398094469087], [Timestamp('2019-09-05 00:00:00'), 2.0, 0.0, 0.8455739663420281, 0.003337867445753467, 0.1510881662122166, 0.5341618568005374], [Timestamp('2019-09-06 00:00:00'), 2.0, 2.0, 0.22015429153067198, 0.00866332266246521, 0.7711823858068632, 0.5319018965550962], [Timestamp('2019-09-09 00:00:00'), 2.0, 2.0, 0.3730172159836869, 0.03223501087607647, 0.5947477731402318, 0.5318322715991496], [Timestamp('2019-09-10 00:00:00'), 0.0, 0.0, 0.5610076541083356, 0.03382157868555215, 0.40517076720611117, 0.5320679711650165], [Timestamp('2019-09-11 00:00:00'), 2.0, 2.0, 0.25818232204644703, 0.00932671946208966, 0.7324909584914607, 0.5334944643849143], [Timestamp('2019-09-12 00:00:00'), 0.0, 0.0, 0.4322053758157155, 0.14958831230387645, 0.4182063118804073, 0.5330045061502863], [Timestamp('2019-09-13 00:00:00'), 2.0, 2.0, 0.05260015671665131, 0.0049700434168926, 0.942429799866461, 0.5307498957162563], [Timestamp('2019-09-16 00:00:00'), 0.0, 0.0, 0.665882455491788, 0.013886749929364289, 0.3202307945788526, 0.5304169375294762], [Timestamp('2019-09-17 00:00:00'), 0.0, 2.0, 0.1821282576573531, 0.1233964665258951, 0.6944752758167505, 0.5324052885837783], [Timestamp('2019-09-18 00:00:00'), 2.0, 2.0, 0.052535420804005306, 0.00582882458345678, 0.9416357546125336, 0.5297386739701015], [Timestamp('2019-09-19 00:00:00'), 0.0, 2.0, 0.13977081250349574, 0.015882195229137926, 0.8443469922673664, 0.5310074593028978], [Timestamp('2019-09-20 00:00:00'), 2.0, 2.0, 0.08673497053454382, 0.002608564322966266, 0.9106564651424871, 0.5312358225417998], [Timestamp('2019-09-23 00:00:00'), 0.0, 0.0, 0.7349267959060416, 0.009608061777614578, 0.25546514231635076, 0.5321983451962965], [Timestamp('2019-09-24 00:00:00'), 2.0, 2.0, 0.32498588567806336, 0.015817285733578334, 0.6591968285883535, 0.5318550472370429], [Timestamp('2019-09-25 00:00:00'), 2.0, 2.0, 0.24642065596821083, 0.006124839425059996, 0.7474545046067269, 0.5345556144814806], [Timestamp('2019-09-26 00:00:00'), 1.0, 0.0, 0.6151468833305913, 0.0005158787956128832, 0.3843372378737931, 0.5332042964592552], [Timestamp('2019-09-27 00:00:00'), 0.0, 2.0, 0.14459463090088007, 0.07280472169810266, 0.7826006474010105, 0.5352186201751791], [Timestamp('2019-09-30 00:00:00'), 1.0, 2.0, 0.26462828602566085, 0.01148962877025262, 0.7238820852040915, 0.5367345717022441], [Timestamp('2019-10-01 00:00:00'), 0.0, 2.0, 0.15298850072633846, 0.04476885409100718, 0.8022426451826581, 0.5349907755922508], [Timestamp('2019-10-02 00:00:00'), 1.0, 2.0, 0.2261403393435132, 0.013785355539062733, 0.7600743051174187, 0.5285218100662491], [Timestamp('2019-10-03 00:00:00'), 0.0, 2.0, 0.31223671783301643, 0.00576771290962986, 0.6819955692573582, 0.5199594466835846], [Timestamp('2019-10-04 00:00:00'), 0.0, 2.0, 0.3380969724756139, 0.018037252857801726, 0.643865774666578, 0.5161391850131353], [Timestamp('2019-10-07 00:00:00'), 0.0, 2.0, 0.2654514336208215, 0.09873645134663027, 0.635812115032543, 0.5140819448157058], [Timestamp('2019-10-08 00:00:00'), 2.0, 2.0, 0.25158505620007665, 0.0006848506702775102, 0.747730093129652, 0.5156087477452568], [Timestamp('2019-10-09 00:00:00'), 2.0, 0.0, 0.6812202957311086, 0.009475650227458882, 0.3093040540414286, 0.5165635122984641], [Timestamp('2019-10-10 00:00:00'), 2.0, 0.0, 0.6858604115691589, 0.019704503444630355, 0.294435084986217, 0.5152244574559959], [Timestamp('2019-10-11 00:00:00'), 2.0, 0.0, 0.8391718061220181, 0.010325891603767645, 0.15050230227421724, 0.5156056711251087], [Timestamp('2019-10-14 00:00:00'), 2.0, 0.0, 0.7685384613766834, 0.032979490527897955, 0.19848204809541423, 0.5156952154374834], [Timestamp('2019-10-15 00:00:00'), 2.0, 0.0, 0.6515993201251615, 0.01697979618189587, 0.3314208836929382, 0.512227054060413], [Timestamp('2019-10-16 00:00:00'), 0.0, 2.0, 0.46226923952116244, 0.014912744005056757, 0.5228180164737818, 0.5103383214317629], [Timestamp('2019-10-17 00:00:00'), 0.0, 2.0, 0.3685700520119578, 0.013692631065333746, 0.6177373169227152, 0.5125526951772644], [Timestamp('2019-10-18 00:00:00'), 2.0, 2.0, 0.3699749878585543, 0.0056543740225868675, 0.6243706381188621, 0.513791901557859], [Timestamp('2019-10-21 00:00:00'), 2.0, 0.0, 0.5072293520260125, 0.0421556776647703, 0.4506149703092185, 0.5148815032572994], [Timestamp('2019-10-22 00:00:00'), 2.0, 2.0, 0.402661589378582, 0.02405033332222335, 0.5732880772991966, 0.5119935776715437], [Timestamp('2019-10-23 00:00:00'), 0.0, 0.0, 0.906745945305933, 0.005712256750347076, 0.08754179794372598, 0.5143436451575987], [Timestamp('2019-10-24 00:00:00'), 2.0, 0.0, 0.5210666077743806, 0.03611727230935139, 0.44281611991627434, 0.5193558135329384], [Timestamp('2019-10-25 00:00:00'), 2.0, 0.0, 0.6885976557173038, 0.027017010650963572, 0.2843853336317279, 0.5192877004039554], [Timestamp('2019-10-28 00:00:00'), 2.0, 0.0, 0.6091792171923701, 0.08938648154730516, 0.3014343012603247, 0.5186617011411735], [Timestamp('2019-10-29 00:00:00'), 2.0, 2.0, 0.46322270100001783, 0.016995026729218956, 0.519782272270759, 0.5178825153249705], [Timestamp('2019-10-30 00:00:00'), 2.0, 0.0, 0.7804204129361146, 0.04758586052652069, 0.17199372653736378, 0.5167080413425055], [Timestamp('2019-10-31 00:00:00'), 1.0, 0.0, 0.5803704141860367, 0.19718688163660156, 0.22244270417736656, 0.5183278817560618], [Timestamp('2019-11-01 00:00:00'), 0.0, 2.0, 0.26429297879994684, 0.09333328032317056, 0.6423737408768853, 0.5303492770285514], [Timestamp('2019-11-04 00:00:00'), 0.0, 2.0, 0.36574093327018914, 0.012715308951538865, 0.621543757778266, 0.5298925726076525], [Timestamp('2019-11-05 00:00:00'), 2.0, 2.0, 0.3109767609760472, 0.017046884168948867, 0.671976354855, 0.5285900159615845], [Timestamp('2019-11-06 00:00:00'), 2.0, 2.0, 0.22279667506248077, 0.11734652601039448, 0.6598567989271265, 0.5284185188249587], [Timestamp('2019-11-07 00:00:00'), 0.0, 2.0, 0.4487299417889504, 0.019012288789068424, 0.5322577694219843, 0.528334320936015], [Timestamp('2019-11-08 00:00:00'), 2.0, 2.0, 0.3395255332293491, 0.24601394225902007, 0.41446052451162496, 0.5267986695408293], [Timestamp('2019-11-11 00:00:00'), 0.0, 2.0, 0.4751261265001184, 0.02569320215682626, 0.4991806713430579, 0.5277168954590552], [Timestamp('2019-11-12 00:00:00'), 0.0, 1.0, 0.13230156713596652, 0.510116833325861, 0.3575815995381752, 0.5235426406919385], [Timestamp('2019-11-13 00:00:00'), 0.0, 2.0, 0.4660518670459706, 0.019925684267960073, 0.5140224486860708, 0.5229421154078688], [Timestamp('2019-11-14 00:00:00'), 0.0, 2.0, 0.17772460870974763, 0.004029692038336646, 0.818245699251918, 0.5209621052414607], [Timestamp('2019-11-18 00:00:00'), 0.0, 2.0, 0.3657471819487628, 0.016555468939507465, 0.6176973491117251, 0.523869167842459], [Timestamp('2019-11-19 00:00:00'), 2.0, 2.0, 0.2136299262332175, 0.08672213000515686, 0.6996479437616231, 0.5198316041863266], [Timestamp('2019-11-21 00:00:00'), 2.0, 0.0, 0.7077540439099537, 0.19370954149327646, 0.0985364145967664, 0.5199120359112223], [Timestamp('2019-11-22 00:00:00'), 0.0, 0.0, 0.7175533997561585, 0.027811502609524513, 0.2546350976343179, 0.5194438349198388], [Timestamp('2019-11-25 00:00:00'), 0.0, 0.0, 0.5102747208685694, 0.004982176157680479, 0.4847431029737521, 0.5213515934465905], [Timestamp('2019-11-26 00:00:00'), 2.0, 0.0, 0.5985768177778203, 0.07565506654147644, 0.32576811568070035, 0.5233964541740367], [Timestamp('2019-11-27 00:00:00'), 2.0, 0.0, 0.5210191230510506, 0.004381815087671968, 0.4745990618612734, 0.5245715054979254], [Timestamp('2019-11-28 00:00:00'), 0.0, 0.0, 0.8494748598612176, 0.0012205271428087677, 0.1493046129959706, 0.5254760523724816], [Timestamp('2019-11-29 00:00:00'), 0.0, 2.0, 0.21549078040556427, 0.06512767153642807, 0.7193815480580138, 0.5202057546378169], [Timestamp('2019-12-02 00:00:00'), 0.0, 2.0, 0.2817143512121716, 0.06034079883577214, 0.6579448499520592, 0.5222636234922635], [Timestamp('2019-12-03 00:00:00'), 2.0, 2.0, 0.26996536376893654, 0.08543917216315, 0.6445954640679147, 0.524706286473867], [Timestamp('2019-12-04 00:00:00'), 2.0, 0.0, 0.7213651840256556, 0.016592059173033323, 0.262042756801318, 0.522869283376841], [Timestamp('2019-12-05 00:00:00'), 2.0, 0.0, 0.7053397756989662, 0.03153979132977494, 0.26312043297125914, 0.5226713530963234], [Timestamp('2019-12-06 00:00:00'), 0.0, 0.0, 0.7789431899113183, 0.07592555358586085, 0.14513125650281578, 0.5230321791442561], [Timestamp('2019-12-09 00:00:00'), 2.0, 2.0, 0.18614114956312114, 0.010546113224948589, 0.8033127372119373, 0.5213660000794765], [Timestamp('2019-12-10 00:00:00'), 1.0, 0.0, 0.7115685584349597, 0.007402633976310369, 0.2810288075887358, 0.5213067322114768], [Timestamp('2019-12-11 00:00:00'), 2.0, 2.0, 0.15137647107667954, 0.07230551742960109, 0.7763180114937221, 0.520525433408869], [Timestamp('2019-12-12 00:00:00'), 0.0, 0.0, 0.7106689671760285, 0.030613173423082132, 0.258717859400886, 0.5207452350309493], [Timestamp('2019-12-13 00:00:00'), 0.0, 2.0, 0.34292805961165385, 0.00879452782833943, 0.6482774125599997, 0.5257736272514597], [Timestamp('2019-12-16 00:00:00'), 2.0, 2.0, 0.2345158244463504, 0.05132413443433629, 0.7141600411193098, 0.5260141402998545], [Timestamp('2019-12-17 00:00:00'), 2.0, 2.0, 0.3677914265583355, 0.015018552105037187, 0.6171900213366235, 0.5255541599737118], [Timestamp('2019-12-18 00:00:00'), 2.0, 0.0, 0.7938072611225524, 0.0304336495802811, 0.17575908929717157, 0.5264510630364289], [Timestamp('2019-12-19 00:00:00'), 0.0, 0.0, 0.6008256383393152, 0.2306057212515666, 0.1685686404091234, 0.5253159299812443], [Timestamp('2019-12-20 00:00:00'), 2.0, 2.0, 0.11754902635865921, 0.007469932698409175, 0.8749810409429378, 0.5263714525884912], [Timestamp('2019-12-23 00:00:00'), 2.0, 2.0, 0.389058382376209, 0.21941224334755416, 0.3915293742762379, 0.5265890741896934], [Timestamp('2019-12-26 00:00:00'), 1.0, 2.0, 0.4515567771798118, 0.09170582153327558, 0.4567374012869086, 0.5281526178585002], [Timestamp('2019-12-27 00:00:00'), 0.0, 0.0, 0.483649549635093, 0.08123604713069697, 0.4351144032342064, 0.5219429590017826], [Timestamp('2019-12-30 00:00:00'), 2.0, 2.0, 0.3636180949301229, 0.012273943169717288, 0.6241079619001656, 0.5196288887242432], [Timestamp('2020-01-02 00:00:00'), 0.0, 2.0, 0.34831291855104907, 0.03734330772012576, 0.614343773728829, 0.5205181577586736], [Timestamp('2020-01-03 00:00:00'), 2.0, 2.0, 0.4309204759849136, 0.0038984926832976103, 0.5651810313317849, 0.5210368039164263], [Timestamp('2020-01-06 00:00:00'), 0.0, 0.0, 0.5435816474564764, 0.10685521681734066, 0.34956313572617953, 0.519769456849498], [Timestamp('2020-01-07 00:00:00'), 0.0, 2.0, 0.1304753076479399, 0.006074695755944051, 0.8634499965961179, 0.5201453000239882], [Timestamp('2020-01-08 00:00:00'), 0.0, 2.0, 0.07977457209512143, 0.05240392054159044, 0.8678215073632837, 0.5197102572283757], [Timestamp('2020-01-09 00:00:00'), 0.0, 2.0, 0.1046941017645757, 0.00766336708453392, 0.8876425311508858, 0.5177995278639824], [Timestamp('2020-01-10 00:00:00'), 2.0, 0.0, 0.5781411696906364, 0.02461102521854281, 0.39724780509081675, 0.5168344041705952], [Timestamp('2020-01-13 00:00:00'), 0.0, 2.0, 0.31958115444264384, 0.07442813792994556, 0.6059907076274084, 0.5185297712589488], [Timestamp('2020-01-14 00:00:00'), 0.0, 2.0, 0.2642142530045497, 0.03354502729800416, 0.7022407196974408, 0.5202677376171353], [Timestamp('2020-01-15 00:00:00'), 1.0, 2.0, 0.3947081999315116, 0.0044568616347235275, 0.6008349384337648, 0.5214455256925136], [Timestamp('2020-01-16 00:00:00'), 2.0, 2.0, 0.29781344694819645, 0.012812463543063932, 0.6893740895087406, 0.5120278685592573], [Timestamp('2020-01-17 00:00:00'), 2.0, 0.0, 0.6014010242620683, 0.04389713183476341, 0.3547018439031671, 0.5113146333152134], [Timestamp('2020-01-20 00:00:00'), 0.0, 0.0, 0.5764614002934194, 0.06407336812587404, 0.35946523158070137, 0.506615721997301], [Timestamp('2020-01-21 00:00:00'), 0.0, 2.0, 0.4315288074472324, 0.013845182280282335, 0.5546260102724816, 0.5069865791577265], [Timestamp('2020-01-22 00:00:00'), 2.0, 2.0, 0.41952072147825525, 0.0019767142416742766, 0.5785025642800703, 0.5068197767145136], [Timestamp('2020-01-23 00:00:00'), 0.0, 0.0, 0.6658535324050576, 0.04475288907298033, 0.28939357852196773, 0.5083740015980798], [Timestamp('2020-01-24 00:00:00'), 0.0, 2.0, 0.38057675293858967, 0.010938545756759564, 0.6084847013046459, 0.5107354273574026], [Timestamp('2020-01-27 00:00:00'), 2.0, 2.0, 0.2833937386403496, 0.043579135206631855, 0.6730271261530251, 0.5110991050480492], [Timestamp('2020-01-28 00:00:00'), 2.0, 0.0, 0.5964408395372368, 0.0027614437002278173, 0.40079771676254194, 0.5125135215196024], [Timestamp('2020-01-29 00:00:00'), 2.0, 0.0, 0.7662845793686007, 0.03927959731282952, 0.1944358233185669, 0.5107366660521416], [Timestamp('2020-01-30 00:00:00'), 0.0, 0.0, 0.8132971843757482, 0.011822240135929257, 0.1748805754883226, 0.5117446393762183], [Timestamp('2020-01-31 00:00:00'), 0.0, 0.0, 0.6588378516337166, 0.004452142325985537, 0.3367100060402942, 0.5069288588322217], [Timestamp('2020-02-03 00:00:00'), 2.0, 2.0, 0.28389331505580145, 0.0148049678311097, 0.7013017171130941, 0.5080808590723781], [Timestamp('2020-02-04 00:00:00'), 0.0, 0.0, 0.5553011120625481, 0.009784416222709742, 0.4349144717147376, 0.5053944219565384], [Timestamp('2020-02-05 00:00:00'), 2.0, 2.0, 0.2993274069848096, 0.01280443382253221, 0.6878681591926542, 0.508780820224613], [Timestamp('2020-02-06 00:00:00'), 0.0, 0.0, 0.8581848661396904, 0.033385191746436345, 0.10842994211386747, 0.5109710392465151], [Timestamp('2020-02-07 00:00:00'), 2.0, 1.0, 0.22494965298687467, 0.40478359895763544, 0.37026674805548465, 0.5052915166953853], [Timestamp('2020-02-10 00:00:00'), 2.0, 0.0, 0.716433087538456, 0.057265439105857366, 0.22630147335569115, 0.5077330971170251], [Timestamp('2020-02-11 00:00:00'), 2.0, 0.0, 0.591070054683284, 0.1149855351977191, 0.2939444101190025, 0.5066304243002396], [Timestamp('2020-02-12 00:00:00'), 0.0, 0.0, 0.5853943725870224, 0.1371367310482279, 0.2774688963647538, 0.5068418418809005], [Timestamp('2020-02-13 00:00:00'), 0.0, 2.0, 0.32904378479360574, 0.05530009922088642, 0.6156561159855064, 0.5108652510740622], [Timestamp('2020-02-14 00:00:00'), 0.0, 2.0, 0.3446906086982458, 0.047694573183521015, 0.6076148181182403, 0.510563865736729], [Timestamp('2020-02-17 00:00:00'), 2.0, 2.0, 0.13018396822766545, 0.026172640679084334, 0.8436433910932458, 0.5080446655948706], [Timestamp('2020-02-18 00:00:00'), 2.0, 0.0, 0.5820859611318746, 0.04202879220511722, 0.37588524666301193, 0.5089101329343932], [Timestamp('2020-02-19 00:00:00'), 0.0, 0.0, 0.5527207351410982, 0.32669044195167724, 0.12058882290722338, 0.5085959058886832], [Timestamp('2020-02-20 00:00:00'), 0.0, 1.0, 0.23945126197034872, 0.387922137848124, 0.37262660018153426, 0.5081691073157225], [Timestamp('2020-02-21 00:00:00'), 0.0, 2.0, 0.32369673503011975, 0.028468501739708045, 0.6478347632301713, 0.5069672977026979], [Timestamp('2020-02-27 00:00:00'), 1.0, 2.0, 0.11185553318167706, 0.28217033403929326, 0.6059741327790353, 0.505771077901827], [Timestamp('2020-02-28 00:00:00'), 2.0, 2.0, 0.20036387082721333, 0.12786684061041906, 0.6717692885623644, 0.5132418712221197], [Timestamp('2020-03-02 00:00:00'), 0.0, 0.0, 0.6452798884421699, 0.07728367730318704, 0.27743643425464676, 0.5116256653956509], [Timestamp('2020-03-03 00:00:00'), 2.0, 0.0, 0.5714447960937048, 0.022790395235186226, 0.4057648086711113, 0.5112047945166782], [Timestamp('2020-03-04 00:00:00'), 0.0, 2.0, 0.31960221000758415, 0.00998606671002136, 0.6704117232823911, 0.5120715809851643], [Timestamp('2020-03-05 00:00:00'), 0.0, 2.0, 0.15201726103680718, 0.1838663533851007, 0.6641163855780903, 0.5086880711880711], [Timestamp('2020-03-06 00:00:00'), 0.0, 2.0, 0.16875844341270094, 0.005890742511929453, 0.8253508140753729, 0.5091663030563399], [Timestamp('2020-03-09 00:00:00'), 2.0, 0.0, 0.7103079008936724, 0.003656549944528578, 0.2860355491618034, 0.5051920558428765], [Timestamp('2020-03-10 00:00:00'), 0.0, 2.0, 0.3825569125755065, 0.005386527784490166, 0.6120565596400077, 0.5061775386238798], [Timestamp('2020-03-11 00:00:00'), 0.0, 0.0, 0.48747903916273005, 0.038576720327592955, 0.4739442405096753, 0.5042300484672684], [Timestamp('2020-03-12 00:00:00'), 2.0, 2.0, 0.3310702013906196, 0.10213119107288827, 0.5667986075364891, 0.5082655656317884], [Timestamp('2020-03-13 00:00:00'), 0.0, 0.0, 0.7093285129668141, 0.0311335021885923, 0.25953798484459173, 0.5092479400887703], [Timestamp('2020-03-16 00:00:00'), 0.0, 2.0, 0.4412430972311767, 0.015641155649941264, 0.5431157471188846, 0.5061488865492332], [Timestamp('2020-03-17 00:00:00'), 0.0, 0.0, 0.6027391059582625, 0.008294604234510682, 0.3889662898072321, 0.5069151223826047], [Timestamp('2020-03-18 00:00:00'), 2.0, 2.0, 0.15867647313342528, 0.0053010400468377055, 0.8360224868197438, 0.5080228703777369], [Timestamp('2020-03-19 00:00:00'), 0.0, 0.0, 0.5166832848160351, 0.0070278264589470184, 0.47628888872501574, 0.5089966671918252], [Timestamp('2020-03-20 00:00:00'), 0.0, 2.0, 0.4793485696245371, 0.025662176794356985, 0.494989253581113, 0.5079365079365079], [Timestamp('2020-03-23 00:00:00'), 2.0, 0.0, 0.7672860961286119, 0.00582087691282437, 0.2268930269585693, 0.5096819324394075], [Timestamp('2020-03-24 00:00:00'), 2.0, 0.0, 0.6074338761917283, 0.03749237995724344, 0.355073743851034, 0.5078502494915869], [Timestamp('2020-03-25 00:00:00'), 2.0, 2.0, 0.35742810714288176, 0.005258377246331746, 0.6373135156107909, 0.5074220788506503], [Timestamp('2020-03-26 00:00:00'), 0.0, 0.0, 0.8028595117142148, 0.050856004594134446, 0.14628448369164954, 0.5088087433256715], [Timestamp('2020-03-27 00:00:00'), 2.0, 0.0, 0.6767845025681368, 0.0009324608821118851, 0.3222830365497449, 0.5078679490444196], [Timestamp('2020-03-30 00:00:00'), 2.0, 0.0, 0.602765916120303, 0.019197514592805547, 0.3780365692868987, 0.5066864508040978], [Timestamp('2020-03-31 00:00:00'), 2.0, 2.0, 0.37613137108718714, 0.008620332639536444, 0.615248296273278, 0.5075403314084582], [Timestamp('2020-04-01 00:00:00'), 2.0, 2.0, 0.40150293337971293, 0.09138782880922482, 0.507109237811064, 0.5077523708355351], [Timestamp('2020-04-02 00:00:00'), 0.0, 0.0, 0.7410833625079593, 0.01673632600795851, 0.24218031148408045, 0.5079635994043822], [Timestamp('2020-04-03 00:00:00'), 2.0, 2.0, 0.43643342006133107, 0.026453305700981797, 0.5371132742376874, 0.5063954312990737], [Timestamp('2020-04-06 00:00:00'), 2.0, 2.0, 0.38085627418321144, 0.05807682133084609, 0.5610669044859414, 0.5100227904365205], [Timestamp('2020-04-07 00:00:00'), 2.0, 2.0, 0.29928024212117793, 0.027118489237486002, 0.6736012686413431, 0.5094799657327874], [Timestamp('2020-04-08 00:00:00'), 0.0, 0.0, 0.6176249302536967, 0.08301129698040009, 0.2993637727658997, 0.5081838985442999], [Timestamp('2020-04-09 00:00:00'), 2.0, 2.0, 0.3709549218311395, 0.019223596968974698, 0.6098214811998852, 0.5077771712752702], [Timestamp('2020-04-13 00:00:00'), 0.0, 2.0, 0.26951144619187556, 0.22437225390233642, 0.5061162999057811, 0.5081034421641633], [Timestamp('2020-04-14 00:00:00'), 0.0, 2.0, 0.0369816765421123, 0.008686571092178906, 0.9543317523657104, 0.5071808973350452], [Timestamp('2020-04-15 00:00:00'), 0.0, 2.0, 0.1413431787080163, 0.05061582484590661, 0.8080409964460776, 0.5078664581306662], [Timestamp('2020-04-16 00:00:00'), 2.0, 0.0, 0.5798699610187009, 0.028237610264877058, 0.39189242871642704, 0.5074551460451133], [Timestamp('2020-04-17 00:00:00'), 0.0, 2.0, 0.413312231216362, 0.08456948846618799, 0.5021182803174493, 0.5056612384632518], [Timestamp('2020-04-20 00:00:00'), 2.0, 2.0, 0.33754399320069456, 0.02192865382233468, 0.6405273529769646, 0.5051406926406926], [Timestamp('2020-04-22 00:00:00'), 2.0, 2.0, 0.10838792653124497, 0.01526104254641264, 0.8763510309223471, 0.5020909796725014], [Timestamp('2020-04-23 00:00:00'), 0.0, 0.0, 0.7558755180704598, 0.061967657420500304, 0.18215682450904538, 0.5030537858368046], [Timestamp('2020-04-24 00:00:00'), 2.0, 2.0, 0.28773640390335664, 0.03501499736596264, 0.6772485987306835, 0.504649700287806], [Timestamp('2020-04-27 00:00:00'), 2.0, 0.0, 0.6109039301943747, 0.07165555086352572, 0.3174405189421014, 0.5034951291415751], [Timestamp('2020-04-28 00:00:00'), 2.0, 2.0, 0.17943064664846575, 0.0158800007439148, 0.8046893526076182, 0.49863966963369144], [Timestamp('2020-04-29 00:00:00'), 0.0, 0.0, 0.5154464006019407, 0.009035578785517955, 0.4755180206125444, 0.49998857679883263], [Timestamp('2020-04-30 00:00:00'), 0.0, 0.0, 0.7941238289738997, 0.0464272747658766, 0.15944889626021938, 0.4991946445604982], [Timestamp('2020-05-04 00:00:00'), 2.0, 2.0, 0.22808399600985535, 0.01800259105081346, 0.7539134129393321, 0.5015228673765258], [Timestamp('2020-05-05 00:00:00'), 0.0, 2.0, 0.25174902336870497, 0.0028745931310898113, 0.7453763835002035, 0.5003715264060564], [Timestamp('2020-05-06 00:00:00'), 2.0, 2.0, 0.06964993572150836, 0.0005751285566364876, 0.9297749357218547, 0.5001973957676453], [Timestamp('2020-05-07 00:00:00'), 2.0, 0.0, 0.8101530756884453, 0.016910313154903798, 0.17293661115664466, 0.501772596768875], [Timestamp('2020-05-08 00:00:00'), 0.0, 0.0, 0.6898552270147297, 0.005562620045328724, 0.3045821529399481, 0.5013634276611426], [Timestamp('2020-05-11 00:00:00'), 1.0, 2.0, 0.4669623754175604, 0.03015492222967301, 0.5028827023527719, 0.5030532559814956], [Timestamp('2020-05-12 00:00:00'), 0.0, 2.0, 0.10339939646455082, 0.003744714796858424, 0.892855888738593, 0.5046925332583136], [Timestamp('2020-05-13 00:00:00'), 0.0, 2.0, 0.2808134440876928, 0.0016539020498185192, 0.7175326538624817, 0.49814978254553505], [Timestamp('2020-05-14 00:00:00'), 0.0, 0.0, 0.5712922956616673, 0.002055483254269179, 0.4266522210840634, 0.5006669946968455], [Timestamp('2020-05-15 00:00:00'), 2.0, 2.0, 0.27268038071805206, 0.01150495172588604, 0.7158146675560603, 0.5029743321180871], [Timestamp('2020-05-18 00:00:00'), 2.0, 2.0, 0.39281840810799074, 0.09594880231239546, 0.5112327895796137, 0.5049416712530502], [Timestamp('2020-05-19 00:00:00'), 2.0, 2.0, 0.2794694570443165, 0.006037278293299582, 0.714493264662377, 0.5045320801288322], [Timestamp('2020-05-20 00:00:00'), 0.0, 0.0, 0.9148174790606547, 0.005311220250565754, 0.07987130068877599, 0.5059792963928302], [Timestamp('2020-05-21 00:00:00'), 0.0, 0.0, 0.6457199954473708, 0.042941146209558106, 0.3113388583430668, 0.5042331847583489], [Timestamp('2020-05-22 00:00:00'), 2.0, 2.0, 0.18015840833596142, 0.0012794538462493866, 0.818562137817784, 0.5064213183364774], [Timestamp('2020-05-25 00:00:00'), 2.0, 0.0, 0.54587034904295, 0.02775114583092245, 0.42637850512612385, 0.5046639462214426], [Timestamp('2020-05-26 00:00:00'), 2.0, 2.0, 0.46044174723580533, 0.0033795861712174165, 0.5361786665929741, 0.50683055613679], [Timestamp('2020-05-27 00:00:00'), 0.0, 0.0, 0.8955958538167511, 0.004666024892541981, 0.09973812129070868, 0.5043484260841936], [Timestamp('2020-05-28 00:00:00'), 2.0, 2.0, 0.3443583603619451, 0.19567016569696852, 0.4599714739410884, 0.505913005334176], [Timestamp('2020-05-29 00:00:00'), 1.0, 2.0, 0.25909038610928475, 0.32683519932492017, 0.4140744145657986, 0.5060033992977336], [Timestamp('2020-06-01 00:00:00'), 2.0, 2.0, 0.19731093182595988, 0.01748072441075964, 0.7852083437632827, 0.5083106552590912], [Timestamp('2020-06-02 00:00:00'), 2.0, 0.0, 0.6696818471088352, 0.02863564578808312, 0.3016825071030751, 0.5078999016390429], [Timestamp('2020-06-03 00:00:00'), 0.0, 0.0, 0.8609858541559375, 0.021626957447426924, 0.11738718839663811, 0.5061528181641307], [Timestamp('2020-06-04 00:00:00'), 2.0, 2.0, 0.18611102542338995, 0.07024016040878871, 0.7436488141678225, 0.5078253464140071], [Timestamp('2020-06-05 00:00:00'), 2.0, 2.0, 0.1559018528986612, 0.10037313512009458, 0.7437250119812374, 0.5124157411719327], [Timestamp('2020-06-08 00:00:00'), 0.0, 2.0, 0.325174399016075, 0.027612109063295722, 0.6472134919206356, 0.5092183746369936], [Timestamp('2020-06-09 00:00:00'), 0.0, 0.0, 0.688590845542671, 0.051386908174368987, 0.26002224628295795, 0.5053353481390397], [Timestamp('2020-06-10 00:00:00'), 0.0, 2.0, 0.19874653995271593, 0.0019077008098858316, 0.7993457592374048, 0.5101139164036903], [Timestamp('2020-06-12 00:00:00'), 2.0, 2.0, 0.22123077008721795, 0.17847786640808477, 0.6002913635046901, 0.5053943700147429], [Timestamp('2020-06-15 00:00:00'), 2.0, 2.0, 0.12535511971419933, 0.007524869250965191, 0.8671200110348356, 0.5049840362766359], [Timestamp('2020-06-16 00:00:00'), 2.0, 0.0, 0.6833671052567958, 0.012465849327550793, 0.30416704541565426, 0.507003855545468], [Timestamp('2020-06-17 00:00:00'), 2.0, 0.0, 0.8813291847636504, 0.027203893850800746, 0.09146692138554766, 0.5059860269866557], [Timestamp('2020-06-18 00:00:00'), 0.0, 2.0, 0.4195148434488798, 0.04682352616255093, 0.5336616303885741, 0.5086016569707068], [Timestamp('2020-06-19 00:00:00'), 0.0, 2.0, 0.1836570152922321, 0.006171588862213283, 0.8101713958455532, 0.510506405691225], [Timestamp('2020-06-22 00:00:00'), 2.0, 2.0, 0.1190671480698234, 0.0031733180100368026, 0.877759533920141, 0.5046939332909606], [Timestamp('2020-06-23 00:00:00'), 0.0, 0.0, 0.9121195616543158, 0.006889583873198201, 0.08099085447248427, 0.5063227029468796], [Timestamp('2020-06-24 00:00:00'), 2.0, 2.0, 0.28694912937500294, 0.0013943316737409968, 0.7116565389512607, 0.5127958828937427], [Timestamp('2020-06-25 00:00:00'), 0.0, 0.0, 0.4933212092991074, 0.023467718360858557, 0.4832110723400288, 0.5129880713082756], [Timestamp('2020-06-26 00:00:00'), 2.0, 2.0, 0.17647064252913844, 0.00456246070523671, 0.8189668967656188, 0.5121103112863067], [Timestamp('2020-06-29 00:00:00'), 0.0, 0.0, 0.6832719363015426, 0.007598065304532941, 0.30912999839392624, 0.5107643390488571], [Timestamp('2020-06-30 00:00:00'), 2.0, 0.0, 0.7985855755318627, 0.0098088298507405, 0.19160559461739704, 0.5131214853716339], [Timestamp('2020-07-01 00:00:00'), 2.0, 0.0, 0.5773930939151286, 0.008135567195511114, 0.41447133888936055, 0.5118877884700669], [Timestamp('2020-07-02 00:00:00'), 0.0, 0.0, 0.4967258543546395, 0.01575608229356819, 0.4875180633517986, 0.5097925554395821], [Timestamp('2020-07-03 00:00:00'), 2.0, 2.0, 0.2162199451912326, 0.0004234686377759563, 0.7833565861709859, 0.5096322769049881], [Timestamp('2020-07-06 00:00:00'), 0.0, 0.0, 0.8829368312541895, 0.04155188587118156, 0.07551128287462645, 0.5105399788516713], [Timestamp('2020-07-07 00:00:00'), 2.0, 2.0, 0.30847633742254893, 0.03948062647658221, 0.6520430361008674, 0.5095559248619895], [Timestamp('2020-07-08 00:00:00'), 0.0, 0.0, 0.5774927084792896, 0.028367104567185154, 0.394140186953522, 0.5119927691172577], [Timestamp('2020-07-09 00:00:00'), 2.0, 2.0, 0.16663014485124306, 0.0037774443546890976, 0.8295924107940614, 0.5114924309656074], [Timestamp('2020-07-10 00:00:00'), 0.0, 0.0, 0.5096658630802894, 0.025391912491422062, 0.46494222442829025, 0.5109813472362669], [Timestamp('2020-07-13 00:00:00'), 2.0, 0.0, 0.6379177939964606, 0.00564729057798473, 0.3564349154255503, 0.5126047352157879], [Timestamp('2020-07-14 00:00:00'), 2.0, 2.0, 0.4818632187679431, 0.016142530006277907, 0.5019942512257756, 0.5122043603968084], [Timestamp('2020-07-15 00:00:00'), 0.0, 0.0, 0.6524220951978131, 0.026666917364959606, 0.3209109874372299, 0.5125116276283029], [Timestamp('2020-07-16 00:00:00'), 1.0, 2.0, 0.12811132734630198, 0.001784053441122843, 0.8701046192125712, 0.5122339780775426], [Timestamp('2020-07-17 00:00:00'), 1.0, 0.0, 0.7008313606747568, 0.016387122192576938, 0.282781517132661, 0.506748746010032], [Timestamp('2020-07-20 00:00:00'), 2.0, 2.0, 0.23572341723047502, 0.016016846077354208, 0.7482597366921678, 0.5048094564738431], [Timestamp('2020-07-21 00:00:00'), 0.0, 2.0, 0.34352528863088305, 0.09408153250021514, 0.5623931788689038, 0.5070086689027901], [Timestamp('2020-07-22 00:00:00'), 0.0, 2.0, 0.30027786788510613, 0.016800568047304964, 0.682921564067582, 0.5069920288924148], [Timestamp('2020-07-23 00:00:00'), 2.0, 2.0, 0.3293055865458496, 0.01220053534052061, 0.6584938781136285, 0.5047191063199055], [Timestamp('2020-07-24 00:00:00'), 2.0, 0.0, 0.7845533590080281, 0.008303149085970548, 0.20714349190600242, 0.5051323262435412], [Timestamp('2020-07-27 00:00:00'), 0.0, 0.0, 0.5820684741597247, 0.0568539869351448, 0.3610775389051245, 0.5065096152516831], [Timestamp('2020-07-28 00:00:00'), 2.0, 2.0, 0.16725815358786555, 0.02035219067992807, 0.8123896557322074, 0.5055338132100752], [Timestamp('2020-07-29 00:00:00'), 0.0, 2.0, 0.430957700970027, 0.008971599603559156, 0.56007069942641, 0.50502705179089], [Timestamp('2020-07-30 00:00:00'), 0.0, 2.0, 0.3187373102787514, 0.031228771758186242, 0.6500339179630671, 0.5053441597661557], [Timestamp('2020-07-31 00:00:00'), 0.0, 2.0, 0.3689610236622747, 0.056374507100321936, 0.5746644692374044, 0.5065720501360644], [Timestamp('2020-08-03 00:00:00'), 1.0, 2.0, 0.25687691581124655, 0.0457853937270421, 0.6973376904617055, 0.5043104158099139], [Timestamp('2020-08-04 00:00:00'), 2.0, 2.0, 0.10533579521337916, 0.006658901006197572, 0.8880053037804297, 0.5080787416405951], [Timestamp('2020-08-05 00:00:00'), 2.0, 0.0, 0.7793473306484557, 0.012703028102571977, 0.20794964124897133, 0.5083847966993874], [Timestamp('2020-08-06 00:00:00'), 0.0, 0.0, 0.7232223311977105, 0.04391932703547665, 0.23285834176681014, 0.5044111767072316], [Timestamp('2020-08-07 00:00:00'), 2.0, 2.0, 0.28892674604856466, 0.017466691860162108, 0.6936065620912677, 0.5072831978319784], [Timestamp('2020-08-10 00:00:00'), 0.0, 0.0, 0.633790890156932, 0.1710915343576735, 0.19511757548539604, 0.5095681132867668], [Timestamp('2020-08-11 00:00:00'), 2.0, 2.0, 0.08679418116189803, 0.001457930040611862, 0.9117478887974956, 0.5106733355187494], [Timestamp('2020-08-12 00:00:00'), 0.0, 0.0, 0.9018083232797609, 0.017514850176772852, 0.08067682654346872, 0.5061122875002595], [Timestamp('2020-08-13 00:00:00'), 0.0, 2.0, 0.39687812683197454, 0.006559648557802119, 0.5965622246102177, 0.5044467060776611], [Timestamp('2020-08-14 00:00:00'), 0.0, 2.0, 0.3215299988812407, 0.08944303199536115, 0.5890269691234004, 0.5055461202037491], [Timestamp('2020-08-17 00:00:00'), 2.0, 2.0, 0.10401226176677451, 0.004498109278301627, 0.8914896289549203, 0.5071215114684227], [Timestamp('2020-08-18 00:00:00'), 0.0, 0.0, 0.5411705892633314, 0.011433137396127111, 0.44739627334054366, 0.5046679303243663], [Timestamp('2020-08-19 00:00:00'), 0.0, 0.0, 0.6994819502195753, 0.024059754420833798, 0.2764582953595854, 0.5048679027213495], [Timestamp('2020-08-20 00:00:00'), 0.0, 2.0, 0.338090750136041, 0.0009570965352584346, 0.6609521533286982, 0.5037987923738886], [Timestamp('2020-08-21 00:00:00'), 2.0, 2.0, 0.41881038312402136, 0.05463932455157892, 0.5265502923244026, 0.5039036632666121], [Timestamp('2020-08-24 00:00:00'), 0.0, 2.0, 0.33774219321084364, 0.0022819340331757857, 0.6599758727559866, 0.505467179047413], [Timestamp('2020-08-25 00:00:00'), 0.0, 0.0, 0.606024110446059, 0.010132531604820545, 0.38384335794911434, 0.5065524870028638], [Timestamp('2020-08-26 00:00:00'), 0.0, 0.0, 0.622837049300136, 0.005419013132729088, 0.37174393756713187, 0.5074351842655663], [Timestamp('2020-08-27 00:00:00'), 2.0, 2.0, 0.3423882508321733, 0.041601665031587874, 0.6160100841362331, 0.507052618151186], [Timestamp('2020-08-28 00:00:00'), 0.0, 0.0, 0.5695546174025033, 0.027370934972556448, 0.40307444762493894, 0.5030839659759521], [Timestamp('2020-08-31 00:00:00'), 2.0, 2.0, 0.13346424577560084, 0.002024844990540897, 0.8645109092338638, 0.5028966157449591], [Timestamp('2020-09-01 00:00:00'), 0.0, 0.0, 0.9654198812128177, 0.00543791227425076, 0.029142206512936062, 0.4992118523034219], [Timestamp('2020-09-02 00:00:00'), 2.0, 2.0, 0.24223187218820166, 0.00195077412576497, 0.7558173536860339, 0.49941826643397325], [Timestamp('2020-09-03 00:00:00'), 2.0, 0.0, 0.6235388611634227, 0.09426555669535236, 0.28219558214122686, 0.5007742934572202], [Timestamp('2020-09-04 00:00:00'), 0.0, 2.0, 0.1475582956391086, 0.00394080825023309, 0.8485008961106514, 0.5004842700600919], [Timestamp('2020-09-08 00:00:00'), 2.0, 2.0, 0.25495880843054464, 0.037774361559869546, 0.7072668300095808, 0.4985628132848927], [Timestamp('2020-09-09 00:00:00'), 0.0, 0.0, 0.959250936913415, 0.005021564580496239, 0.035727498506094295, 0.5028261228563943], [Timestamp('2020-09-10 00:00:00'), 0.0, 2.0, 0.09583719368068408, 0.10822321318227264, 0.7959395931370434, 0.5036071407019738], [Timestamp('2020-09-11 00:00:00'), 0.0, 2.0, 0.42854988078553424, 0.057282154077322726, 0.5141679651371462, 0.5000300258684405], [Timestamp('2020-09-14 00:00:00'), 1.0, 2.0, 0.07079999161825096, 0.014418916684373782, 0.9147810916973708, 0.5009033423667569], [Timestamp('2020-09-15 00:00:00'), 2.0, 0.0, 0.5382384454518027, 0.12401622922502044, 0.3377453253231766, 0.5037929495760821], [Timestamp('2020-09-16 00:00:00'), 2.0, 0.0, 0.7020266693878154, 0.010100357861758953, 0.28787297275042006, 0.5039895283038567], [Timestamp('2020-09-17 00:00:00'), 0.0, 0.0, 0.5732120058978665, 0.04619309837929444, 0.3805948957228425, 0.5055295128499263], [Timestamp('2020-09-18 00:00:00'), 0.0, 2.0, 0.23082891372580402, 0.03888666447155203, 0.7302844218026396, 0.5058202356815397], [Timestamp('2020-09-21 00:00:00'), 0.0, 2.0, 0.24400800694518704, 0.02477417322136971, 0.7312178198334393, 0.505347804134647], [Timestamp('2020-09-22 00:00:00'), 0.0, 2.0, 0.30328498502858425, 0.07959764940323047, 0.6171173655681917, 0.5039087777979493], [Timestamp('2020-09-23 00:00:00'), 2.0, 2.0, 0.21097242541726213, 0.012056920251305052, 0.7769706543314265, 0.5053520934950876], [Timestamp('2020-09-24 00:00:00'), 0.0, 0.0, 0.5761302204569922, 0.007118787496398295, 0.416750992046604, 0.5067896872463948], [Timestamp('2020-09-25 00:00:00'), 0.0, 2.0, 0.16625207281235754, 0.016715603617122793, 0.817032323570513, 0.5064117764014182], [Timestamp('2020-09-28 00:00:00'), 0.0, 0.0, 0.3770517682319293, 0.30367167300668246, 0.31927655876139166, 0.5073633924256779], [Timestamp('2020-09-29 00:00:00'), 2.0, 2.0, 0.13769697200612233, 0.005033355053539698, 0.8572696729403404, 0.5075615488802762], [Timestamp('2020-09-30 00:00:00'), 2.0, 0.0, 0.6725598023232235, 0.0538341613272661, 0.27360603634951575, 0.5072673775929109], [Timestamp('2020-10-01 00:00:00'), 0.0, 0.0, 0.6771623831849208, 0.0009312734649997725, 0.32190634335007673, 0.505740086161174], [Timestamp('2020-10-02 00:00:00'), 2.0, 0.0, 0.725999746916418, 0.013293459706061307, 0.26070679337752767, 0.5059364441894562], [Timestamp('2020-10-05 00:00:00'), 0.0, 0.0, 0.5003830298010583, 0.005314207536906942, 0.4943027626620321, 0.505647528750249], [Timestamp('2020-10-06 00:00:00'), 0.0, 2.0, 0.24096881960407143, 0.03908888048947821, 0.719942299906447, 0.5027490725721435], [Timestamp('2020-10-07 00:00:00'), 2.0, 2.0, 0.38568973386806094, 0.030971050089645763, 0.583339216042292, 0.5029409026023003], [Timestamp('2020-10-08 00:00:00'), 0.0, 0.0, 0.5228343418198554, 0.0014974993790163019, 0.4756681588011304, 0.5030513582407788], [Timestamp('2020-10-09 00:00:00'), 2.0, 2.0, 0.4034034665713006, 0.1627333220998541, 0.4338632113288404, 0.5026732525115025], [Timestamp('2020-10-13 00:00:00'), 0.0, 0.0, 0.5549424158737473, 0.06163022392870282, 0.3834273601975559, 0.5023849688017423], [Timestamp('2020-10-14 00:00:00'), 0.0, 2.0, 0.15492696239190387, 0.00430734184972072, 0.8407656957583749, 0.5020924491840731], [Timestamp('2020-10-15 00:00:00'), 0.0, 2.0, 0.3751914254969192, 0.004227954210645539, 0.6205806202924382, 0.5003208733418482], [Timestamp('2020-10-16 00:00:00'), 2.0, 2.0, 0.19643441543035106, 0.002354708087959081, 0.8012108764816862, 0.49929489648090075], [Timestamp('2020-10-19 00:00:00'), 2.0, 0.0, 0.8243277263814958, 0.009322120143026346, 0.16635015347548024, 0.49892218446435316], [Timestamp('2020-10-20 00:00:00'), 1.0, 2.0, 0.23517284163047333, 0.007946800801018805, 0.7568803575685057, 0.49838121138774466], [Timestamp('2020-10-21 00:00:00'), 2.0, 0.0, 0.669254405603659, 0.05589235973537025, 0.2748532346609651, 0.5037794412417982], [Timestamp('2020-10-22 00:00:00'), 0.0, 2.0, 0.46386391576026204, 0.008965170820621163, 0.5271709134191118, 0.5058492675889208], [Timestamp('2020-10-23 00:00:00'), 0.0, 0.0, 0.5899291262945313, 0.0012680503004646943, 0.40880282340499785, 0.5053913283126137], [Timestamp('2020-10-26 00:00:00'), 0.0, 2.0, 0.20108787689397628, 0.01572346808241961, 0.7831886550236006, 0.5027491418735296], [Timestamp('2020-10-27 00:00:00'), 0.0, 2.0, 0.10283993875579031, 0.002568159233961511, 0.8945919020102413, 0.5005943466148493], [Timestamp('2020-10-28 00:00:00'), 2.0, 2.0, 0.21351778093814933, 0.03821312535505625, 0.7482690937067906, 0.5009569010987555], [Timestamp('2020-10-29 00:00:00'), 0.0, 2.0, 0.4578743993975107, 0.001592516885520667, 0.5405330837169632, 0.5010742299513838], [Timestamp('2020-10-30 00:00:00'), 2.0, 0.0, 0.5612367926882986, 0.06467328686105439, 0.3740899204506496, 0.500788146974], [Timestamp('2020-11-03 00:00:00'), 2.0, 0.0, 0.753270129807462, 0.016821458542083563, 0.22990841165045514, 0.5027655264060559], [Timestamp('2020-11-04 00:00:00'), 2.0, 2.0, 0.4715334231333038, 0.04545043906063773, 0.48301613780605146, 0.5023139550373382], [Timestamp('2020-11-05 00:00:00'), 0.0, 0.0, 0.7590554632286568, 0.003205790165521956, 0.2377387466058284, 0.5037202356506482], [Timestamp('2020-11-06 00:00:00'), 2.0, 2.0, 0.32112802323539913, 0.01665372631545041, 0.6622182504491511, 0.5041626614050178], [Timestamp('2020-11-09 00:00:00'), 2.0, 0.0, 0.6351539026153236, 0.0357842277596506, 0.3290618696250313, 0.5044409026967166], [Timestamp('2020-11-10 00:00:00'), 0.0, 2.0, 0.2911342484241125, 0.03731324922616982, 0.6715525023497151, 0.5039066269016463], [Timestamp('2020-11-11 00:00:00'), 0.0, 2.0, 0.3738847508748135, 0.08376498612852917, 0.5423502629966581, 0.5024137954065119], [Timestamp('2020-11-12 00:00:00'), 2.0, 2.0, 0.28486785726833996, 0.002713361720523411, 0.7124187810111403, 0.5032522283787039], [Timestamp('2020-11-13 00:00:00'), 2.0, 0.0, 0.5854459491412207, 0.10858269871285235, 0.3059713521459337, 0.49991549991549994], [Timestamp('2020-11-16 00:00:00'), 2.0, 0.0, 0.7015631931377027, 0.017435888651381033, 0.2810009182109204, 0.5014005602240896], [Timestamp('2020-11-17 00:00:00'), 0.0, 2.0, 0.4472335322150606, 0.08943483046818504, 0.46333163731675053, 0.5019888056800809], [Timestamp('2020-11-18 00:00:00'), 2.0, 2.0, 0.21393265087700755, 0.03024207142748874, 0.7558252776955054, 0.5011479718291992], [Timestamp('2020-11-19 00:00:00'), 2.0, 2.0, 0.17694229272204784, 0.019698223839153556, 0.8033594834387996, 0.5030188099871779], [Timestamp('2020-11-23 00:00:00'), 2.0, 0.0, 0.7032268746750875, 0.017167870806518562, 0.27960525451839996, 0.5032111503586748], [Timestamp('2020-11-24 00:00:00'), 1.0, 0.0, 0.6320201000583251, 0.004030118661832435, 0.36394978127983557, 0.505145170384442], [Timestamp('2020-11-25 00:00:00'), 0.0, 0.0, 0.8185857027387429, 0.017373441425371172, 0.16404085583589198, 0.5063755244346381], [Timestamp('2020-11-26 00:00:00'), 0.0, 2.0, 0.22022763980893867, 0.03906842965173595, 0.7407039305393299, 0.5054522117052805], [Timestamp('2020-11-27 00:00:00'), 0.0, 2.0, 0.1400118220764888, 0.003098871953666085, 0.8568893059698409, 0.5005254262856645], [Timestamp('2020-11-30 00:00:00'), 2.0, 2.0, 0.18995560714407062, 0.004450576940243165, 0.8055938159156916, 0.5059178213658128], [Timestamp('2020-12-01 00:00:00'), 2.0, 2.0, 0.3028344058812591, 0.014108227469365629, 0.6830573666493732, 0.5061107354819322], [Timestamp('2020-12-02 00:00:00'), 2.0, 0.0, 0.640474304303976, 0.19516491715057174, 0.16436077854544506, 0.5043872988086991], [Timestamp('2020-12-03 00:00:00'), 2.0, 0.0, 0.4735599149033514, 0.1255463331247483, 0.4008937519719033, 0.503386652709734], [Timestamp('2020-12-04 00:00:00'), 0.0, 0.0, 0.7658436324698213, 0.004050668693167374, 0.2301056988370166, 0.5048557079376613], [Timestamp('2020-12-07 00:00:00'), 0.0, 2.0, 0.169704848007533, 0.0037864859922286083, 0.8265086660002411, 0.504026064953852], [Timestamp('2020-12-08 00:00:00'), 2.0, 2.0, 0.3502412489440729, 0.0035526542255352174, 0.6462060968303892, 0.5036648196427432], [Timestamp('2020-12-09 00:00:00'), 2.0, 2.0, 0.31764207987629894, 0.0006337181207926621, 0.6817242020029068, 0.5044940629302629], [Timestamp('2020-12-10 00:00:00'), 0.0, 0.0, 0.6835206136245835, 0.07690925024817089, 0.23957013612724454, 0.5051523315780163], [Timestamp('2020-12-11 00:00:00'), 2.0, 2.0, 0.16164770931850597, 0.017002500118801757, 0.8213497905626953, 0.5003199296247425], [Timestamp('2020-12-14 00:00:00'), 2.0, 0.0, 0.47014185469220604, 0.21155627134175992, 0.31830187396603965, 0.5006805890673101], [Timestamp('2020-12-15 00:00:00'), 2.0, 0.0, 0.502400911734412, 0.02122092214363345, 0.47637816612196066, 0.5008727005354586], [Timestamp('2020-12-16 00:00:00'), 2.0, 0.0, 0.5096839475442799, 0.03302131146966471, 0.45729474098605855, 0.5016991006929087], [Timestamp('2020-12-17 00:00:00'), 0.0, 2.0, 0.35266000525513175, 0.004164785424813427, 0.6431752093200567, 0.49796838275540095], [Timestamp('2020-12-18 00:00:00'), 0.0, 2.0, 0.33688042945760543, 0.06195317272540574, 0.601166397816982, 0.4982399742817454], [Timestamp('2020-12-21 00:00:00'), 2.0, 2.0, 0.17187453366606464, 0.0051553189590373034, 0.8229701473749009, 0.49669816758739094], [Timestamp('2020-12-22 00:00:00'), 2.0, 0.0, 0.5648162075776773, 0.007737388005047574, 0.4274464044172718, 0.49916114515610577], [Timestamp('2020-12-23 00:00:00'), 2.0, 2.0, 0.40029074092837663, 0.017618614776701107, 0.5820906442949223, 0.496818914461936], [Timestamp('2020-12-28 00:00:00'), 2.0, 0.0, 0.8283649160825618, 0.0030813450365609683, 0.16855373888087774, 0.4952842785859864], [Timestamp('2020-12-29 00:00:00'), 2.0, 0.0, 0.6449354119378622, 0.04104058023223588, 0.3140240078299005, 0.4961938930179239], [Timestamp('2020-12-30 00:00:00'), 2.0, 0.0, 0.6152070493900981, 0.014889866895980207, 0.3699030837139275, 0.4951175090189485], [Timestamp('2021-01-04 00:00:00'), 2.0, 0.0, 0.7068661425275592, 0.04182957206864235, 0.25130428540379257, 0.4941310172115004], [Timestamp('2021-01-05 00:00:00'), 2.0, 0.0, 0.6732747753104205, 0.038932744384999246, 0.28779248030457344, 0.4988733909986282], [Timestamp('2021-01-06 00:00:00'), 2.0, 2.0, 0.27545276064812396, 0.022829591701015936, 0.7017176476508566, 0.4990610747357446], [Timestamp('2021-01-07 00:00:00'), 2.0, 2.0, 0.4786792892350806, 0.04176705743506836, 0.4795536533298482, 0.4982625391737236], [Timestamp('2021-01-08 00:00:00'), 0.0, 2.0, 0.40863077896619876, 0.017099636439872758, 0.5742695845939308, 0.4982636089507981], [Timestamp('2021-01-11 00:00:00'), 0.0, 0.0, 0.48511983334567804, 0.0713187067660056, 0.4435614598883163, 0.5048955414642481], [Timestamp('2021-01-12 00:00:00'), 0.0, 2.0, 0.2125779690768124, 0.01829915819091561, 0.7691228727322764, 0.5040918261022859], [Timestamp('2021-01-13 00:00:00'), 2.0, 2.0, 0.19405880670312012, 0.0016719495968996508, 0.8042692436999805, 0.5029253680318105], [Timestamp('2021-01-14 00:00:00'), 0.0, 2.0, 0.23818957055000367, 0.006206933016303793, 0.7556034964336932, 0.5055356652703686], [Timestamp('2021-01-15 00:00:00'), 0.0, 2.0, 0.1254989102146075, 0.0070891685410765255, 0.867411921244311, 0.5091019057522773], [Timestamp('2021-01-18 00:00:00'), 2.0, 0.0, 0.5162832137716078, 0.05291884770714504, 0.43079793852124054, 0.5071350102010244], [Timestamp('2021-01-19 00:00:00'), 0.0, 2.0, 0.29164844765831766, 0.2399323488288731, 0.4684192035128044, 0.5078587821262923], [Timestamp('2021-01-20 00:00:00'), 0.0, 2.0, 0.2623763119752384, 0.06034044181502534, 0.6772832462097317, 0.5048353312133904], [Timestamp('2021-01-21 00:00:00'), 0.0, 0.0, 0.6306366270908965, 0.00700169537839619, 0.36236167753070153, 0.5034081084350729], [Timestamp('2021-01-22 00:00:00'), 0.0, 2.0, 0.42437400244694, 0.015528642096804159, 0.5600973554562578, 0.501367535441043], [Timestamp('2021-01-26 00:00:00'), 2.0, 2.0, 0.29470467111357385, 0.003443915573250593, 0.7018514133131808, 0.4974542250384904], [Timestamp('2021-01-27 00:00:00'), 2.0, 2.0, 0.4587708986003424, 0.006783693117495653, 0.5344454082821561, 0.49950629865614243], [Timestamp('2021-01-28 00:00:00'), 0.0, 0.0, 0.7502809434543841, 0.04081737643194673, 0.2089016801136736, 0.49639242415060947], [Timestamp('2021-01-29 00:00:00'), 2.0, 2.0, 0.47754322390419957, 0.003214076133831497, 0.5192426999619681, 0.4954207925506946], [Timestamp('2021-02-01 00:00:00'), 2.0, 0.0, 0.5301156784464779, 0.008950293149146216, 0.46093402840436976, 0.4962267330653791], [Timestamp('2021-02-02 00:00:00'), 2.0, 2.0, 0.46863120152848564, 0.012270268786825994, 0.5190985296846838, 0.4957906159856866], [Timestamp('2021-02-03 00:00:00'), 0.0, 0.0, 0.7520662765231559, 0.002911695837108139, 0.24502202763973235, 0.49650712290794435], [Timestamp('2021-02-04 00:00:00'), 1.0, 0.0, 0.8230531227623576, 0.00317639459907643, 0.17377048263856354, 0.49954776927220407], [Timestamp('2021-02-05 00:00:00'), 0.0, 2.0, 0.28545559049399066, 0.04328612680342705, 0.6712582827025768, 0.5017968656564298], [Timestamp('2021-02-08 00:00:00'), 0.0, 2.0, 0.2576214114263721, 0.007029552035429018, 0.7353490365381937, 0.502772523878953], [Timestamp('2021-02-09 00:00:00'), 2.0, 2.0, 0.09156008901680474, 0.0015825611499972038, 0.9068573498331929, 0.4973126890171247], [Timestamp('2021-02-10 00:00:00'), 2.0, 0.0, 0.5294098333997234, 0.0018536295224060092, 0.4687365370778774, 0.49634891001680065], [Timestamp('2021-02-11 00:00:00'), 2.0, 0.0, 0.8142217518166672, 0.002633632533193163, 0.18314461565013487, 0.49759587514241144], [Timestamp('2021-02-12 00:00:00'), 2.0, 0.0, 0.6599260252568259, 0.020273195057796484, 0.3198007796853728, 0.5015676186902818], [Timestamp('2021-02-18 00:00:00'), 0.0, 0.0, 0.6877738803040065, 0.004038720211070066, 0.3081873994849224, 0.5034247979984414], [Timestamp('2021-02-19 00:00:00'), 0.0, 2.0, 0.2246986749867778, 0.01814325688399075, 0.7571580681292317, 0.505368881756278], [Timestamp('2021-02-22 00:00:00'), 2.0, 2.0, 0.38486222168602013, 0.010933867763871058, 0.6042039105501099, 0.5094306391568899], [Timestamp('2021-02-23 00:00:00'), 2.0, 2.0, 0.24113744746837112, 0.0036570906553051994, 0.7552054618763274, 0.5080269715091652], [Timestamp('2021-02-24 00:00:00'), 0.0, 0.0, 0.6901955404372453, 0.06564975845933314, 0.2441547011034154, 0.5103197289700304], [Timestamp('2021-02-25 00:00:00'), 0.0, 2.0, 0.2929943010384399, 0.015283105667080547, 0.6917225932944835, 0.5073294594425117], [Timestamp('2021-02-26 00:00:00'), 0.0, 0.0, 0.46041636370392147, 0.14857666953514123, 0.39100696676093183, 0.5090961171044809], [Timestamp('2021-03-01 00:00:00'), 1.0, 2.0, 0.3072195869628115, 0.01911497137653632, 0.6736654416606473, 0.5038235077001744], [Timestamp('2021-03-02 00:00:00'), 0.0, 2.0, 0.21365290901508327, 0.0071625216949082835, 0.7791845692900133, 0.49835087972214104], [Timestamp('2021-03-03 00:00:00'), 2.0, 2.0, 0.2974871514526744, 0.0039097566773998406, 0.698603091869927, 0.4979272691516254], [Timestamp('2021-03-04 00:00:00'), 2.0, 0.0, 0.7825229668352343, 0.03181432664195187, 0.18566270652281153, 0.4987991525824505], [Timestamp('2021-03-05 00:00:00'), 0.0, 0.0, 0.6476679397261642, 0.026812192729695917, 0.32551986754414186, 0.5011646861423865], [Timestamp('2021-03-08 00:00:00'), 2.0, 0.0, 0.4403293300112981, 0.209073925496206, 0.3505967444924918, 0.49977163823811316], [Timestamp('2021-03-09 00:00:00'), 2.0, 0.0, 0.6942114160261753, 0.10038724933501969, 0.20540133463880778, 0.4995075120747649], [Timestamp('2021-03-10 00:00:00'), 2.0, 0.0, 0.6165340243238187, 0.01708122766956685, 0.36638474800661486, 0.500209075322755], [Timestamp('2021-03-11 00:00:00'), 0.0, 0.0, 0.6606139947983601, 0.007378811502314453, 0.3320071936993272, 0.49977576510118543], [Timestamp('2021-03-12 00:00:00'), 2.0, 0.0, 0.5199729006162076, 0.012223645952867677, 0.46780345343092283, 0.49891220210518333], [Timestamp('2021-03-15 00:00:00'), 0.0, 2.0, 0.3679857275045661, 0.03256533127552152, 0.5994489412199158, 0.4974353400605603], [Timestamp('2021-03-16 00:00:00'), 2.0, 2.0, 0.14791581251804142, 0.03260957079703518, 0.8194746166849215, 0.4993566720531107], [Timestamp('2021-03-17 00:00:00'), 0.0, 0.0, 0.6442524922099997, 0.05277278810077339, 0.3029747196892247, 0.49788200130161453], [Timestamp('2021-03-18 00:00:00'), 2.0, 2.0, 0.20523701863437022, 0.006316821038450074, 0.7884461603271747, 0.4997996150594499], [Timestamp('2021-03-19 00:00:00'), 0.0, 0.0, 0.5981415514018796, 0.006465499808787577, 0.39539294878933195, 0.49893318965517236], [Timestamp('2021-03-22 00:00:00'), 0.0, 0.0, 0.556315698685597, 0.12204531787607659, 0.32163898343832936, 0.499804333030853], [Timestamp('2021-03-23 00:00:00'), 1.0, 2.0, 0.29540356413360785, 0.009950692782666382, 0.6946457430837277, 0.4993815592203898], [Timestamp('2021-03-24 00:00:00'), 2.0, 2.0, 0.2627547075541084, 0.028455302983392473, 0.7087899894625049, 0.5026453392621871], [Timestamp('2021-03-25 00:00:00'), 2.0, 2.0, 0.2592583339702662, 0.03901267064066758, 0.7017289953890609, 0.5035077424177336], [Timestamp('2021-03-26 00:00:00'), 2.0, 0.0, 0.5728707459635369, 0.03194323249048671, 0.3951860215459762, 0.5042865674214719], [Timestamp('2021-03-29 00:00:00'), 1.0, 0.0, 0.6735071641875524, 0.014949623265145392, 0.31154321254730644, 0.5043755220744875], [Timestamp('2021-03-30 00:00:00'), 2.0, 0.0, 0.6406928481805074, 0.053160709903772634, 0.3061464419157131, 0.5070568295310759], [Timestamp('2021-03-31 00:00:00'), 0.0, 0.0, 0.7506065974794303, 0.050821239571987094, 0.19857216294858082, 0.504986972805732], [Timestamp('2021-04-01 00:00:00'), 2.0, 2.0, 0.29528331711453876, 0.01794101723241056, 0.6867756656530477, 0.5063726949536996], [Timestamp('2021-04-05 00:00:00'), 1.0, 2.0, 0.44972431157163356, 0.021557837383742777, 0.5287178510446254, 0.5071488029676517], [Timestamp('2021-04-06 00:00:00'), 1.0, 2.0, 0.17784135579084315, 0.03828827646310607, 0.783870367746055, 0.5019322994300915], [Timestamp('2021-04-07 00:00:00'), 0.0, 2.0, 0.3059656979583946, 0.33181992884334105, 0.36221437319827027, 0.499795229792286], [Timestamp('2021-04-08 00:00:00'), 0.0, 2.0, 0.3551029773082175, 0.22016821822450147, 0.4247288044672846, 0.4999725579693677], [Timestamp('2021-04-09 00:00:00'), 2.0, 0.0, 0.49421138375478435, 0.06610467424631421, 0.43968394199890815, 0.5008355147890032], [Timestamp('2021-04-12 00:00:00'), 2.0, 0.0, 0.5215681060867545, 0.02446277153478242, 0.4539691223784592, 0.4998933169211807], [Timestamp('2021-04-13 00:00:00'), 2.0, 2.0, 0.4311255954486468, 0.026792143203233948, 0.5420822613481163, 0.5016989712507487], [Timestamp('2021-04-14 00:00:00'), 0.0, 0.0, 0.7844086752734729, 0.07262247711503597, 0.14296884761149167, 0.5023859023859024], [Timestamp('2021-04-15 00:00:00'), 0.0, 2.0, 0.1575035988031724, 0.33317566762057216, 0.5093207335762558, 0.4977841902302334], [Timestamp('2021-04-16 00:00:00'), 2.0, 2.0, 0.27767374060612704, 0.22930867483575004, 0.4930175845581167, 0.49318584866161164], [Timestamp('2021-04-19 00:00:00'), 0.0, 0.0, 0.5073358325677668, 0.18405415195884062, 0.3086100154733881, 0.49224499749406175], [Timestamp('2021-04-20 00:00:00'), 0.0, 0.0, 0.5463658565286551, 0.034887479998616716, 0.41874666347272854, 0.4942171814779797], [Timestamp('2021-04-22 00:00:00'), 1.0, 2.0, 0.2744229482837793, 0.041794435905246355, 0.683782615810976, 0.49404701198158035], [Timestamp('2021-04-23 00:00:00'), 2.0, 2.0, 0.3300881090399115, 0.07535927906313059, 0.5945526118969636, 0.49372756398771384], [Timestamp('2021-04-26 00:00:00'), 0.0, 0.0, 0.7468685785977807, 0.07534341540776629, 0.1777880059944509, 0.4944126416219439], [Timestamp('2021-04-27 00:00:00'), 2.0, 2.0, 0.41980877925555315, 0.022319859345945627, 0.5578713613985042, 0.4948351648351648], [Timestamp('2021-04-28 00:00:00'), 0.0, 0.0, 0.5727938322466982, 0.1448805537613339, 0.28232561399196276, 0.49500768049155147], [Timestamp('2021-04-29 00:00:00'), 1.0, 2.0, 0.1233783305774593, 0.07395051728187434, 0.8026711521406609, 0.4982913391460541], [Timestamp('2021-04-30 00:00:00'), 0.0, 2.0, 0.26058672387103976, 0.29244566226351526, 0.44696761386544365, 0.4993223095689889], [Timestamp('2021-05-03 00:00:00'), 0.0, 0.0, 0.5285216537924252, 0.04859420450538645, 0.42288414170218613, 0.49728677297268736], [Timestamp('2021-05-04 00:00:00'), 2.0, 1.0, 0.31627985027562744, 0.4391468991403208, 0.24457325058404863, 0.4994011126481189], [Timestamp('2021-05-05 00:00:00'), 0.0, 0.0, 0.7663649150224268, 0.02257787876812794, 0.21105720620945137, 0.49728879404267007], [Timestamp('2021-05-06 00:00:00'), 2.0, 2.0, 0.25256284600551093, 0.1161916911991696, 0.6312454627953177, 0.4991584476417586], [Timestamp('2021-05-07 00:00:00'), 2.0, 0.0, 0.5913300202864258, 0.02158590615773493, 0.3870840735558409, 0.49925170408846925], [Timestamp('2021-05-10 00:00:00'), 2.0, 0.0, 0.4643586390998165, 0.1125107627018206, 0.4231305981983674, 0.49875185140874984], [Timestamp('2021-05-11 00:00:00'), 0.0, 0.0, 0.7193933587265119, 0.05541054385196483, 0.22519609742151606, 0.4971499933345092], [Timestamp('2021-05-12 00:00:00'), 2.0, 0.0, 0.5121724982652984, 0.01389059252925356, 0.4739369092054528, 0.4996892763914706], [Timestamp('2021-05-13 00:00:00'), 2.0, 0.0, 0.7746477790049249, 0.043977849262671356, 0.18137437173239682, 0.5040333701581378], [Timestamp('2021-05-14 00:00:00'), 2.0, 2.0, 0.4498879246497202, 0.06350354527113217, 0.48660853007914623, 0.5031553008199249], [Timestamp('2021-05-17 00:00:00'), 0.0, 1.0, 0.16557763239121717, 0.7109138688353203, 0.12350849877345778, 0.5006206095552006], [Timestamp('2021-05-18 00:00:00'), 0.0, 2.0, 0.3904144182115888, 0.11312498983705804, 0.496460591951356, 0.4997825564166683], [Timestamp('2021-05-19 00:00:00'), 0.0, 2.0, 0.23134944302853136, 0.05580631070951262, 0.7128442462619563, 0.4978511973860102], [Timestamp('2021-05-20 00:00:00'), 1.0, 0.0, 0.5214999137932048, 0.21596558737151, 0.2625344988352885, 0.4992836604773152], [Timestamp('2021-05-21 00:00:00'), 2.0, 2.0, 0.08035058183474986, 0.055911342617675414, 0.86373807554758, 0.50483575210126], [Timestamp('2021-05-24 00:00:00'), 0.0, 0.0, 0.8263275090419735, 0.01744584827866571, 0.1562266426793626, 0.5009529540267467], [Timestamp('2021-05-25 00:00:00'), 2.0, 2.0, 0.39209613188839754, 0.056806161655630065, 0.5510977064559703, 0.5001181273384905]]\n[[Timestamp('2018-01-26 00:00:00'), 0.0, 1.0, 0.17330203169160566, 0.704364764412918, 0.12233320389548194, 0.6466624773076386], [Timestamp('2018-01-29 00:00:00'), 0.0, 1.0, 0.19402460604523925, 0.42917875357641283, 0.37679664037834115, 0.6415520753756048], [Timestamp('2018-01-30 00:00:00'), 2.0, 1.0, 0.3017792146248907, 0.5105157936420422, 0.18770499173306424, 0.6458851432255687], [Timestamp('2018-01-31 00:00:00'), 2.0, 1.0, 0.027579179257618602, 0.9330256591825798, 0.0393951615597947, 0.6431544034715452], [Timestamp('2018-02-01 00:00:00'), 0.0, 2.0, 0.08485617053437479, 0.08675360764514257, 0.8283902218204765, 0.6362515330880393], [Timestamp('2018-02-02 00:00:00'), 2.0, 1.0, 0.031719642402093795, 0.631206250716773, 0.33707410688112877, 0.6359997215260372], [Timestamp('2018-02-05 00:00:00'), 2.0, 2.0, 0.14708774957397994, 0.2641465867167769, 0.5887656637092465, 0.6351187999128074], [Timestamp('2018-02-06 00:00:00'), 0.0, 0.0, 0.469762085049041, 0.158458982410645, 0.37177893254031114, 0.6369672800021059], [Timestamp('2018-02-07 00:00:00'), 0.0, 1.0, 0.05913809892484437, 0.5100906313217816, 0.43077126975337254, 0.6314601803015], [Timestamp('2018-02-08 00:00:00'), 2.0, 2.0, 0.13155516316728527, 0.10387711535320572, 0.7645677214795095, 0.6357037735778847], [Timestamp('2018-02-09 00:00:00'), 2.0, 2.0, 0.2282974959341196, 0.016648694940243524, 0.7550538091256414, 0.6337345765687111], [Timestamp('2018-02-15 00:00:00'), 2.0, 0.0, 0.6641878841348494, 0.12189263042262716, 0.21391948544252806, 0.6387349141712143], [Timestamp('2018-02-16 00:00:00'), 2.0, 2.0, 0.07996409463060285, 0.0562805888554907, 0.863755316513904, 0.6405492598306686], [Timestamp('2018-02-19 00:00:00'), 2.0, 2.0, 0.31161080069913105, 0.21961354249747617, 0.46877565680339, 0.6319065584348923], [Timestamp('2018-02-20 00:00:00'), 2.0, 0.0, 0.654792849906085, 0.06583637090168813, 0.2793707791922288, 0.6280021607246739], [Timestamp('2018-02-21 00:00:00'), 2.0, 2.0, 0.09332739980255289, 0.03840016282481613, 0.8682724373726304, 0.6293491256708286], [Timestamp('2018-02-22 00:00:00'), 2.0, 1.0, 0.2725611915647979, 0.5598003963017157, 0.16763841213348935, 0.6289664785476304], [Timestamp('2018-02-23 00:00:00'), 2.0, 2.0, 0.19687013840660167, 0.1255377748057015, 0.6775920867877013, 0.6263165786839814], [Timestamp('2018-02-26 00:00:00'), 1.0, 0.0, 0.4118144979579016, 0.2602476656151296, 0.3279378364269655, 0.6194660306899707], [Timestamp('2018-02-27 00:00:00'), 0.0, 1.0, 0.06245114274548467, 0.8797923924116721, 0.05775646484284632, 0.6275425144662425], [Timestamp('2018-02-28 00:00:00'), 1.0, 0.0, 0.5000345697929116, 0.11373098205602142, 0.386234448151074, 0.6230613480904427], [Timestamp('2018-03-01 00:00:00'), 2.0, 1.0, 0.04981961948121827, 0.5643696318180395, 0.3858107487007441, 0.6162247828402285], [Timestamp('2018-03-02 00:00:00'), 2.0, 1.0, 0.22265060410297752, 0.46861345962512113, 0.30873593627189827, 0.6188226143223353], [Timestamp('2018-03-05 00:00:00'), 0.0, 1.0, 0.3274170041706356, 0.5546719467930133, 0.11791104903635233, 0.6179544916161991], [Timestamp('2018-03-06 00:00:00'), 0.0, 1.0, 0.06212460250218527, 0.596736062363314, 0.34113933513449896, 0.6257215793833913], [Timestamp('2018-03-07 00:00:00'), 2.0, 2.0, 0.38562768140772, 0.04732375173647452, 0.5670485668558084, 0.6253779495353753], [Timestamp('2018-03-08 00:00:00'), 2.0, 0.0, 0.47891433014463164, 0.14182880645939647, 0.37925686339597814, 0.6274033822558529], [Timestamp('2018-03-09 00:00:00'), 0.0, 0.0, 0.6422762783722995, 0.24334707349477647, 0.1143766481329219, 0.6288946549215028], [Timestamp('2018-03-12 00:00:00'), 1.0, 1.0, 0.18598898905845693, 0.7346855564426654, 0.07932545449888265, 0.6249945198036988], [Timestamp('2018-03-13 00:00:00'), 0.0, 0.0, 0.47849327506985256, 0.29687904606339677, 0.2246276788667464, 0.6195049550378581], [Timestamp('2018-03-14 00:00:00'), 0.0, 0.0, 0.492892922410771, 0.23647864631147128, 0.27062843127776204, 0.6180049919720587], [Timestamp('2018-03-15 00:00:00'), 0.0, 2.0, 0.17135056699764434, 0.2213781168827679, 0.6072713161195888, 0.6177185396199237], [Timestamp('2018-03-16 00:00:00'), 0.0, 0.0, 0.4379362730464234, 0.16174268385441481, 0.40032104309915645, 0.6198314244034734], [Timestamp('2018-03-19 00:00:00'), 2.0, 0.0, 0.677500887236349, 0.008153305827096117, 0.31434580693656045, 0.6262984504523946], [Timestamp('2018-03-20 00:00:00'), 2.0, 0.0, 0.8358682250489374, 0.04402807405290519, 0.120103700898161, 0.6242078082845207], [Timestamp('2018-03-21 00:00:00'), 0.0, 0.0, 0.4219072609178686, 0.3520420742817864, 0.22605066480035124, 0.6169233301923164], [Timestamp('2018-03-22 00:00:00'), 2.0, 2.0, 0.3951775166578347, 0.013968461610988322, 0.5908540217311783, 0.622852208979657], [Timestamp('2018-03-23 00:00:00'), 0.0, 2.0, 0.45045278659644555, 0.008163288772620111, 0.5413839246309307, 0.6139675340176654], [Timestamp('2018-03-26 00:00:00'), 0.0, 2.0, 0.45844370982393345, 0.06109465373762637, 0.4804616364384379, 0.6198601367317942], [Timestamp('2018-03-27 00:00:00'), 1.0, 2.0, 0.22370330396006016, 0.1056013828354908, 0.6706953132044525, 0.6266821716786145], [Timestamp('2018-03-28 00:00:00'), 0.0, 2.0, 0.05244349925361388, 0.014682979664002591, 0.9328735210823857, 0.6387796526131485], [Timestamp('2018-03-29 00:00:00'), 0.0, 2.0, 0.26796196007690554, 0.09357622007270314, 0.6384618198503866, 0.6312441700008479], [Timestamp('2018-04-02 00:00:00'), 0.0, 2.0, 0.16589562510659686, 0.2608989646750773, 0.5732054102183323, 0.6347601951578753], [Timestamp('2018-04-03 00:00:00'), 2.0, 2.0, 0.13564200707016416, 0.006634970320528086, 0.8577230226093099, 0.6303503950532462], [Timestamp('2018-04-04 00:00:00'), 2.0, 2.0, 0.1299531615076372, 0.18865744037902446, 0.6813893981133363, 0.6290044216231636], [Timestamp('2018-04-05 00:00:00'), 0.0, 0.0, 0.6020267371386834, 0.016218966264104403, 0.38175429659720844, 0.6204749180358936], [Timestamp('2018-04-06 00:00:00'), 2.0, 2.0, 0.32806517754606634, 0.21823582312900094, 0.4536989993249374, 0.6223429213720476], [Timestamp('2018-04-09 00:00:00'), 2.0, 2.0, 0.06223865834460587, 0.38738826396158005, 0.5503730776938071, 0.6131184481054243], [Timestamp('2018-04-10 00:00:00'), 2.0, 2.0, 0.4741457887413502, 0.04309868352093622, 0.4827555277377094, 0.6173296476087738], [Timestamp('2018-04-11 00:00:00'), 0.0, 2.0, 0.3365353920030419, 0.1273270680179557, 0.5361375399789988, 0.6175979738935361], [Timestamp('2018-04-12 00:00:00'), 0.0, 2.0, 0.16852859952210114, 0.18339942677697443, 0.6480719737009174, 0.6134627707292067], [Timestamp('2018-04-13 00:00:00'), 0.0, 2.0, 0.32699431903962073, 0.11490691131393341, 0.5580987696464454, 0.6125724573388933], [Timestamp('2018-04-16 00:00:00'), 2.0, 2.0, 0.2964418387420699, 0.045200119843134996, 0.6583580414147989, 0.6148804563574187], [Timestamp('2018-04-17 00:00:00'), 2.0, 0.0, 0.5225664603703772, 0.0661065117873593, 0.4113270278422689, 0.6162963574034355], [Timestamp('2018-04-18 00:00:00'), 2.0, 2.0, 0.3839855744104191, 0.19955218693136356, 0.4164622386582189, 0.6117723628032906], [Timestamp('2018-04-19 00:00:00'), 2.0, 1.0, 0.3063859849043093, 0.5940044127569637, 0.09960960233872185, 0.6104363752885743], [Timestamp('2018-04-20 00:00:00'), 1.0, 0.0, 0.8094457636108839, 0.03478365530149016, 0.15577058108763192, 0.6111507567138966], [Timestamp('2018-04-23 00:00:00'), 0.0, 1.0, 0.3692381876426255, 0.3846171671048059, 0.24614464525256166, 0.6021819195527259], [Timestamp('2018-04-24 00:00:00'), 2.0, 1.0, 0.04578977254284386, 0.7991241094821325, 0.15508611797501803, 0.6076729197805683], [Timestamp('2018-04-25 00:00:00'), 2.0, 2.0, 0.18165076692596666, 0.01691745030723653, 0.8014317827667902, 0.6091037507899334], [Timestamp('2018-04-26 00:00:00'), 2.0, 0.0, 0.8185178983456072, 0.014898830176862863, 0.16658327147753652, 0.6082505281449433], [Timestamp('2018-04-27 00:00:00'), 0.0, 2.0, 0.3151433173999105, 0.03609807031710676, 0.6487586122829869, 0.610781448896203], [Timestamp('2018-04-30 00:00:00'), 0.0, 2.0, 0.11675008991353784, 0.17502180358547212, 0.7082281065009881, 0.6049055037478626], [Timestamp('2018-05-02 00:00:00'), 0.0, 1.0, 0.20663520801020058, 0.4750201020208605, 0.3183446899689421, 0.6040410964629956], [Timestamp('2018-05-03 00:00:00'), 2.0, 0.0, 0.507325672076689, 0.009044339691524335, 0.4836299882317905, 0.6058758770314993], [Timestamp('2018-05-04 00:00:00'), 2.0, 2.0, 0.11386158411841743, 0.09986876810178165, 0.7862696477797979, 0.6083897545453768], [Timestamp('2018-05-07 00:00:00'), 2.0, 2.0, 0.20408561647921283, 0.11838553548060823, 0.677528848040179, 0.6059661104731837], [Timestamp('2018-05-08 00:00:00'), 2.0, 2.0, 0.111243357583029, 0.189329054687694, 0.6994275877292814, 0.6077929278615937], [Timestamp('2018-05-09 00:00:00'), 2.0, 0.0, 0.5433285845436941, 0.016855110759085454, 0.4398163046972237, 0.6005697939916366], [Timestamp('2018-05-10 00:00:00'), 2.0, 0.0, 0.6864966137946981, 0.15064065894124784, 0.16286272726405013, 0.5992595552984957], [Timestamp('2018-05-11 00:00:00'), 2.0, 2.0, 0.22377705134085313, 0.10991118942013149, 0.6663117592390199, 0.5984088222746532], [Timestamp('2018-05-14 00:00:00'), 2.0, 2.0, 0.3925836956753841, 0.0820692132850254, 0.5253470910395923, 0.5986638146613091], [Timestamp('2018-05-15 00:00:00'), 0.0, 2.0, 0.36697809193008635, 0.031667952517378105, 0.6013539555525286, 0.6031435111483782], [Timestamp('2018-05-16 00:00:00'), 0.0, 2.0, 0.23438433959947516, 0.18939511819489838, 0.5762205422056244, 0.6027563642521349], [Timestamp('2018-05-17 00:00:00'), 0.0, 2.0, 0.12654058960347384, 0.06640020366178073, 0.8070592067347443, 0.5979946291577208], [Timestamp('2018-05-18 00:00:00'), 0.0, 2.0, 0.24325381429466472, 0.003885973467394848, 0.7528602122379426, 0.5945079570510977], [Timestamp('2018-05-21 00:00:00'), 0.0, 2.0, 0.11817782295865815, 0.011197732708287557, 0.8706244443330509, 0.5952120021099815], [Timestamp('2018-05-22 00:00:00'), 0.0, 2.0, 0.08874646194522634, 0.014992924698541535, 0.8962606133562337, 0.5917549928362793], [Timestamp('2018-05-23 00:00:00'), 0.0, 2.0, 0.4315391227168022, 0.020155290034748374, 0.5483055872484557, 0.6011670133676882], [Timestamp('2018-05-24 00:00:00'), 0.0, 2.0, 0.3535839597888451, 0.006344327800385103, 0.6400717124107642, 0.6059579836327033], [Timestamp('2018-05-25 00:00:00'), 0.0, 2.0, 0.18251746575613267, 0.007420974497638569, 0.8100615597462232, 0.6096190284702186], [Timestamp('2018-05-28 00:00:00'), 2.0, 2.0, 0.19452930169701327, 0.11630583229845674, 0.689164866004537, 0.6131105760635528], [Timestamp('2018-05-29 00:00:00'), 0.0, 0.0, 0.8137617302274271, 0.003164442145049163, 0.18307382762752733, 0.6148673263427361], [Timestamp('2018-05-30 00:00:00'), 0.0, 0.0, 0.5454717938853562, 0.006967414530287159, 0.44756079158435624, 0.6139920115660761], [Timestamp('2018-06-01 00:00:00'), 2.0, 2.0, 0.48128628920073463, 0.036857104155049336, 0.48185660664422175, 0.6139149102263857], [Timestamp('2018-06-04 00:00:00'), 0.0, 2.0, 0.3920516286042529, 0.0494714335411023, 0.5584769378546413, 0.6101992481874352], [Timestamp('2018-06-05 00:00:00'), 0.0, 2.0, 0.3847748409069614, 0.010050078105113913, 0.6051750809879272, 0.6104089431765419], [Timestamp('2018-06-06 00:00:00'), 0.0, 0.0, 0.6142086541749062, 0.0028530162050674534, 0.38293832962002733, 0.6113976257685562], [Timestamp('2018-06-07 00:00:00'), 0.0, 2.0, 0.4317335438462741, 0.0009233478910464582, 0.5673431082626753, 0.6054049601834427], [Timestamp('2018-06-08 00:00:00'), 2.0, 0.0, 0.6492927816180329, 0.009287949822574245, 0.3414192685593891, 0.6074723533359273], [Timestamp('2018-06-11 00:00:00'), 0.0, 0.0, 0.516103437482182, 0.03934618336422053, 0.44455037915359563, 0.6081146869594733], [Timestamp('2018-06-12 00:00:00'), 0.0, 0.0, 0.6676512314274143, 0.017645938416138193, 0.3147028301564502, 0.6061606555667042], [Timestamp('2018-06-13 00:00:00'), 0.0, 0.0, 0.5153438785422679, 0.0080085931913835, 0.4766475282663516, 0.6009568452277305], [Timestamp('2018-06-14 00:00:00'), 0.0, 0.0, 0.6102913294187534, 0.0031758134307178737, 0.3865328571505255, 0.6000894912092648], [Timestamp('2018-06-15 00:00:00'), 2.0, 0.0, 0.7069579084623585, 0.009587517617748943, 0.28345457391989765, 0.590535884938398], [Timestamp('2018-06-18 00:00:00'), 2.0, 2.0, 0.23940414315418937, 0.014007089925867106, 0.7465887669199391, 0.5908320057556914], [Timestamp('2018-06-19 00:00:00'), 0.0, 2.0, 0.3941703678924888, 0.005209251716109861, 0.6006203803914001, 0.5925629915425862], [Timestamp('2018-06-20 00:00:00'), 0.0, 0.0, 0.8536392735964741, 0.04127714074039792, 0.10508358566312967, 0.5906394399705938], [Timestamp('2018-06-21 00:00:00'), 2.0, 2.0, 0.20626176788213454, 0.004752423166796856, 0.7889858089510635, 0.5894561048150098], [Timestamp('2018-06-22 00:00:00'), 2.0, 2.0, 0.04922081244862262, 0.013697108084448413, 0.9370820794669326, 0.5972477216263573], [Timestamp('2018-06-25 00:00:00'), 2.0, 2.0, 0.3654486723741576, 0.1323637017290543, 0.5021876258967906, 0.5978804656259719], [Timestamp('2018-06-26 00:00:00'), 2.0, 0.0, 0.44276976254206507, 0.31445798706036593, 0.24277225039757136, 0.5919854280510018], [Timestamp('2018-06-27 00:00:00'), 2.0, 0.0, 0.42317104268394506, 0.2336968540759456, 0.34313210324011567, 0.5995070164137362], [Timestamp('2018-06-28 00:00:00'), 2.0, 2.0, 0.14419769342683744, 0.0656268036797075, 0.7901755028934592, 0.5961046869473307], [Timestamp('2018-06-29 00:00:00'), 2.0, 2.0, 0.2059811683234388, 0.13573843369016203, 0.6582803979863946, 0.5899911260566998], [Timestamp('2018-07-02 00:00:00'), 2.0, 2.0, 0.3724226969933755, 0.03460683472854883, 0.5929704682780749, 0.580904083964885], [Timestamp('2018-07-03 00:00:00'), 2.0, 2.0, 0.13731988360073782, 0.18653822285758057, 0.6761418935416871, 0.5945245132554436], [Timestamp('2018-07-04 00:00:00'), 2.0, 0.0, 0.5960067697333009, 0.09308291344840716, 0.31091031681828957, 0.5912681102906899], [Timestamp('2018-07-05 00:00:00'), 0.0, 0.0, 0.4922773155780823, 0.06329004602000922, 0.44443263840191266, 0.5846485758865635], [Timestamp('2018-07-06 00:00:00'), 0.0, 2.0, 0.07332601063299947, 0.29682420590584235, 0.6298497834611598, 0.5958613108703916], [Timestamp('2018-07-10 00:00:00'), 2.0, 2.0, 0.1804052237517185, 0.1117115825948037, 0.7078831936534744, 0.5967997902587013], [Timestamp('2018-07-11 00:00:00'), 2.0, 2.0, 0.12674281835136741, 0.004570986872472949, 0.8686861947761528, 0.5977351926461054], [Timestamp('2018-07-12 00:00:00'), 0.0, 2.0, 0.41394931481912134, 0.03906513512513374, 0.5469855500557516, 0.5955407020007], [Timestamp('2018-07-13 00:00:00'), 2.0, 0.0, 0.6146044653939378, 0.06753723938286571, 0.3178582952231982, 0.5982553913776314], [Timestamp('2018-07-16 00:00:00'), 2.0, 2.0, 0.356966382781827, 0.012778787661364928, 0.6302548295568062, 0.5971133643842216], [Timestamp('2018-07-17 00:00:00'), 2.0, 0.0, 0.5688777023568775, 0.11165918887200278, 0.3194631087711243, 0.5959696999742948], [Timestamp('2018-07-18 00:00:00'), 2.0, 2.0, 0.32344130590935083, 0.05434725469375024, 0.6222114393968953, 0.5962308830700153], [Timestamp('2018-07-19 00:00:00'), 2.0, 2.0, 0.1610080355619662, 0.024936909886966282, 0.8140550545510665, 0.5926518783377053], [Timestamp('2018-07-20 00:00:00'), 2.0, 0.0, 0.531803923372039, 0.08952655232705674, 0.37866952430091055, 0.5915065380403886], [Timestamp('2018-07-23 00:00:00'), 2.0, 0.0, 0.6300374193370591, 0.0030605096199597436, 0.36690207104298495, 0.5935596419118258], [Timestamp('2018-07-24 00:00:00'), 0.0, 2.0, 0.33932444538325657, 0.0596782762609527, 0.6009972783557921, 0.5910077600464737], [Timestamp('2018-07-25 00:00:00'), 0.0, 2.0, 0.30524057936865545, 0.05306891701785815, 0.6416905036134869, 0.5881538141046102], [Timestamp('2018-07-26 00:00:00'), 2.0, 2.0, 0.17742012355247855, 0.00805950687368224, 0.8145203695738406, 0.5925396229234945], [Timestamp('2018-07-27 00:00:00'), 1.0, 0.0, 0.7448956432468535, 0.031076819355122638, 0.22402753739803083, 0.5851970395302176], [Timestamp('2018-07-30 00:00:00'), 2.0, 0.0, 0.7383067867055714, 0.06293851057290235, 0.19875470272153012, 0.5828173462462409], [Timestamp('2018-07-31 00:00:00'), 2.0, 2.0, 0.13167840778172957, 0.25032431300174485, 0.6179972792165286, 0.5813051557551491], [Timestamp('2018-08-01 00:00:00'), 2.0, 2.0, 0.1895013105922787, 0.02884220987898917, 0.7816564795287371, 0.5819552497119463], [Timestamp('2018-08-02 00:00:00'), 2.0, 0.0, 0.5459869497780715, 0.04793771907476218, 0.4060753311471716, 0.5814377770225185], [Timestamp('2018-08-03 00:00:00'), 0.0, 0.0, 0.5500602877019408, 0.1715922697786067, 0.2783474425194589, 0.5816940573992807], [Timestamp('2018-08-06 00:00:00'), 0.0, 1.0, 0.042278833962864575, 0.4831213310341165, 0.4745998350030218, 0.5802964254577158], [Timestamp('2018-08-07 00:00:00'), 0.0, 2.0, 0.135097323217523, 0.01846107482754716, 0.8464416019549368, 0.5812779048624317], [Timestamp('2018-08-08 00:00:00'), 0.0, 2.0, 0.26880103402252775, 0.012331346198407754, 0.7188676197790594, 0.5832440011325641], [Timestamp('2018-08-09 00:00:00'), 0.0, 2.0, 0.4820256845685363, 0.006171150846230378, 0.5118031645852333, 0.5841929145393902], [Timestamp('2018-08-10 00:00:00'), 2.0, 2.0, 0.28816771763472554, 0.0242116430705028, 0.6876206392947684, 0.586491965259813], [Timestamp('2018-08-13 00:00:00'), 0.0, 0.0, 0.569240022198776, 0.17627589346516015, 0.2544840843360707, 0.592119832255109], [Timestamp('2018-08-14 00:00:00'), 0.0, 2.0, 0.26569145586577486, 0.14651794670968188, 0.5877905974245411, 0.5899451919071667], [Timestamp('2018-08-15 00:00:00'), 0.0, 2.0, 0.11189787258029088, 0.23975085275609234, 0.6483512746636146, 0.588502250678292], [Timestamp('2018-08-16 00:00:00'), 0.0, 0.0, 0.5790233017687867, 0.02640225881583962, 0.3945744394153713, 0.5867033075027951], [Timestamp('2018-08-17 00:00:00'), 0.0, 0.0, 0.7919589214195386, 0.0029306624307644776, 0.20511041614970169, 0.5875587534608203], [Timestamp('2018-08-20 00:00:00'), 1.0, 0.0, 0.5828196154321293, 0.01824214952696568, 0.3989382350409055, 0.5871167368592262], [Timestamp('2018-08-21 00:00:00'), 2.0, 2.0, 0.17516117057222216, 0.0681013618671587, 0.756737467560624, 0.5814778933240804], [Timestamp('2018-08-22 00:00:00'), 1.0, 0.0, 0.780960222741617, 0.009713709493467997, 0.20932606776491014, 0.5813993714730992], [Timestamp('2018-08-23 00:00:00'), 2.0, 0.0, 0.7876578138676379, 0.0016251548772624363, 0.2107170312550986, 0.5564542997862095], [Timestamp('2018-08-24 00:00:00'), 1.0, 0.0, 0.9078742029192969, 0.007440254700667076, 0.08468554238003005, 0.5547248517881048], [Timestamp('2018-08-27 00:00:00'), 2.0, 0.0, 0.5838414507211498, 0.12768435261583044, 0.2884741966630149, 0.5552927828530239], [Timestamp('2018-08-28 00:00:00'), 2.0, 2.0, 0.31383819475127517, 0.09666368412629056, 0.589498121122439, 0.5568875822853157], [Timestamp('2018-08-29 00:00:00'), 1.0, 0.0, 0.8701329856116365, 0.032082483391216304, 0.09778453099714265, 0.560579200430915], [Timestamp('2018-08-30 00:00:00'), 2.0, 2.0, 0.322727933667324, 0.004457527721017581, 0.6728145386116643, 0.5473524004571271], [Timestamp('2018-08-31 00:00:00'), 0.0, 2.0, 0.2505088676676562, 0.02655836532114066, 0.7229327670112052, 0.5549750624918554], [Timestamp('2018-09-03 00:00:00'), 0.0, 0.0, 0.5005707044815578, 0.014245931957413678, 0.48518336356103414, 0.5553159917761687], [Timestamp('2018-09-04 00:00:00'), 2.0, 0.0, 0.6054018699095192, 0.003796393086695674, 0.39080173700378085, 0.5581850661179364], [Timestamp('2018-09-05 00:00:00'), 2.0, 2.0, 0.2657225502578351, 0.019681329007866847, 0.7145961207343001, 0.5602012626854437], [Timestamp('2018-09-06 00:00:00'), 0.0, 0.0, 0.7212505548694526, 0.0467997417993446, 0.23194970333120232, 0.5560365251279016], [Timestamp('2018-09-10 00:00:00'), 0.0, 0.0, 0.5192216959399814, 0.013515410418709843, 0.4672628936413119, 0.5546401030271998], [Timestamp('2018-09-11 00:00:00'), 2.0, 2.0, 0.11240620523498629, 0.001924230408490817, 0.8856695643565209, 0.5614419085667333], [Timestamp('2018-09-12 00:00:00'), 0.0, 0.0, 0.5125653564932527, 0.02051883504005581, 0.4669158084666902, 0.5637688864233487], [Timestamp('2018-09-13 00:00:00'), 2.0, 2.0, 0.4844025519806057, 0.02114909732170186, 0.4944483506976976, 0.5607142009960079], [Timestamp('2018-09-14 00:00:00'), 2.0, 0.0, 0.6948552806698421, 0.086656291475924, 0.2184884278542305, 0.559654764861257], [Timestamp('2018-09-17 00:00:00'), 2.0, 0.0, 0.6638463339887943, 0.07281739360752319, 0.26333627240368473, 0.5612185302947068], [Timestamp('2018-09-18 00:00:00'), 0.0, 0.0, 0.5199384209984613, 0.29880599203572406, 0.18125558696580862, 0.5537933252414594], [Timestamp('2018-09-19 00:00:00'), 2.0, 0.0, 0.4783710205870925, 0.13636157875312385, 0.3852674006597815, 0.5575938412255548], [Timestamp('2018-09-20 00:00:00'), 2.0, 2.0, 0.4247117781539446, 0.0015010966155845713, 0.5737871252304725, 0.5546028115222267], [Timestamp('2018-09-21 00:00:00'), 1.0, 2.0, 0.3355998499456954, 0.12385472007446116, 0.540545429979849, 0.554503331329912], [Timestamp('2018-09-24 00:00:00'), 2.0, 2.0, 0.3650419475727208, 0.09927524015225611, 0.5356828122750228, 0.5565304516576298], [Timestamp('2018-09-25 00:00:00'), 2.0, 0.0, 0.692990856869134, 0.18108182129449737, 0.12592732183636726, 0.5504938122100788], [Timestamp('2018-09-26 00:00:00'), 2.0, 0.0, 0.8805647202857021, 0.03347994357108554, 0.08595533614320748, 0.5523973480538205], [Timestamp('2018-09-27 00:00:00'), 0.0, 0.0, 0.5602320830563412, 0.01164137428660281, 0.42812654265705785, 0.5509809495937678], [Timestamp('2018-09-28 00:00:00'), 2.0, 2.0, 0.3624270302446396, 0.0075179709823514915, 0.6300549987730135, 0.5488475035536603], [Timestamp('2018-10-01 00:00:00'), 2.0, 2.0, 0.2657308124002064, 0.005647503128168148, 0.7286216844716205, 0.5477841937924338], [Timestamp('2018-10-02 00:00:00'), 2.0, 0.0, 0.5212552826894848, 0.2409558512745928, 0.23778886603592056, 0.5454174440298507], [Timestamp('2018-10-03 00:00:00'), 2.0, 0.0, 0.494188165819656, 0.015381464718460185, 0.49043036946188523, 0.5450670250024664], [Timestamp('2018-10-04 00:00:00'), 2.0, 0.0, 0.4632199012457164, 0.18046087092714846, 0.35631922782713443, 0.5445861704893611], [Timestamp('2018-10-05 00:00:00'), 2.0, 2.0, 0.2720685810351464, 0.009271496250363085, 0.7186599227144868, 0.5435175027152968], [Timestamp('2018-10-08 00:00:00'), 0.0, 2.0, 0.4137986418365753, 0.0633868009337715, 0.5228145572296543, 0.5428132773631841], [Timestamp('2018-10-09 00:00:00'), 0.0, 2.0, 0.3122102016156049, 0.23729325662853534, 0.4504965417558585, 0.5424798579567515], [Timestamp('2018-10-10 00:00:00'), 0.0, 2.0, 0.05857274979969518, 0.00608055603758607, 0.9353466941627192, 0.5421417648279809], [Timestamp('2018-10-11 00:00:00'), 2.0, 2.0, 0.1463395463467834, 0.019065436806457545, 0.8345950168467536, 0.5368239277345432], [Timestamp('2018-10-15 00:00:00'), 2.0, 2.0, 0.1476637614117367, 0.00882768118623427, 0.8435085574020309, 0.5370652227544238], [Timestamp('2018-10-16 00:00:00'), 0.0, 2.0, 0.20329324282985223, 0.0283504791290046, 0.7683562780411434, 0.5382362684538954], [Timestamp('2018-10-17 00:00:00'), 0.0, 2.0, 0.10134340801794646, 0.06009111525269781, 0.8385654767293594, 0.5401073703249972], [Timestamp('2018-10-18 00:00:00'), 2.0, 2.0, 0.3198207796530018, 0.047836644049903836, 0.6323425762970941, 0.5403409545308412], [Timestamp('2018-10-19 00:00:00'), 2.0, 2.0, 0.16449035681883867, 0.01646203356456158, 0.8190476096166003, 0.5365117230738696], [Timestamp('2018-10-22 00:00:00'), 0.0, 2.0, 0.1801471506788303, 0.1262488928239194, 0.6936039564972436, 0.5371028001753684], [Timestamp('2018-10-23 00:00:00'), 2.0, 2.0, 0.11779406285491766, 0.014357793384200087, 0.8678481437608887, 0.5358310230020381], [Timestamp('2018-10-24 00:00:00'), 2.0, 2.0, 0.09045688155080511, 0.0018340052665406453, 0.9077091131826585, 0.5360772526111486], [Timestamp('2018-10-25 00:00:00'), 2.0, 2.0, 0.319649898251889, 0.05544832660359548, 0.6249017751445193, 0.5322874262654086], [Timestamp('2018-10-26 00:00:00'), 2.0, 2.0, 0.20276819519852463, 0.1830771670644021, 0.6141546377370666, 0.5340211126563887], [Timestamp('2018-10-29 00:00:00'), 2.0, 2.0, 0.23571310342455934, 0.08924319615663827, 0.6750437004188088, 0.5327883066005431], [Timestamp('2018-10-30 00:00:00'), 0.0, 2.0, 0.3859539553773407, 0.01856801805909275, 0.5954780265635601, 0.528306652453512], [Timestamp('2018-10-31 00:00:00'), 2.0, 2.0, 0.1863348367267755, 0.008308497653299213, 0.8053566656199305, 0.5329088191004973], [Timestamp('2018-11-01 00:00:00'), 0.0, 2.0, 0.10589759664212663, 0.24860558968147273, 0.6454968136764057, 0.5316860398706669], [Timestamp('2018-11-05 00:00:00'), 0.0, 2.0, 0.3317704074317439, 0.017166306189866493, 0.6510632863783925, 0.5306524382558148], [Timestamp('2018-11-06 00:00:00'), 0.0, 2.0, 0.2519104182985219, 0.009885167124863866, 0.7382044145766091, 0.5283756493334955], [Timestamp('2018-11-07 00:00:00'), 0.0, 0.0, 0.5578261507591672, 0.08884161262533698, 0.35333223661548924, 0.5293138645488201], [Timestamp('2018-11-08 00:00:00'), 1.0, 0.0, 0.6339142203826583, 0.00768635808591961, 0.35839942153142046, 0.5358050418381476], [Timestamp('2018-11-09 00:00:00'), 0.0, 2.0, 0.24674132294786666, 0.07280383572926805, 0.6804548413228689, 0.5342872397930746], [Timestamp('2018-11-12 00:00:00'), 0.0, 0.0, 0.5006492290446478, 0.012377113083407876, 0.4869736578719435, 0.5323447052746554], [Timestamp('2018-11-13 00:00:00'), 2.0, 0.0, 0.6182403939443532, 0.009213434407443983, 0.37254617164820447, 0.5350448323234062], [Timestamp('2018-11-14 00:00:00'), 2.0, 0.0, 0.5844635416500161, 0.017565408546825752, 0.39797104980315334, 0.5361938126344001], [Timestamp('2018-11-16 00:00:00'), 0.0, 0.0, 0.779954877806324, 0.01074226518989467, 0.2093028570037791, 0.5321408424368617], [Timestamp('2018-11-19 00:00:00'), 0.0, 0.0, 0.744460566058218, 0.002513338943588597, 0.2530260949981925, 0.5326915790470778], [Timestamp('2018-11-21 00:00:00'), 0.0, 0.0, 0.5021668976940086, 0.02172365396873159, 0.476109448337257, 0.5292908475759949], [Timestamp('2018-11-22 00:00:00'), 0.0, 0.0, 0.733789961048898, 0.03519150779503078, 0.231018531156072, 0.5362318840579711], [Timestamp('2018-11-23 00:00:00'), 2.0, 0.0, 0.7005904685108053, 0.0030219404596062613, 0.29638759102959145, 0.5334137835736301], [Timestamp('2018-11-26 00:00:00'), 2.0, 2.0, 0.28211067496887227, 0.0017400880182462275, 0.7161492370128776, 0.532435801087164], [Timestamp('2018-11-27 00:00:00'), 0.0, 0.0, 0.712872894262831, 0.006324720125054399, 0.2808023856121157, 0.5338985074279191], [Timestamp('2018-11-28 00:00:00'), 1.0, 0.0, 0.5172021894363446, 0.005608899121426259, 0.4771889114422265, 0.5344332524882117], [Timestamp('2018-11-29 00:00:00'), 2.0, 2.0, 0.2903928561857982, 0.07272431016487017, 0.6368828336493286, 0.5471184608918662], [Timestamp('2018-11-30 00:00:00'), 0.0, 0.0, 0.729214826570038, 0.02440660899638025, 0.2463785644335815, 0.5449586068646805], [Timestamp('2018-12-03 00:00:00'), 0.0, 0.0, 0.7128401888890781, 0.0981431705778306, 0.18901664053308717, 0.5448577418388739], [Timestamp('2018-12-04 00:00:00'), 0.0, 0.0, 0.5096807998553967, 0.10478983548582393, 0.38552936465877713, 0.5453891428050576], [Timestamp('2018-12-05 00:00:00'), 0.0, 0.0, 0.8709170810264397, 0.009610838469726627, 0.11947208050383464, 0.5471200763755767], [Timestamp('2018-12-06 00:00:00'), 0.0, 2.0, 0.2349368209445278, 0.05427672449970559, 0.7107864545557698, 0.5467410438226238], [Timestamp('2018-12-07 00:00:00'), 0.0, 2.0, 0.2563312090535777, 0.17095938527727517, 0.5727094056691502, 0.5436725308910336], [Timestamp('2018-12-10 00:00:00'), 0.0, 2.0, 0.29735112782139195, 0.012888947291834261, 0.6897599248867675, 0.5456798442647499], [Timestamp('2018-12-11 00:00:00'), 1.0, 2.0, 0.20668958218173888, 0.035300740354943266, 0.7580096774633203, 0.5426337503823571], [Timestamp('2018-12-12 00:00:00'), 0.0, 2.0, 0.3934807602677349, 0.20329845016758952, 0.4032207895646793, 0.5460961205807778], [Timestamp('2018-12-13 00:00:00'), 0.0, 0.0, 0.6072297382944685, 0.002037128275304454, 0.39073313343022764, 0.5492949220367928], [Timestamp('2018-12-14 00:00:00'), 0.0, 0.0, 0.4741394709976372, 0.0931699677028847, 0.4326905612994814, 0.5444921724940393], [Timestamp('2018-12-17 00:00:00'), 0.0, 2.0, 0.4024108302578952, 0.009435757070966999, 0.588153412671139, 0.5434930336737406], [Timestamp('2018-12-18 00:00:00'), 0.0, 2.0, 0.41525225651959913, 0.015523330869875578, 0.5692244126105321, 0.5496681956463486], [Timestamp('2018-12-19 00:00:00'), 0.0, 0.0, 0.5065280062360932, 0.05086562215734916, 0.44260637160655986, 0.5490138792025584], [Timestamp('2018-12-20 00:00:00'), 2.0, 2.0, 0.31411346147324964, 0.0044821121212406745, 0.6814044264055032, 0.5495255626410712], [Timestamp('2018-12-21 00:00:00'), 2.0, 0.0, 0.5702042447592967, 0.023376224269827765, 0.40641953097087147, 0.553632934946861], [Timestamp('2018-12-26 00:00:00'), 2.0, 0.0, 0.8453807227220763, 0.00466812841087011, 0.14995114886705294, 0.5506590316764907], [Timestamp('2018-12-27 00:00:00'), 2.0, 2.0, 0.43936014957293223, 0.035853873457571676, 0.5247859769694931, 0.5479587844006782], [Timestamp('2018-12-28 00:00:00'), 2.0, 2.0, 0.43942708409366305, 0.06913713226488058, 0.4914357836414519, 0.5508726287262873], [Timestamp('2019-01-02 00:00:00'), 2.0, 2.0, 0.35558099001713556, 0.0020435898743930685, 0.6423754201084664, 0.5425284799960463], [Timestamp('2019-01-03 00:00:00'), 2.0, 0.0, 0.505146185014876, 0.006520271818782662, 0.4883335431663444, 0.5469166381697921], [Timestamp('2019-01-04 00:00:00'), 2.0, 2.0, 0.22765675987342815, 0.17494168099685534, 0.5974015591297196, 0.5497913709295824], [Timestamp('2019-01-07 00:00:00'), 2.0, 2.0, 0.2654439305220996, 0.03510341767049433, 0.6994526518074119, 0.5508919186440262], [Timestamp('2019-01-08 00:00:00'), 2.0, 2.0, 0.05417058673312593, 0.0030461474831983103, 0.9427832657836771, 0.5502322880371661], [Timestamp('2019-01-09 00:00:00'), 0.0, 1.0, 0.26492413728536096, 0.43556894331815676, 0.29950691939648827, 0.5427778844717581], [Timestamp('2019-01-10 00:00:00'), 0.0, 2.0, 0.07064581730968032, 0.014980392738443727, 0.9143737899518763, 0.5431779360913219], [Timestamp('2019-01-11 00:00:00'), 0.0, 2.0, 0.06855215503084994, 0.022822272919887986, 0.9086255720492565, 0.5382020586527031], [Timestamp('2019-01-14 00:00:00'), 1.0, 2.0, 0.07314445164761142, 0.0156175633918222, 0.9112379849605691, 0.540751371595792], [Timestamp('2019-01-15 00:00:00'), 2.0, 2.0, 0.12392079637676687, 0.04630997764544936, 0.8297692259777892, 0.537459001451919], [Timestamp('2019-01-16 00:00:00'), 2.0, 1.0, 0.1400234418469514, 0.5085784627954274, 0.3513980953576157, 0.5374089151869755], [Timestamp('2019-01-17 00:00:00'), 2.0, 2.0, 0.35405970832720585, 0.03600113283543454, 0.6099391588373545, 0.5338718001716973], [Timestamp('2019-01-18 00:00:00'), 0.0, 0.0, 0.34240463174095, 0.3315654890331538, 0.3260298792258931, 0.5332326574388863], [Timestamp('2019-01-21 00:00:00'), 0.0, 2.0, 0.15932695755649015, 0.010487775187551143, 0.8301852672559593, 0.533757996144104], [Timestamp('2019-01-22 00:00:00'), 2.0, 2.0, 0.1906285961034653, 0.1985211382112259, 0.6108502656853079, 0.5342797366390122], [Timestamp('2019-01-23 00:00:00'), 0.0, 2.0, 0.2924044212148752, 0.01514782767268592, 0.6924477511124443, 0.5359270191510207], [Timestamp('2019-01-24 00:00:00'), 0.0, 2.0, 0.3262843872564138, 0.009155727651198892, 0.6645598850923936, 0.5375750625378083], [Timestamp('2019-01-28 00:00:00'), 2.0, 2.0, 0.25873287647944476, 0.20650903729064246, 0.5347580862299172, 0.5360785201831479], [Timestamp('2019-01-29 00:00:00'), 2.0, 2.0, 0.32427034400492105, 0.06001962827965317, 0.6157100277154312, 0.5394429245098025], [Timestamp('2019-01-30 00:00:00'), 2.0, 0.0, 0.7481654915758754, 0.03201297436247831, 0.21982153406164479, 0.5374051866260935], [Timestamp('2019-01-31 00:00:00'), 2.0, 2.0, 0.47168192702885864, 0.0035286184610661226, 0.5247894545100719, 0.537633837717847], [Timestamp('2019-02-01 00:00:00'), 2.0, 0.0, 0.5708224337925872, 0.11897436792398368, 0.3102031982834256, 0.5401288869217262], [Timestamp('2019-02-04 00:00:00'), 0.0, 2.0, 0.3738767256525739, 0.07964800245229577, 0.5464752718951339, 0.5403551958883951], [Timestamp('2019-02-05 00:00:00'), 0.0, 2.0, 0.20652971284080882, 0.013838274111246654, 0.7796320130479414, 0.5377363995649983], [Timestamp('2019-02-06 00:00:00'), 0.0, 2.0, 0.23464506630843188, 0.00938095030961342, 0.7559739833819528, 0.5406790593410311], [Timestamp('2019-02-07 00:00:00'), 0.0, 2.0, 0.3783751954363835, 0.06394995030645415, 0.5576748542571551, 0.5442827212780265], [Timestamp('2019-02-08 00:00:00'), 2.0, 2.0, 0.2828326939248233, 0.009366407987785359, 0.7078008980873973, 0.5410934256949949], [Timestamp('2019-02-11 00:00:00'), 2.0, 2.0, 0.19802047490134161, 0.014653530894689322, 0.7873259942039686, 0.5421776497971217], [Timestamp('2019-02-12 00:00:00'), 2.0, 0.0, 0.7790413778579693, 0.14210066839335778, 0.07885795374867859, 0.543792798336841], [Timestamp('2019-02-13 00:00:00'), 2.0, 0.0, 0.6580701509566342, 0.006726933904146346, 0.335202915139213, 0.5437517603529196], [Timestamp('2019-02-14 00:00:00'), 0.0, 0.0, 0.5054046185850963, 0.061379595701653085, 0.43321578571325003, 0.5400486133736521], [Timestamp('2019-02-15 00:00:00'), 2.0, 2.0, 0.2925768800210338, 0.11684045035737291, 0.5905826696215924, 0.5405461704804884], [Timestamp('2019-02-18 00:00:00'), 2.0, 2.0, 0.3083088343661356, 0.08152074677815993, 0.6101704188557046, 0.5418901737194542], [Timestamp('2019-02-19 00:00:00'), 1.0, 1.0, 0.40863532347265685, 0.4456200340816285, 0.14574464244571658, 0.539615461538303], [Timestamp('2019-02-20 00:00:00'), 2.0, 2.0, 0.3597429110174224, 0.08476064263874415, 0.5554964463438322, 0.5418383950992647], [Timestamp('2019-02-21 00:00:00'), 0.0, 0.0, 0.4367735446137859, 0.3527513768983226, 0.21047507848788738, 0.5431800921752734], [Timestamp('2019-02-22 00:00:00'), 0.0, 0.0, 0.6326549039559495, 0.03802263976204652, 0.3293224562820082, 0.542838417762851], [Timestamp('2019-02-25 00:00:00'), 2.0, 0.0, 0.5463408686048792, 0.06161710043964925, 0.3920420309554775, 0.5458156938079028], [Timestamp('2019-02-26 00:00:00'), 2.0, 0.0, 0.4444624832204536, 0.13392447982545144, 0.4216130369540897, 0.5413182406433545], [Timestamp('2019-02-27 00:00:00'), 0.0, 0.0, 0.7052101500413682, 0.1088475009243424, 0.1859423490342887, 0.5437876020801357], [Timestamp('2019-02-28 00:00:00'), 0.0, 0.0, 0.5474682467475311, 0.19860719887737713, 0.2539245543750933, 0.5467548101305507], [Timestamp('2019-03-01 00:00:00'), 1.0, 0.0, 0.5073771455997823, 0.09967347427785316, 0.39294938012236746, 0.54916754206749], [Timestamp('2019-03-07 00:00:00'), 2.0, 0.0, 0.6742290198647467, 0.11875151845744616, 0.20701946167780183, 0.5572438488055211], [Timestamp('2019-03-08 00:00:00'), 2.0, 0.0, 0.6882906224268368, 0.006485002726556565, 0.30522437484660225, 0.5487737164127372], [Timestamp('2019-03-11 00:00:00'), 2.0, 0.0, 0.5073641683359555, 0.06328168773932277, 0.4293541439247183, 0.5481731345341004], [Timestamp('2019-03-12 00:00:00'), 2.0, 2.0, 0.21443771612001258, 0.020705156576170926, 0.7648571273038142, 0.5497921801994142], [Timestamp('2019-03-13 00:00:00'), 2.0, 0.0, 0.8233849466868132, 0.05449674896694859, 0.12211830434623215, 0.5548556419257276], [Timestamp('2019-03-14 00:00:00'), 2.0, 0.0, 0.7291635434468119, 0.05875193292624616, 0.21208452362694427, 0.5539807405398074], [Timestamp('2019-03-15 00:00:00'), 2.0, 0.0, 0.44882014414534366, 0.1222695013489167, 0.42891035450574116, 0.5542048370235299], [Timestamp('2019-03-18 00:00:00'), 2.0, 0.0, 0.7946938680082687, 0.06896811940230607, 0.1363380125894185, 0.5557907737598783], [Timestamp('2019-03-19 00:00:00'), 0.0, 0.0, 0.7018273816578754, 0.06297329066564851, 0.23519932767648177, 0.5524495063353059], [Timestamp('2019-03-20 00:00:00'), 0.0, 2.0, 0.4319350190192447, 0.11261855845582483, 0.4554464225249238, 0.5551274057041901], [Timestamp('2019-03-21 00:00:00'), 0.0, 2.0, 0.2670235089941663, 0.039763369889182505, 0.6932131211166539, 0.5542351611967601], [Timestamp('2019-03-22 00:00:00'), 2.0, 2.0, 0.25885644045631484, 0.04679698100520561, 0.6943465785384831, 0.5563517513557804], [Timestamp('2019-03-25 00:00:00'), 1.0, 0.0, 0.49771147586032816, 0.03518839465774617, 0.46710012948192947, 0.555749544871221], [Timestamp('2019-03-26 00:00:00'), 0.0, 0.0, 0.6661810203366313, 0.01663996477154414, 0.317179014891829, 0.5574986751457339], [Timestamp('2019-03-27 00:00:00'), 2.0, 2.0, 0.22917115149429604, 0.24168258225491612, 0.5291462662507879, 0.5563437071867806], [Timestamp('2019-03-28 00:00:00'), 1.0, 0.0, 0.7556149092482, 0.02056969812145337, 0.22381539263034228, 0.555477660150215], [Timestamp('2019-03-29 00:00:00'), 2.0, 2.0, 0.27892305685735347, 0.05075672798181624, 0.6703202151608323, 0.5457220855046648], [Timestamp('2019-04-01 00:00:00'), 0.0, 2.0, 0.30951396846894064, 0.18916723630467014, 0.5013187952263836, 0.5379695099882594], [Timestamp('2019-04-02 00:00:00'), 2.0, 2.0, 0.2955526813616646, 0.013080176716149645, 0.6913671419221923, 0.5373702178580227], [Timestamp('2019-04-03 00:00:00'), 2.0, 2.0, 0.3205968874492254, 0.2006688357715038, 0.47873427677927044, 0.5348874564203031], [Timestamp('2019-04-04 00:00:00'), 2.0, 0.0, 0.5727121053070925, 0.06517146787209997, 0.36211642682080847, 0.5356477395312346], [Timestamp('2019-04-05 00:00:00'), 2.0, 2.0, 0.42053029630757216, 0.05904970516212506, 0.5204199985303023, 0.5345097463741532], [Timestamp('2019-04-08 00:00:00'), 0.0, 0.0, 0.48085928977759346, 0.16732045151277647, 0.35182025870963674, 0.5322845431541084], [Timestamp('2019-04-09 00:00:00'), 0.0, 2.0, 0.36326756021089546, 0.0157893175480928, 0.6209431222410137, 0.5327923956037085], [Timestamp('2019-04-10 00:00:00'), 0.0, 2.0, 0.2693604638838074, 0.03499230436229638, 0.6956472317538975, 0.5359877408965769], [Timestamp('2019-04-11 00:00:00'), 0.0, 2.0, 0.14859015851841798, 0.1245471890798355, 0.7268626524017506, 0.5404958292557901], [Timestamp('2019-04-12 00:00:00'), 2.0, 2.0, 0.3700527230862506, 0.028669560859113798, 0.6012777160546331, 0.5361544655022916], [Timestamp('2019-04-15 00:00:00'), 2.0, 0.0, 0.5092241590687728, 0.007527717609409912, 0.4832481233218163, 0.5406549274019153], [Timestamp('2019-04-16 00:00:00'), 2.0, 0.0, 0.6200866157851752, 0.10143370331713963, 0.2784796808976861, 0.5371260683760684], [Timestamp('2019-04-17 00:00:00'), 2.0, 2.0, 0.21277856417635777, 0.013033236399444982, 0.7741881994241976, 0.5425831642378405], [Timestamp('2019-04-18 00:00:00'), 1.0, 2.0, 0.43118409447950606, 0.11827762100201664, 0.45053828451847494, 0.5383490778227621], [Timestamp('2019-04-22 00:00:00'), 2.0, 2.0, 0.2810475626541219, 0.005996412487310519, 0.7129560248585649, 0.5508424324213798], [Timestamp('2019-04-23 00:00:00'), 2.0, 2.0, 0.30191976829900635, 0.013751258530379697, 0.684328973170611, 0.5454581200875636], [Timestamp('2019-04-24 00:00:00'), 0.0, 2.0, 0.35980425956764456, 0.033908925475815234, 0.6062868149565414, 0.5438082385450805], [Timestamp('2019-04-25 00:00:00'), 0.0, 2.0, 0.4245367884233393, 0.02182153378581461, 0.553641677790851, 0.5488377493253901], [Timestamp('2019-04-26 00:00:00'), 0.0, 2.0, 0.1370852844628757, 0.01377410165661704, 0.8491406138805087, 0.5519648377844825], [Timestamp('2019-04-29 00:00:00'), 0.0, 2.0, 0.3242871547680814, 0.014688571649845343, 0.6610242735820797, 0.5500556947925369], [Timestamp('2019-04-30 00:00:00'), 0.0, 2.0, 0.459443732219269, 0.015390809521645427, 0.52516545825908, 0.5478924103507715], [Timestamp('2019-05-02 00:00:00'), 1.0, 0.0, 0.5626389574314163, 0.005824652942154334, 0.43153638962642804, 0.5475779353747144], [Timestamp('2019-05-03 00:00:00'), 0.0, 2.0, 0.19783291379776488, 0.0023025153282693485, 0.7998645708739635, 0.5408603839203209], [Timestamp('2019-05-06 00:00:00'), 2.0, 2.0, 0.33615694703658694, 0.05387513765218941, 0.6099679153112283, 0.5410819788178278], [Timestamp('2019-05-07 00:00:00'), 2.0, 2.0, 0.1876972442079956, 0.18635550018308922, 0.6259472556089111, 0.5377596479420528], [Timestamp('2019-05-08 00:00:00'), 0.0, 2.0, 0.2444870915817601, 0.07638026742671115, 0.679132640991533, 0.5382448932033123], [Timestamp('2019-05-09 00:00:00'), 0.0, 2.0, 0.20596654292958314, 0.03258563842756589, 0.7614478186428529, 0.5374031916767658], [Timestamp('2019-05-10 00:00:00'), 0.0, 2.0, 0.3407358504809502, 0.10395960633679167, 0.5553045431822542, 0.5397114980201473], [Timestamp('2019-05-13 00:00:00'), 1.0, 2.0, 0.4024660889099857, 0.040756334720923014, 0.5567775763690949, 0.539392143391789], [Timestamp('2019-05-14 00:00:00'), 0.0, 2.0, 0.26333416938190246, 0.037595906333795406, 0.6990699242843, 0.5301920228172609], [Timestamp('2019-05-15 00:00:00'), 0.0, 2.0, 0.3714669328961067, 0.11979268013764235, 0.5087403869662525, 0.5343098072337717], [Timestamp('2019-05-16 00:00:00'), 2.0, 0.0, 0.5344239919631921, 0.08931274824144815, 0.37626325979536523, 0.5347681141263229], [Timestamp('2019-05-17 00:00:00'), 2.0, 0.0, 0.5331459573854233, 0.021916468275693504, 0.4449375743388883, 0.5349735227213653], [Timestamp('2019-05-20 00:00:00'), 2.0, 2.0, 0.46297393292681727, 0.029737579124288854, 0.5072884879489007, 0.5341459706638747], [Timestamp('2019-05-21 00:00:00'), 0.0, 0.0, 0.46336857756757, 0.4038119655785368, 0.13281945685389152, 0.5322854647932047], [Timestamp('2019-05-22 00:00:00'), 0.0, 0.0, 0.6199748684130673, 0.0013695161881064149, 0.3786556153988309, 0.5355415898553154], [Timestamp('2019-05-23 00:00:00'), 2.0, 0.0, 0.5147834680170448, 0.00916735777008833, 0.4760491742128624, 0.5334539969834088], [Timestamp('2019-05-24 00:00:00'), 2.0, 0.0, 0.8112320069134119, 0.007210440986930166, 0.1815575520996585, 0.5341446972432888], [Timestamp('2019-05-27 00:00:00'), 2.0, 0.0, 0.571473961418051, 0.004434441804098657, 0.4240915967778444, 0.5304942052483036], [Timestamp('2019-05-28 00:00:00'), 0.0, 0.0, 0.5359787344347183, 0.030699738025768517, 0.4333215275395128, 0.5335322310088665], [Timestamp('2019-05-29 00:00:00'), 0.0, 2.0, 0.43133807104191046, 0.07340512496437375, 0.4952568039937121, 0.5316966064342935], [Timestamp('2019-05-30 00:00:00'), 0.0, 2.0, 0.3875338451662222, 0.22077584913697157, 0.3916903056968051, 0.5316179956924191], [Timestamp('2019-05-31 00:00:00'), 2.0, 2.0, 0.4346279917937171, 0.037044617072566, 0.5283273911337144, 0.5336293711607137], [Timestamp('2019-06-03 00:00:00'), 0.0, 2.0, 0.2826943144876359, 0.06090080074076035, 0.6564048847716095, 0.5318001781416416], [Timestamp('2019-06-04 00:00:00'), 1.0, 0.0, 0.4895636048349641, 0.034502032547808606, 0.47593436261722993, 0.534047512770917], [Timestamp('2019-06-05 00:00:00'), 2.0, 2.0, 0.23794731431790195, 0.00396512078403702, 0.7580875648980554, 0.5359554710483878], [Timestamp('2019-06-06 00:00:00'), 2.0, 0.0, 0.6502187860755576, 0.014085603069422298, 0.33569561085501537, 0.5384816270629159], [Timestamp('2019-06-07 00:00:00'), 2.0, 0.0, 0.6329747883347341, 0.17281911563348057, 0.19420609603179104, 0.5361159135250667], [Timestamp('2019-06-10 00:00:00'), 2.0, 2.0, 0.4452958844209101, 0.04075235204665349, 0.5139517635324388, 0.537084447797932], [Timestamp('2019-06-11 00:00:00'), 1.0, 2.0, 0.4231815821255931, 0.023435898667118624, 0.5533825192072871, 0.5350089947935729], [Timestamp('2019-06-12 00:00:00'), 2.0, 0.0, 0.4860275605955927, 0.1412518946367308, 0.37272054476767347, 0.5316640869408547], [Timestamp('2019-06-13 00:00:00'), 1.0, 0.0, 0.5827170628342772, 0.16114685707511614, 0.25613608009060324, 0.5333917540935386], [Timestamp('2019-06-14 00:00:00'), 2.0, 2.0, 0.2864403316439935, 0.019508796983468975, 0.6940508713725314, 0.5295564763268569], [Timestamp('2019-06-17 00:00:00'), 2.0, 2.0, 0.4354299784504899, 0.11679340304003918, 0.4477766185094777, 0.5279677096421207], [Timestamp('2019-06-18 00:00:00'), 2.0, 0.0, 0.47241174761014254, 0.13075519450520381, 0.39683305788464746, 0.5299369777431676], [Timestamp('2019-06-19 00:00:00'), 2.0, 2.0, 0.2652532718651525, 0.163316107824014, 0.5714306203108367, 0.5311390936083256], [Timestamp('2019-06-21 00:00:00'), 0.0, 2.0, 0.4649871250150695, 0.025271549658705483, 0.509741325326222, 0.5328492455018573], [Timestamp('2019-06-24 00:00:00'), 0.0, 0.0, 0.4520524576746232, 0.17285987525944385, 0.3750876670659339, 0.5320628917280362], [Timestamp('2019-06-25 00:00:00'), 0.0, 2.0, 0.36910363068724356, 0.048240083093940976, 0.5826562862188092, 0.5305018607335741], [Timestamp('2019-06-26 00:00:00'), 0.0, 2.0, 0.19571787847771727, 0.02418792299665739, 0.7800941985256243, 0.5314900732644768], [Timestamp('2019-06-27 00:00:00'), 1.0, 2.0, 0.2252457545580338, 0.022409248179665164, 0.7523449972622969, 0.5322003891258077], [Timestamp('2019-06-28 00:00:00'), 0.0, 2.0, 0.27385592123920643, 0.0174847727849193, 0.7086593059758791, 0.5251186702907384], [Timestamp('2019-07-01 00:00:00'), 1.0, 2.0, 0.3795314717589858, 0.052897791906208136, 0.5675707363348053, 0.5258231726768091], [Timestamp('2019-07-02 00:00:00'), 2.0, 2.0, 0.3939994379326074, 0.024862989902270812, 0.581137572165116, 0.5168766279950415], [Timestamp('2019-07-03 00:00:00'), 2.0, 0.0, 0.7078422469022618, 0.011390710685827667, 0.28076704241191247, 0.5155936446124136], [Timestamp('2019-07-04 00:00:00'), 2.0, 0.0, 0.5970323265895298, 0.011328196840433782, 0.39163947657002957, 0.5140261645383931], [Timestamp('2019-07-05 00:00:00'), 2.0, 2.0, 0.3596562288220231, 0.017078954539073977, 0.623264816638896, 0.5094432068573287], [Timestamp('2019-07-08 00:00:00'), 2.0, 0.0, 0.4839804502131723, 0.10429845148703089, 0.41172109829979475, 0.5128949428189361], [Timestamp('2019-07-10 00:00:00'), 2.0, 0.0, 0.5164566834420188, 0.03709440972742012, 0.4464489068305556, 0.5115724383591598], [Timestamp('2019-07-11 00:00:00'), 0.0, 2.0, 0.4166943640212752, 0.025485152178865173, 0.5578204837998559, 0.5135226670289076], [Timestamp('2019-07-12 00:00:00'), 0.0, 2.0, 0.20232144133443633, 0.01756968254069927, 0.7801088761248662, 0.5165168675238806], [Timestamp('2019-07-15 00:00:00'), 0.0, 2.0, 0.2795396418227822, 0.03335416395673158, 0.6871061942204821, 0.5139696495118181], [Timestamp('2019-07-16 00:00:00'), 0.0, 2.0, 0.15736796980469758, 0.028785384722475008, 0.8138466454728334, 0.5139639210194429], [Timestamp('2019-07-17 00:00:00'), 0.0, 2.0, 0.13989931212589146, 0.26959690769064865, 0.5905037801834591, 0.5134073797187791], [Timestamp('2019-07-18 00:00:00'), 1.0, 2.0, 0.4237142937024933, 0.031179778452482765, 0.5451059278450253, 0.5143910570128717], [Timestamp('2019-07-19 00:00:00'), 1.0, 2.0, 0.30180926668759017, 0.014574356859779479, 0.6836163764526315, 0.5051223790161843], [Timestamp('2019-07-22 00:00:00'), 0.0, 2.0, 0.1402784215767108, 0.05135433802149397, 0.8083672404017994, 0.5085817728191492], [Timestamp('2019-07-23 00:00:00'), 0.0, 2.0, 0.17965230064128185, 0.1269149880028776, 0.6934327113558454, 0.5085320614732379], [Timestamp('2019-07-24 00:00:00'), 0.0, 2.0, 0.286435514509771, 0.2865224302423259, 0.4270420552479064, 0.5062920346601562], [Timestamp('2019-07-25 00:00:00'), 0.0, 2.0, 0.30799967875739187, 0.06880839921217301, 0.6231919220304366, 0.5065368774347102], [Timestamp('2019-07-26 00:00:00'), 2.0, 0.0, 0.6305487365034947, 0.020498681366835022, 0.3489525821296704, 0.5116437566017399], [Timestamp('2019-07-29 00:00:00'), 0.0, 0.0, 0.8060665205024041, 0.008155605575769337, 0.18577787392183107, 0.5076435710720886], [Timestamp('2019-07-30 00:00:00'), 1.0, 2.0, 0.195642391004773, 0.0127151509628489, 0.791642458032382, 0.5105711738575754], [Timestamp('2019-07-31 00:00:00'), 2.0, 2.0, 0.24144278978986933, 0.06464317228862425, 0.693914037921505, 0.5088189182126993], [Timestamp('2019-08-01 00:00:00'), 0.0, 0.0, 0.6824681335121351, 0.024717641790185773, 0.2928142246976838, 0.5084862863697986], [Timestamp('2019-08-02 00:00:00'), 0.0, 0.0, 0.8489105630458982, 0.03295282985539609, 0.1181366070986993, 0.505948946130404], [Timestamp('2019-08-05 00:00:00'), 1.0, 2.0, 0.3263407209672533, 0.022986299003877882, 0.6506729800288716, 0.5083253109345146], [Timestamp('2019-08-06 00:00:00'), 2.0, 0.0, 0.5734583520958194, 0.055435977065296814, 0.37110567083888957, 0.5136347579167552], [Timestamp('2019-08-07 00:00:00'), 2.0, 2.0, 0.3367225171827714, 0.01275434423352569, 0.6505231385836974, 0.5096698072192131], [Timestamp('2019-08-08 00:00:00'), 0.0, 0.0, 0.5244951247241354, 0.05186647837298389, 0.423638396902881, 0.5130684146125396], [Timestamp('2019-08-09 00:00:00'), 0.0, 0.0, 0.6168161775237153, 0.1476459854810453, 0.23553783699523256, 0.5089343879590327], [Timestamp('2019-08-12 00:00:00'), 0.0, 2.0, 0.4727753659991029, 0.004841368429556466, 0.5223832655713421, 0.5091594827586207], [Timestamp('2019-08-13 00:00:00'), 0.0, 0.0, 0.5929957060047985, 0.22793804090199019, 0.17906625309321275, 0.5045011413484143], [Timestamp('2019-08-14 00:00:00'), 0.0, 2.0, 0.3861944004650448, 0.03784383911102218, 0.5759617604239322, 0.5075947591680351], [Timestamp('2019-08-15 00:00:00'), 0.0, 2.0, 0.3606142403717272, 0.23399662682077313, 0.40538913280750616, 0.5036945081844507], [Timestamp('2019-08-16 00:00:00'), 2.0, 0.0, 0.6227605432050278, 0.004360556299497516, 0.37287890049546785, 0.5058279842762602], [Timestamp('2019-08-19 00:00:00'), 2.0, 0.0, 0.7258464465885646, 0.06661348916653903, 0.20754006424489055, 0.5050831834201007], [Timestamp('2019-08-20 00:00:00'), 2.0, 2.0, 0.21636061009176033, 0.07725386765144703, 0.7063855222567911, 0.5014965749448508], [Timestamp('2019-08-21 00:00:00'), 0.0, 2.0, 0.4264047549074992, 0.016371605454143217, 0.5572236396383528, 0.5035908921001436], [Timestamp('2019-08-22 00:00:00'), 0.0, 0.0, 0.613739147325742, 0.019140460758108293, 0.3671203919161529, 0.5040362092947526], [Timestamp('2019-08-23 00:00:00'), 1.0, 2.0, 0.42954832905226126, 0.060857968222549734, 0.5095937027251851, 0.5081079069119153], [Timestamp('2019-08-26 00:00:00'), 2.0, 2.0, 0.39852947188377147, 0.07995746898541854, 0.5215130591308096, 0.5025795365811901], [Timestamp('2019-08-27 00:00:00'), 2.0, 2.0, 0.38671129295096324, 0.09123597934569394, 0.522052727703337, 0.5046622668866557], [Timestamp('2019-08-28 00:00:00'), 2.0, 0.0, 0.6746718385864972, 0.04176134753560878, 0.2835668138778936, 0.5072741671967175], [Timestamp('2019-08-29 00:00:00'), 0.0, 0.0, 0.5815218881000694, 0.01702618262627784, 0.40145192927364726, 0.5084102806607099], [Timestamp('2019-08-30 00:00:00'), 2.0, 0.0, 0.5056833479114384, 0.10893524597580506, 0.3853814061127575, 0.5064352340179107], [Timestamp('2019-09-02 00:00:00'), 2.0, 2.0, 0.461085460712213, 0.07723169527139812, 0.46168284401639287, 0.504547799618222], [Timestamp('2019-09-03 00:00:00'), 2.0, 0.0, 0.7521853550559534, 0.09003627098520314, 0.15777837395884822, 0.5062041841693435], [Timestamp('2019-09-04 00:00:00'), 2.0, 0.0, 0.6209215008883928, 0.060856714345736806, 0.31822178476587537, 0.5049357413628378], [Timestamp('2019-09-05 00:00:00'), 2.0, 2.0, 0.4302193018261327, 0.03897356302721264, 0.5308071351466582, 0.5034616725095845], [Timestamp('2019-09-06 00:00:00'), 2.0, 2.0, 0.3265612647441505, 0.05240392463998916, 0.6210348106158557, 0.5015644338069083], [Timestamp('2019-09-09 00:00:00'), 1.0, 0.0, 0.49312966375249484, 0.035697936369653176, 0.4711723998778455, 0.50175546029802], [Timestamp('2019-09-10 00:00:00'), 1.0, 0.0, 0.5997369143478783, 0.09215236389224793, 0.3081107217598759, 0.5032073732084741], [Timestamp('2019-09-11 00:00:00'), 1.0, 2.0, 0.3349627483676061, 0.18841097748333696, 0.47662627414906067, 0.4941598058560704], [Timestamp('2019-09-12 00:00:00'), 2.0, 2.0, 0.4116684686279794, 0.04773273842943791, 0.5405987929425806, 0.49628417102391387], [Timestamp('2019-09-13 00:00:00'), 2.0, 2.0, 0.25107416845533215, 0.03838260896371988, 0.7105432225809417, 0.49553540163054083], [Timestamp('2019-09-16 00:00:00'), 0.0, 0.0, 0.5308145319312332, 0.05924758206807465, 0.409937886000698, 0.4940643081488152], [Timestamp('2019-09-17 00:00:00'), 0.0, 2.0, 0.1383981384076606, 0.05185187366752379, 0.8097499879248108, 0.4973269551921237], [Timestamp('2019-09-18 00:00:00'), 0.0, 2.0, 0.113724951352581, 0.01582434396875257, 0.8704507046786631, 0.4970487029310558], [Timestamp('2019-09-19 00:00:00'), 2.0, 2.0, 0.09884548595381952, 0.00532681073611644, 0.895827703310071, 0.49490577702868205], [Timestamp('2019-09-20 00:00:00'), 2.0, 2.0, 0.20908089536466956, 0.13988914187185938, 0.651029962763468, 0.4960301070198871], [Timestamp('2019-09-23 00:00:00'), 0.0, 0.0, 0.6048696413593379, 0.08858550881573139, 0.3065448498249239, 0.4955041146165207], [Timestamp('2019-09-24 00:00:00'), 2.0, 0.0, 0.5569258919745741, 0.0276602202877046, 0.4154138877377171, 0.4994489309876881], [Timestamp('2019-09-25 00:00:00'), 2.0, 2.0, 0.35214281910979517, 0.031148397276649923, 0.6167087836135611, 0.497783491655358], [Timestamp('2019-09-26 00:00:00'), 0.0, 2.0, 0.24713055547400076, 0.12555636589576016, 0.6273130786302388, 0.4994048555084815], [Timestamp('2019-09-27 00:00:00'), 0.0, 2.0, 0.1467880821752167, 0.025867353076205725, 0.8273445647485806, 0.49676888839978967], [Timestamp('2019-09-30 00:00:00'), 0.0, 2.0, 0.2348022941955969, 0.24683019672929424, 0.5183675090751142, 0.49741297345116015], [Timestamp('2019-10-01 00:00:00'), 0.0, 0.0, 0.5182153296151942, 0.023064337187957605, 0.4587203331968496, 0.49620792741984027], [Timestamp('2019-10-02 00:00:00'), 0.0, 2.0, 0.23311831693794954, 0.25567671341261766, 0.5112049696494326, 0.49623322877322096], [Timestamp('2019-10-03 00:00:00'), 0.0, 2.0, 0.15802357949114726, 0.018182301052935543, 0.8237941194559102, 0.49635271738276027], [Timestamp('2019-10-04 00:00:00'), 0.0, 2.0, 0.18603410406832652, 0.006623141008883773, 0.8073427549227947, 0.4958659894987722], [Timestamp('2019-10-07 00:00:00'), 2.0, 2.0, 0.47788491765124763, 0.01979721840308218, 0.5023178639456761, 0.49811416176715556], [Timestamp('2019-10-08 00:00:00'), 2.0, 2.0, 0.25438812137278055, 0.03302523986537366, 0.712586638761851, 0.4981132494384153], [Timestamp('2019-10-09 00:00:00'), 2.0, 0.0, 0.6089601630135636, 0.08672402701929381, 0.30431580996714924, 0.49688244360375505], [Timestamp('2019-10-10 00:00:00'), 2.0, 0.0, 0.7640103434672605, 0.029762465734855494, 0.20622719079788193, 0.497989097328466], [Timestamp('2019-10-11 00:00:00'), 2.0, 2.0, 0.4298212691880138, 0.018681622915591823, 0.551497107896394, 0.49798264364012534], [Timestamp('2019-10-14 00:00:00'), 2.0, 0.0, 0.49631135795562165, 0.02769820295339557, 0.47599043909098854, 0.49888309687829374], [Timestamp('2019-10-15 00:00:00'), 1.0, 0.0, 0.571827143977294, 0.037116576691738984, 0.39105627933096104, 0.5002088152963396], [Timestamp('2019-10-16 00:00:00'), 0.0, 2.0, 0.39782612049564203, 0.018085750464915146, 0.5840881290394472, 0.49862666494852975], [Timestamp('2019-10-17 00:00:00'), 2.0, 2.0, 0.24233557578456852, 0.055452016452015265, 0.7022124077634175, 0.5031907249857267], [Timestamp('2019-10-18 00:00:00'), 2.0, 2.0, 0.363410273110319, 0.03944817035115928, 0.5971415565385154, 0.5008590908214287], [Timestamp('2019-10-21 00:00:00'), 2.0, 2.0, 0.2477898580918224, 0.04785534019663335, 0.7043548017115472, 0.5026608954918487], [Timestamp('2019-10-22 00:00:00'), 0.0, 0.0, 0.6073101126757827, 0.13655102855710305, 0.25613885876711656, 0.5058624466628009], [Timestamp('2019-10-23 00:00:00'), 2.0, 0.0, 0.7467256500121634, 0.06049721276704321, 0.19277713722079073, 0.5048760488176964], [Timestamp('2019-10-24 00:00:00'), 2.0, 0.0, 0.6864130664020491, 0.11570381506896439, 0.1978831185289836, 0.5041560102301791], [Timestamp('2019-10-25 00:00:00'), 2.0, 0.0, 0.5297866383848752, 0.2907972924284697, 0.17941606918664915, 0.5039270197186522], [Timestamp('2019-10-28 00:00:00'), 2.0, 2.0, 0.3704824258763823, 0.0903128224838476, 0.5392047516397735, 0.5066172457704202], [Timestamp('2019-10-29 00:00:00'), 2.0, 2.0, 0.3262819186164887, 0.26457275264334507, 0.4091453287401622, 0.5008755408308372], [Timestamp('2019-10-30 00:00:00'), 2.0, 0.0, 0.48774916035535576, 0.19267156530613952, 0.319579274338509, 0.5010567632850242], [Timestamp('2019-10-31 00:00:00'), 1.0, 0.0, 0.6341669728603746, 0.08759009260965994, 0.27824293452996707, 0.5055534213142908], [Timestamp('2019-11-01 00:00:00'), 0.0, 2.0, 0.43798278690976217, 0.07096156459393636, 0.491055648496302, 0.5002062964896065], [Timestamp('2019-11-04 00:00:00'), 0.0, 2.0, 0.39416306572639437, 0.03352872178131757, 0.5723082124922952, 0.4995148268161644], [Timestamp('2019-11-05 00:00:00'), 2.0, 2.0, 0.33533239308160084, 0.04718317796906503, 0.6174844289493405, 0.5000782420137259], [Timestamp('2019-11-06 00:00:00'), 2.0, 2.0, 0.19165733883232391, 0.03711555376993976, 0.7712271073977378, 0.49984359945543194], [Timestamp('2019-11-07 00:00:00'), 0.0, 0.0, 0.4727515237446117, 0.22908960273066442, 0.2981588735247244, 0.5029953014525245], [Timestamp('2019-11-08 00:00:00'), 2.0, 0.0, 0.4384355797609333, 0.20856462724816024, 0.3529997929909073, 0.5049991452119055], [Timestamp('2019-11-11 00:00:00'), 0.0, 2.0, 0.40102565484351493, 0.06724680469830205, 0.5317275404581767, 0.5035859260531423], [Timestamp('2019-11-12 00:00:00'), 0.0, 2.0, 0.30282816811567626, 0.05620626981378414, 0.640965562070534, 0.509167925590213], [Timestamp('2019-11-13 00:00:00'), 0.0, 0.0, 0.5605140823844801, 0.15872205376773296, 0.2807638638477915, 0.5055083315286314], [Timestamp('2019-11-14 00:00:00'), 0.0, 2.0, 0.3303519222252471, 0.13716934212827436, 0.5324787356464804, 0.5054260040459799], [Timestamp('2019-11-18 00:00:00'), 2.0, 0.0, 0.5342456447846674, 0.017605952729635947, 0.4481484024856946, 0.5078319263011345], [Timestamp('2019-11-19 00:00:00'), 2.0, 2.0, 0.3027617611989993, 0.007614531161342628, 0.6896237076396571, 0.5089011565606179], [Timestamp('2019-11-21 00:00:00'), 0.0, 0.0, 0.6442807539588122, 0.01533515076417474, 0.3403840952770173, 0.5081918668967654], [Timestamp('2019-11-22 00:00:00'), 0.0, 0.0, 0.7118864094563273, 0.015893278235712675, 0.2722203123079553, 0.5070189982045901], [Timestamp('2019-11-25 00:00:00'), 0.0, 0.0, 0.6505194556821227, 0.13295974512941441, 0.2165207991884611, 0.5047187013157433], [Timestamp('2019-11-26 00:00:00'), 2.0, 0.0, 0.5533799833504128, 0.10889008078068108, 0.3377299358689046, 0.5039382273551927], [Timestamp('2019-11-27 00:00:00'), 0.0, 0.0, 0.48752118119833876, 0.05184549014923488, 0.4606333286524232, 0.5030435167106911], [Timestamp('2019-11-28 00:00:00'), 0.0, 2.0, 0.46062455892849763, 0.03660524218472223, 0.5027701988867839, 0.5016800474492449], [Timestamp('2019-11-29 00:00:00'), 0.0, 2.0, 0.28188651959908845, 0.03718354052881796, 0.6809299398720905, 0.5036397959859574], [Timestamp('2019-12-02 00:00:00'), 2.0, 2.0, 0.3839408429106663, 0.004833787260581502, 0.6112253698287534, 0.5023609645667843], [Timestamp('2019-12-03 00:00:00'), 2.0, 2.0, 0.4063959981901946, 0.09584759727446787, 0.4977564045353325, 0.5016740206522307], [Timestamp('2019-12-04 00:00:00'), 2.0, 0.0, 0.7688302385122183, 0.08219361737879817, 0.1489761441089882, 0.5021564071758], [Timestamp('2019-12-05 00:00:00'), 2.0, 0.0, 0.6246818903726147, 0.0342792967238577, 0.3410388129035283, 0.5016583756523522], [Timestamp('2019-12-06 00:00:00'), 1.0, 0.0, 0.7024166684935088, 0.03395316611639958, 0.2636301653900913, 0.498733704867274], [Timestamp('2019-12-09 00:00:00'), 2.0, 2.0, 0.21897813803009986, 0.02119260974524493, 0.7598292522246589, 0.49773696875661916], [Timestamp('2019-12-10 00:00:00'), 2.0, 0.0, 0.4200550980328066, 0.27911899417233044, 0.3008259077948624, 0.49724298214669194], [Timestamp('2019-12-11 00:00:00'), 0.0, 2.0, 0.2742824588690395, 0.14121077421779973, 0.5845067669131566, 0.499853045860528], [Timestamp('2019-12-12 00:00:00'), 0.0, 0.0, 0.5119626874349988, 0.23674761448581924, 0.25128969807918017, 0.5018536967321566], [Timestamp('2019-12-13 00:00:00'), 0.0, 1.0, 0.12040612195150505, 0.582513623940273, 0.29708025410821615, 0.5024776251067714], [Timestamp('2019-12-16 00:00:00'), 2.0, 2.0, 0.2703499691023708, 0.1802330761870935, 0.5494169547105323, 0.504991946949971], [Timestamp('2019-12-17 00:00:00'), 2.0, 0.0, 0.525011713388587, 0.1930977504386243, 0.28189053617278903, 0.5025670785836659], [Timestamp('2019-12-18 00:00:00'), 0.0, 0.0, 0.6233123860586336, 0.16842075852531183, 0.20826685541605744, 0.5034190531198509], [Timestamp('2019-12-19 00:00:00'), 0.0, 2.0, 0.31052534766338924, 0.15526320331421187, 0.5342114490224047, 0.5046749153132132], [Timestamp('2019-12-20 00:00:00'), 2.0, 2.0, 0.155783055436198, 0.013416332835825802, 0.8308006117279815, 0.5057341132636028], [Timestamp('2019-12-23 00:00:00'), 2.0, 0.0, 0.5013736277981479, 0.1492333274227274, 0.34939304477911803, 0.5061982650606979], [Timestamp('2019-12-26 00:00:00'), 0.0, 0.0, 0.5918745520309632, 0.05328995353665906, 0.35483549443237256, 0.5050272605389438], [Timestamp('2019-12-27 00:00:00'), 2.0, 0.0, 0.5350956133603509, 0.11912118103059235, 0.3457832056090636, 0.5076182349286585], [Timestamp('2019-12-30 00:00:00'), 2.0, 2.0, 0.3621445524294115, 0.0926767341752851, 0.5451787133952998, 0.5088412366271949], [Timestamp('2020-01-02 00:00:00'), 2.0, 0.0, 0.5027390318303945, 0.05847585515623851, 0.4387851130133669, 0.5086249431808437], [Timestamp('2020-01-03 00:00:00'), 2.0, 2.0, 0.3750805436992049, 0.007302834508358771, 0.6176166217924387, 0.5079306174061246], [Timestamp('2020-01-06 00:00:00'), 0.0, 2.0, 0.41479899458011515, 0.05885723144649194, 0.5263437739733959, 0.5074316154101166], [Timestamp('2020-01-07 00:00:00'), 0.0, 2.0, 0.15022731521429689, 0.02891028885269521, 0.8208623959330141, 0.5106729948744756], [Timestamp('2020-01-08 00:00:00'), 0.0, 2.0, 0.2793962325817477, 0.029196909849941464, 0.6914068575683143, 0.5090590574034342], [Timestamp('2020-01-09 00:00:00'), 1.0, 2.0, 0.428044933226857, 0.05386545737340146, 0.5180896093997414, 0.5085844677257596], [Timestamp('2020-01-10 00:00:00'), 0.0, 2.0, 0.32125036840052634, 0.04906079875988344, 0.6296888328395887, 0.5054258554416134], [Timestamp('2020-01-13 00:00:00'), 0.0, 2.0, 0.3391681989130122, 0.007776397366225355, 0.6530554037207583, 0.5029620572199702], [Timestamp('2020-01-14 00:00:00'), 0.0, 2.0, 0.22168217091869427, 0.009160081039336515, 0.7691577480419712, 0.49965030554715845], [Timestamp('2020-01-15 00:00:00'), 2.0, 2.0, 0.328357678265103, 0.032664003153521214, 0.6389783185813699, 0.5013675341161791], [Timestamp('2020-01-16 00:00:00'), 2.0, 2.0, 0.29055356375444646, 0.11956382374985225, 0.5898826124957035, 0.5020263039623124], [Timestamp('2020-01-17 00:00:00'), 0.0, 2.0, 0.34442620491029863, 0.008207502282648337, 0.6473662928070543, 0.5032355396658929], [Timestamp('2020-01-20 00:00:00'), 0.0, 0.0, 0.5867821896094984, 0.025542213802524003, 0.38767559658798423, 0.5015621649920585], [Timestamp('2020-01-21 00:00:00'), 1.0, 0.0, 0.49557938189791473, 0.019609602916976106, 0.48481101518511366, 0.5058111468872751], [Timestamp('2020-01-22 00:00:00'), 1.0, 0.0, 0.5258572989131911, 0.015247646793532152, 0.4588950542932719, 0.49758110968506825], [Timestamp('2020-01-23 00:00:00'), 0.0, 0.0, 0.7323336464337648, 0.01510793782153003, 0.2525584157447061, 0.4977699681246118], [Timestamp('2020-01-24 00:00:00'), 0.0, 2.0, 0.35851751625015343, 0.006135551869871237, 0.6353469318799706, 0.49851593345270456], [Timestamp('2020-01-27 00:00:00'), 2.0, 2.0, 0.4486996899203205, 0.05612319619670325, 0.4951771138829749, 0.4987657133019656], [Timestamp('2020-01-28 00:00:00'), 2.0, 0.0, 0.5845306054283022, 0.029400880906349538, 0.38606851366534856, 0.496250017689811], [Timestamp('2020-01-29 00:00:00'), 0.0, 0.0, 0.5602851499781893, 0.164423030071746, 0.27529181995006285, 0.4966090049499379], [Timestamp('2020-01-30 00:00:00'), 0.0, 0.0, 0.6439174110370456, 0.050456436020583, 0.30562615294236956, 0.4954911504205386], [Timestamp('2020-01-31 00:00:00'), 2.0, 2.0, 0.4569903141725832, 0.06280231579747514, 0.48020737002993963, 0.4992127660173195], [Timestamp('2020-02-03 00:00:00'), 2.0, 0.0, 0.4170735021947549, 0.22135933391050197, 0.36156716389473653, 0.49772324472128665], [Timestamp('2020-02-04 00:00:00'), 2.0, 2.0, 0.3860146586539625, 0.051214175223699934, 0.5627711661223413, 0.5005337007090002], [Timestamp('2020-02-05 00:00:00'), 2.0, 0.0, 0.48881982255093903, 0.047464195027445116, 0.4637159824216132, 0.5000522520143473], [Timestamp('2020-02-06 00:00:00'), 1.0, 0.0, 0.6392586425299783, 0.10794268831764606, 0.25279866915237326, 0.5013567885646915], [Timestamp('2020-02-07 00:00:00'), 2.0, 0.0, 0.47842075706003356, 0.0880304365856975, 0.43354880635427245, 0.5002593254583741], [Timestamp('2020-02-10 00:00:00'), 2.0, 0.0, 0.609799979368661, 0.14587768080819408, 0.2443223398231508, 0.49828492800629026], [Timestamp('2020-02-11 00:00:00'), 2.0, 0.0, 0.4982773490980003, 0.1581968278127378, 0.34352582308925517, 0.5004119888763003], [Timestamp('2020-02-12 00:00:00'), 0.0, 2.0, 0.46832085769928516, 0.0362071783283012, 0.4954719639724088, 0.4974203085839599], [Timestamp('2020-02-13 00:00:00'), 0.0, 2.0, 0.29891418073837794, 0.021160798224588703, 0.6799250210370337, 0.49817336309523813], [Timestamp('2020-02-14 00:00:00'), 2.0, 2.0, 0.2853023357199148, 0.03603949441927239, 0.678658169860818, 0.49642240195641846], [Timestamp('2020-02-17 00:00:00'), 2.0, 2.0, 0.12005640291662636, 0.04868179244699908, 0.8312618046363791, 0.49025929758621833], [Timestamp('2020-02-18 00:00:00'), 2.0, 0.0, 0.5704621878712012, 0.017866304697682477, 0.41167150743111197, 0.49172444626278083], [Timestamp('2020-02-19 00:00:00'), 0.0, 0.0, 0.5452731087699204, 0.010333785066825701, 0.4443931061632583, 0.4931650136266888], [Timestamp('2020-02-20 00:00:00'), 0.0, 2.0, 0.29293063905549893, 0.17875265029454285, 0.52831671064996, 0.49502812862574386], [Timestamp('2020-02-21 00:00:00'), 0.0, 2.0, 0.23343394660463793, 0.0874530799316395, 0.6791129734637208, 0.49476358131004455], [Timestamp('2020-02-27 00:00:00'), 2.0, 2.0, 0.12832134583713461, 0.031165516153967458, 0.8405131380088929, 0.49202189190585394], [Timestamp('2020-02-28 00:00:00'), 2.0, 2.0, 0.21765026271172283, 0.02065158399547185, 0.7616981532927989, 0.49283597083011577], [Timestamp('2020-03-02 00:00:00'), 2.0, 2.0, 0.4758240626889997, 0.022251513104631866, 0.5019244242063629, 0.49253774704435277], [Timestamp('2020-03-03 00:00:00'), 0.0, 2.0, 0.4356909722203613, 0.043314393315737144, 0.5209946344639014, 0.49950622475374945], [Timestamp('2020-03-04 00:00:00'), 0.0, 2.0, 0.380513725922848, 0.014851131035548311, 0.6046351430416025, 0.4939118256578574], [Timestamp('2020-03-05 00:00:00'), 0.0, 2.0, 0.19749509692623496, 0.006732602364849834, 0.7957723007089129, 0.4942972529179426], [Timestamp('2020-03-06 00:00:00'), 0.0, 2.0, 0.33451258044179366, 0.05243231459731145, 0.6130551049608878, 0.49145899145899147], [Timestamp('2020-03-09 00:00:00'), 0.0, 2.0, 0.4736246275479706, 0.006786336293567019, 0.5195890361584644, 0.49230638201226434], [Timestamp('2020-03-10 00:00:00'), 0.0, 2.0, 0.3757252321650018, 0.02713265900886233, 0.5971421088261415, 0.4910539250392551], [Timestamp('2020-03-11 00:00:00'), 2.0, 0.0, 0.49763767388816005, 0.04766595716428685, 0.4546963689475514, 0.49371746061641536], [Timestamp('2020-03-12 00:00:00'), 2.0, 2.0, 0.18889149437249173, 0.010641785277193082, 0.8004667203503139, 0.49419801004663344], [Timestamp('2020-03-13 00:00:00'), 0.0, 0.0, 0.7112088224623621, 0.09184254217469258, 0.19694863536294796, 0.494842439790175], [Timestamp('2020-03-16 00:00:00'), 0.0, 0.0, 0.7428259988341501, 0.028924602102241067, 0.22824939906360708, 0.49505421296349345], [Timestamp('2020-03-17 00:00:00'), 0.0, 0.0, 0.4793340692217466, 0.06951599539790453, 0.45114993538034814, 0.49479508009327705], [Timestamp('2020-03-18 00:00:00'), 2.0, 2.0, 0.25471540196951076, 0.051047433229836105, 0.694237164800649, 0.4940591490833621], [Timestamp('2020-03-19 00:00:00'), 0.0, 0.0, 0.7184057994434169, 0.0275081501757161, 0.25408605038087356, 0.4955146833124089], [Timestamp('2020-03-20 00:00:00'), 2.0, 0.0, 0.6803343940545625, 0.029076346748046322, 0.2905892591973952, 0.49426524436955876], [Timestamp('2020-03-23 00:00:00'), 2.0, 0.0, 0.7295280688829935, 0.01046373014072695, 0.2600082009762747, 0.4955498441255563], [Timestamp('2020-03-24 00:00:00'), 2.0, 0.0, 0.5473158161650946, 0.028240396411589486, 0.42444378742332056, 0.49699425682466664], [Timestamp('2020-03-25 00:00:00'), 0.0, 0.0, 0.5940772083169473, 0.24190254672962763, 0.16402024495341805, 0.4969918712036203], [Timestamp('2020-03-26 00:00:00'), 0.0, 0.0, 0.7464834799673353, 0.032542195132310665, 0.22097432490036115, 0.49459914748572104], [Timestamp('2020-03-27 00:00:00'), 2.0, 0.0, 0.624920851617711, 0.012361114862112887, 0.3627180335201796, 0.49259048158312063], [Timestamp('2020-03-30 00:00:00'), 2.0, 2.0, 0.4827048704132065, 0.015085848034731005, 0.5022092815520611, 0.4930928396999826], [Timestamp('2020-03-31 00:00:00'), 2.0, 2.0, 0.1961914416075407, 0.004948874583731641, 0.7988596838087241, 0.4937303892016005], [Timestamp('2020-04-01 00:00:00'), 2.0, 2.0, 0.28328796412043694, 0.032716849895164296, 0.6839951859843985, 0.4911999228971986], [Timestamp('2020-04-02 00:00:00'), 2.0, 0.0, 0.5727641886166975, 0.10903862333161259, 0.3181971880516929, 0.4922939164010593], [Timestamp('2020-04-03 00:00:00'), 2.0, 0.0, 0.5207268627518654, 0.028044716153123348, 0.4512284210950171, 0.488795714592366], [Timestamp('2020-04-06 00:00:00'), 2.0, 2.0, 0.23466850895681401, 0.19108585937669503, 0.5742456316664909, 0.4843041280372817], [Timestamp('2020-04-07 00:00:00'), 2.0, 2.0, 0.16818114072819254, 0.02673668353741115, 0.8050821757343916, 0.4865302759521888], [Timestamp('2020-04-08 00:00:00'), 0.0, 2.0, 0.34143426769594104, 0.22746662992464184, 0.4310991023794119, 0.48972706491503476], [Timestamp('2020-04-09 00:00:00'), 0.0, 2.0, 0.35014023781906906, 0.023489725433336595, 0.6263700367475984, 0.49277506029547996], [Timestamp('2020-04-13 00:00:00'), 0.0, 2.0, 0.1792341829091337, 0.008925298692991454, 0.8118405183978787, 0.49296943657845915], [Timestamp('2020-04-14 00:00:00'), 0.0, 2.0, 0.06852513067259332, 0.2358635319792594, 0.6956113373481516, 0.48803456817974017], [Timestamp('2020-04-15 00:00:00'), 0.0, 2.0, 0.27143573917402125, 0.024174061260921958, 0.7043901995650569, 0.48874567370807975], [Timestamp('2020-04-16 00:00:00'), 2.0, 2.0, 0.3809009112125771, 0.22416466134569002, 0.39493442744173884, 0.48650745458828615], [Timestamp('2020-04-17 00:00:00'), 2.0, 2.0, 0.4589576619771508, 0.08200365483863206, 0.4590386831842117, 0.48684193254115543], [Timestamp('2020-04-20 00:00:00'), 2.0, 2.0, 0.11451133840682119, 0.03498941367117278, 0.850499247922003, 0.48401106063249627], [Timestamp('2020-04-22 00:00:00'), 0.0, 2.0, 0.3070236961813491, 0.03351545696798315, 0.6594608468506699, 0.48916513349851226], [Timestamp('2020-04-23 00:00:00'), 0.0, 0.0, 0.6929006049839629, 0.020678642743166612, 0.2864207522728683, 0.4886237242826439], [Timestamp('2020-04-24 00:00:00'), 2.0, 2.0, 0.357459076759899, 0.030191634564423763, 0.6123492886756817, 0.48961277801149405], [Timestamp('2020-04-27 00:00:00'), 2.0, 2.0, 0.4076001823292744, 0.08786613699330352, 0.5045336806774235, 0.4864999314711242], [Timestamp('2020-04-28 00:00:00'), 2.0, 2.0, 0.30741047204708216, 0.004931771456818337, 0.687657756496102, 0.4830980452722315], [Timestamp('2020-04-29 00:00:00'), 0.0, 0.0, 0.7974790407106057, 0.025250744101112076, 0.17727021518828606, 0.48405105756435124], [Timestamp('2020-04-30 00:00:00'), 0.0, 0.0, 0.5746511705363999, 0.05568843230751936, 0.3696603971560763, 0.48475036012862044], [Timestamp('2020-05-04 00:00:00'), 0.0, 2.0, 0.3231756287856242, 0.00144539062823762, 0.6753789805861405, 0.4877541202354548], [Timestamp('2020-05-05 00:00:00'), 0.0, 2.0, 0.23710076278853318, 0.07440848277454273, 0.6884907544369239, 0.4853071933696948], [Timestamp('2020-05-06 00:00:00'), 2.0, 2.0, 0.19436847977340194, 0.004220944685398928, 0.801410575541206, 0.4848857210017898], [Timestamp('2020-05-07 00:00:00'), 2.0, 0.0, 0.5382370809405845, 0.06625883349226348, 0.39550408556715155, 0.48503604124213023], [Timestamp('2020-05-08 00:00:00'), 0.0, 0.0, 0.8506277444711695, 0.04356619256539751, 0.1058060629634317, 0.48302582721005827], [Timestamp('2020-05-11 00:00:00'), 0.0, 0.0, 0.5911064914971013, 0.03380812238412235, 0.37508538611877773, 0.483848187942536], [Timestamp('2020-05-12 00:00:00'), 0.0, 2.0, 0.17428659681565736, 0.03763529682832038, 0.7880781063560166, 0.4843746200889058], [Timestamp('2020-05-13 00:00:00'), 0.0, 2.0, 0.26526522052068235, 0.01313344029525263, 0.7216013391840643, 0.48674758447737404], [Timestamp('2020-05-14 00:00:00'), 2.0, 0.0, 0.6180921075220764, 0.025148108747994627, 0.35675978372992617, 0.4867950806686858], [Timestamp('2020-05-15 00:00:00'), 2.0, 2.0, 0.4152005184656831, 0.1480474476367673, 0.43675203389754846, 0.48881953937808104], [Timestamp('2020-05-18 00:00:00'), 2.0, 2.0, 0.22010733179527717, 0.01652358803296719, 0.763369080171761, 0.4863659610127045], [Timestamp('2020-05-19 00:00:00'), 2.0, 2.0, 0.3575569498258151, 0.15445734836188013, 0.48798570181229894, 0.4900815196973065], [Timestamp('2020-05-20 00:00:00'), 0.0, 0.0, 0.8568895724126832, 0.030602211455883113, 0.1125082161314379, 0.48545877167572365], [Timestamp('2020-05-21 00:00:00'), 2.0, 0.0, 0.5269450356538783, 0.017840405669229793, 0.45521455867688615, 0.4862653394691209], [Timestamp('2020-05-22 00:00:00'), 2.0, 2.0, 0.22129131674749564, 0.11319792612226859, 0.665510757130231, 0.48797157327083057], [Timestamp('2020-05-25 00:00:00'), 2.0, 2.0, 0.2802032175298566, 0.24702970046592523, 0.47276708200422074, 0.48922054576816487], [Timestamp('2020-05-26 00:00:00'), 2.0, 0.0, 0.5313859142338453, 0.03570040391406034, 0.4329136818521002, 0.4901539835922181], [Timestamp('2020-05-27 00:00:00'), 2.0, 0.0, 0.45195129325395755, 0.14048165054942505, 0.4075670561966107, 0.4852406561435063], [Timestamp('2020-05-28 00:00:00'), 2.0, 2.0, 0.24082000059622122, 0.06769418778394842, 0.691485811619832, 0.48338093585946823], [Timestamp('2020-05-29 00:00:00'), 2.0, 1.0, 0.3232750319555199, 0.4383702197945509, 0.23835474824992672, 0.48414817105293295], [Timestamp('2020-06-01 00:00:00'), 2.0, 2.0, 0.23843075655433166, 0.22576840182396113, 0.5358008416217037, 0.4827644508095636], [Timestamp('2020-06-02 00:00:00'), 1.0, 0.0, 0.6023580077194725, 0.10432292888583071, 0.2933190633946905, 0.48386270772399137], [Timestamp('2020-06-03 00:00:00'), 2.0, 0.0, 0.5299359398840121, 0.10337515703801474, 0.36668890307796687, 0.48705960571902596], [Timestamp('2020-06-04 00:00:00'), 2.0, 2.0, 0.15398802670389933, 0.005214966447786267, 0.8407970068483192, 0.48782289947901764], [Timestamp('2020-06-05 00:00:00'), 0.0, 2.0, 0.11601678946700493, 0.27104909951041756, 0.6129341110225807, 0.48935504260414014], [Timestamp('2020-06-08 00:00:00'), 0.0, 2.0, 0.3840311913466933, 0.08308024109554568, 0.5328885675577627, 0.4889487471632026], [Timestamp('2020-06-09 00:00:00'), 0.0, 0.0, 0.5268210077235993, 0.024584842589985455, 0.4485941496864219, 0.4860479612430235], [Timestamp('2020-06-10 00:00:00'), 0.0, 2.0, 0.39630118636787887, 0.038049322518695604, 0.5656494911134239, 0.4830775911318672], [Timestamp('2020-06-12 00:00:00'), 2.0, 2.0, 0.1421528313136516, 0.15191367976721507, 0.7059334889191388, 0.4886187301647839], [Timestamp('2020-06-15 00:00:00'), 2.0, 2.0, 0.06226826981806061, 0.003945428572811968, 0.933786301609133, 0.48817099000585235], [Timestamp('2020-06-16 00:00:00'), 2.0, 0.0, 0.6126460356431785, 0.028033736815076076, 0.35932022754175214, 0.4871253400393391], [Timestamp('2020-06-17 00:00:00'), 1.0, 0.0, 0.7096587946983087, 0.01979434777193087, 0.2705468575297562, 0.4877126948388457], [Timestamp('2020-06-18 00:00:00'), 0.0, 0.0, 0.5125018430567975, 0.10963366028955097, 0.37786449665365524, 0.4825001235320277], [Timestamp('2020-06-19 00:00:00'), 2.0, 2.0, 0.2484999793565604, 0.004638622445631034, 0.7468613981978064, 0.486081204885604], [Timestamp('2020-06-22 00:00:00'), 2.0, 0.0, 0.5730187829190712, 0.0457637418032768, 0.38121747527764843, 0.4839451675784443], [Timestamp('2020-06-23 00:00:00'), 0.0, 0.0, 0.7999982287105644, 0.0526528511461808, 0.14734892014325549, 0.48290430907321974], [Timestamp('2020-06-24 00:00:00'), 0.0, 2.0, 0.19283411179947296, 0.0056616024131259666, 0.8015042857874005, 0.4811464264028775], [Timestamp('2020-06-25 00:00:00'), 2.0, 0.0, 0.5562667381071644, 0.1245103968574428, 0.3192228650353885, 0.48424577188628676], [Timestamp('2020-06-26 00:00:00'), 2.0, 2.0, 0.3838749127328182, 0.03635170381270105, 0.5797733834544856, 0.4839700618288318], [Timestamp('2020-06-29 00:00:00'), 1.0, 0.0, 0.7792819532277774, 0.016461057831351666, 0.20425698894086844, 0.48217943876660213], [Timestamp('2020-06-30 00:00:00'), 2.0, 0.0, 0.5581353382158177, 0.077375471349192, 0.3644891904349837, 0.4798686487970785], [Timestamp('2020-07-01 00:00:00'), 2.0, 0.0, 0.6154964614220547, 0.1652146426349998, 0.21928889594294174, 0.48061929953726246], [Timestamp('2020-07-02 00:00:00'), 2.0, 0.0, 0.4462627958004865, 0.17062820660343808, 0.3831089975960718, 0.47959108473307627], [Timestamp('2020-07-03 00:00:00'), 2.0, 2.0, 0.3096020607568232, 0.04573974267982574, 0.6446581965633473, 0.47873480097488735], [Timestamp('2020-07-06 00:00:00'), 2.0, 0.0, 0.5779998771816225, 0.05890530037557996, 0.36309482244280195, 0.47906417710531507], [Timestamp('2020-07-07 00:00:00'), 0.0, 0.0, 0.5957205638610503, 0.007995769683103927, 0.39628366645584057, 0.4806507369830329], [Timestamp('2020-07-08 00:00:00'), 0.0, 0.0, 0.6382335467311164, 0.03290970887008859, 0.3288567443987971, 0.4795014307084419], [Timestamp('2020-07-09 00:00:00'), 1.0, 2.0, 0.13361280096130917, 0.09763572421786959, 0.7687514748208236, 0.47885812595105337], [Timestamp('2020-07-10 00:00:00'), 2.0, 0.0, 0.43518507428442676, 0.3397074214001928, 0.22510750431538065, 0.4782433585919645], [Timestamp('2020-07-13 00:00:00'), 2.0, 2.0, 0.4794865541264212, 0.016763875778260412, 0.5037495700953153, 0.47882115809286674], [Timestamp('2020-07-14 00:00:00'), 0.0, 0.0, 0.5932091802317115, 0.10651502989671977, 0.30027578987157255, 0.4791454140341583], [Timestamp('2020-07-15 00:00:00'), 0.0, 2.0, 0.3463416399736456, 0.07005255848994918, 0.5836058015364028, 0.47758180556440705], [Timestamp('2020-07-16 00:00:00'), 1.0, 2.0, 0.4028383273606619, 0.005899892403481969, 0.591261780235852, 0.47895422966397216], [Timestamp('2020-07-17 00:00:00'), 2.0, 0.0, 0.6235774288317579, 0.13531143236097376, 0.24111113880726115, 0.4718789076257717], [Timestamp('2020-07-20 00:00:00'), 2.0, 2.0, 0.286214252423149, 0.0383286647880101, 0.6754570827888375, 0.47203361442544506], [Timestamp('2020-07-21 00:00:00'), 0.0, 2.0, 0.32027122932420127, 0.17698508874354912, 0.5027436819322506, 0.46934564672948964], [Timestamp('2020-07-22 00:00:00'), 0.0, 2.0, 0.17026783000184115, 0.014400946681429156, 0.8153312233167285, 0.46612729234088457], [Timestamp('2020-07-23 00:00:00'), 2.0, 2.0, 0.4018761766531397, 0.06844126429494592, 0.5296825590519075, 0.46783568316814], [Timestamp('2020-07-24 00:00:00'), 2.0, 0.0, 0.5447502213999074, 0.19507343029089644, 0.26017634830919445, 0.4715703571613476], [Timestamp('2020-07-27 00:00:00'), 1.0, 2.0, 0.4019272515605466, 0.015958100219218077, 0.5821146482202311, 0.4708080672463786], [Timestamp('2020-07-28 00:00:00'), 1.0, 2.0, 0.14090160960399403, 0.12734546626434312, 0.7317529241316625, 0.478187449254865], [Timestamp('2020-07-29 00:00:00'), 0.0, 2.0, 0.34216573439235753, 0.028008047267482034, 0.6298262183401626, 0.47385011991753573], [Timestamp('2020-07-30 00:00:00'), 0.0, 2.0, 0.42896957385306705, 0.07546660783175378, 0.49556381831517476, 0.4772041112848287], [Timestamp('2020-07-31 00:00:00'), 0.0, 2.0, 0.3806966385899553, 0.04325148577965513, 0.5760518756303871, 0.47098300655347636], [Timestamp('2020-08-03 00:00:00'), 2.0, 2.0, 0.3216180465262675, 0.03571670727701746, 0.6426652461967152, 0.47377310189810196], [Timestamp('2020-08-04 00:00:00'), 2.0, 2.0, 0.24494594044957999, 0.08442277467189487, 0.6706312848785321, 0.4752534696251973], [Timestamp('2020-08-05 00:00:00'), 0.0, 2.0, 0.3366595866822506, 0.10173347968083002, 0.5616069336369167, 0.4744083914053426], [Timestamp('2020-08-06 00:00:00'), 2.0, 2.0, 0.35928632843799296, 0.1393273708467342, 0.5013863007152749, 0.4663499651223047], [Timestamp('2020-08-07 00:00:00'), 2.0, 0.0, 0.5259294281875676, 0.0417160932675723, 0.43235447854485426, 0.46840831784765163], [Timestamp('2020-08-10 00:00:00'), 1.0, 0.0, 0.5119094888099237, 0.12003433041987484, 0.3680561807701954, 0.46369294346048245], [Timestamp('2020-08-11 00:00:00'), 0.0, 2.0, 0.2282743165933981, 0.011961416456720898, 0.7597642669498743, 0.47264772033978764], [Timestamp('2020-08-12 00:00:00'), 0.0, 0.0, 0.7697938760913953, 0.054371439925290206, 0.17583468398331573, 0.47532713137665966], [Timestamp('2020-08-13 00:00:00'), 0.0, 0.0, 0.6832619675810847, 0.006360616816228009, 0.31037741560268206, 0.4755297161676591], [Timestamp('2020-08-14 00:00:00'), 2.0, 2.0, 0.31864257808533847, 0.028585889341583313, 0.6527715325730852, 0.47464294107097577], [Timestamp('2020-08-17 00:00:00'), 2.0, 2.0, 0.13802940618334658, 0.03838992912936531, 0.8235806646872933, 0.4715833495627792], [Timestamp('2020-08-18 00:00:00'), 0.0, 0.0, 0.5912652613968247, 0.020509479845796756, 0.3882252587573716, 0.47320919763896246], [Timestamp('2020-08-19 00:00:00'), 0.0, 0.0, 0.7103991758712683, 0.06892842093535233, 0.2206724031933849, 0.47331054199542527], [Timestamp('2020-08-20 00:00:00'), 2.0, 2.0, 0.4069314615389175, 0.00423658184038917, 0.588831956620689, 0.46952494981965565], [Timestamp('2020-08-21 00:00:00'), 2.0, 2.0, 0.3405668738326306, 0.028220684759456997, 0.6312124414079083, 0.4656439488718623], [Timestamp('2020-08-24 00:00:00'), 0.0, 0.0, 0.5603968612115583, 0.03540928738883487, 0.40419385139961295, 0.4643303649480222], [Timestamp('2020-08-25 00:00:00'), 0.0, 2.0, 0.32664598340224454, 0.006237146826591827, 0.6671168697711678, 0.4687684252420752], [Timestamp('2020-08-26 00:00:00'), 2.0, 2.0, 0.4568670871078504, 0.03275895913555627, 0.5103739537565956, 0.4691830557743306], [Timestamp('2020-08-27 00:00:00'), 0.0, 2.0, 0.47130119315444174, 0.01767421620256503, 0.5110245906429901, 0.4694916707545176], [Timestamp('2020-08-28 00:00:00'), 2.0, 2.0, 0.40147636512671253, 0.1243442484404315, 0.4741793864328513, 0.46942871843025696], [Timestamp('2020-08-31 00:00:00'), 2.0, 0.0, 0.5257160762051781, 0.08286559028605517, 0.39141833350876437, 0.47184110254688444], [Timestamp('2020-09-01 00:00:00'), 1.0, 0.0, 0.5873789677136352, 0.04160922636176752, 0.37101180592459465, 0.47230865277188716], [Timestamp('2020-09-02 00:00:00'), 2.0, 2.0, 0.2999316794695975, 0.026592254256232895, 0.6734760662741738, 0.4723514008308581], [Timestamp('2020-09-03 00:00:00'), 0.0, 2.0, 0.2678436624028522, 0.15906914231583186, 0.5730871952813186, 0.4716161170340767], [Timestamp('2020-09-04 00:00:00'), 0.0, 2.0, 0.18998195576899365, 0.018462885704805258, 0.7915551585261957, 0.47025893018038795], [Timestamp('2020-09-08 00:00:00'), 0.0, 0.0, 0.5508610712960886, 0.10212583201407038, 0.34701309668984226, 0.473932726383393], [Timestamp('2020-09-09 00:00:00'), 0.0, 0.0, 0.7339047054162533, 0.047182729476452676, 0.21891256510728746, 0.4738631324460673], [Timestamp('2020-09-10 00:00:00'), 0.0, 2.0, 0.3363476795529076, 0.022483383872856474, 0.6411689365742366, 0.47933954729286327], [Timestamp('2020-09-11 00:00:00'), 0.0, 2.0, 0.288235319576917, 0.14315688886212125, 0.5686077915609571, 0.4771346805657393], [Timestamp('2020-09-14 00:00:00'), 1.0, 2.0, 0.13121602642441532, 0.01598805677283711, 0.8527959168027496, 0.4754621742666168], [Timestamp('2020-09-15 00:00:00'), 2.0, 2.0, 0.39117248007015076, 0.16435454046013004, 0.4444729794697202, 0.47257648611855346], [Timestamp('2020-09-16 00:00:00'), 1.0, 2.0, 0.23444431792940867, 0.019311034060602156, 0.7462446480099924, 0.47087012526177396], [Timestamp('2020-09-17 00:00:00'), 0.0, 0.0, 0.498354169628876, 0.03502343931671419, 0.4666223910544061, 0.4772249073990806], [Timestamp('2020-09-18 00:00:00'), 0.0, 2.0, 0.2724617513511413, 0.024233809972691818, 0.7033044386761612, 0.4813396509813783], [Timestamp('2020-09-21 00:00:00'), 0.0, 2.0, 0.4289781807085555, 0.04576424165511678, 0.5252575776363343, 0.47475685670417844], [Timestamp('2020-09-22 00:00:00'), 0.0, 2.0, 0.29522422211677474, 0.04256600753853577, 0.6622097703446872, 0.4781533857723959], [Timestamp('2020-09-23 00:00:00'), 0.0, 2.0, 0.36428079075854214, 0.005704487658913853, 0.6300147215825399, 0.4748624569617439], [Timestamp('2020-09-24 00:00:00'), 0.0, 2.0, 0.39808503674149615, 0.004664026967004432, 0.5972509362915043, 0.4774013957500196], [Timestamp('2020-09-25 00:00:00'), 0.0, 2.0, 0.4079199894519184, 0.04294366904788506, 0.5491363415002006, 0.48015259700868357], [Timestamp('2020-09-28 00:00:00'), 1.0, 2.0, 0.2656543139938527, 0.05270628715768223, 0.6816393988484701, 0.4783530839357503], [Timestamp('2020-09-29 00:00:00'), 2.0, 2.0, 0.17878989933229655, 0.02878860600028347, 0.7924214946674224, 0.4643968324819389], [Timestamp('2020-09-30 00:00:00'), 0.0, 0.0, 0.741317693393488, 0.017727295085979422, 0.2409550115205392, 0.461151563562427], [Timestamp('2020-10-01 00:00:00'), 2.0, 0.0, 0.6852237772941187, 0.025449522583367173, 0.28932670012251765, 0.46489004740564494], [Timestamp('2020-10-02 00:00:00'), 2.0, 2.0, 0.41188451801104403, 0.0440910126560417, 0.5440244693329105, 0.464253522760017], [Timestamp('2020-10-05 00:00:00'), 0.0, 2.0, 0.43778681328487007, 0.00670837026318847, 0.5555048164519361, 0.4644157682374243], [Timestamp('2020-10-06 00:00:00'), 2.0, 2.0, 0.39021174147555066, 0.020590581031739642, 0.5891976774927153, 0.46418473912420644], [Timestamp('2020-10-07 00:00:00'), 1.0, 2.0, 0.3806653367323428, 0.14238118180751222, 0.4769534814601418, 0.46258923876618335], [Timestamp('2020-10-08 00:00:00'), 0.0, 0.0, 0.4941261038652742, 0.026239390118992054, 0.47963450601572866, 0.46815844433538895], [Timestamp('2020-10-09 00:00:00'), 2.0, 2.0, 0.4324352368701071, 0.03845962051158882, 0.5291051426183054, 0.4696232573596652], [Timestamp('2020-10-13 00:00:00'), 0.0, 0.0, 0.5257495493557691, 0.03592420153255033, 0.4383262491116793, 0.468373042121564], [Timestamp('2020-10-14 00:00:00'), 0.0, 2.0, 0.20379726707720724, 0.02390955721189654, 0.7722931757109025, 0.4703506767870638], [Timestamp('2020-10-15 00:00:00'), 0.0, 2.0, 0.38617061152968535, 0.05744253548352066, 0.5563868529867937, 0.46578762527023004], [Timestamp('2020-10-16 00:00:00'), 2.0, 0.0, 0.5026252137633672, 0.003267187448262209, 0.49410759878836363, 0.4625736261827987], [Timestamp('2020-10-19 00:00:00'), 2.0, 2.0, 0.37054667784298284, 0.005015301865426105, 0.6244380202915922, 0.462871447568372], [Timestamp('2020-10-20 00:00:00'), 2.0, 2.0, 0.416849259016182, 0.05945309818438847, 0.5236976427994227, 0.4635964746258863], [Timestamp('2020-10-21 00:00:00'), 2.0, 2.0, 0.4316645245371785, 0.034266359936020646, 0.5340691155268062, 0.46207060221046925], [Timestamp('2020-10-22 00:00:00'), 0.0, 0.0, 0.6465005773942009, 0.013630202591788801, 0.3398692200140164, 0.46531743100370554], [Timestamp('2020-10-23 00:00:00'), 0.0, 0.0, 0.5621150496507203, 0.0409479113566511, 0.39693703899262417, 0.46397483818867463], [Timestamp('2020-10-26 00:00:00'), 0.0, 2.0, 0.1819975076403975, 0.06484608630348448, 0.7531564060561118, 0.46619029324747596], [Timestamp('2020-10-27 00:00:00'), 0.0, 2.0, 0.3099819875031465, 0.13375084238413087, 0.5562671701127233, 0.4665737097887411], [Timestamp('2020-10-28 00:00:00'), 2.0, 2.0, 0.226352447386129, 0.006456386895560934, 0.7671911657183124, 0.46729684229684226], [Timestamp('2020-10-29 00:00:00'), 2.0, 0.0, 0.51748735741021, 0.041331037546569034, 0.4411816050432187, 0.46705828727887555], [Timestamp('2020-10-30 00:00:00'), 2.0, 0.0, 0.554938035182409, 0.06442715726058496, 0.3806348075570129, 0.46555981316887357], [Timestamp('2020-11-03 00:00:00'), 2.0, 0.0, 0.6621072877910554, 0.02043272750065414, 0.3174599847082902, 0.4720572645509831], [Timestamp('2020-11-04 00:00:00'), 1.0, 0.0, 0.6620747561941004, 0.06184709005966335, 0.2760781537462368, 0.46915299496821233], [Timestamp('2020-11-05 00:00:00'), 2.0, 2.0, 0.46353068834042777, 0.057139115180357525, 0.4793301964792129, 0.47150952911822475], [Timestamp('2020-11-06 00:00:00'), 2.0, 2.0, 0.20659604337695495, 0.020464361292674902, 0.7729395953303743, 0.4687519874393831], [Timestamp('2020-11-09 00:00:00'), 2.0, 2.0, 0.19500938595838116, 0.013332159656398355, 0.7916584543852244, 0.4705753968253969], [Timestamp('2020-11-10 00:00:00'), 0.0, 2.0, 0.3386730346413061, 0.09335018278489647, 0.5679767825738004, 0.4723973602197396], [Timestamp('2020-11-11 00:00:00'), 0.0, 2.0, 0.2031189367886409, 0.13824093626764514, 0.6586401269437164, 0.46462000434546197], [Timestamp('2020-11-12 00:00:00'), 2.0, 0.0, 0.504183733849351, 0.03265918687230807, 0.4631570792783462, 0.46717208952608463], [Timestamp('2020-11-13 00:00:00'), 2.0, 0.0, 0.6330597680985343, 0.014025453435773246, 0.3529147784656984, 0.4637960486713261], [Timestamp('2020-11-16 00:00:00'), 2.0, 0.0, 0.5797182506505559, 0.03620211805400499, 0.3840796312954426, 0.461322835699655], [Timestamp('2020-11-17 00:00:00'), 2.0, 2.0, 0.3142354096133895, 0.013742695835787836, 0.6720218945508235, 0.46334994523763723], [Timestamp('2020-11-18 00:00:00'), 2.0, 2.0, 0.06064549084767643, 0.018300794783421056, 0.9210537143689062, 0.4592872125359866], [Timestamp('2020-11-19 00:00:00'), 2.0, 2.0, 0.3077656378601252, 0.09988057606204591, 0.5923537860778235, 0.46192682768573246], [Timestamp('2020-11-23 00:00:00'), 2.0, 0.0, 0.5090251322241693, 0.01133624149620773, 0.4796386262796203, 0.4617960511089258], [Timestamp('2020-11-24 00:00:00'), 0.0, 0.0, 0.43815622577565416, 0.13547914883310064, 0.4263646253912383, 0.46168617862082595], [Timestamp('2020-11-25 00:00:00'), 0.0, 0.0, 0.7269860294630708, 0.04639878217314932, 0.22661518836377703, 0.4643560894991101], [Timestamp('2020-11-26 00:00:00'), 0.0, 2.0, 0.25791906741846454, 0.028775377523651578, 0.7133055550578769, 0.460288764266037], [Timestamp('2020-11-27 00:00:00'), 2.0, 2.0, 0.23750828421895162, 0.02893289339268662, 0.7335588223883652, 0.463096368498566], [Timestamp('2020-11-30 00:00:00'), 2.0, 2.0, 0.07842028320786368, 0.009693295771180516, 0.9118864210209559, 0.46375543818665044], [Timestamp('2020-12-01 00:00:00'), 2.0, 0.0, 0.5677288169578123, 0.037633539422737956, 0.3946376436194431, 0.46843819664083314], [Timestamp('2020-12-02 00:00:00'), 2.0, 0.0, 0.5774048289103371, 0.08141847566788012, 0.34117669542178164, 0.4617729030127582], [Timestamp('2020-12-03 00:00:00'), 2.0, 0.0, 0.5388046911224147, 0.09740012796208593, 0.363795180915493, 0.4645318551246387], [Timestamp('2020-12-04 00:00:00'), 0.0, 2.0, 0.3784582339780736, 0.20584148339006744, 0.4157002826318627, 0.4592148935556799], [Timestamp('2020-12-07 00:00:00'), 1.0, 2.0, 0.2786106428896581, 0.01836779726308497, 0.7030215598472538, 0.4622935353895719], [Timestamp('2020-12-08 00:00:00'), 2.0, 2.0, 0.206132393707868, 0.007182227701153235, 0.7866853785909825, 0.46533941621718045], [Timestamp('2020-12-09 00:00:00'), 2.0, 2.0, 0.4575313434936858, 0.03217908897138087, 0.5102895675349298, 0.4645216781398969], [Timestamp('2020-12-10 00:00:00'), 0.0, 0.0, 0.5813981263041637, 0.07121153283005555, 0.3473903408657841, 0.46103778671349477], [Timestamp('2020-12-11 00:00:00'), 2.0, 2.0, 0.32366413768958435, 0.019427688268549615, 0.6569081740418655, 0.4638245041388103], [Timestamp('2020-12-14 00:00:00'), 2.0, 2.0, 0.28252507392715187, 0.25279211345516794, 0.46468281261767613, 0.46192069546633335], [Timestamp('2020-12-15 00:00:00'), 2.0, 0.0, 0.45888593389302784, 0.08629295797240266, 0.45482110813456805, 0.46138532732707277], [Timestamp('2020-12-16 00:00:00'), 1.0, 1.0, 0.29421162950351765, 0.595647368090699, 0.11014100240578777, 0.46099456505215447], [Timestamp('2020-12-17 00:00:00'), 0.0, 2.0, 0.36752132860785186, 0.05189860177844861, 0.5805800696137066, 0.4664026279823413], [Timestamp('2020-12-18 00:00:00'), 0.0, 2.0, 0.18867553818304625, 0.029610840625429322, 0.7817136211915263, 0.46839133817886003], [Timestamp('2020-12-21 00:00:00'), 2.0, 2.0, 0.32660326904647524, 0.029137367179570415, 0.6442593637739504, 0.4695435503405007], [Timestamp('2020-12-22 00:00:00'), 2.0, 2.0, 0.37897438900644254, 0.005000282174730146, 0.6160253288188305, 0.4677050366980118], [Timestamp('2020-12-23 00:00:00'), 2.0, 0.0, 0.6499711079627479, 0.07812343716609922, 0.2719054548711504, 0.4659525983637767], [Timestamp('2020-12-28 00:00:00'), 2.0, 0.0, 0.5579002164490394, 0.20673966738683247, 0.23536011616412603, 0.46639129758838865], [Timestamp('2020-12-29 00:00:00'), 2.0, 2.0, 0.30940516496693465, 0.24399738841917523, 0.44659744661388323, 0.471218905963348], [Timestamp('2020-12-30 00:00:00'), 2.0, 0.0, 0.48911289622329157, 0.12744777455170195, 0.3834393292250011, 0.4690800609667363], [Timestamp('2021-01-04 00:00:00'), 2.0, 0.0, 0.6645640962710888, 0.012409395624647932, 0.3230265081042573, 0.4692301137853285], [Timestamp('2021-01-05 00:00:00'), 2.0, 2.0, 0.340857040895218, 0.08329689964534112, 0.5758460594594401, 0.46669238331001334], [Timestamp('2021-01-06 00:00:00'), 2.0, 2.0, 0.13837320181146923, 0.04656000865484889, 0.8150667895336806, 0.4693795993213857], [Timestamp('2021-01-07 00:00:00'), 0.0, 2.0, 0.33764159667519766, 0.13632329944123994, 0.5260351038835566, 0.4671007785311951], [Timestamp('2021-01-08 00:00:00'), 0.0, 0.0, 0.5623389171442787, 0.09229705339304461, 0.34536402946267314, 0.465538304636049], [Timestamp('2021-01-11 00:00:00'), 0.0, 2.0, 0.3096463235396965, 0.029491192534355185, 0.6608624839259452, 0.4679959634938733], [Timestamp('2021-01-12 00:00:00'), 0.0, 2.0, 0.34601371121323854, 0.06052029253716992, 0.5934659962495932, 0.46740835829410926], [Timestamp('2021-01-13 00:00:00'), 0.0, 2.0, 0.2927375818966137, 0.10899269002060913, 0.598269728082778, 0.46570703296442595], [Timestamp('2021-01-14 00:00:00'), 0.0, 2.0, 0.21162282014385653, 0.38129527143497643, 0.4070819084211693, 0.4648304911462806], [Timestamp('2021-01-15 00:00:00'), 2.0, 2.0, 0.34518354834784365, 0.06785308928409296, 0.5869633623680585, 0.4640971114655325], [Timestamp('2021-01-18 00:00:00'), 2.0, 0.0, 0.36724370064323714, 0.3657650465091665, 0.2669912528475956, 0.4674333569238027], [Timestamp('2021-01-19 00:00:00'), 0.0, 0.0, 0.5185682554653249, 0.071997655512829, 0.4094340890218394, 0.46743588508294387], [Timestamp('2021-01-20 00:00:00'), 0.0, 0.0, 0.6913889119543553, 0.08614888143304317, 0.22246220661259797, 0.46574966807414625], [Timestamp('2021-01-21 00:00:00'), 0.0, 0.0, 0.6239527150348033, 0.053832548874270233, 0.322214736090929, 0.46435173485712006], [Timestamp('2021-01-22 00:00:00'), 2.0, 2.0, 0.4081822717255598, 0.06461532616612065, 0.5272024021083193, 0.4632339639426812], [Timestamp('2021-01-26 00:00:00'), 2.0, 2.0, 0.23600818698277784, 0.026899957602868847, 0.7370918554143531, 0.465251482118952], [Timestamp('2021-01-27 00:00:00'), 0.0, 2.0, 0.42410621222877, 0.01090485421018897, 0.5649889335610466, 0.46539989859066483], [Timestamp('2021-01-28 00:00:00'), 0.0, 0.0, 0.7992843272165134, 0.055623521259237724, 0.145092151524253, 0.46544771176443395], [Timestamp('2021-01-29 00:00:00'), 2.0, 0.0, 0.49701786415198435, 0.07013654887139563, 0.4328455869766184, 0.4660531783764747], [Timestamp('2021-02-01 00:00:00'), 2.0, 1.0, 0.2966090183090346, 0.35175355736535935, 0.35163742432559913, 0.4631379080612925], [Timestamp('2021-02-02 00:00:00'), 2.0, 0.0, 0.7432339715324221, 0.036466413478388734, 0.22029961498918485, 0.46328455419750014], [Timestamp('2021-02-03 00:00:00'), 0.0, 0.0, 0.9084373510237396, 0.00566126108841209, 0.0859013878878525, 0.46209740439426644], [Timestamp('2021-02-04 00:00:00'), 0.0, 0.0, 0.5181619391357346, 0.03876459349095127, 0.44307346737331316, 0.46466870343656036], [Timestamp('2021-02-05 00:00:00'), 0.0, 2.0, 0.28049709624102326, 0.0031522328545429343, 0.7163506709044364, 0.4648564963126078], [Timestamp('2021-02-08 00:00:00'), 0.0, 2.0, 0.20017190325710285, 0.008037638790216274, 0.7917904579526798, 0.4636052741055465], [Timestamp('2021-02-09 00:00:00'), 2.0, 2.0, 0.17516981666899487, 0.15277832890581852, 0.6720518544251897, 0.46274290703976906], [Timestamp('2021-02-10 00:00:00'), 2.0, 0.0, 0.41114308797850596, 0.26036746987188797, 0.3284894421496026, 0.46197520266107084], [Timestamp('2021-02-11 00:00:00'), 2.0, 0.0, 0.600881346344929, 0.04102995761662557, 0.35808869603843974, 0.46421937968611254], [Timestamp('2021-02-12 00:00:00'), 0.0, 0.0, 0.7072924091488064, 0.08976011528167813, 0.20294747556951637, 0.4638400794415832], [Timestamp('2021-02-18 00:00:00'), 0.0, 2.0, 0.413067264747877, 0.07929767586711028, 0.5076350593850077, 0.464413290870523], [Timestamp('2021-02-19 00:00:00'), 0.0, 2.0, 0.3010849241072595, 0.09804015515819751, 0.6008749207345486, 0.46421481972315576], [Timestamp('2021-02-22 00:00:00'), 2.0, 2.0, 0.2574604531021381, 0.12876276286548388, 0.613776784032381, 0.46401506725393366], [Timestamp('2021-02-23 00:00:00'), 0.0, 2.0, 0.39671557537228685, 0.056913274974777196, 0.5463711496529304, 0.46293701954737837], [Timestamp('2021-02-24 00:00:00'), 0.0, 0.0, 0.6034619081260924, 0.14277127836562872, 0.2537668135082807, 0.4632579853438135], [Timestamp('2021-02-25 00:00:00'), 0.0, 2.0, 0.2263110325328398, 0.06592907976493571, 0.7077598877022309, 0.46436498163310286], [Timestamp('2021-02-26 00:00:00'), 0.0, 2.0, 0.2600520281178772, 0.0480075804517878, 0.6919403914303303, 0.4629823375206102], [Timestamp('2021-03-01 00:00:00'), 0.0, 2.0, 0.34224494456585514, 0.1036326908826509, 0.5541223645514985, 0.4621234812170347], [Timestamp('2021-03-02 00:00:00'), 2.0, 2.0, 0.3398000311779025, 0.09651912283981512, 0.5636808459822785, 0.46113736331096816], [Timestamp('2021-03-03 00:00:00'), 2.0, 2.0, 0.43581993076864156, 0.10892256065289825, 0.45525750857845526, 0.46564161252848474], [Timestamp('2021-03-04 00:00:00'), 0.0, 0.0, 0.5630873883022559, 0.048117804748754364, 0.38879480694898483, 0.46155656067251466], [Timestamp('2021-03-05 00:00:00'), 0.0, 0.0, 0.598262822293234, 0.04217236969886937, 0.35956480800790136, 0.46122279077322936], [Timestamp('2021-03-08 00:00:00'), 2.0, 0.0, 0.5334814778225047, 0.0642854315994074, 0.4022330905780809, 0.463877511548001], [Timestamp('2021-03-09 00:00:00'), 2.0, 0.0, 0.6880050376660303, 0.037634338008503784, 0.274360624325465, 0.46402051726822985], [Timestamp('2021-03-10 00:00:00'), 2.0, 0.0, 0.6395058805734766, 0.0574386399340108, 0.3030554794925104, 0.46546211462447645], [Timestamp('2021-03-11 00:00:00'), 2.0, 0.0, 0.581502663561736, 0.07942174615751253, 0.33907559028075807, 0.46839406550821633], [Timestamp('2021-03-12 00:00:00'), 2.0, 2.0, 0.2170154694975378, 0.014875259136018542, 0.76810927136645, 0.4688668959942856], [Timestamp('2021-03-15 00:00:00'), 2.0, 2.0, 0.19131976361273637, 0.0011385615177264738, 0.8075416748695301, 0.4664684363458944], [Timestamp('2021-03-16 00:00:00'), 1.0, 2.0, 0.3696537033346973, 0.015355564924164627, 0.6149907317411424, 0.46570735605321384], [Timestamp('2021-03-17 00:00:00'), 1.0, 0.0, 0.5688848319711718, 0.08395088537038556, 0.347164282658446, 0.4689292618631475], [Timestamp('2021-03-18 00:00:00'), 2.0, 2.0, 0.2412181102099835, 0.021511961074278198, 0.7372699287157376, 0.4718440583348807], [Timestamp('2021-03-19 00:00:00'), 0.0, 0.0, 0.6860655873470919, 0.00870108055838366, 0.30523333209452375, 0.47263293040301396], [Timestamp('2021-03-22 00:00:00'), 0.0, 2.0, 0.44922308068296257, 0.06141661069149431, 0.4893603086255494, 0.47229797265050494], [Timestamp('2021-03-23 00:00:00'), 2.0, 2.0, 0.2460714481464674, 0.005419186834421619, 0.7485093650191128, 0.47742185573041446], [Timestamp('2021-03-24 00:00:00'), 2.0, 2.0, 0.08325083115819097, 0.04205767025043558, 0.8746914985913713, 0.47807618943901575], [Timestamp('2021-03-25 00:00:00'), 2.0, 2.0, 0.30086173805577876, 0.10624413304862577, 0.5928941288955999, 0.478993275008797], [Timestamp('2021-03-26 00:00:00'), 2.0, 2.0, 0.27622069771630137, 0.03731260033940726, 0.6864667019442897, 0.4765483656502101], [Timestamp('2021-03-29 00:00:00'), 2.0, 0.0, 0.5477398975457598, 0.0569893146651353, 0.39527078778910396, 0.47746696526348337], [Timestamp('2021-03-30 00:00:00'), 1.0, 0.0, 0.8377805987550386, 0.04516866960300208, 0.1170507316419636, 0.47163717803437183], [Timestamp('2021-03-31 00:00:00'), 1.0, 0.0, 0.6925821205251707, 0.04064170873881862, 0.26677617073601195, 0.47399497349561553], [Timestamp('2021-04-01 00:00:00'), 2.0, 2.0, 0.3153264680543068, 0.030525437560300215, 0.6541480943853957, 0.47066308880779867], [Timestamp('2021-04-05 00:00:00'), 1.0, 2.0, 0.20680696350351763, 0.2691866930811525, 0.524006343415327, 0.47413777203396207], [Timestamp('2021-04-06 00:00:00'), 0.0, 2.0, 0.16316200994822058, 0.043703657433259366, 0.7931343326185201, 0.47975197296649585], [Timestamp('2021-04-07 00:00:00'), 0.0, 2.0, 0.38817561749873447, 0.039300602541231675, 0.5725237799600382, 0.4744423449102153], [Timestamp('2021-04-08 00:00:00'), 2.0, 0.0, 0.5117217395735962, 0.09078572949555627, 0.3974925309308507, 0.4752881492814833], [Timestamp('2021-04-09 00:00:00'), 2.0, 1.0, 0.15264825653123598, 0.67828423132429, 0.16906751214447355, 0.4742778010452053], [Timestamp('2021-04-12 00:00:00'), 2.0, 2.0, 0.332770026678682, 0.17188621025758524, 0.4953437630637332, 0.47377747880666604], [Timestamp('2021-04-13 00:00:00'), 0.0, 2.0, 0.29747305910915045, 0.026898838478122092, 0.6756281024127311, 0.4749364808547018], [Timestamp('2021-04-14 00:00:00'), 0.0, 2.0, 0.42098577976085366, 0.0543871451404314, 0.5246270750987203, 0.4767992351318083], [Timestamp('2021-04-15 00:00:00'), 2.0, 2.0, 0.44588762246947516, 0.017369628871138746, 0.5367427486593832, 0.47875503860080615], [Timestamp('2021-04-16 00:00:00'), 2.0, 0.0, 0.5412290486093153, 0.02166124639844927, 0.43710970499222895, 0.4781234247592024], [Timestamp('2021-04-19 00:00:00'), 0.0, 0.0, 0.6030662951885863, 0.13117023281572907, 0.2657634719956882, 0.47698324829113314], [Timestamp('2021-04-20 00:00:00'), 0.0, 2.0, 0.4114398799723771, 0.07043843360539015, 0.5181216864222394, 0.4772920725442951], [Timestamp('2021-04-22 00:00:00'), 1.0, 2.0, 0.15170840808655445, 0.07108714087577273, 0.7772044510376784, 0.4737854218886544], [Timestamp('2021-04-23 00:00:00'), 0.0, 2.0, 0.4769238875436508, 0.0192293234455368, 0.5038467890108109, 0.46720974197129733], [Timestamp('2021-04-26 00:00:00'), 2.0, 0.0, 0.5072210401597218, 0.02875830152129997, 0.4640206583189726, 0.4704247473367933], [Timestamp('2021-04-27 00:00:00'), 2.0, 2.0, 0.30104758609522253, 0.03083574144417956, 0.6681166724605977, 0.467645586771776], [Timestamp('2021-04-28 00:00:00'), 0.0, 2.0, 0.24575427048045634, 0.02667832061012241, 0.7275674089094211, 0.47044875137609354], [Timestamp('2021-04-29 00:00:00'), 0.0, 2.0, 0.3121271535537565, 0.021403193999672384, 0.6664696524465752, 0.4682396483923202], [Timestamp('2021-04-30 00:00:00'), 0.0, 0.0, 0.5182892831717154, 0.18515842751226394, 0.29655228931601924, 0.4656277056277056], [Timestamp('2021-05-03 00:00:00'), 2.0, 0.0, 0.5647299958107386, 0.012855122699798638, 0.42241488148945683, 0.46910166301801287], [Timestamp('2021-05-04 00:00:00'), 2.0, 0.0, 0.6241680899598309, 0.012866559320686618, 0.36296535071948105, 0.4676045499547165], [Timestamp('2021-05-05 00:00:00'), 2.0, 0.0, 0.524105743139038, 0.0477575982903589, 0.42813665857059774, 0.46698712916907575], [Timestamp('2021-05-06 00:00:00'), 2.0, 2.0, 0.17233470772812293, 0.03839702898740564, 0.789268263284468, 0.4666271432039717], [Timestamp('2021-05-07 00:00:00'), 2.0, 2.0, 0.2572465294534217, 0.16899323303360453, 0.5737602375129692, 0.4684123057502507], [Timestamp('2021-05-10 00:00:00'), 2.0, 0.0, 0.5792185813979759, 0.042380378189450826, 0.3784010404125802, 0.46892452932076706], [Timestamp('2021-05-11 00:00:00'), 0.0, 0.0, 0.5991491332691029, 0.19113215432430483, 0.20971871240658746, 0.4717017397625763], [Timestamp('2021-05-12 00:00:00'), 2.0, 2.0, 0.46102088032135297, 0.07092172318083866, 0.46805739649781114, 0.4694886062533121], [Timestamp('2021-05-13 00:00:00'), 2.0, 0.0, 0.6544960366147371, 0.017410889322211236, 0.3280930740630487, 0.4692298932760059], [Timestamp('2021-05-14 00:00:00'), 1.0, 0.0, 0.4339771176033302, 0.1563272398890711, 0.4096956425075956, 0.4687342898011693], [Timestamp('2021-05-17 00:00:00'), 0.0, 0.0, 0.45368459905550074, 0.20855285141901753, 0.3377625495254837, 0.46779910782352685], [Timestamp('2021-05-18 00:00:00'), 0.0, 2.0, 0.34423758429859896, 0.08779803542508798, 0.5679643802763076, 0.46773429724572896], [Timestamp('2021-05-19 00:00:00'), 0.0, 0.0, 0.452271294092741, 0.10544371194418616, 0.44228499396307835, 0.4674037230729997], [Timestamp('2021-05-20 00:00:00'), 2.0, 2.0, 0.3289183420242164, 0.09004141688135853, 0.5810402410944188, 0.46531753271875087], [Timestamp('2021-05-21 00:00:00'), 0.0, 2.0, 0.2432433005060355, 0.0454626944746828, 0.7112940050192798, 0.46708245761695916], [Timestamp('2021-05-24 00:00:00'), 0.0, 0.0, 0.5275074782640488, 0.37467272637763144, 0.09781979535832482, 0.4646284811768792], [Timestamp('2021-05-25 00:00:00'), 0.0, 0.0, 0.668508106910744, 0.035803773741351, 0.2956881193479016, 0.46306348057694424]]\n[[Timestamp('2018-01-26 00:00:00'), 1.0, 1.0, 0.014384505310212813, 0.9759024681192427, 0.009713026570547938, 0.6021432177294971], [Timestamp('2018-01-29 00:00:00'), 0.0, 1.0, 0.022090482582441104, 0.9159419724931552, 0.06196754492440898, 0.6043111073271561], [Timestamp('2018-01-30 00:00:00'), 2.0, 1.0, 0.050676445550115995, 0.8011436099451292, 0.1481799445047616, 0.6031856867447218], [Timestamp('2018-01-31 00:00:00'), 0.0, 1.0, 0.0037085714309853936, 0.982909583195654, 0.013381845373359036, 0.6108053974277884], [Timestamp('2018-02-01 00:00:00'), 0.0, 1.0, 0.03396364326387393, 0.8296204468746037, 0.13641590986152655, 0.6061089102685028], [Timestamp('2018-02-02 00:00:00'), 0.0, 1.0, 0.04221805757940691, 0.8209911337686348, 0.13679080865195636, 0.6057528270956628], [Timestamp('2018-02-05 00:00:00'), 2.0, 1.0, 0.03787856552118453, 0.850841221393369, 0.11128021308544375, 0.602265523028156], [Timestamp('2018-02-06 00:00:00'), 0.0, 1.0, 0.020485473645145948, 0.9463570131610527, 0.03315751319380499, 0.6050945040829434], [Timestamp('2018-02-07 00:00:00'), 2.0, 1.0, 0.2291278974031537, 0.6103741992097552, 0.16049790338708858, 0.5960727969348659], [Timestamp('2018-02-08 00:00:00'), 2.0, 1.0, 0.07606664125186725, 0.8349672979814869, 0.08896606076664824, 0.6000763556537271], [Timestamp('2018-02-09 00:00:00'), 2.0, 1.0, 0.10857617078758904, 0.7725579441249046, 0.11886588508750197, 0.5984959647835657], [Timestamp('2018-02-15 00:00:00'), 2.0, 1.0, 0.06674108387318502, 0.9012128854096009, 0.03204603071720806, 0.6055366015461057], [Timestamp('2018-02-16 00:00:00'), 2.0, 1.0, 0.06131562170048947, 0.8733442405851733, 0.06534013771433653, 0.6082780206141062], [Timestamp('2018-02-19 00:00:00'), 2.0, 0.0, 0.555721005411245, 0.23243630764594403, 0.21184268694280634, 0.6090844928412986], [Timestamp('2018-02-20 00:00:00'), 2.0, 1.0, 0.040170927288415204, 0.9404139109415985, 0.019415161769992306, 0.604459716528682], [Timestamp('2018-02-21 00:00:00'), 2.0, 1.0, 0.12531380497699646, 0.7116795217122178, 0.16300667331078464, 0.6017561776602988], [Timestamp('2018-02-22 00:00:00'), 2.0, 1.0, 0.10112419503969433, 0.8641544188636989, 0.034721386096608325, 0.5969747765006385], [Timestamp('2018-02-23 00:00:00'), 2.0, 1.0, 0.32990855181814854, 0.5962293016342481, 0.07386214654760569, 0.5948527754577153], [Timestamp('2018-02-26 00:00:00'), 2.0, 1.0, 0.09905303766508333, 0.8604031416119424, 0.04054382072297673, 0.5955938697318007], [Timestamp('2018-02-27 00:00:00'), 2.0, 1.0, 0.08126530928256276, 0.8380263548158766, 0.0807083359015643, 0.5913595607694434], [Timestamp('2018-02-28 00:00:00'), 2.0, 1.0, 0.17676879583167407, 0.5964854657188282, 0.2267457384494949, 0.5875216501338373], [Timestamp('2018-03-01 00:00:00'), 2.0, 1.0, 0.12316668760861728, 0.6274068863097149, 0.249426426081672, 0.5878878819975678], [Timestamp('2018-03-02 00:00:00'), 2.0, 1.0, 0.10953079742084953, 0.8162473083601379, 0.07422189421901398, 0.5815622800844475], [Timestamp('2018-03-05 00:00:00'), 2.0, 1.0, 0.15604776033264262, 0.8072069093645953, 0.03674533030275966, 0.5769660367556335], [Timestamp('2018-03-06 00:00:00'), 2.0, 0.0, 0.5113080284084117, 0.24785424707612566, 0.2408377245154631, 0.5803743398570984], [Timestamp('2018-03-07 00:00:00'), 2.0, 0.0, 0.5661164465371543, 0.24531217262393887, 0.18857138083890804, 0.5772320910251945], [Timestamp('2018-03-08 00:00:00'), 0.0, 1.0, 0.11698155254101351, 0.8069707166382147, 0.07604773082076509, 0.5825323356218982], [Timestamp('2018-03-09 00:00:00'), 0.0, 1.0, 0.43806144765007876, 0.4863911987578665, 0.07554735359205385, 0.5740758495792052], [Timestamp('2018-03-12 00:00:00'), 0.0, 1.0, 0.3336817272403356, 0.5901527786301162, 0.07616549412955402, 0.577232899464443], [Timestamp('2018-03-13 00:00:00'), 0.0, 0.0, 0.4597639546481758, 0.33141747564917884, 0.20881856970264226, 0.5852646014231727], [Timestamp('2018-03-14 00:00:00'), 0.0, 0.0, 0.6801008624753344, 0.16729036471764433, 0.15260877280701804, 0.5846345591500558], [Timestamp('2018-03-15 00:00:00'), 2.0, 0.0, 0.41713652502919685, 0.3467177449712905, 0.23614572999951888, 0.5948347976787106], [Timestamp('2018-03-16 00:00:00'), 2.0, 1.0, 0.250941098189447, 0.7111767515091484, 0.037882150301399804, 0.5925432177193168], [Timestamp('2018-03-19 00:00:00'), 2.0, 0.0, 0.5059577477901377, 0.2854729221767267, 0.2085693300331414, 0.5850465549348232], [Timestamp('2018-03-20 00:00:00'), 2.0, 1.0, 0.2459822568960156, 0.6494828964465493, 0.10453484665743157, 0.579053682015875], [Timestamp('2018-03-21 00:00:00'), 0.0, 1.0, 0.1368104724886733, 0.7689979709550173, 0.09419155655630729, 0.5826766312725837], [Timestamp('2018-03-22 00:00:00'), 0.0, 2.0, 0.26233284852213734, 0.08685113921993698, 0.6508160122579207, 0.5857738533235222], [Timestamp('2018-03-23 00:00:00'), 0.0, 1.0, 0.05217427994965148, 0.6697744991867086, 0.27805122086364303, 0.5940202212383984], [Timestamp('2018-03-26 00:00:00'), 0.0, 1.0, 0.028986487527896388, 0.8776683403318976, 0.09334517214021226, 0.5955777114717512], [Timestamp('2018-03-27 00:00:00'), 0.0, 1.0, 0.064010114038123, 0.5984055683215601, 0.33758431764031216, 0.5982098698416145], [Timestamp('2018-03-28 00:00:00'), 1.0, 2.0, 0.13742664105308536, 0.2882780790067345, 0.5742952799401786, 0.6011253479220655], [Timestamp('2018-03-29 00:00:00'), 1.0, 1.0, 0.10113254342176319, 0.5858773139948255, 0.31299014258341346, 0.6026995526772495], [Timestamp('2018-04-02 00:00:00'), 0.0, 1.0, 0.17376270208332154, 0.5261984659168494, 0.300038831999831, 0.5889256290461904], [Timestamp('2018-04-03 00:00:00'), 2.0, 1.0, 0.015111189691178176, 0.9539123821418698, 0.030976428166949985, 0.5915108229357321], [Timestamp('2018-04-04 00:00:00'), 2.0, 1.0, 0.05857402932866574, 0.7123603710829329, 0.22906559958840736, 0.5889549722191051], [Timestamp('2018-04-05 00:00:00'), 2.0, 1.0, 0.10315272988477561, 0.8110038163259606, 0.08584345378926374, 0.5821242294926505], [Timestamp('2018-04-06 00:00:00'), 1.0, 1.0, 0.054349484542623804, 0.8668956170773292, 0.07875489838005195, 0.5857480431250923], [Timestamp('2018-04-09 00:00:00'), 1.0, 1.0, 0.12860925194825001, 0.4777009032638384, 0.3936898447879104, 0.58800955522267], [Timestamp('2018-04-10 00:00:00'), 0.0, 1.0, 0.25457464497874566, 0.4899427472313729, 0.2554826077898824, 0.5883865935607738], [Timestamp('2018-04-11 00:00:00'), 1.0, 1.0, 0.17968719819379178, 0.6311338443190306, 0.18917895748717503, 0.5851836550326106], [Timestamp('2018-04-12 00:00:00'), 2.0, 1.0, 0.06680369882517809, 0.7750772628766226, 0.15811903829819687, 0.5892690070373835], [Timestamp('2018-04-13 00:00:00'), 2.0, 2.0, 0.42485765783597385, 0.14403659759833232, 0.4311057445656926, 0.5896333057623381], [Timestamp('2018-04-16 00:00:00'), 2.0, 1.0, 0.020002500604750666, 0.9195859853948866, 0.06041151400036182, 0.5910810075357728], [Timestamp('2018-04-17 00:00:00'), 2.0, 1.0, 0.13315774239360476, 0.7758301087563827, 0.09101214885000697, 0.5928916961175026], [Timestamp('2018-04-18 00:00:00'), 1.0, 1.0, 0.21571097847677337, 0.6200078101564271, 0.16428121136679916, 0.5953924316279467], [Timestamp('2018-04-19 00:00:00'), 2.0, 1.0, 0.07062035631300244, 0.8661582371564128, 0.06322140653057898, 0.5872014855729231], [Timestamp('2018-04-20 00:00:00'), 2.0, 1.0, 0.08093472374249262, 0.740223044004783, 0.17884223225272178, 0.5857608341479309], [Timestamp('2018-04-23 00:00:00'), 2.0, 1.0, 0.20389809854360624, 0.5183107067781546, 0.277791194678245, 0.5828773158407013], [Timestamp('2018-04-24 00:00:00'), 2.0, 1.0, 0.01498805036982292, 0.7919821606166537, 0.19302978901352172, 0.5821490337619369], [Timestamp('2018-04-25 00:00:00'), 2.0, 1.0, 0.05293977027657488, 0.6100049190004834, 0.33705531072293904, 0.5875289012978387], [Timestamp('2018-04-26 00:00:00'), 0.0, 1.0, 0.24523124427641677, 0.5513852291258233, 0.20338352659775574, 0.5864137574636855], [Timestamp('2018-04-27 00:00:00'), 1.0, 1.0, 0.147736039029465, 0.6183063226770639, 0.23395763829347582, 0.5865265619855353], [Timestamp('2018-04-30 00:00:00'), 1.0, 1.0, 0.12658551489793168, 0.5038430760429143, 0.36957140905915614, 0.5816975953367804], [Timestamp('2018-05-02 00:00:00'), 2.0, 2.0, 0.0704739715577957, 0.3271114838900041, 0.6024145445521996, 0.5911929334559533], [Timestamp('2018-05-03 00:00:00'), 2.0, 1.0, 0.06495907478257734, 0.4868297642260065, 0.4482111609914092, 0.5925982514217808], [Timestamp('2018-05-04 00:00:00'), 2.0, 1.0, 0.051865118128970135, 0.802119124305659, 0.14601575756536708, 0.5890475868137819], [Timestamp('2018-05-07 00:00:00'), 2.0, 1.0, 0.10447735719891502, 0.5432418690790669, 0.35228077372201505, 0.586559602332473], [Timestamp('2018-05-08 00:00:00'), 2.0, 1.0, 0.05039488455873787, 0.7678860067292119, 0.1817191087120548, 0.5816909382392179], [Timestamp('2018-05-09 00:00:00'), 2.0, 0.0, 0.3927386289010343, 0.3208086901285448, 0.2864526809704144, 0.5809976028028766], [Timestamp('2018-05-10 00:00:00'), 2.0, 1.0, 0.2625939949654813, 0.614786076505118, 0.12261992852939675, 0.5777852049910873], [Timestamp('2018-05-11 00:00:00'), 2.0, 2.0, 0.23819604036570013, 0.10305463319346501, 0.6587493264408382, 0.5756090314913843], [Timestamp('2018-05-14 00:00:00'), 0.0, 1.0, 0.06462456957267132, 0.8274409555408161, 0.10793447488651088, 0.5762419592342866], [Timestamp('2018-05-15 00:00:00'), 0.0, 0.0, 0.3906272812060271, 0.27050904817511207, 0.3388636706188575, 0.577161669478152], [Timestamp('2018-05-16 00:00:00'), 0.0, 1.0, 0.23817305706871272, 0.5703590932796586, 0.19146784965163152, 0.5861615177471955], [Timestamp('2018-05-17 00:00:00'), 0.0, 2.0, 0.13355053073254833, 0.08200257856239383, 0.7844468907050647, 0.5887791729964297], [Timestamp('2018-05-18 00:00:00'), 0.0, 2.0, 0.4695281482564283, 0.03436891214779648, 0.4961029395957771, 0.5892478548441912], [Timestamp('2018-05-21 00:00:00'), 0.0, 2.0, 0.20260163694730543, 0.2306576920701239, 0.5667406709825659, 0.5900659176714164], [Timestamp('2018-05-22 00:00:00'), 0.0, 2.0, 0.1486135306053445, 0.2946359150828563, 0.5567505543118036, 0.5912187808216326], [Timestamp('2018-05-23 00:00:00'), 0.0, 2.0, 0.3307891867545907, 0.1869905291146248, 0.48222028413078116, 0.5868742835547486], [Timestamp('2018-05-24 00:00:00'), 0.0, 0.0, 0.3738918068398333, 0.31763988745972577, 0.30846830570044775, 0.5849579519656246], [Timestamp('2018-05-25 00:00:00'), 0.0, 0.0, 0.4661438905884924, 0.2929316574558702, 0.24092445195563747, 0.5883527323973067], [Timestamp('2018-05-28 00:00:00'), 1.0, 1.0, 0.2381072424465711, 0.4478219751316357, 0.3140707824217945, 0.5839537094908588], [Timestamp('2018-05-29 00:00:00'), 0.0, 1.0, 0.30433677258005004, 0.36095538679218475, 0.3347078406277637, 0.5851208430109335], [Timestamp('2018-05-30 00:00:00'), 0.0, 1.0, 0.382867601650556, 0.4221468154678027, 0.19498558288163817, 0.5817073425769078], [Timestamp('2018-06-01 00:00:00'), 0.0, 1.0, 0.10980748037002686, 0.7204429329889668, 0.1697495866410042, 0.5814250687530128], [Timestamp('2018-06-04 00:00:00'), 0.0, 1.0, 0.10161950724811819, 0.6640712527342079, 0.23430924001767875, 0.5783747412008281], [Timestamp('2018-06-05 00:00:00'), 0.0, 2.0, 0.3155415311326689, 0.208125917161621, 0.4763325517057141, 0.5768311650855454], [Timestamp('2018-06-06 00:00:00'), 0.0, 0.0, 0.5986798255182519, 0.16169585713036858, 0.23962431735137776, 0.5804583563946456], [Timestamp('2018-06-07 00:00:00'), 0.0, 1.0, 0.1506405014674598, 0.7562486091438042, 0.0931108893887407, 0.5768544299278932], [Timestamp('2018-06-08 00:00:00'), 0.0, 1.0, 0.1232026624643505, 0.7827068913460298, 0.09409044618962674, 0.5732756870864287], [Timestamp('2018-06-11 00:00:00'), 0.0, 1.0, 0.3435148381952057, 0.4873528012543781, 0.16913236055041536, 0.5680957430692319], [Timestamp('2018-06-12 00:00:00'), 1.0, 0.0, 0.7706750246079592, 0.11029437751300211, 0.11903059787903808, 0.563538965607349], [Timestamp('2018-06-13 00:00:00'), 2.0, 1.0, 0.06905320106079404, 0.870990639398017, 0.05995615954118336, 0.5462240447044162], [Timestamp('2018-06-14 00:00:00'), 1.0, 1.0, 0.2771400491798012, 0.5263966595810671, 0.19646329123913833, 0.5455702362702873], [Timestamp('2018-06-15 00:00:00'), 2.0, 1.0, 0.279750603235359, 0.6338225500698018, 0.08642684669484368, 0.5594191155126578], [Timestamp('2018-06-18 00:00:00'), 2.0, 1.0, 0.2568240328203904, 0.42605356843484776, 0.31712239874475995, 0.5604003581197924], [Timestamp('2018-06-19 00:00:00'), 2.0, 2.0, 0.2620541191882602, 0.28914476460103533, 0.44880111621070795, 0.5601814120971785], [Timestamp('2018-06-20 00:00:00'), 2.0, 1.0, 0.05584172745197421, 0.9008120529063127, 0.04334621964171286, 0.5589313872793414], [Timestamp('2018-06-21 00:00:00'), 2.0, 2.0, 0.2614503440046414, 0.26070721677403413, 0.47784243922132574, 0.5615141120368082], [Timestamp('2018-06-22 00:00:00'), 2.0, 1.0, 0.0690537273030221, 0.7186350613391862, 0.2123112113577972, 0.5649296163753378], [Timestamp('2018-06-25 00:00:00'), 2.0, 1.0, 0.079660907611621, 0.5603361239620177, 0.360002968426357, 0.5689293095209694], [Timestamp('2018-06-26 00:00:00'), 2.0, 1.0, 0.14589190050649317, 0.5432849561023235, 0.3108231433911764, 0.570286344920558], [Timestamp('2018-06-27 00:00:00'), 2.0, 1.0, 0.11497337426008748, 0.5072746207923119, 0.37775200494759714, 0.5690100083877172], [Timestamp('2018-06-28 00:00:00'), 2.0, 2.0, 0.06383275973543981, 0.050471202884560175, 0.8856960373800005, 0.5725859818324303], [Timestamp('2018-06-29 00:00:00'), 2.0, 2.0, 0.13134356314644588, 0.16580716420174849, 0.70284927265181, 0.5739246699181062], [Timestamp('2018-07-02 00:00:00'), 2.0, 2.0, 0.24397812609442265, 0.2546153626019744, 0.5014065113036009, 0.5768734648848072], [Timestamp('2018-07-03 00:00:00'), 1.0, 2.0, 0.16585464761036953, 0.20090361521046118, 0.6332417371791635, 0.5755830447108093], [Timestamp('2018-07-04 00:00:00'), 2.0, 2.0, 0.2337727308617897, 0.0945541769695638, 0.6716730921686476, 0.5741918777203519], [Timestamp('2018-07-05 00:00:00'), 2.0, 1.0, 0.10944945676999344, 0.5843622541313147, 0.30618828909869494, 0.5771337391483023], [Timestamp('2018-07-06 00:00:00'), 1.0, 2.0, 0.15114522529246588, 0.04860090121485747, 0.800253873492671, 0.5790786248782273], [Timestamp('2018-07-10 00:00:00'), 2.0, 2.0, 0.2749612635248223, 0.06457053294036953, 0.6604682035348138, 0.575300172632233], [Timestamp('2018-07-11 00:00:00'), 2.0, 2.0, 0.16440783495121983, 0.061094027384836394, 0.7744981376639484, 0.5756425602149767], [Timestamp('2018-07-12 00:00:00'), 2.0, 2.0, 0.2740948443225993, 0.27416804012628565, 0.45173711555111046, 0.5753480890037602], [Timestamp('2018-07-13 00:00:00'), 2.0, 2.0, 0.251531195759406, 0.0766243813036706, 0.6718444229369176, 0.574065439448821], [Timestamp('2018-07-16 00:00:00'), 2.0, 2.0, 0.23607299149684496, 0.02693178295747643, 0.7369952255456841, 0.5678777010264123], [Timestamp('2018-07-17 00:00:00'), 2.0, 2.0, 0.19858905742802804, 0.3283905813531035, 0.4730203612188723, 0.5727811830562056], [Timestamp('2018-07-18 00:00:00'), 2.0, 2.0, 0.20455273607563612, 0.028737379124814465, 0.7667098847995466, 0.560101516714831], [Timestamp('2018-07-19 00:00:00'), 2.0, 2.0, 0.415580900799761, 0.06642706789785972, 0.517992031302373, 0.5604320475982808], [Timestamp('2018-07-20 00:00:00'), 2.0, 1.0, 0.1664258112485523, 0.5228651635992021, 0.31070902515224835, 0.5649630918902387], [Timestamp('2018-07-23 00:00:00'), 2.0, 2.0, 0.39760895999677415, 0.01355544214903749, 0.588835597854195, 0.5652870147072503], [Timestamp('2018-07-24 00:00:00'), 1.0, 2.0, 0.3106601719424247, 0.15618234501129774, 0.533157483046274, 0.5633334393727373], [Timestamp('2018-07-25 00:00:00'), 1.0, 2.0, 0.24259812969669664, 0.19429116940582436, 0.5631107008974734, 0.5678350071623902], [Timestamp('2018-07-26 00:00:00'), 2.0, 2.0, 0.1555881727473792, 0.021935113664996887, 0.8224767135876206, 0.5744370766222421], [Timestamp('2018-07-27 00:00:00'), 2.0, 2.0, 0.38479702908239477, 0.05947106185808118, 0.5557319090595246, 0.575417550124614], [Timestamp('2018-07-30 00:00:00'), 2.0, 2.0, 0.207930594355707, 0.1877931846678127, 0.6042762209764767, 0.5766937744766214], [Timestamp('2018-07-31 00:00:00'), 2.0, 2.0, 0.20670361183933467, 0.03598845040582517, 0.7573079377548453, 0.577010325165665], [Timestamp('2018-08-01 00:00:00'), 1.0, 2.0, 0.05577621495359302, 0.3777059787818026, 0.5665178062645999, 0.5805613181017711], [Timestamp('2018-08-02 00:00:00'), 1.0, 2.0, 0.07519297406611505, 0.25606410812623537, 0.6687429178076565, 0.5853759436624366], [Timestamp('2018-08-03 00:00:00'), 0.0, 2.0, 0.13074720807025525, 0.2071110967689624, 0.6621416951607874, 0.5871695127358557], [Timestamp('2018-08-06 00:00:00'), 0.0, 2.0, 0.03219749850989535, 0.4152433141403664, 0.5525591873497322, 0.5830733946675976], [Timestamp('2018-08-07 00:00:00'), 0.0, 2.0, 0.04746905964350215, 0.036528177279056905, 0.9160027630774407, 0.5812721229387896], [Timestamp('2018-08-08 00:00:00'), 0.0, 2.0, 0.21916900825569383, 0.025362287701268214, 0.7554687040430426, 0.5762892078681552], [Timestamp('2018-08-09 00:00:00'), 0.0, 2.0, 0.18039755002604488, 0.223803313650021, 0.595799136323934, 0.5786435786435785], [Timestamp('2018-08-10 00:00:00'), 0.0, 0.0, 0.40859920113607756, 0.19353517105096688, 0.39786562781296203, 0.5778160754464072], [Timestamp('2018-08-13 00:00:00'), 0.0, 0.0, 0.614418599992712, 0.062004285177466145, 0.323577114829816, 0.5750970436505027], [Timestamp('2018-08-14 00:00:00'), 0.0, 2.0, 0.39073132276767936, 0.11545166473152418, 0.4938170125008005, 0.5758499889955289], [Timestamp('2018-08-15 00:00:00'), 0.0, 2.0, 0.2653418241102804, 0.1282922728281455, 0.6063659030615695, 0.5765958974681716], [Timestamp('2018-08-16 00:00:00'), 0.0, 0.0, 0.503567205531397, 0.007459654655082702, 0.4889731398135242, 0.5723347568308809], [Timestamp('2018-08-17 00:00:00'), 1.0, 2.0, 0.42978614323685366, 0.024284265701153666, 0.5459295910619935, 0.5683961100627767], [Timestamp('2018-08-20 00:00:00'), 2.0, 0.0, 0.7356708467786757, 0.025364661597289125, 0.23896449162403463, 0.5707502374169041], [Timestamp('2018-08-21 00:00:00'), 2.0, 0.0, 0.6715795953117027, 0.007026586767532658, 0.32139381792076877, 0.5701297159630493], [Timestamp('2018-08-22 00:00:00'), 2.0, 0.0, 0.847360267683151, 0.014165755298181986, 0.13847397701866818, 0.5714012843380171], [Timestamp('2018-08-23 00:00:00'), 2.0, 0.0, 0.820269157882138, 0.011908864315033879, 0.1678219778028259, 0.5707824295394917], [Timestamp('2018-08-24 00:00:00'), 2.0, 0.0, 0.8687477061689497, 0.02294172867562479, 0.10831056515542306, 0.5701670612468734], [Timestamp('2018-08-27 00:00:00'), 2.0, 2.0, 0.36741883175473333, 0.08464503332723329, 0.5479361349180396, 0.5704914796675097], [Timestamp('2018-08-28 00:00:00'), 2.0, 2.0, 0.29289221042531893, 0.00882843365660032, 0.698279355918085, 0.5698803738019423], [Timestamp('2018-08-29 00:00:00'), 0.0, 0.0, 0.5670755863375428, 0.007071210156018413, 0.4258532035064344, 0.5726780892050911], [Timestamp('2018-08-30 00:00:00'), 2.0, 2.0, 0.2900433138392557, 0.011289030870183457, 0.6986676552905641, 0.5706644829910283], [Timestamp('2018-08-31 00:00:00'), 1.0, 2.0, 0.2647979119707998, 0.02265402786916995, 0.7125480601600271, 0.5725144056530523], [Timestamp('2018-09-03 00:00:00'), 0.0, 2.0, 0.245560472461112, 0.0027187421908099457, 0.7517207853480801, 0.5717785143495825], [Timestamp('2018-09-04 00:00:00'), 2.0, 2.0, 0.20929405856030786, 0.011167861394599717, 0.7795380800450926, 0.5703817330233499], [Timestamp('2018-09-05 00:00:00'), 1.0, 2.0, 0.2580795556068296, 0.004990405505428851, 0.7369300388877384, 0.5679134569478316], [Timestamp('2018-09-06 00:00:00'), 0.0, 2.0, 0.3576635193834641, 0.04417239317112201, 0.5981640874454158, 0.5633834522595991], [Timestamp('2018-09-10 00:00:00'), 2.0, 2.0, 0.3789379279684815, 0.08849514716753398, 0.5325669248639797, 0.5650494672754945], [Timestamp('2018-09-11 00:00:00'), 2.0, 2.0, 0.29301034311973095, 0.017548196063863274, 0.6894414608164001, 0.5644338932527183], [Timestamp('2018-09-12 00:00:00'), 2.0, 0.0, 0.6083701289510742, 0.015247787642543942, 0.3763820834063815, 0.5650620811153998], [Timestamp('2018-09-13 00:00:00'), 2.0, 2.0, 0.31507633709590893, 0.056685331677190066, 0.6282383312268941, 0.5595649835842861], [Timestamp('2018-09-14 00:00:00'), 2.0, 0.0, 0.554295692491424, 0.038503285802435706, 0.40720102170614164, 0.5623105775503036], [Timestamp('2018-09-17 00:00:00'), 2.0, 0.0, 0.5888719885777369, 0.013286773283303344, 0.397841238138962, 0.5644406392694065], [Timestamp('2018-09-18 00:00:00'), 0.0, 2.0, 0.31467497848967446, 0.1299269970408659, 0.5553980244694634, 0.5610945804326671], [Timestamp('2018-09-19 00:00:00'), 2.0, 0.0, 0.48182899876797136, 0.08986180157968696, 0.42830919965234526, 0.5636632720649114], [Timestamp('2018-09-20 00:00:00'), 2.0, 0.0, 0.7234439149616129, 0.03220972545723013, 0.2443463595811593, 0.5591899925687391], [Timestamp('2018-09-21 00:00:00'), 2.0, 1.0, 0.3290448916854872, 0.4932067625029745, 0.17774834581154145, 0.5588768115942029], [Timestamp('2018-09-24 00:00:00'), 2.0, 0.0, 0.4434004508911928, 0.16007822056940563, 0.3965213285394043, 0.5564588979223125], [Timestamp('2018-09-25 00:00:00'), 2.0, 2.0, 0.3227423855178535, 0.10118489529100369, 0.5760727191911487, 0.5579443079443079], [Timestamp('2018-09-26 00:00:00'), 2.0, 0.0, 0.4647284315604826, 0.08691748904828012, 0.44835407939123045, 0.559748427672956], [Timestamp('2018-09-27 00:00:00'), 2.0, 2.0, 0.34574148132965365, 0.0775402179213064, 0.5767183007490366, 0.5627240143369175], [Timestamp('2018-09-28 00:00:00'), 2.0, 2.0, 0.32096916655664065, 0.11430060998955499, 0.5647302234538087, 0.560593874400845], [Timestamp('2018-10-01 00:00:00'), 2.0, 1.0, 0.1396884645955244, 0.47868083577357345, 0.3816306996309033, 0.5623885918003565], [Timestamp('2018-10-02 00:00:00'), 2.0, 2.0, 0.4533972223602327, 0.02893193941924683, 0.5176708382205187, 0.5617777777777778], [Timestamp('2018-10-03 00:00:00'), 2.0, 1.0, 0.1877489387387345, 0.41759653772628774, 0.39465452353498065, 0.5629432624113475], [Timestamp('2018-10-04 00:00:00'), 2.0, 1.0, 0.045466752383364235, 0.5213580319299239, 0.43317521568671624, 0.5617032392894462], [Timestamp('2018-10-05 00:00:00'), 2.0, 2.0, 0.08919400457050748, 0.06802420616894048, 0.8427817892605571, 0.560710277376944], [Timestamp('2018-10-08 00:00:00'), 1.0, 2.0, 0.0842668590162642, 0.36336652614657383, 0.5523666148371554, 0.5609778524026545], [Timestamp('2018-10-09 00:00:00'), 0.0, 2.0, 0.06060353915074478, 0.3239502035733153, 0.6154462572759466, 0.5569173608458277], [Timestamp('2018-10-10 00:00:00'), 0.0, 2.0, 0.05668555268032194, 0.018569757064702506, 0.9247446902549793, 0.5609525310147864], [Timestamp('2018-10-11 00:00:00'), 2.0, 2.0, 0.18558625915407556, 0.06920562549276378, 0.7452081153531558, 0.5578241683489411], [Timestamp('2018-10-15 00:00:00'), 2.0, 2.0, 0.2087544413803388, 0.1612600028205048, 0.6299855557991502, 0.5580972785651148], [Timestamp('2018-10-16 00:00:00'), 0.0, 2.0, 0.1790471521953111, 0.057096468436879426, 0.7638563793678054, 0.5577423442690285], [Timestamp('2018-10-17 00:00:00'), 0.0, 2.0, 0.1276334767025217, 0.18701755184510094, 0.6853489714523731, 0.5611345999122639], [Timestamp('2018-10-18 00:00:00'), 2.0, 2.0, 0.12027562815679542, 0.08744243727865127, 0.7922819345645551, 0.5604072349752577], [Timestamp('2018-10-19 00:00:00'), 2.0, 2.0, 0.1958269659127147, 0.010970419306450678, 0.7932026147808394, 0.5589231272950005], [Timestamp('2018-10-22 00:00:00'), 1.0, 2.0, 0.15098791739127193, 0.10302329905271754, 0.7459887835560098, 0.5591874135625948], [Timestamp('2018-10-23 00:00:00'), 2.0, 2.0, 0.1189849891170536, 0.029848283356847607, 0.8511667275260955, 0.5575452104996619], [Timestamp('2018-10-24 00:00:00'), 2.0, 2.0, 0.22413248817911285, 0.012793433200665715, 0.7630740786202198, 0.5586829752226093], [Timestamp('2018-10-25 00:00:00'), 2.0, 2.0, 0.3232585381453364, 0.13547603709895217, 0.5412654247557175, 0.5572378840671524], [Timestamp('2018-10-26 00:00:00'), 2.0, 2.0, 0.4179443301312401, 0.06015445478064425, 0.5219012150881113, 0.5575003159358018], [Timestamp('2018-10-29 00:00:00'), 2.0, 2.0, 0.2986347906099916, 0.020739071306154384, 0.680626138083847, 0.5562732963293088], [Timestamp('2018-10-30 00:00:00'), 0.0, 2.0, 0.3282727920533885, 0.19160320481482163, 0.4801240031317966, 0.5577910036280039], [Timestamp('2018-10-31 00:00:00'), 0.0, 2.0, 0.25077533641177707, 0.02737152663539011, 0.7218531369528359, 0.5546968215386622], [Timestamp('2018-11-01 00:00:00'), 0.0, 2.0, 0.4623762982641195, 0.019738779799105318, 0.5178849219367788, 0.5513787612102925], [Timestamp('2018-11-05 00:00:00'), 0.0, 0.0, 0.5245163486346817, 0.037652807918398756, 0.43783084344692225, 0.5576464450325052], [Timestamp('2018-11-06 00:00:00'), 0.0, 0.0, 0.5197753109116711, 0.008876648878701796, 0.47134804020963106, 0.5560209508768988], [Timestamp('2018-11-07 00:00:00'), 0.0, 0.0, 0.7281944644025424, 0.017744371750992926, 0.25406116384646754, 0.5544212738530393], [Timestamp('2018-11-08 00:00:00'), 2.0, 0.0, 0.7558900321379542, 0.01761552135727866, 0.2264944465047631, 0.5542773119349301], [Timestamp('2018-11-09 00:00:00'), 2.0, 0.0, 0.6587972601701129, 0.011313743248413007, 0.32988899658146925, 0.5603211686526164], [Timestamp('2018-11-12 00:00:00'), 1.0, 0.0, 0.9380234795096941, 0.0016607250415067258, 0.06031579544879371, 0.5629360741767953], [Timestamp('2018-11-13 00:00:00'), 2.0, 0.0, 0.7921915667019234, 0.058293827436744786, 0.14951460586132959, 0.5738772257368381], [Timestamp('2018-11-14 00:00:00'), 0.0, 0.0, 0.8558600495592332, 0.0110237959178133, 0.13311615452295297, 0.5724257644388295], [Timestamp('2018-11-16 00:00:00'), 0.0, 0.0, 0.7425979116169977, 0.06870700674516235, 0.18869508163784282, 0.5739809583213891], [Timestamp('2018-11-19 00:00:00'), 0.0, 0.0, 0.6907128503690225, 0.16846560099375707, 0.14082154863721907, 0.5700772487604532], [Timestamp('2018-11-21 00:00:00'), 2.0, 0.0, 0.5278078728796982, 0.028021006739099474, 0.44417112038119544, 0.5747901212657177], [Timestamp('2018-11-22 00:00:00'), 1.0, 0.0, 0.608900343830432, 0.013483866359060816, 0.37761578981050953, 0.5736117809241696], [Timestamp('2018-11-23 00:00:00'), 2.0, 0.0, 0.6721330723902151, 0.004063264913339135, 0.3238036626964445, 0.5500484170778469], [Timestamp('2018-11-26 00:00:00'), 2.0, 0.0, 0.4866181719484193, 0.2053467173536975, 0.3080351106978845, 0.5500444889594056], [Timestamp('2018-11-27 00:00:00'), 1.0, 0.0, 0.6213810800270746, 0.07431763962215845, 0.30430128035077386, 0.5511552986880321], [Timestamp('2018-11-28 00:00:00'), 1.0, 2.0, 0.1965781414529089, 0.34861965674986767, 0.4548022017972263, 0.5605142525409544], [Timestamp('2018-11-29 00:00:00'), 0.0, 2.0, 0.2833138915759655, 0.2025401535887751, 0.5141459548352583, 0.5608757067156159], [Timestamp('2018-11-30 00:00:00'), 0.0, 2.0, 0.40489764052036314, 0.1283666300372824, 0.4667357294423498, 0.5567701906386832], [Timestamp('2018-12-03 00:00:00'), 0.0, 1.0, 0.08932781007171334, 0.6309059875456554, 0.2797662023826384, 0.5499950184651682], [Timestamp('2018-12-04 00:00:00'), 0.0, 2.0, 0.396075692719854, 0.04147298262666849, 0.562451324653473, 0.5579654614247213], [Timestamp('2018-12-05 00:00:00'), 0.0, 2.0, 0.2258721096662033, 0.18774808339429383, 0.5863798069395082, 0.5613892255218059], [Timestamp('2018-12-06 00:00:00'), 0.0, 2.0, 0.20872014801701427, 0.07050025966571098, 0.7207795923172813, 0.5626742020146508], [Timestamp('2018-12-07 00:00:00'), 0.0, 1.0, 0.15961255099179714, 0.4845347151164667, 0.35585273389173105, 0.5555159312097209], [Timestamp('2018-12-10 00:00:00'), 0.0, 0.0, 0.48656595544962156, 0.09215649755882392, 0.4212775469915555, 0.5567221172890515], [Timestamp('2018-12-11 00:00:00'), 0.0, 2.0, 0.2348326043005029, 0.19821428645264236, 0.5669531092468599, 0.556847762923312], [Timestamp('2018-12-12 00:00:00'), 0.0, 2.0, 0.2797730370828992, 0.15733204788510213, 0.5628949150319991, 0.5519427325052936], [Timestamp('2018-12-13 00:00:00'), 0.0, 2.0, 0.08689966995131787, 0.39940429240475395, 0.5136960376439249, 0.5490107538687224], [Timestamp('2018-12-14 00:00:00'), 0.0, 2.0, 0.16256459741569268, 0.12419111462549323, 0.7132442879588122, 0.5452780718271854], [Timestamp('2018-12-17 00:00:00'), 0.0, 2.0, 0.31677111989811346, 0.08340900564176619, 0.5998198744601169, 0.5432205274979783], [Timestamp('2018-12-18 00:00:00'), 2.0, 2.0, 0.2713152250060008, 0.3299956417684709, 0.39868913322553196, 0.5421805752705149], [Timestamp('2018-12-19 00:00:00'), 2.0, 2.0, 0.3258127658828186, 0.13423980293040666, 0.5399474311867744, 0.5438247361572821], [Timestamp('2018-12-20 00:00:00'), 2.0, 2.0, 0.41347549316581333, 0.1343714356305763, 0.45215307120360987, 0.543270672312079], [Timestamp('2018-12-21 00:00:00'), 2.0, 0.0, 0.37644734041652217, 0.2524874379519954, 0.3710652216314858, 0.543883277858026], [Timestamp('2018-12-26 00:00:00'), 2.0, 1.0, 0.1507655625764109, 0.4793595336108231, 0.3698749038127664, 0.5468759946451865], [Timestamp('2018-12-27 00:00:00'), 2.0, 2.0, 0.2921708805420476, 0.14086837577774908, 0.5669607436802043, 0.5466376756790824], [Timestamp('2018-12-28 00:00:00'), 2.0, 1.0, 0.17396496357478994, 0.5197309964768053, 0.3063040399483999, 0.5491038003403289], [Timestamp('2019-01-02 00:00:00'), 2.0, 2.0, 0.21984733876855525, 0.3573547184776538, 0.4227979427537885, 0.5471918458093125], [Timestamp('2019-01-03 00:00:00'), 2.0, 2.0, 0.1569673020393138, 0.04370117939424527, 0.7993315185664371, 0.5499544252644887], [Timestamp('2019-01-04 00:00:00'), 2.0, 1.0, 0.05627731320701299, 0.5074691117420053, 0.43625357505097784, 0.5540583081980627], [Timestamp('2019-01-07 00:00:00'), 0.0, 2.0, 0.11379223121011198, 0.3313907096455493, 0.5548170591443372, 0.5501472013338601], [Timestamp('2019-01-08 00:00:00'), 1.0, 2.0, 0.0660855225395932, 0.09528997553062929, 0.8386245019297841, 0.5415259419737285], [Timestamp('2019-01-09 00:00:00'), 0.0, 2.0, 0.06557974939992878, 0.3975652303450458, 0.5368550202550203, 0.5368266548838326], [Timestamp('2019-01-10 00:00:00'), 1.0, 2.0, 0.1510699669940663, 0.1147477749588868, 0.7341822580470505, 0.5380149805859341], [Timestamp('2019-01-11 00:00:00'), 2.0, 2.0, 0.15567259696410007, 0.10262398251159657, 0.7417034205242999, 0.5385417574015466], [Timestamp('2019-01-14 00:00:00'), 2.0, 1.0, 0.12088070222512187, 0.4646829034218269, 0.4144363943530554, 0.5382875951872368], [Timestamp('2019-01-15 00:00:00'), 2.0, 2.0, 0.1643647654117518, 0.20803471236843019, 0.6276005222198157, 0.5372070458277355], [Timestamp('2019-01-16 00:00:00'), 2.0, 2.0, 0.26660339537457095, 0.06314829825096284, 0.6702483063744721, 0.5401574595122982], [Timestamp('2019-01-17 00:00:00'), 2.0, 2.0, 0.3338310259324148, 0.1958301197090083, 0.47033885435857054, 0.5415253100262588], [Timestamp('2019-01-18 00:00:00'), 0.0, 2.0, 0.34870453842096516, 0.13967288315622875, 0.5116225784228088, 0.5409640015582914], [Timestamp('2019-01-21 00:00:00'), 1.0, 2.0, 0.21424855609307833, 0.16276281612141588, 0.6229886277855012, 0.5416279476653], [Timestamp('2019-01-22 00:00:00'), 2.0, 2.0, 0.32715319277705623, 0.018399515682330014, 0.654447291540613, 0.5475012878083031], [Timestamp('2019-01-23 00:00:00'), 1.0, 2.0, 0.4514011478999183, 0.08996988092716547, 0.45862897117291257, 0.5418587109926873], [Timestamp('2019-01-24 00:00:00'), 2.0, 0.0, 0.48210520200564827, 0.07034388225571327, 0.447550915738641, 0.5338546598317389], [Timestamp('2019-01-28 00:00:00'), 2.0, 0.0, 0.4988837435915113, 0.09889108426401882, 0.4022251721444633, 0.538624571277812], [Timestamp('2019-01-29 00:00:00'), 2.0, 2.0, 0.4507404773768071, 0.028375162333187532, 0.5208843602900004, 0.5388765417657685], [Timestamp('2019-01-30 00:00:00'), 1.0, 0.0, 0.5045142586891115, 0.11555660969573527, 0.37992913161515157, 0.5340849002682778], [Timestamp('2019-01-31 00:00:00'), 0.0, 2.0, 0.3449263833823591, 0.1021508513748542, 0.5529227652427917, 0.5323597737320425], [Timestamp('2019-02-01 00:00:00'), 0.0, 2.0, 0.23725431842976036, 0.011400015458544322, 0.7513456661117011, 0.5354504252809337], [Timestamp('2019-02-04 00:00:00'), 0.0, 2.0, 0.339412101193028, 0.17608714323975255, 0.4845007555672264, 0.528094859649995], [Timestamp('2019-02-05 00:00:00'), 0.0, 2.0, 0.20308601872536794, 0.09207715880664055, 0.7048368224679904, 0.5328049226354311], [Timestamp('2019-02-06 00:00:00'), 2.0, 2.0, 0.21134042089001095, 0.11005458231546812, 0.6786049967945202, 0.531859935691918], [Timestamp('2019-02-07 00:00:00'), 2.0, 0.0, 0.49667842695799885, 0.012906897044595581, 0.49041467599739946, 0.535033404598622], [Timestamp('2019-02-08 00:00:00'), 2.0, 2.0, 0.325396447027489, 0.07265156979960592, 0.6019519831729077, 0.5363660959260749], [Timestamp('2019-02-11 00:00:00'), 2.0, 0.0, 0.5442051999255846, 0.015919284163954227, 0.43987551591046786, 0.5341958765871809], [Timestamp('2019-02-12 00:00:00'), 2.0, 2.0, 0.44056833149401453, 0.0500031296208629, 0.5094285388851292, 0.5378685541338027], [Timestamp('2019-02-13 00:00:00'), 2.0, 0.0, 0.5148151384106856, 0.231020373267866, 0.2541644883214511, 0.5370636228759799], [Timestamp('2019-02-14 00:00:00'), 2.0, 0.0, 0.6862526400951058, 0.03773822131170416, 0.2760091385931928, 0.5312442736952439], [Timestamp('2019-02-15 00:00:00'), 2.0, 0.0, 0.5268351981454866, 0.10727760326421378, 0.3658871985903012, 0.5314791488704532], [Timestamp('2019-02-18 00:00:00'), 1.0, 2.0, 0.4199664272436433, 0.08124313915521696, 0.4987904336011363, 0.5282860964984274], [Timestamp('2019-02-19 00:00:00'), 0.0, 0.0, 0.4470452088203725, 0.33964996980994594, 0.21330482136968174, 0.5151914163842285], [Timestamp('2019-02-20 00:00:00'), 1.0, 0.0, 0.5168582215208004, 0.0027599871927245093, 0.4803817912864723, 0.516386309529134], [Timestamp('2019-02-21 00:00:00'), 0.0, 0.0, 0.5556609331303549, 0.14105154341034296, 0.3032875234593073, 0.5212279801703968], [Timestamp('2019-02-22 00:00:00'), 0.0, 2.0, 0.37167857616090294, 0.10084503613138454, 0.5274763877077155, 0.526100070963435], [Timestamp('2019-02-25 00:00:00'), 1.0, 0.0, 0.5909861945840733, 0.08260934633484425, 0.3264044590810868, 0.5223827769157748], [Timestamp('2019-02-26 00:00:00'), 1.0, 0.0, 0.6891916524042541, 0.027705899226677412, 0.2831024483690641, 0.5301411201167433], [Timestamp('2019-02-27 00:00:00'), 2.0, 0.0, 0.7557555656075827, 0.12015255661261004, 0.12409187777980984, 0.5269476067320915], [Timestamp('2019-02-28 00:00:00'), 2.0, 0.0, 0.4526163069358313, 0.19150214378147248, 0.35588154928269156, 0.5276943283861312], [Timestamp('2019-03-01 00:00:00'), 2.0, 2.0, 0.3597546019215521, 0.03349666001194138, 0.6067487380665045, 0.5274155533262826], [Timestamp('2019-03-07 00:00:00'), 2.0, 0.0, 0.5508367799878084, 0.06223653721455924, 0.3869266827976376, 0.5270135095994704], [Timestamp('2019-03-08 00:00:00'), 2.0, 0.0, 0.718095514681906, 0.0668279228784283, 0.2150765624396668, 0.5259389512383902], [Timestamp('2019-03-11 00:00:00'), 2.0, 1.0, 0.25660949661934374, 0.5392903713654252, 0.20410013201522528, 0.5246004627913682], [Timestamp('2019-03-12 00:00:00'), 2.0, 0.0, 0.5308119454613321, 0.08278710808974953, 0.3864009464489186, 0.5243080806524508], [Timestamp('2019-03-13 00:00:00'), 2.0, 0.0, 0.5229307948164972, 0.29234428877296126, 0.1847249164105403, 0.520246659288408], [Timestamp('2019-03-14 00:00:00'), 2.0, 1.0, 0.20168246245163185, 0.4343712019734807, 0.36394633557488676, 0.518394502975095], [Timestamp('2019-03-15 00:00:00'), 0.0, 2.0, 0.3507388283551212, 0.06538338405928965, 0.5838777875855872, 0.5160180156058751], [Timestamp('2019-03-18 00:00:00'), 0.0, 1.0, 0.06744955992226102, 0.8057433389847704, 0.12680710109296855, 0.5228047949935467], [Timestamp('2019-03-19 00:00:00'), 0.0, 2.0, 0.325717252838723, 0.15118601222782532, 0.5230967349334497, 0.5209982320141439], [Timestamp('2019-03-20 00:00:00'), 0.0, 2.0, 0.1468809971395343, 0.09479922757053906, 0.7583197752899299, 0.5154800892866988], [Timestamp('2019-03-21 00:00:00'), 0.0, 2.0, 0.1761439885012218, 0.0360705021137374, 0.7877855093850359, 0.5166650974484512], [Timestamp('2019-03-22 00:00:00'), 2.0, 2.0, 0.20202809346479436, 0.12846807726197934, 0.6695038292732316, 0.5201606663008765], [Timestamp('2019-03-25 00:00:00'), 2.0, 2.0, 0.18514961934375834, 0.09313825598114094, 0.7217121246751, 0.5178312120720144], [Timestamp('2019-03-26 00:00:00'), 0.0, 2.0, 0.2529955066872709, 0.11850134665319495, 0.6285031466595354, 0.5221524836996706], [Timestamp('2019-03-27 00:00:00'), 1.0, 2.0, 0.2279259937522441, 0.02218750178815566, 0.7498865044596018, 0.5166789429043347], [Timestamp('2019-03-28 00:00:00'), 2.0, 2.0, 0.2009280652637436, 0.3611353142681413, 0.437936620468117, 0.5253157101121477], [Timestamp('2019-03-29 00:00:00'), 2.0, 2.0, 0.15631585348000862, 0.18151732820405797, 0.6621668183159387, 0.522066237583422], [Timestamp('2019-04-01 00:00:00'), 2.0, 2.0, 0.17858583198721897, 0.15202862254233113, 0.6693855454704434, 0.5250963237219701], [Timestamp('2019-04-02 00:00:00'), 2.0, 2.0, 0.42650062538166855, 0.10254050812045623, 0.47095886649788093, 0.523113538650522], [Timestamp('2019-04-03 00:00:00'), 2.0, 2.0, 0.23641360788130866, 0.008705233176511097, 0.754881158942186, 0.5283309891760238], [Timestamp('2019-04-04 00:00:00'), 0.0, 2.0, 0.34804378792132684, 0.28337240103105543, 0.3685838110476142, 0.5237981902981815], [Timestamp('2019-04-05 00:00:00'), 0.0, 0.0, 0.42180683879449377, 0.1661780225194622, 0.41201513868604334, 0.523702989909888], [Timestamp('2019-04-08 00:00:00'), 0.0, 2.0, 0.35758337535926654, 0.27872062216739574, 0.3636960024733367, 0.520893286411021], [Timestamp('2019-04-09 00:00:00'), 0.0, 0.0, 0.47371279077841505, 0.10439491823720597, 0.4218922909843788, 0.5202929433578477], [Timestamp('2019-04-10 00:00:00'), 0.0, 2.0, 0.375173453051308, 0.14582713292941357, 0.4789994140192851, 0.5222033798263775], [Timestamp('2019-04-11 00:00:00'), 0.0, 2.0, 0.19470400812464797, 0.11731452943230199, 0.6879814624430562, 0.5240995059093753], [Timestamp('2019-04-12 00:00:00'), 2.0, 2.0, 0.383217827282951, 0.009361053587653663, 0.6074211191293891, 0.5279281510069788], [Timestamp('2019-04-15 00:00:00'), 2.0, 0.0, 0.4287484286930737, 0.2106395858551419, 0.36061198545178946, 0.5271786046934278], [Timestamp('2019-04-16 00:00:00'), 2.0, 0.0, 0.5405011810723487, 0.1159956225151145, 0.34350319641253996, 0.5273900996158161], [Timestamp('2019-04-17 00:00:00'), 2.0, 2.0, 0.3258159934301784, 0.15881614644848996, 0.515367860121335, 0.524128410975426], [Timestamp('2019-04-18 00:00:00'), 0.0, 0.0, 0.6376297042611226, 0.02862602666506841, 0.33374426907381416, 0.5258497055538633], [Timestamp('2019-04-22 00:00:00'), 1.0, 0.0, 0.48939113554624003, 0.08641662768421882, 0.4241922367695454, 0.5212312826824245], [Timestamp('2019-04-23 00:00:00'), 0.0, 1.0, 0.3559758756824424, 0.3930943793284014, 0.25092974498915127, 0.5270978083828821], [Timestamp('2019-04-24 00:00:00'), 0.0, 0.0, 0.4690063481884239, 0.12582285718256583, 0.4051707946290046, 0.5306751031836942], [Timestamp('2019-04-25 00:00:00'), 0.0, 0.0, 0.5042265707066967, 0.20894237875577457, 0.28683105053753477, 0.5325246772349903], [Timestamp('2019-04-26 00:00:00'), 0.0, 2.0, 0.45283689565899243, 0.05180897974927169, 0.4953541245917354, 0.5296420973887837], [Timestamp('2019-04-29 00:00:00'), 0.0, 2.0, 0.36944470264229357, 0.1593379103826082, 0.471217386975099, 0.5319387425425717], [Timestamp('2019-04-30 00:00:00'), 2.0, 2.0, 0.395895693783166, 0.05662923972596129, 0.5474750664908724, 0.5258628078952085], [Timestamp('2019-05-02 00:00:00'), 1.0, 2.0, 0.38402172774324245, 0.18570073466274853, 0.43027753759401344, 0.5276002550108069], [Timestamp('2019-05-03 00:00:00'), 1.0, 2.0, 0.21102442815033676, 0.0890122305536074, 0.6999633412960501, 0.5357527626435189], [Timestamp('2019-05-06 00:00:00'), 0.0, 0.0, 0.4353323876905151, 0.18941223744772104, 0.3752553748617595, 0.5335565472963498], [Timestamp('2019-05-07 00:00:00'), 0.0, 2.0, 0.33131789474664897, 0.1965286587995717, 0.4721534464537775, 0.5329372152901565], [Timestamp('2019-05-08 00:00:00'), 0.0, 1.0, 0.34855334658311926, 0.39916992654725514, 0.25227672686962155, 0.5363991412584763], [Timestamp('2019-05-09 00:00:00'), 0.0, 0.0, 0.5297040919716828, 0.03760379696366103, 0.43269211106465366, 0.5350171485232007], [Timestamp('2019-05-10 00:00:00'), 0.0, 2.0, 0.3492246509415153, 0.20317874558303756, 0.4475966034754488, 0.5329800056841952], [Timestamp('2019-05-13 00:00:00'), 0.0, 0.0, 0.4709328340390929, 0.20734012291372858, 0.32172704304717303, 0.5342155560371878], [Timestamp('2019-05-14 00:00:00'), 2.0, 0.0, 0.5304639982527782, 0.08543202852744383, 0.38410397321978285, 0.5364141305317776], [Timestamp('2019-05-15 00:00:00'), 2.0, 0.0, 0.4956873132962777, 0.13471531215280075, 0.3695973745509169, 0.538147138372872], [Timestamp('2019-05-16 00:00:00'), 2.0, 0.0, 0.37898189159532175, 0.3714777790394714, 0.24954032936520898, 0.5395613145613146], [Timestamp('2019-05-17 00:00:00'), 2.0, 0.0, 0.39168313648128783, 0.36365100793198557, 0.24466585558673226, 0.5374030567850793], [Timestamp('2019-05-20 00:00:00'), 2.0, 0.0, 0.753368353991931, 0.07829762108038175, 0.1683340249276895, 0.5395626131052139], [Timestamp('2019-05-21 00:00:00'), 2.0, 1.0, 0.25851967409749577, 0.6476213042024117, 0.09385902170009726, 0.5417178334963569], [Timestamp('2019-05-22 00:00:00'), 2.0, 0.0, 0.5773432844166926, 0.08952036389078458, 0.33313635169252664, 0.540446127946128], [Timestamp('2019-05-23 00:00:00'), 2.0, 0.0, 0.5449747863912835, 0.053545394355989144, 0.40147981925272397, 0.5382839156224457], [Timestamp('2019-05-24 00:00:00'), 0.0, 0.0, 0.5205851948894741, 0.16163251559843478, 0.3177822895120973, 0.5396921596921597], [Timestamp('2019-05-27 00:00:00'), 0.0, 0.0, 0.5138488813651498, 0.2093062709145963, 0.2768448477202607, 0.5354029020932934], [Timestamp('2019-05-28 00:00:00'), 0.0, 1.0, 0.28706799571749986, 0.47480811544374113, 0.2381238888387527, 0.5348133820474246], [Timestamp('2019-05-29 00:00:00'), 0.0, 2.0, 0.2846310241846949, 0.23199124104103636, 0.48337773477426804, 0.5306532938335059], [Timestamp('2019-05-30 00:00:00'), 1.0, 2.0, 0.17245955924768305, 0.20598824544305436, 0.6215521953092605, 0.5288699198558353], [Timestamp('2019-05-31 00:00:00'), 2.0, 2.0, 0.23243711648927604, 0.08157222005212991, 0.6859906634585965, 0.5321022430881586], [Timestamp('2019-06-03 00:00:00'), 2.0, 2.0, 0.20477092596054453, 0.24658702086599146, 0.5486420531734579, 0.5435537095447779], [Timestamp('2019-06-04 00:00:00'), 2.0, 1.0, 0.19222571247413348, 0.5513090925460624, 0.2564651949798061, 0.5373497029373883], [Timestamp('2019-06-05 00:00:00'), 2.0, 0.0, 0.46095485390812946, 0.11736164692191003, 0.4216834991699584, 0.5397242172682897], [Timestamp('2019-06-06 00:00:00'), 2.0, 0.0, 0.4184101868394415, 0.37560339556918937, 0.20598641759137054, 0.542285578374801], [Timestamp('2019-06-07 00:00:00'), 2.0, 0.0, 0.7466490285481999, 0.09715607717363492, 0.15619489427816552, 0.5436722901511634], [Timestamp('2019-06-10 00:00:00'), 2.0, 0.0, 0.437858024034397, 0.3576853780201088, 0.20445659794548737, 0.5336965653570546], [Timestamp('2019-06-11 00:00:00'), 2.0, 1.0, 0.3750751906454128, 0.47239652804309656, 0.15252828131149318, 0.5352692082472955], [Timestamp('2019-06-12 00:00:00'), 2.0, 0.0, 0.48159909035372045, 0.1634267126902875, 0.3549741969559907, 0.536301330545134], [Timestamp('2019-06-13 00:00:00'), 2.0, 0.0, 0.5130835250003853, 0.19619474959227964, 0.29072172540733476, 0.5355199607229433], [Timestamp('2019-06-14 00:00:00'), 2.0, 1.0, 0.06642101909226834, 0.6717737060754381, 0.26180527483229915, 0.5271586037966932], [Timestamp('2019-06-17 00:00:00'), 2.0, 2.0, 0.11972372208228198, 0.323494774451732, 0.5567815034659909, 0.5269123053578157], [Timestamp('2019-06-18 00:00:00'), 2.0, 1.0, 0.14659116079387438, 0.47473898681170795, 0.3786698523944168, 0.5212505335040546], [Timestamp('2019-06-19 00:00:00'), 0.0, 2.0, 0.12895814492643048, 0.3591375390110564, 0.5119043160625111, 0.5202731725123962], [Timestamp('2019-06-21 00:00:00'), 0.0, 2.0, 0.26917684186527935, 0.21389780160599717, 0.5169253565287263, 0.523029947077918], [Timestamp('2019-06-24 00:00:00'), 0.0, 1.0, 0.24232169915452756, 0.4268776710754645, 0.3308006297700123, 0.520460763138949], [Timestamp('2019-06-25 00:00:00'), 0.0, 2.0, 0.17140274907335454, 0.2647977583044702, 0.5637994926221721, 0.5184401582368184], [Timestamp('2019-06-26 00:00:00'), 0.0, 2.0, 0.1947271295978154, 0.20313101627966262, 0.6021418541225286, 0.5154050675945924], [Timestamp('2019-06-27 00:00:00'), 1.0, 2.0, 0.3132636192957651, 0.1107361860743066, 0.5760001946299239, 0.5167178099800459], [Timestamp('2019-06-28 00:00:00'), 1.0, 2.0, 0.10537017407216769, 0.4176610804523307, 0.4769687454754967, 0.5203230128927382], [Timestamp('2019-07-01 00:00:00'), 2.0, 2.0, 0.2151209584292421, 0.15205643034245234, 0.632822611228301, 0.5333382325953716], [Timestamp('2019-07-02 00:00:00'), 2.0, 2.0, 0.338547705562307, 0.11345085211865157, 0.5480014423190351, 0.5331188932638641], [Timestamp('2019-07-03 00:00:00'), 2.0, 1.0, 0.2836156624845022, 0.4086691286784423, 0.30771520883705655, 0.5343292282124742], [Timestamp('2019-07-04 00:00:00'), 2.0, 2.0, 0.3172635875838655, 0.2509395347053511, 0.43179687771078035, 0.5356903006931966], [Timestamp('2019-07-05 00:00:00'), 2.0, 0.0, 0.4828144299132259, 0.19572549543605364, 0.3214600746507214, 0.5357316038058167], [Timestamp('2019-07-08 00:00:00'), 1.0, 0.0, 0.6266520304182616, 0.03835131413109358, 0.3349966554506438, 0.5342595057127929], [Timestamp('2019-07-10 00:00:00'), 0.0, 1.0, 0.31893185333956386, 0.35264714353365745, 0.3284210031267843, 0.5241571839482276], [Timestamp('2019-07-11 00:00:00'), 0.0, 2.0, 0.4136189017071756, 0.06287224382512499, 0.5235088544677011, 0.5247497942174876], [Timestamp('2019-07-12 00:00:00'), 0.0, 1.0, 0.06880959217608286, 0.667493636812882, 0.26369677101103806, 0.5244718822438771], [Timestamp('2019-07-15 00:00:00'), 0.0, 2.0, 0.16690328456905854, 0.24105792214474317, 0.5920387932861932, 0.5259228378326662], [Timestamp('2019-07-16 00:00:00'), 0.0, 2.0, 0.16328512446000262, 0.1031256221823654, 0.733589253357632, 0.5198159764497284], [Timestamp('2019-07-17 00:00:00'), 0.0, 2.0, 0.07958469073940821, 0.3063196498175319, 0.6140956594430598, 0.5235529611396462], [Timestamp('2019-07-18 00:00:00'), 0.0, 2.0, 0.25576976029611626, 0.08280790589462847, 0.6614223338092493, 0.5204373878061553], [Timestamp('2019-07-19 00:00:00'), 0.0, 2.0, 0.18294169131047497, 0.25541878875774043, 0.5616395199317781, 0.5229638712648421], [Timestamp('2019-07-22 00:00:00'), 0.0, 2.0, 0.26932522908038425, 0.13674914627831694, 0.5939256246413034, 0.5180174913184622], [Timestamp('2019-07-23 00:00:00'), 0.0, 0.0, 0.4072473765218896, 0.2506415791007379, 0.34211104437736883, 0.5163452883039773], [Timestamp('2019-07-24 00:00:00'), 0.0, 0.0, 0.4259307054342894, 0.3629428136703221, 0.2111264808953878, 0.5213732338046394], [Timestamp('2019-07-25 00:00:00'), 0.0, 0.0, 0.5469191536706403, 0.0903915329162401, 0.3626893134131148, 0.5233360993555168], [Timestamp('2019-07-26 00:00:00'), 2.0, 0.0, 0.5936284316531879, 0.06705521151510492, 0.3393163568317023, 0.5221524605697105], [Timestamp('2019-07-29 00:00:00'), 0.0, 0.0, 0.68961545259774, 0.08834824039249478, 0.22203630700976554, 0.5169279878708576], [Timestamp('2019-07-30 00:00:00'), 0.0, 2.0, 0.3145538834344223, 0.2852471644885364, 0.4001989520770397, 0.5139170365987479], [Timestamp('2019-07-31 00:00:00'), 0.0, 2.0, 0.28418583215349963, 0.18146146928374332, 0.5343526985627539, 0.5162670054241228], [Timestamp('2019-08-01 00:00:00'), 1.0, 2.0, 0.24315537565100098, 0.32541839128402444, 0.4314262330649775, 0.5164739936350077], [Timestamp('2019-08-02 00:00:00'), 1.0, 2.0, 0.3484032632192795, 0.06143328305275041, 0.5901634537279697, 0.5210532790069318], [Timestamp('2019-08-05 00:00:00'), 1.0, 0.0, 0.6135710704612938, 0.057155453531377416, 0.32927347600732465, 0.5206040599009962], [Timestamp('2019-08-06 00:00:00'), 1.0, 0.0, 0.5705845082241802, 0.14121648586167335, 0.2881990059141408, 0.5159355813498827], [Timestamp('2019-08-07 00:00:00'), 0.0, 0.0, 0.5554717183853983, 0.12419515204165525, 0.32033312957295074, 0.5122167996710986], [Timestamp('2019-08-08 00:00:00'), 0.0, 0.0, 0.8512285576208191, 0.054700453955227275, 0.09407098842395145, 0.5093086298682342], [Timestamp('2019-08-09 00:00:00'), 0.0, 0.0, 0.663492661732475, 0.1441614125910122, 0.1923459256765173, 0.5138159260852556], [Timestamp('2019-08-12 00:00:00'), 0.0, 0.0, 0.6974999255725111, 0.07891309398696425, 0.22358698044052266, 0.5119652516956236], [Timestamp('2019-08-13 00:00:00'), 0.0, 0.0, 0.4154831759733138, 0.4020294476312721, 0.1824873763954076, 0.5103371659725404], [Timestamp('2019-08-14 00:00:00'), 2.0, 0.0, 0.5425599492456644, 0.08525122528426567, 0.3721888254700639, 0.5146640104122902], [Timestamp('2019-08-15 00:00:00'), 2.0, 0.0, 0.48907991339970863, 0.1015318435218929, 0.40938824307839156, 0.5190686252743795], [Timestamp('2019-08-16 00:00:00'), 2.0, 2.0, 0.31195283485683406, 0.22202700074079326, 0.4660201644023744, 0.5161882613216112], [Timestamp('2019-08-19 00:00:00'), 1.0, 0.0, 0.6247338202853281, 0.014584604285563064, 0.36068157542911056, 0.5167558475507553], [Timestamp('2019-08-20 00:00:00'), 2.0, 0.0, 0.5023185453609401, 0.1725699350497571, 0.3251115195893032, 0.5214888864510389], [Timestamp('2019-08-21 00:00:00'), 0.0, 0.0, 0.5456205638114636, 0.19672320785723582, 0.2576562283313067, 0.5181503831876376], [Timestamp('2019-08-22 00:00:00'), 2.0, 0.0, 0.45766479518449815, 0.11885538909729942, 0.42347981571820426, 0.5218912051245135], [Timestamp('2019-08-23 00:00:00'), 2.0, 2.0, 0.4271327274498173, 0.13470901002213842, 0.43815826252803897, 0.5246033906507569], [Timestamp('2019-08-26 00:00:00'), 2.0, 0.0, 0.5036357345360328, 0.0811246491818372, 0.4152396162821288, 0.5258599006476765], [Timestamp('2019-08-27 00:00:00'), 2.0, 0.0, 0.457190866000249, 0.13291183655778316, 0.40989729744196635, 0.5278565906649398], [Timestamp('2019-08-28 00:00:00'), 2.0, 0.0, 0.4431611859606078, 0.21683134601925802, 0.34000746802013837, 0.5280753364441739], [Timestamp('2019-08-29 00:00:00'), 2.0, 0.0, 0.39213678611932107, 0.3002287384321261, 0.30763447544855077, 0.5279152482216324], [Timestamp('2019-08-30 00:00:00'), 2.0, 2.0, 0.34460060283481, 0.11616085435559004, 0.5392385428095932, 0.5291431272339078], [Timestamp('2019-09-02 00:00:00'), 2.0, 2.0, 0.26995076407972685, 0.16383716217663805, 0.566212073743629, 0.5241163878932696], [Timestamp('2019-09-03 00:00:00'), 2.0, 2.0, 0.37151421354519226, 0.21409387057109114, 0.4143919158837127, 0.5246364644214107], [Timestamp('2019-09-04 00:00:00'), 2.0, 0.0, 0.44972144707548173, 0.2352292173880052, 0.3150493355365163, 0.520680375641782], [Timestamp('2019-09-05 00:00:00'), 2.0, 0.0, 0.511400341994909, 0.10527168518484209, 0.38332797282024933, 0.5184888354593824], [Timestamp('2019-09-06 00:00:00'), 2.0, 0.0, 0.5393073622911994, 0.06734724272077328, 0.3933453949880309, 0.5262415619209363], [Timestamp('2019-09-09 00:00:00'), 2.0, 0.0, 0.44353411784847896, 0.1969281802985697, 0.3595377018529557, 0.521979373232006], [Timestamp('2019-09-10 00:00:00'), 2.0, 2.0, 0.23317857146929621, 0.19770799951845613, 0.5691134290122413, 0.5203261206248064], [Timestamp('2019-09-11 00:00:00'), 2.0, 2.0, 0.20787579435932024, 0.13248576238675386, 0.6596384432539268, 0.5230646347038125], [Timestamp('2019-09-12 00:00:00'), 2.0, 2.0, 0.083453678192208, 0.24265066877025745, 0.6738956530375289, 0.5158749045184793], [Timestamp('2019-09-13 00:00:00'), 1.0, 2.0, 0.103690175307543, 0.1318954702101281, 0.7644143544823238, 0.521596875587904], [Timestamp('2019-09-16 00:00:00'), 0.0, 2.0, 0.13095267912130676, 0.37546299108885944, 0.49358432978983335, 0.5155082526846416], [Timestamp('2019-09-17 00:00:00'), 0.0, 2.0, 0.16311765649069596, 0.3578593511954431, 0.47902299231385803, 0.5123926517106945], [Timestamp('2019-09-18 00:00:00'), 1.0, 0.0, 0.5576581375329925, 0.08146211878746051, 0.3608797436795399, 0.5087814655904374], [Timestamp('2019-09-19 00:00:00'), 2.0, 2.0, 0.35161862896732127, 0.14136890645532718, 0.5070124645773479, 0.5071823711394264], [Timestamp('2019-09-20 00:00:00'), 2.0, 0.0, 0.5266561387691244, 0.1257135184215279, 0.3476303428093411, 0.5121616478759337], [Timestamp('2019-09-23 00:00:00'), 1.0, 0.0, 0.5766274147734457, 0.15873303485651488, 0.26463955037004244, 0.5125744361996908], [Timestamp('2019-09-24 00:00:00'), 2.0, 0.0, 0.5560037889053054, 0.03236824509414363, 0.41162796600055523, 0.5049340426122503], [Timestamp('2019-09-25 00:00:00'), 0.0, 0.0, 0.6040048532281703, 0.09011785954075556, 0.30587728723107777, 0.5027107313692679], [Timestamp('2019-09-26 00:00:00'), 0.0, 0.0, 0.4962857069150034, 0.10111718890351304, 0.40259710418148864, 0.5036204286574426], [Timestamp('2019-09-27 00:00:00'), 0.0, 2.0, 0.2520676222328436, 0.1435625620979655, 0.6043698156691861, 0.5003643537898625], [Timestamp('2019-09-30 00:00:00'), 0.0, 2.0, 0.2687112627962931, 0.23849021224175454, 0.49279852496195814, 0.5016413626169723], [Timestamp('2019-10-01 00:00:00'), 0.0, 2.0, 0.1630270554429342, 0.10491832491594388, 0.7320546196411201, 0.49914401898038824], [Timestamp('2019-10-02 00:00:00'), 1.0, 2.0, 0.2358376521183358, 0.06778772060405909, 0.6963746272776045, 0.49784692748453613], [Timestamp('2019-10-03 00:00:00'), 1.0, 2.0, 0.1733603429827472, 0.23500049004780857, 0.5916391669694474, 0.5019603119645436], [Timestamp('2019-10-04 00:00:00'), 2.0, 0.0, 0.5701176241005722, 0.1012847306519318, 0.32859764524749796, 0.49933578953890656], [Timestamp('2019-10-07 00:00:00'), 2.0, 0.0, 0.54588431097492, 0.1485232589087889, 0.3055924301162872, 0.5035515502396891], [Timestamp('2019-10-08 00:00:00'), 2.0, 2.0, 0.38842048047350064, 0.173719319952921, 0.4378601995735742, 0.5035930265198895], [Timestamp('2019-10-09 00:00:00'), 2.0, 2.0, 0.25137395498415294, 0.20862493862320594, 0.5400011063926378, 0.5017012667832019], [Timestamp('2019-10-10 00:00:00'), 2.0, 2.0, 0.36131546577794227, 0.21344997708448718, 0.4252345571375765, 0.5052719195474031], [Timestamp('2019-10-11 00:00:00'), 2.0, 2.0, 0.23230306315320137, 0.16809504893029845, 0.5996018879164953, 0.502030318385191], [Timestamp('2019-10-14 00:00:00'), 2.0, 2.0, 0.2872378678241576, 0.08260612553261154, 0.6301560066432337, 0.5045302227306254], [Timestamp('2019-10-15 00:00:00'), 2.0, 1.0, 0.32613651451861514, 0.34136245460622505, 0.3325010308751567, 0.5017775517348685], [Timestamp('2019-10-16 00:00:00'), 2.0, 1.0, 0.20403309640551817, 0.448460020980465, 0.34750688261401597, 0.5011025562286554], [Timestamp('2019-10-17 00:00:00'), 2.0, 2.0, 0.4462237616627763, 0.0596517559843889, 0.4941244823528401, 0.4979634544595519], [Timestamp('2019-10-18 00:00:00'), 2.0, 0.0, 0.4562337354826612, 0.18138492576000972, 0.3623813387573274, 0.49777076983746804], [Timestamp('2019-10-21 00:00:00'), 2.0, 0.0, 0.4141270322561706, 0.22735131056849, 0.35852165717534495, 0.4986261950550866], [Timestamp('2019-10-22 00:00:00'), 2.0, 0.0, 0.5902920972177976, 0.20010844495072802, 0.2095994578314689, 0.49671380064515375], [Timestamp('2019-10-23 00:00:00'), 2.0, 0.0, 0.4646399488188144, 0.2765317629485689, 0.2588282882326146, 0.4971723078898996], [Timestamp('2019-10-24 00:00:00'), 2.0, 0.0, 0.6538406612702893, 0.05954910620394731, 0.2866102325257566, 0.49999389439637626], [Timestamp('2019-10-25 00:00:00'), 2.0, 0.0, 0.4564927312779061, 0.36390796052053026, 0.1795993082015653, 0.5001745016003949], [Timestamp('2019-10-28 00:00:00'), 2.0, 0.0, 0.4961654577279524, 0.27214721662914937, 0.2316873256429038, 0.4986467050625958], [Timestamp('2019-10-29 00:00:00'), 1.0, 2.0, 0.3374687946485176, 0.26107962270201235, 0.4014515826494742, 0.4993436494531028], [Timestamp('2019-10-30 00:00:00'), 0.0, 0.0, 0.5988049695120126, 0.05985213448021109, 0.34134289600776957, 0.4983975601350321], [Timestamp('2019-10-31 00:00:00'), 2.0, 0.0, 0.7595341882752545, 0.09678479328805496, 0.14368101843669587, 0.49803666411088326], [Timestamp('2019-11-01 00:00:00'), 0.0, 0.0, 0.3770385012195124, 0.35227787747728034, 0.2706836213032089, 0.5001797872409474], [Timestamp('2019-11-04 00:00:00'), 1.0, 0.0, 0.580910106486714, 0.11936966453453637, 0.29972022897874523, 0.5024270677307935], [Timestamp('2019-11-05 00:00:00'), 2.0, 0.0, 0.49247418341822224, 0.18713635671067425, 0.32038945987110345, 0.49960180337638666], [Timestamp('2019-11-06 00:00:00'), 2.0, 0.0, 0.5838097563392786, 0.059741022701652984, 0.3564492209590686, 0.49910024528742564], [Timestamp('2019-11-07 00:00:00'), 0.0, 0.0, 0.5023164499326508, 0.17066622436290269, 0.32701732570444353, 0.4971836524919359], [Timestamp('2019-11-08 00:00:00'), 0.0, 2.0, 0.3468428746029639, 0.17234260956792422, 0.4808145158291147, 0.49851675724637684], [Timestamp('2019-11-11 00:00:00'), 0.0, 0.0, 0.48860464901842504, 0.17933347605611352, 0.3320618749254594, 0.5008718649826177], [Timestamp('2019-11-12 00:00:00'), 1.0, 2.0, 0.3447574525600033, 0.188818084632258, 0.4664244628077349, 0.4985296972049689], [Timestamp('2019-11-13 00:00:00'), 1.0, 0.0, 0.5442729385622926, 0.14291747177782826, 0.31280958965988614, 0.4975903510298422], [Timestamp('2019-11-14 00:00:00'), 2.0, 2.0, 0.37928823329099753, 0.19541948768358858, 0.4252922790254141, 0.4974063401373184], [Timestamp('2019-11-18 00:00:00'), 1.0, 0.0, 0.5664308364492707, 0.11787611547356194, 0.31569304807716325, 0.5004576038138205], [Timestamp('2019-11-19 00:00:00'), 2.0, 0.0, 0.5364033505309985, 0.04319410974846472, 0.42040253972054376, 0.4969165387334982], [Timestamp('2019-11-21 00:00:00'), 0.0, 0.0, 0.6344190982447175, 0.11256334091284806, 0.2530175608424392, 0.4926947051153859], [Timestamp('2019-11-22 00:00:00'), 0.0, 0.0, 0.420624939859805, 0.20223748671100888, 0.37713757342918497, 0.4929745203600067], [Timestamp('2019-11-25 00:00:00'), 0.0, 2.0, 0.34483655164085475, 0.1419267823532968, 0.5132366660058459, 0.4932446020731101], [Timestamp('2019-11-26 00:00:00'), 1.0, 2.0, 0.17956968355935757, 0.18376199010388386, 0.6366683263367583, 0.49145375805139707], [Timestamp('2019-11-27 00:00:00'), 2.0, 2.0, 0.3030226517810297, 0.1387892416913313, 0.558188106527646, 0.4882226676962314], [Timestamp('2019-11-28 00:00:00'), 2.0, 2.0, 0.23610124738200916, 0.29678937304020936, 0.46710937957777526, 0.49138146072469313], [Timestamp('2019-11-29 00:00:00'), 2.0, 2.0, 0.3578998605922359, 0.23154350978263066, 0.41055662962513595, 0.48494047827837167], [Timestamp('2019-12-02 00:00:00'), 2.0, 1.0, 0.29404305932297364, 0.4437870520209005, 0.26216988865612345, 0.4826944769873431], [Timestamp('2019-12-03 00:00:00'), 2.0, 0.0, 0.4407054997438438, 0.24509806584463945, 0.3141964344115213, 0.4887909758497993], [Timestamp('2019-12-04 00:00:00'), 2.0, 0.0, 0.7290552786884646, 0.07390236923029873, 0.19704235208123258, 0.48960930036754585], [Timestamp('2019-12-05 00:00:00'), 2.0, 1.0, 0.31687417699543247, 0.35546415875539816, 0.327661664249163, 0.49003986209868566], [Timestamp('2019-12-06 00:00:00'), 0.0, 0.0, 0.4451686921069185, 0.1798888425293396, 0.3749424653637487, 0.48702391808635453], [Timestamp('2019-12-09 00:00:00'), 0.0, 2.0, 0.4081438224710561, 0.15598693656357168, 0.4358692409653775, 0.4866650167950793], [Timestamp('2019-12-10 00:00:00'), 0.0, 0.0, 0.4320444186351752, 0.17133491157462277, 0.3966206697902083, 0.4856567759905503], [Timestamp('2019-12-11 00:00:00'), 1.0, 0.0, 0.37406103781437494, 0.37198905194059206, 0.25394991024503455, 0.48746158966372], [Timestamp('2019-12-12 00:00:00'), 0.0, 1.0, 0.2593496670131482, 0.5015611822811622, 0.23908915070568879, 0.49103727457620067], [Timestamp('2019-12-13 00:00:00'), 2.0, 2.0, 0.34248794997280146, 0.16621350607891264, 0.4912985439482822, 0.4896399190759464], [Timestamp('2019-12-16 00:00:00'), 2.0, 2.0, 0.25346907845914174, 0.14775028724098907, 0.5987806342998758, 0.4932376643491831], [Timestamp('2019-12-17 00:00:00'), 2.0, 0.0, 0.4112669126036269, 0.23735632906661597, 0.3513767583297538, 0.4940544962136842], [Timestamp('2019-12-18 00:00:00'), 2.0, 1.0, 0.2595783180104639, 0.5676432205671414, 0.1727784614223964, 0.4907002224851827], [Timestamp('2019-12-19 00:00:00'), 1.0, 0.0, 0.48679035416685323, 0.16787943915865233, 0.3453302066744889, 0.4850574130361365], [Timestamp('2019-12-20 00:00:00'), 2.0, 0.0, 0.5563160381446883, 0.13963527981670779, 0.30404868203860186, 0.48269765909998014], [Timestamp('2019-12-23 00:00:00'), 2.0, 1.0, 0.3317846182816131, 0.4517787603113626, 0.2164366214070307, 0.48239846216252946], [Timestamp('2019-12-26 00:00:00'), 2.0, 0.0, 0.5885117370812983, 0.1765304072551679, 0.23495785566353772, 0.48519643028541615], [Timestamp('2019-12-27 00:00:00'), 1.0, 0.0, 0.4345375491733254, 0.14143600514397076, 0.42402644568269887, 0.48197800748574443], [Timestamp('2019-12-30 00:00:00'), 2.0, 2.0, 0.2072697719399248, 0.13160472265194445, 0.6611255054081355, 0.48403283293848337], [Timestamp('2020-01-02 00:00:00'), 0.0, 0.0, 0.4611216725462308, 0.21513706902113852, 0.323741258432631, 0.4848312232690099], [Timestamp('2020-01-03 00:00:00'), 1.0, 2.0, 0.26294915648159184, 0.17607113723292156, 0.5609797062854928, 0.4860017009789237], [Timestamp('2020-01-06 00:00:00'), 0.0, 0.0, 0.4507342614531025, 0.3307899658161299, 0.2184757727307617, 0.48928246921238755], [Timestamp('2020-01-07 00:00:00'), 0.0, 2.0, 0.28786884207861235, 0.19278372546931677, 0.5193474324520753, 0.49070021262922864], [Timestamp('2020-01-08 00:00:00'), 0.0, 2.0, 0.22069728853048892, 0.09037288049932081, 0.6889298309701875, 0.49222898633432094], [Timestamp('2020-01-09 00:00:00'), 0.0, 2.0, 0.23780120701202742, 0.1507179755187774, 0.6114808174691958, 0.4898562170451147], [Timestamp('2020-01-10 00:00:00'), 0.0, 2.0, 0.2588117814824453, 0.14067691986059438, 0.6005112986569605, 0.4893766956075094], [Timestamp('2020-01-13 00:00:00'), 0.0, 2.0, 0.20055195103492476, 0.15779546519801463, 0.6416525837670582, 0.4893646327203333], [Timestamp('2020-01-14 00:00:00'), 0.0, 2.0, 0.1913432941217988, 0.24631086540103192, 0.5623458404771687, 0.48454971688523263], [Timestamp('2020-01-15 00:00:00'), 0.0, 2.0, 0.39172256183020165, 0.18682822795377377, 0.42144921021602294, 0.48506193957846416], [Timestamp('2020-01-16 00:00:00'), 1.0, 2.0, 0.3354977478406394, 0.15745709998268476, 0.5070451521766692, 0.48143433946056335], [Timestamp('2020-01-17 00:00:00'), 0.0, 0.0, 0.507065755886793, 0.20007492967729057, 0.2928593144359104, 0.477668876956826], [Timestamp('2020-01-20 00:00:00'), 0.0, 0.0, 0.48155073933809567, 0.10868120140484909, 0.4097680592570606, 0.47682915948720517], [Timestamp('2020-01-21 00:00:00'), 0.0, 0.0, 0.496082546468889, 0.11760349086044333, 0.3863139626706661, 0.4796434263686571], [Timestamp('2020-01-22 00:00:00'), 0.0, 0.0, 0.5352056378367575, 0.1771540202501112, 0.28764034191313603, 0.48219311229250855], [Timestamp('2020-01-23 00:00:00'), 0.0, 0.0, 0.655114384929761, 0.11154745315912017, 0.23333816191111878, 0.4859503866006188], [Timestamp('2020-01-24 00:00:00'), 0.0, 0.0, 0.4643663070831807, 0.2021968368184683, 0.33343685609835527, 0.4792726434349057], [Timestamp('2020-01-27 00:00:00'), 1.0, 0.0, 0.4223255188240439, 0.17969910124232452, 0.3979753799336265, 0.4808391370054234], [Timestamp('2020-01-28 00:00:00'), 1.0, 0.0, 0.5233108154369015, 0.11393778318274433, 0.36275140138034817, 0.4912550666021893], [Timestamp('2020-01-29 00:00:00'), 0.0, 0.0, 0.5039863776497209, 0.29217035957450305, 0.20384326277578094, 0.4882427661255639], [Timestamp('2020-01-30 00:00:00'), 2.0, 0.0, 0.49816714048704125, 0.2404990309338677, 0.2613338285790892, 0.4932207119352677], [Timestamp('2020-01-31 00:00:00'), 2.0, 0.0, 0.5836547500658973, 0.12777004172563533, 0.28857520820846655, 0.4911567726169859], [Timestamp('2020-02-03 00:00:00'), 2.0, 0.0, 0.6114668603782231, 0.21399464646475405, 0.1745384931570181, 0.4907327476966386], [Timestamp('2020-02-04 00:00:00'), 2.0, 0.0, 0.5425154932671705, 0.3133162885727072, 0.14416821816011594, 0.48837794970518084], [Timestamp('2020-02-05 00:00:00'), 2.0, 0.0, 0.6703342155153731, 0.059455773803794305, 0.270210010680834, 0.49114025285817436], [Timestamp('2020-02-06 00:00:00'), 2.0, 0.0, 0.49317265789205084, 0.33549330998104077, 0.17133403212690623, 0.4916251166373296], [Timestamp('2020-02-07 00:00:00'), 2.0, 0.0, 0.4603557300163048, 0.23684323774507704, 0.3028010322386111, 0.4900821062543939], [Timestamp('2020-02-10 00:00:00'), 2.0, 0.0, 0.49659600732376474, 0.35199648675299844, 0.1514075059232376, 0.48452243612853607], [Timestamp('2020-02-11 00:00:00'), 2.0, 2.0, 0.38970503781345855, 0.1645209423769265, 0.445774019809613, 0.4806732441174996], [Timestamp('2020-02-12 00:00:00'), 2.0, 1.0, 0.18989005984819687, 0.6021549402970237, 0.2079549998547838, 0.48247622810415763], [Timestamp('2020-02-13 00:00:00'), 1.0, 2.0, 0.13448970239881858, 0.3817278171477955, 0.48378248045338024, 0.4842539563036457], [Timestamp('2020-02-14 00:00:00'), 0.0, 2.0, 0.3101855740813094, 0.13000550628484872, 0.5598089196338398, 0.48678881260678103], [Timestamp('2020-02-17 00:00:00'), 0.0, 2.0, 0.25938761207055716, 0.19451388549179502, 0.5460985024376435, 0.48507723975385897], [Timestamp('2020-02-18 00:00:00'), 0.0, 2.0, 0.3891129147890261, 0.20723348372259626, 0.4036536014883727, 0.4843290178008785], [Timestamp('2020-02-19 00:00:00'), 0.0, 1.0, 0.241425311242776, 0.4863875167018748, 0.2721871720553425, 0.48515427118855997], [Timestamp('2020-02-20 00:00:00'), 0.0, 2.0, 0.21640029374720515, 0.24110752202771352, 0.5424921842250748, 0.4843977122961591], [Timestamp('2020-02-21 00:00:00'), 0.0, 2.0, 0.20595545043301342, 0.2801871048856655, 0.5138574446813277, 0.4789259784085431], [Timestamp('2020-02-27 00:00:00'), 1.0, 1.0, 0.21625518587443734, 0.41654405289164104, 0.36720076123392614, 0.47912176146146895], [Timestamp('2020-02-28 00:00:00'), 0.0, 2.0, 0.35185515352306573, 0.2601464591074318, 0.3879983873695058, 0.4794401475949095], [Timestamp('2020-03-02 00:00:00'), 0.0, 0.0, 0.4143727842547549, 0.2740946928963141, 0.31153252284893324, 0.4843266852195424], [Timestamp('2020-03-03 00:00:00'), 0.0, 0.0, 0.5337998859467955, 0.17665813931727947, 0.28954197473592036, 0.48355660510405557], [Timestamp('2020-03-04 00:00:00'), 0.0, 0.0, 0.5896244032365987, 0.08180964730214695, 0.32856594946124856, 0.48340231701309183], [Timestamp('2020-03-05 00:00:00'), 0.0, 2.0, 0.34867250671192557, 0.1999064149827741, 0.4514210783053047, 0.4812596765658888], [Timestamp('2020-03-06 00:00:00'), 0.0, 1.0, 0.30854145729375776, 0.4022675466871656, 0.2891909960190775, 0.4846406770062468], [Timestamp('2020-03-09 00:00:00'), 0.0, 2.0, 0.422236669129518, 0.09844868070284808, 0.4793146501676393, 0.4852913703360133], [Timestamp('2020-03-10 00:00:00'), 0.0, 0.0, 0.8123429152815316, 0.0595428829234828, 0.12811420179498273, 0.4822063951278836], [Timestamp('2020-03-11 00:00:00'), 0.0, 0.0, 0.7005033607243983, 0.1274192473055659, 0.1720773919700422, 0.4810977774877994], [Timestamp('2020-03-12 00:00:00'), 2.0, 2.0, 0.3973805390867319, 0.19519629668680205, 0.4074231642264646, 0.4827662251769394], [Timestamp('2020-03-13 00:00:00'), 0.0, 1.0, 0.30927415754928106, 0.36174167815628716, 0.3289841642944253, 0.48130787037037037], [Timestamp('2020-03-16 00:00:00'), 0.0, 2.0, 0.33590328713649525, 0.1774096063840225, 0.4866871064794806, 0.4832768522793338], [Timestamp('2020-03-17 00:00:00'), 2.0, 2.0, 0.33406575407120065, 0.25657474038448463, 0.40935950554430894, 0.47894072709910646], [Timestamp('2020-03-18 00:00:00'), 2.0, 2.0, 0.21413023208441617, 0.29898284955048227, 0.4868869183651037, 0.47838311091764446], [Timestamp('2020-03-19 00:00:00'), 2.0, 0.0, 0.4889233829516588, 0.23643433325196683, 0.2746422837963765, 0.47722608441554026], [Timestamp('2020-03-20 00:00:00'), 2.0, 0.0, 0.5790292355175924, 0.16301636571912329, 0.2579543987632852, 0.4786678944137508], [Timestamp('2020-03-23 00:00:00'), 2.0, 0.0, 0.48614362920573434, 0.2760254793932465, 0.23783089140101618, 0.47611648250460403], [Timestamp('2020-03-24 00:00:00'), 2.0, 0.0, 0.5316996249176722, 0.16692007236113465, 0.3013803027211995, 0.4755574228715765], [Timestamp('2020-03-25 00:00:00'), 1.0, 0.0, 0.4596300824436383, 0.21925660074998632, 0.321113316806378, 0.4681030822691734], [Timestamp('2020-03-26 00:00:00'), 2.0, 2.0, 0.19338861290099915, 0.15159194750822777, 0.6550194395907778, 0.4776751151542363], [Timestamp('2020-03-27 00:00:00'), 2.0, 2.0, 0.16129972214247024, 0.06301193301024492, 0.7756883448472911, 0.47101845355386746], [Timestamp('2020-03-30 00:00:00'), 2.0, 2.0, 0.18496518939645745, 0.33373812037666634, 0.48129669022687027, 0.47366171451722877], [Timestamp('2020-03-31 00:00:00'), 2.0, 2.0, 0.2515882475536992, 0.24863818372733937, 0.49977356871896794, 0.4753946628029065], [Timestamp('2020-04-01 00:00:00'), 2.0, 1.0, 0.14925906738188777, 0.44919552448396305, 0.401545408134146, 0.4740709160213208], [Timestamp('2020-04-02 00:00:00'), 2.0, 2.0, 0.32157764349796764, 0.17148942938877887, 0.5069329271132506, 0.4772992061824603], [Timestamp('2020-04-03 00:00:00'), 2.0, 2.0, 0.1501183297385211, 0.25238332702413996, 0.5974983432373349, 0.48284288623601174], [Timestamp('2020-04-06 00:00:00'), 2.0, 1.0, 0.10991386649898534, 0.5299434589990183, 0.36014267450199705, 0.4772687111580545], [Timestamp('2020-04-07 00:00:00'), 1.0, 2.0, 0.08751911116290197, 0.25297278336203766, 0.659508105475056, 0.47700463520651093], [Timestamp('2020-04-08 00:00:00'), 0.0, 1.0, 0.10276863455533128, 0.6225078835823574, 0.2747234818623158, 0.4739419631236054], [Timestamp('2020-04-09 00:00:00'), 0.0, 2.0, 0.3171575658145588, 0.08767793138393347, 0.5951645028015136, 0.47344625982630584], [Timestamp('2020-04-13 00:00:00'), 0.0, 2.0, 0.17770921736785383, 0.23481678248147064, 0.5874740001506744, 0.4748211475648299], [Timestamp('2020-04-14 00:00:00'), 1.0, 2.0, 0.13422225416674952, 0.1401450025825639, 0.725632743250681, 0.46774031566573976], [Timestamp('2020-04-15 00:00:00'), 2.0, 2.0, 0.1552009978832641, 0.21289917974607106, 0.6318998223706697, 0.4741654463722935], [Timestamp('2020-04-16 00:00:00'), 2.0, 2.0, 0.2902603412141507, 0.15778329714570566, 0.5519563616401476, 0.4764847876214602], [Timestamp('2020-04-17 00:00:00'), 2.0, 0.0, 0.5818385942016016, 0.15729851771963502, 0.26086288807876834, 0.477286308566866], [Timestamp('2020-04-20 00:00:00'), 2.0, 2.0, 0.3734672249856613, 0.1822434137600838, 0.44428936125425494, 0.4774865074741675], [Timestamp('2020-04-22 00:00:00'), 2.0, 1.0, 0.19039933285654098, 0.46526166860919577, 0.3443389985342674, 0.4733493448652344], [Timestamp('2020-04-23 00:00:00'), 2.0, 2.0, 0.3768288379172109, 0.1564595458672466, 0.4667116162155385, 0.4727956979391165], [Timestamp('2020-04-24 00:00:00'), 2.0, 0.0, 0.5084730754009371, 0.050127058848659756, 0.4413998657503981, 0.47581551913874404], [Timestamp('2020-04-27 00:00:00'), 2.0, 2.0, 0.38052952549218005, 0.22761550104951764, 0.3918549734583084, 0.47537261885700416], [Timestamp('2020-04-28 00:00:00'), 1.0, 2.0, 0.2748661815176742, 0.06533684787223412, 0.6597969706100951, 0.4724273901152983], [Timestamp('2020-04-29 00:00:00'), 0.0, 0.0, 0.6114520346710339, 0.03507357923180326, 0.3534743860971591, 0.4677789201920241], [Timestamp('2020-04-30 00:00:00'), 2.0, 2.0, 0.26005608718561085, 0.17578291560808096, 0.564160997206304, 0.46495481034014374], [Timestamp('2020-05-04 00:00:00'), 2.0, 2.0, 0.24623103916606132, 0.05048997424871046, 0.7032789865852314, 0.4636500549859896], [Timestamp('2020-05-05 00:00:00'), 2.0, 2.0, 0.3516606213988584, 0.0862680120078418, 0.5620713665933026, 0.466260970359331], [Timestamp('2020-05-06 00:00:00'), 2.0, 2.0, 0.0776853568847724, 0.4147727220091719, 0.5075419211060522, 0.46211088104313114], [Timestamp('2020-05-07 00:00:00'), 1.0, 2.0, 0.14385246901682067, 0.1147070382876073, 0.7414404926955692, 0.4647172647520212], [Timestamp('2020-05-08 00:00:00'), 0.0, 2.0, 0.16993373141549603, 0.030993252307521032, 0.799073016276984, 0.4644147684084025], [Timestamp('2020-05-11 00:00:00'), 2.0, 2.0, 0.1677405033271129, 0.06210210677682263, 0.7701573898960632, 0.46413438300114773], [Timestamp('2020-05-12 00:00:00'), 2.0, 2.0, 0.06512239327539614, 0.14415782254247148, 0.7907197841821254, 0.46523168688026795], [Timestamp('2020-05-13 00:00:00'), 2.0, 2.0, 0.0970504254667183, 0.11340569282129491, 0.7895438817119828, 0.46291379425268087], [Timestamp('2020-05-14 00:00:00'), 2.0, 2.0, 0.3388228942433062, 0.05022560003118621, 0.6109515057255036, 0.4665160360472204], [Timestamp('2020-05-15 00:00:00'), 2.0, 2.0, 0.2593225377060632, 0.06660046425949485, 0.6740769980344382, 0.4617954969166786], [Timestamp('2020-05-18 00:00:00'), 2.0, 2.0, 0.31056133398293145, 0.05975774238096956, 0.6296809236361016, 0.4647077138247108], [Timestamp('2020-05-19 00:00:00'), 2.0, 2.0, 0.1791206439880509, 0.363627916822563, 0.4572514391893824, 0.45998893309950245], [Timestamp('2020-05-20 00:00:00'), 2.0, 2.0, 0.36951798878792463, 0.22833826572709887, 0.4021437454849809, 0.46824446358250266], [Timestamp('2020-05-21 00:00:00'), 2.0, 2.0, 0.3255023590654048, 0.028570197381513267, 0.645927443553083, 0.46795112990157994], [Timestamp('2020-05-22 00:00:00'), 2.0, 2.0, 0.21999227810555647, 0.04692279613472389, 0.7330849257597226, 0.46845945571190145], [Timestamp('2020-05-25 00:00:00'), 2.0, 2.0, 0.16112784991281232, 0.27978078177159704, 0.5590913683155934, 0.4670722204580479], [Timestamp('2020-05-26 00:00:00'), 2.0, 2.0, 0.24368686441773305, 0.12309300983760922, 0.6332201257446598, 0.4644963411319207], [Timestamp('2020-05-27 00:00:00'), 2.0, 2.0, 0.44306057661689063, 0.03166719432206989, 0.525272229061039, 0.4634354192997839], [Timestamp('2020-05-28 00:00:00'), 2.0, 2.0, 0.31711913880007186, 0.13015261121971639, 0.5527282499802055, 0.46361271260865483], [Timestamp('2020-05-29 00:00:00'), 2.0, 2.0, 0.33572259231094415, 0.23343128090481352, 0.4308461267842398, 0.4601337036097714], [Timestamp('2020-06-01 00:00:00'), 2.0, 2.0, 0.1338707006190852, 0.41041529301071616, 0.45571400637020226, 0.460550937436826], [Timestamp('2020-06-02 00:00:00'), 2.0, 2.0, 0.2772916716928319, 0.06006768789806393, 0.6626406404090978, 0.4578846129340599], [Timestamp('2020-06-03 00:00:00'), 1.0, 2.0, 0.16898524147570956, 0.15663522764077753, 0.6743795308835115, 0.4609012728633151], [Timestamp('2020-06-04 00:00:00'), 0.0, 2.0, 0.1927817689788034, 0.03343963066107156, 0.7737786003601219, 0.46762372023859555], [Timestamp('2020-06-05 00:00:00'), 0.0, 2.0, 0.2960130342979484, 0.08357759001379342, 0.6204093756882575, 0.46684788637912683], [Timestamp('2020-06-08 00:00:00'), 0.0, 2.0, 0.2557743533419395, 0.16294051409696883, 0.5812851325610876, 0.46650236247120674], [Timestamp('2020-06-09 00:00:00'), 0.0, 2.0, 0.23348954966077537, 0.011472490755541458, 0.7550379595836881, 0.4672929451301741], [Timestamp('2020-06-10 00:00:00'), 2.0, 2.0, 0.1267095352585344, 0.044756261904601524, 0.8285342028368663, 0.4697097701885086], [Timestamp('2020-06-12 00:00:00'), 2.0, 2.0, 0.17387022242489422, 0.11300866747936719, 0.7131211100957354, 0.46898556547102394], [Timestamp('2020-06-15 00:00:00'), 2.0, 1.0, 0.03431126556971246, 0.6173691197846103, 0.34831961464567746, 0.46736229236933263], [Timestamp('2020-06-16 00:00:00'), 2.0, 0.0, 0.4720393405195773, 0.059478906524757534, 0.46848175295566846, 0.4651695983162483], [Timestamp('2020-06-17 00:00:00'), 0.0, 2.0, 0.4169441942782341, 0.02087791432659149, 0.5621778913951808, 0.4695048065622471], [Timestamp('2020-06-18 00:00:00'), 1.0, 2.0, 0.3541669146004564, 0.06558466608071807, 0.5802484193188252, 0.4698751666736385], [Timestamp('2020-06-19 00:00:00'), 0.0, 2.0, 0.213192246309593, 0.01628158599507608, 0.7705261676953241, 0.46809811955165587], [Timestamp('2020-06-22 00:00:00'), 2.0, 2.0, 0.12302555569725557, 0.04734378243946409, 0.8296306618632848, 0.4656411370242319], [Timestamp('2020-06-23 00:00:00'), 1.0, 2.0, 0.4100437999734783, 0.02656401941520097, 0.5633921806113252, 0.4670315251510196], [Timestamp('2020-06-24 00:00:00'), 2.0, 2.0, 0.1699543578495092, 0.03026695707585346, 0.799778685074637, 0.46403588244949456], [Timestamp('2020-06-25 00:00:00'), 2.0, 2.0, 0.42442978248263213, 0.07477017284046088, 0.5008000446769026, 0.46609128865272287], [Timestamp('2020-06-26 00:00:00'), 2.0, 2.0, 0.2869711039067055, 0.058771556797175925, 0.6542573392961244, 0.46470470180165496], [Timestamp('2020-06-29 00:00:00'), 2.0, 2.0, 0.33135691718703186, 0.0849075002363177, 0.5837355825766442, 0.46454522324164893], [Timestamp('2020-06-30 00:00:00'), 2.0, 2.0, 0.24415312564913583, 0.08989199279875092, 0.6659548815521067, 0.4657056323000181], [Timestamp('2020-07-01 00:00:00'), 2.0, 2.0, 0.16953149568636527, 0.04830972623392875, 0.7821587780797118, 0.46799598409274507], [Timestamp('2020-07-02 00:00:00'), 1.0, 2.0, 0.1501084285729092, 0.06760678752771507, 0.7822847838993741, 0.4621410799721266], [Timestamp('2020-07-03 00:00:00'), 2.0, 2.0, 0.2163690852854039, 0.014801630317631776, 0.7688292843969702, 0.46908348645431425], [Timestamp('2020-07-06 00:00:00'), 0.0, 2.0, 0.4517392026234081, 0.016384509927890346, 0.5318762874486993, 0.471351024118381], [Timestamp('2020-07-07 00:00:00'), 2.0, 0.0, 0.5359970061702775, 0.050977357013078095, 0.41302563681664706, 0.4683465954397685], [Timestamp('2020-07-08 00:00:00'), 2.0, 2.0, 0.3556369503020704, 0.1198991617952852, 0.5244638879026478, 0.4680925344401042], [Timestamp('2020-07-09 00:00:00'), 2.0, 2.0, 0.2269412576225651, 0.049817377501865705, 0.7232413648755717, 0.4681670776302371], [Timestamp('2020-07-10 00:00:00'), 2.0, 2.0, 0.24964453958299412, 0.1747201025692095, 0.5756353578478033, 0.47264110740691473], [Timestamp('2020-07-13 00:00:00'), 2.0, 2.0, 0.24776709915736214, 0.08289291851619751, 0.6693399823264332, 0.47369801510916903], [Timestamp('2020-07-14 00:00:00'), 2.0, 2.0, 0.31961647255507913, 0.0145828392937056, 0.6658006881512128, 0.4694067770881854], [Timestamp('2020-07-15 00:00:00'), 0.0, 2.0, 0.3369512073673437, 0.11883088040502628, 0.544217912227634, 0.46957131766869936], [Timestamp('2020-07-16 00:00:00'), 1.0, 2.0, 0.3077127385880629, 0.11014710017263231, 0.5821401612393101, 0.4722533004603034], [Timestamp('2020-07-17 00:00:00'), 1.0, 0.0, 0.5032533284855704, 0.012644702227459875, 0.4841019692869629, 0.4654322988256288], [Timestamp('2020-07-20 00:00:00'), 2.0, 2.0, 0.36184088258228164, 0.0765306104109076, 0.5616285070068144, 0.462864530339123], [Timestamp('2020-07-21 00:00:00'), 0.0, 0.0, 0.5365205224504271, 0.10576964908044183, 0.3577098284691333, 0.45925259526597983], [Timestamp('2020-07-22 00:00:00'), 1.0, 2.0, 0.2845806460571868, 0.042818040411243354, 0.6726013135315645, 0.46038631030568206], [Timestamp('2020-07-23 00:00:00'), 2.0, 2.0, 0.4804769763223784, 0.023607631269968614, 0.4959153924076521, 0.45902016917603267], [Timestamp('2020-07-24 00:00:00'), 0.0, 0.0, 0.5628403945362297, 0.06423583949748818, 0.37292376596627524, 0.4617303553156205], [Timestamp('2020-07-27 00:00:00'), 0.0, 0.0, 0.45739350922785016, 0.09072619391638785, 0.45188029685576064, 0.4610856709672973], [Timestamp('2020-07-28 00:00:00'), 0.0, 2.0, 0.3331971627673499, 0.0251781477554664, 0.641624689477183, 0.4570165047776988], [Timestamp('2020-07-29 00:00:00'), 1.0, 2.0, 0.22978401644095736, 0.07923502922820713, 0.6909809543308311, 0.45923621412141874], [Timestamp('2020-07-30 00:00:00'), 2.0, 2.0, 0.42318002177701175, 0.01655210879650456, 0.5602678694264889, 0.4596429083584616], [Timestamp('2020-07-31 00:00:00'), 2.0, 2.0, 0.39096565795083926, 0.039394997331112354, 0.5696393447180432, 0.4621753746416403], [Timestamp('2020-08-03 00:00:00'), 2.0, 2.0, 0.29613627937433984, 0.03181061938453231, 0.6720531012411264, 0.4611872026069012], [Timestamp('2020-08-04 00:00:00'), 2.0, 0.0, 0.6099388543440584, 0.06970355962988452, 0.32035758602605235, 0.4599258387636327], [Timestamp('2020-08-05 00:00:00'), 2.0, 2.0, 0.2751070256170024, 0.16111448387690205, 0.5637784905060951, 0.45844694293335775], [Timestamp('2020-08-06 00:00:00'), 0.0, 2.0, 0.36836592453675926, 0.014520503847881406, 0.6171135716153621, 0.45916589434813854], [Timestamp('2020-08-07 00:00:00'), 1.0, 2.0, 0.26792649037369964, 0.04335712877482905, 0.6887163808514665, 0.45862806395421174], [Timestamp('2020-08-10 00:00:00'), 0.0, 2.0, 0.44312627532259247, 0.01997923035284039, 0.5368944943245724, 0.46702341240425005], [Timestamp('2020-08-11 00:00:00'), 1.0, 2.0, 0.35102373211870663, 0.02197958604305989, 0.6269966818382289, 0.46780131681093606], [Timestamp('2020-08-12 00:00:00'), 0.0, 0.0, 0.5438205955647132, 0.03370328480144785, 0.42247611963383636, 0.46928023339647873], [Timestamp('2020-08-13 00:00:00'), 1.0, 2.0, 0.4246564586309241, 0.021889610245083096, 0.55345393112399, 0.46542445567659124], [Timestamp('2020-08-14 00:00:00'), 1.0, 2.0, 0.25549405589916757, 0.05456627843749414, 0.6899396656633416, 0.4616457701846239], [Timestamp('2020-08-17 00:00:00'), 2.0, 2.0, 0.39367886464054147, 0.06028862900097092, 0.546032506358482, 0.46241709504850775], [Timestamp('2020-08-18 00:00:00'), 1.0, 2.0, 0.3028819465195276, 0.20895404656824682, 0.48816400691222184, 0.46061004651002846], [Timestamp('2020-08-19 00:00:00'), 0.0, 2.0, 0.3565595873952777, 0.026578224433664185, 0.6168621881710622, 0.4584334114094444], [Timestamp('2020-08-20 00:00:00'), 0.0, 2.0, 0.38251622149122855, 0.12215346531582146, 0.4953303131929523, 0.45681862171586657], [Timestamp('2020-08-21 00:00:00'), 1.0, 2.0, 0.3579277996345448, 0.08819644146028363, 0.5538757589051777, 0.46042342647577056], [Timestamp('2020-08-24 00:00:00'), 0.0, 2.0, 0.3851137550228962, 0.0917793785571454, 0.523106866419955, 0.4546008777074048], [Timestamp('2020-08-25 00:00:00'), 1.0, 2.0, 0.2664365121376934, 0.048207797247952426, 0.6853556906143508, 0.4567675346000469], [Timestamp('2020-08-26 00:00:00'), 2.0, 0.0, 0.5513084017861417, 0.03792902123426354, 0.41076257697959334, 0.45302223405671677], [Timestamp('2020-08-27 00:00:00'), 2.0, 2.0, 0.32769459273602364, 0.16417409568781371, 0.5081313115761656, 0.45135988660578824], [Timestamp('2020-08-28 00:00:00'), 2.0, 2.0, 0.28134342114947325, 0.1836288221615935, 0.5350277566889301, 0.4527042346191282], [Timestamp('2020-08-31 00:00:00'), 2.0, 2.0, 0.19873177181774046, 0.40021351745794426, 0.4010547107243188, 0.45113130701366], [Timestamp('2020-09-01 00:00:00'), 1.0, 2.0, 0.39885506397465675, 0.19615299610509987, 0.40499193992024307, 0.45269903809871187], [Timestamp('2020-09-02 00:00:00'), 0.0, 2.0, 0.27690023446143325, 0.18601853935887222, 0.5370812261796938, 0.4548420679101564], [Timestamp('2020-09-03 00:00:00'), 0.0, 0.0, 0.4913079677765347, 0.0772013256575593, 0.4314907065659077, 0.4558726822415113], [Timestamp('2020-09-04 00:00:00'), 0.0, 0.0, 0.48030280302761513, 0.06641358872798897, 0.45328360824439784, 0.4600525335857384], [Timestamp('2020-09-08 00:00:00'), 0.0, 2.0, 0.4274323517137457, 0.09060457369742461, 0.4819630745888279, 0.4570782808139955], [Timestamp('2020-09-09 00:00:00'), 0.0, 0.0, 0.4122881899959479, 0.274546714021027, 0.3131650959830287, 0.4563477500607724], [Timestamp('2020-09-10 00:00:00'), 1.0, 2.0, 0.4099639107527532, 0.08712052037860783, 0.5029155688686432, 0.4576440829766426], [Timestamp('2020-09-11 00:00:00'), 0.0, 0.0, 0.3509272312955721, 0.3342486243304915, 0.3148241443739411, 0.45421034285634604], [Timestamp('2020-09-14 00:00:00'), 0.0, 2.0, 0.2700971525268026, 0.10069809247104376, 0.6292047550021466, 0.45635037389663297], [Timestamp('2020-09-15 00:00:00'), 0.0, 0.0, 0.5000300832250222, 0.20366032044370305, 0.2963095963312715, 0.45538469221293737], [Timestamp('2020-09-16 00:00:00'), 0.0, 2.0, 0.3429763031361466, 0.22762047408333472, 0.4294032227805122, 0.45411675501261195], [Timestamp('2020-09-17 00:00:00'), 0.0, 0.0, 0.5104366985352311, 0.0864192366234152, 0.4031440648413484, 0.45182843246853976], [Timestamp('2020-09-18 00:00:00'), 0.0, 2.0, 0.23024962706046942, 0.3016789348671842, 0.4680714380723442, 0.4537008968825931], [Timestamp('2020-09-21 00:00:00'), 0.0, 0.0, 0.5488048732888787, 0.0447082569007982, 0.4064868698103238, 0.4547166608993689], [Timestamp('2020-09-22 00:00:00'), 0.0, 2.0, 0.41801272007264356, 0.0870289632889186, 0.49495831663843565, 0.4548342585706009], [Timestamp('2020-09-23 00:00:00'), 0.0, 2.0, 0.29075828435128126, 0.043182359543535914, 0.6660593561051874, 0.4575136877062492], [Timestamp('2020-09-24 00:00:00'), 0.0, 2.0, 0.2984848146429701, 0.08046528523103472, 0.6210499001259916, 0.4540354420581088], [Timestamp('2020-09-25 00:00:00'), 0.0, 2.0, 0.19781067848317946, 0.31271252635897073, 0.48947679515785053, 0.44784121805328986], [Timestamp('2020-09-28 00:00:00'), 2.0, 2.0, 0.33763300775406374, 0.03229256119570288, 0.630074431050233, 0.44859145411801293], [Timestamp('2020-09-29 00:00:00'), 2.0, 2.0, 0.2153327748950973, 0.16224624677575336, 0.6224209783291443, 0.4503963580011246], [Timestamp('2020-09-30 00:00:00'), 2.0, 2.0, 0.4252317408334154, 0.14695155017497324, 0.4278167089916111, 0.44800370347843127], [Timestamp('2020-10-01 00:00:00'), 2.0, 2.0, 0.36057141021364336, 0.08705580039283436, 0.5523727893935156, 0.44813262658649194], [Timestamp('2020-10-02 00:00:00'), 2.0, 2.0, 0.42630220139766184, 0.056543530596539944, 0.517154268005804, 0.4513509850441428], [Timestamp('2020-10-05 00:00:00'), 1.0, 0.0, 0.5380971292883773, 0.025398171600368523, 0.43650469911124995, 0.44900122939684234], [Timestamp('2020-10-06 00:00:00'), 1.0, 2.0, 0.26785694603236926, 0.19177638862977817, 0.5403666653378479, 0.44591586537275263], [Timestamp('2020-10-07 00:00:00'), 1.0, 0.0, 0.4181349073752516, 0.2307651550201128, 0.351099937604638, 0.4420327460177694], [Timestamp('2020-10-08 00:00:00'), 0.0, 2.0, 0.38673007642720925, 0.10480345310486915, 0.5084664704679256, 0.448538425073387], [Timestamp('2020-10-09 00:00:00'), 0.0, 2.0, 0.22969403023743948, 0.12119667089239478, 0.6491092988701697, 0.4443110609778697], [Timestamp('2020-10-13 00:00:00'), 1.0, 2.0, 0.33942115299781345, 0.1798361918961235, 0.48074265510606046, 0.4452722303812004], [Timestamp('2020-10-14 00:00:00'), 2.0, 2.0, 0.21847155374381289, 0.2286989293158401, 0.5528295169403481, 0.45118873540420673], [Timestamp('2020-10-15 00:00:00'), 2.0, 2.0, 0.41117716926262765, 0.1379070562424521, 0.4509157744949167, 0.4497009062124493], [Timestamp('2020-10-16 00:00:00'), 2.0, 2.0, 0.43485487137219947, 0.08056348895730343, 0.48458163967050216, 0.44986679986679984], [Timestamp('2020-10-19 00:00:00'), 2.0, 0.0, 0.6238924054233668, 0.047418073415947365, 0.3286895211606858, 0.45164246520020246], [Timestamp('2020-10-20 00:00:00'), 0.0, 0.0, 0.4072660960297325, 0.25234710869953914, 0.34038679527072796, 0.45341242099877094], [Timestamp('2020-10-21 00:00:00'), 0.0, 0.0, 0.4186415885959252, 0.2250327897810772, 0.3563256216229956, 0.4562669748830648], [Timestamp('2020-10-22 00:00:00'), 0.0, 2.0, 0.40694901908326453, 0.1845855463566313, 0.4084654345601096, 0.45561279787789255], [Timestamp('2020-10-23 00:00:00'), 0.0, 0.0, 0.4872436190943753, 0.1459084798558047, 0.3668479010498141, 0.4576704110466168], [Timestamp('2020-10-26 00:00:00'), 0.0, 2.0, 0.34680031569026826, 0.05946115707807936, 0.5937385272316462, 0.45861581713671756], [Timestamp('2020-10-27 00:00:00'), 1.0, 2.0, 0.3458019071888433, 0.11409477605370677, 0.5401033167574559, 0.4560626940048162], [Timestamp('2020-10-28 00:00:00'), 2.0, 2.0, 0.3059275127961779, 0.08382728203765631, 0.610245205166172, 0.45383016637142704], [Timestamp('2020-10-29 00:00:00'), 2.0, 2.0, 0.4270754094998789, 0.0694050933446286, 0.5035194971554908, 0.4547800272700599], [Timestamp('2020-10-30 00:00:00'), 2.0, 2.0, 0.24504466661031982, 0.13390372982775153, 0.6210516035619228, 0.4578747628083491], [Timestamp('2020-11-03 00:00:00'), 2.0, 2.0, 0.34848116725630285, 0.2517666500220456, 0.39975218272165136, 0.4564131140628294], [Timestamp('2020-11-04 00:00:00'), 2.0, 2.0, 0.3730219776830385, 0.15362511177479668, 0.47335291054215883, 0.45603483930244604], [Timestamp('2020-11-05 00:00:00'), 2.0, 2.0, 0.3563227232285738, 0.0703781737867027, 0.5732991029847195, 0.45721357689549036], [Timestamp('2020-11-06 00:00:00'), 2.0, 2.0, 0.43286813791397155, 0.08982210067082148, 0.47730976141520415, 0.4565464224180187], [Timestamp('2020-11-09 00:00:00'), 2.0, 2.0, 0.23747366902291497, 0.2085326665869255, 0.5539936643901635, 0.4593762643098507], [Timestamp('2020-11-10 00:00:00'), 2.0, 2.0, 0.25204686036462975, 0.11039597001344649, 0.637557169621929, 0.45952431151672135], [Timestamp('2020-11-11 00:00:00'), 2.0, 2.0, 0.13989046366073526, 0.0833850794030273, 0.7767244569362399, 0.45996062092338597], [Timestamp('2020-11-12 00:00:00'), 2.0, 2.0, 0.09986107129331903, 0.06915814343042295, 0.8309807852762591, 0.4619804135074473], [Timestamp('2020-11-13 00:00:00'), 2.0, 2.0, 0.11597276617948356, 0.11573364541300392, 0.7682935884075168, 0.463179412314281], [Timestamp('2020-11-16 00:00:00'), 2.0, 2.0, 0.10453540962994139, 0.24551031815107158, 0.6499542722189809, 0.46174581701341705], [Timestamp('2020-11-17 00:00:00'), 2.0, 2.0, 0.11581473445263167, 0.08914096848789464, 0.7950442970594778, 0.46264934980850353], [Timestamp('2020-11-18 00:00:00'), 2.0, 2.0, 0.0908921451517371, 0.22126400579990266, 0.6878438490483655, 0.4639020164771122], [Timestamp('2020-11-19 00:00:00'), 2.0, 2.0, 0.3075742522006036, 0.082927313734522, 0.6094984340648725, 0.4634577652400911], [Timestamp('2020-11-23 00:00:00'), 1.0, 2.0, 0.38172308459724946, 0.06056158868735356, 0.5577153267153904, 0.4627877611421418], [Timestamp('2020-11-24 00:00:00'), 0.0, 2.0, 0.1845473812402828, 0.18479952369683234, 0.6306530950628882, 0.46242342286967036], [Timestamp('2020-11-25 00:00:00'), 0.0, 2.0, 0.34206288578932925, 0.10888574236396327, 0.549051371846707, 0.460978982983873], [Timestamp('2020-11-26 00:00:00'), 2.0, 2.0, 0.11978965333828798, 0.044810562222488956, 0.8353997844392248, 0.45577372351972856], [Timestamp('2020-11-27 00:00:00'), 2.0, 2.0, 0.2517012233273798, 0.15225192566714213, 0.5960468510054829, 0.4604323320228507], [Timestamp('2020-11-30 00:00:00'), 2.0, 2.0, 0.0736421820287003, 0.20694157860594375, 0.7194162393653553, 0.45386081144465296], [Timestamp('2020-12-01 00:00:00'), 2.0, 2.0, 0.33634823505928185, 0.1283853258850918, 0.5352664390556202, 0.45540036235595766], [Timestamp('2020-12-02 00:00:00'), 2.0, 1.0, 0.2777628709044348, 0.44221340001832155, 0.2800237290772457, 0.45340536043750085], [Timestamp('2020-12-03 00:00:00'), 2.0, 2.0, 0.42248141459381106, 0.047881489631679076, 0.5296370957745072, 0.4500076998616618], [Timestamp('2020-12-04 00:00:00'), 1.0, 0.0, 0.385936332051185, 0.2896376616429089, 0.32442600630589924, 0.4565640792576106], [Timestamp('2020-12-07 00:00:00'), 2.0, 2.0, 0.19478440553001725, 0.2520296907144173, 0.5531859037555714, 0.45204390636302616], [Timestamp('2020-12-08 00:00:00'), 2.0, 0.0, 0.604276386360544, 0.035455224012081926, 0.3602683896273805, 0.45408770114956365], [Timestamp('2020-12-09 00:00:00'), 2.0, 0.0, 0.6297824248564151, 0.051831187712858974, 0.31838638743072484, 0.45817937225338606], [Timestamp('2020-12-10 00:00:00'), 2.0, 0.0, 0.744822054787967, 0.03195175842696617, 0.22322618678507367, 0.45510464561324654], [Timestamp('2020-12-11 00:00:00'), 2.0, 2.0, 0.33667616855465654, 0.08462432723317416, 0.5786995042121633, 0.4608174768938064], [Timestamp('2020-12-14 00:00:00'), 0.0, 2.0, 0.24312402620723442, 0.088625151360777, 0.668250822431983, 0.45962877206034475], [Timestamp('2020-12-15 00:00:00'), 0.0, 2.0, 0.08260564055114444, 0.12536372244947352, 0.792030636999383, 0.4579124632188742], [Timestamp('2020-12-16 00:00:00'), 1.0, 2.0, 0.21098398929529272, 0.09657304485023592, 0.692442965854467, 0.45752466488975346], [Timestamp('2020-12-17 00:00:00'), 1.0, 2.0, 0.2107682030406591, 0.07019389871150385, 0.7190378982478336, 0.45322929955483077], [Timestamp('2020-12-18 00:00:00'), 1.0, 2.0, 0.14117210090579307, 0.16882571058409218, 0.6900021885101209, 0.454515707965927], [Timestamp('2020-12-21 00:00:00'), 2.0, 2.0, 0.3873076864315129, 0.037257417806490695, 0.5754348957620011, 0.4528654676599797], [Timestamp('2020-12-22 00:00:00'), 2.0, 2.0, 0.2626721486751834, 0.1644559666951906, 0.5728718846296326, 0.4519016056758775], [Timestamp('2020-12-23 00:00:00'), 2.0, 2.0, 0.4497063411691502, 0.037058783964249394, 0.5132348748665939, 0.45438698367957686], [Timestamp('2020-12-28 00:00:00'), 2.0, 0.0, 0.49357228566690514, 0.0517885590484459, 0.4546391552846455, 0.4563099350765382], [Timestamp('2020-12-29 00:00:00'), 2.0, 2.0, 0.18877267119353333, 0.12092782163695737, 0.6902995071695079, 0.4532551715507714], [Timestamp('2020-12-30 00:00:00'), 2.0, 2.0, 0.3432657800687072, 0.06573655945066219, 0.5909976604806265, 0.45079718535672936], [Timestamp('2021-01-04 00:00:00'), 2.0, 2.0, 0.30975939054366985, 0.15264478246131552, 0.5375958269950155, 0.4541251327849008], [Timestamp('2021-01-05 00:00:00'), 2.0, 2.0, 0.2380402546578518, 0.21917899691132636, 0.5427807484308187, 0.454468091404215], [Timestamp('2021-01-06 00:00:00'), 0.0, 2.0, 0.10580145686770326, 0.3716237463488659, 0.5225747967834284, 0.45693302276208914], [Timestamp('2021-01-07 00:00:00'), 0.0, 2.0, 0.370157325470995, 0.048275420147299845, 0.5815672543816995, 0.454221847847051], [Timestamp('2021-01-08 00:00:00'), 0.0, 2.0, 0.3547747601284747, 0.16279188283754487, 0.48243335703398277, 0.45092082478547635], [Timestamp('2021-01-11 00:00:00'), 0.0, 2.0, 0.19862116160515542, 0.0579291859854002, 0.7434496524094429, 0.4526640427768914], [Timestamp('2021-01-12 00:00:00'), 0.0, 0.0, 0.6155799984780772, 0.03978131665921342, 0.34463868486270377, 0.45317317449166766], [Timestamp('2021-01-13 00:00:00'), 0.0, 2.0, 0.37634979911779093, 0.17072046342820107, 0.45292973745400095, 0.45330390938390835], [Timestamp('2021-01-14 00:00:00'), 0.0, 0.0, 0.7842342336365979, 0.031713121759259104, 0.184052644604147, 0.4540166292824908], [Timestamp('2021-01-15 00:00:00'), 0.0, 0.0, 0.7195130795198955, 0.028308812319606608, 0.2521781081605047, 0.45761751694317], [Timestamp('2021-01-18 00:00:00'), 0.0, 0.0, 0.7897700988026155, 0.05880072033153227, 0.15142918086584717, 0.4561530586359354], [Timestamp('2021-01-19 00:00:00'), 0.0, 2.0, 0.3964191884674344, 0.0980911614379165, 0.5054896500946543, 0.45548973325813363], [Timestamp('2021-01-20 00:00:00'), 0.0, 0.0, 0.5272450532652906, 0.08883817731376123, 0.38391676942094166, 0.45539702162397416], [Timestamp('2021-01-21 00:00:00'), 0.0, 0.0, 0.4421450327744016, 0.1645593794189087, 0.39329558780669616, 0.4573857502941589], [Timestamp('2021-01-22 00:00:00'), 2.0, 0.0, 0.5269331369967803, 0.10411269842959262, 0.3689541645736213, 0.46195213855623524], [Timestamp('2021-01-26 00:00:00'), 2.0, 0.0, 0.704637859631656, 0.0858394958158953, 0.20952264455244526, 0.4618585240198046], [Timestamp('2021-01-27 00:00:00'), 2.0, 0.0, 0.6155941982033583, 0.11475179153789586, 0.2696540102587529, 0.4591339317242629], [Timestamp('2021-01-28 00:00:00'), 2.0, 0.0, 0.6642416319428083, 0.07807624726766109, 0.25768212078953145, 0.4553899847531923], [Timestamp('2021-01-29 00:00:00'), 2.0, 0.0, 0.7341910572772311, 0.011758078765240397, 0.25405086395753307, 0.45237537940418077], [Timestamp('2021-02-01 00:00:00'), 2.0, 2.0, 0.4304793326671352, 0.10672030255861834, 0.46280036477424263, 0.45194569533907164], [Timestamp('2021-02-02 00:00:00'), 0.0, 2.0, 0.32327275667087274, 0.28909177738257247, 0.38763546594655385, 0.4538710549531859], [Timestamp('2021-02-03 00:00:00'), 0.0, 2.0, 0.3498988267258362, 0.1016450956420305, 0.5484560776321351, 0.45299648064353953], [Timestamp('2021-02-04 00:00:00'), 0.0, 2.0, 0.13436654244345136, 0.18121308513160886, 0.6844203724249412, 0.4529061245299853], [Timestamp('2021-02-05 00:00:00'), 0.0, 2.0, 0.15284744540776504, 0.11153172102758396, 0.7356208335646546, 0.4494663859659038], [Timestamp('2021-02-08 00:00:00'), 2.0, 2.0, 0.08763894428706727, 0.14180100041792304, 0.770560055295009, 0.4493776518515386], [Timestamp('2021-02-09 00:00:00'), 1.0, 2.0, 0.06502479422032303, 0.2564002547449359, 0.6785749510347382, 0.44928874376008715], [Timestamp('2021-02-10 00:00:00'), 0.0, 2.0, 0.17400191066357024, 0.15673630382243325, 0.6692617855140002, 0.4515505861607456], [Timestamp('2021-02-11 00:00:00'), 0.0, 2.0, 0.3798058136867198, 0.0957641532965452, 0.5244300330167385, 0.45246287964988857], [Timestamp('2021-02-12 00:00:00'), 0.0, 2.0, 0.30114914938818604, 0.13397955383413077, 0.5648712967776771, 0.45391963636722243], [Timestamp('2021-02-18 00:00:00'), 0.0, 2.0, 0.3568134714001277, 0.03728373739928041, 0.6059027912005983, 0.45382051327906775], [Timestamp('2021-02-19 00:00:00'), 0.0, 2.0, 0.44027122312188965, 0.04460380333812807, 0.5151249735399819, 0.45472354104709617], [Timestamp('2021-02-22 00:00:00'), 2.0, 0.0, 0.41508192333667526, 0.21662832107279015, 0.36828975559054, 0.4533488694928611], [Timestamp('2021-02-23 00:00:00'), 0.0, 2.0, 0.30159800685028165, 0.08345851484746433, 0.6149434783022564, 0.45294987644832535], [Timestamp('2021-02-24 00:00:00'), 0.0, 2.0, 0.4081038099329352, 0.13457409022303563, 0.4573220998440351, 0.45288361155342777], [Timestamp('2021-02-25 00:00:00'), 0.0, 2.0, 0.2920307672242074, 0.25834941019168095, 0.44961982258411287, 0.45151208856724523], [Timestamp('2021-02-26 00:00:00'), 2.0, 2.0, 0.3528173688462909, 0.10844195696893151, 0.5387406741847711, 0.45164601771680896], [Timestamp('2021-03-01 00:00:00'), 0.0, 0.0, 0.544649153666492, 0.08129993231698755, 0.3740509140165247, 0.4515219818370995], [Timestamp('2021-03-02 00:00:00'), 0.0, 0.0, 0.7299142457843715, 0.08488660184981767, 0.18519915236580425, 0.45115808845785965], [Timestamp('2021-03-03 00:00:00'), 2.0, 0.0, 0.7790166111101258, 0.04117470239111528, 0.17980868649876455, 0.4543404972231655], [Timestamp('2021-03-04 00:00:00'), 2.0, 0.0, 0.9059185140529419, 0.017675457993882464, 0.0764060279531685, 0.4537403509595496], [Timestamp('2021-03-05 00:00:00'), 2.0, 0.0, 0.8655467036377401, 0.009537437432095116, 0.12491585893015979, 0.4518341150309495], [Timestamp('2021-03-08 00:00:00'), 2.0, 0.0, 0.6412229962510633, 0.08424891592670743, 0.2745280878222328, 0.45071281400828395], [Timestamp('2021-03-09 00:00:00'), 2.0, 0.0, 0.613459800277295, 0.12082402339558618, 0.26571617632712224, 0.4506171459839688], [Timestamp('2021-03-10 00:00:00'), 2.0, 0.0, 0.5126082926592266, 0.1697157441644262, 0.31767596317634733, 0.4504922744214282], [Timestamp('2021-03-11 00:00:00'), 1.0, 0.0, 0.41298749015623376, 0.2072749683314472, 0.3797375415123245, 0.4514438393979788], [Timestamp('2021-03-12 00:00:00'), 2.0, 2.0, 0.3216625170943438, 0.12703391620256466, 0.551303566703088, 0.4536651989562948], [Timestamp('2021-03-15 00:00:00'), 1.0, 2.0, 0.20314614074911208, 0.21509618748042753, 0.5817576717704669, 0.45555465090770103], [Timestamp('2021-03-16 00:00:00'), 0.0, 2.0, 0.28171414848167103, 0.12209876887156976, 0.5961870826467526, 0.4528754641367274], [Timestamp('2021-03-17 00:00:00'), 0.0, 2.0, 0.37358608101015484, 0.0688659661206924, 0.5575479528691579, 0.4515225596640737], [Timestamp('2021-03-18 00:00:00'), 1.0, 2.0, 0.33522538026293863, 0.013173553007122092, 0.6516010667299446, 0.44920998009152874], [Timestamp('2021-03-19 00:00:00'), 0.0, 2.0, 0.41428227989642497, 0.09837976299779677, 0.4873379571057763, 0.4504354694701884], [Timestamp('2021-03-22 00:00:00'), 2.0, 2.0, 0.09481512030590122, 0.3614425894778357, 0.5437422902162666, 0.44577290722922897], [Timestamp('2021-03-23 00:00:00'), 2.0, 2.0, 0.14257290266278833, 0.0871615125790497, 0.7702655847581616, 0.44312635671624084], [Timestamp('2021-03-24 00:00:00'), 2.0, 2.0, 0.12359765490533833, 0.11723088333403606, 0.7591714617606257, 0.4399855588125042], [Timestamp('2021-03-25 00:00:00'), 2.0, 2.0, 0.2600239853665319, 0.19093795907206565, 0.5490380555614062, 0.44259276167611733], [Timestamp('2021-03-26 00:00:00'), 2.0, 2.0, 0.2699708072230354, 0.2767788425170038, 0.453250350259965, 0.44397864704840034], [Timestamp('2021-03-29 00:00:00'), 1.0, 2.0, 0.39011623638029586, 0.10481024931943896, 0.5050735143002674, 0.4456192626461813], [Timestamp('2021-03-30 00:00:00'), 1.0, 2.0, 0.34452492353509717, 0.18252968505767644, 0.4729453914072236, 0.44405927920466476], [Timestamp('2021-03-31 00:00:00'), 0.0, 2.0, 0.372945464602132, 0.09843608264306465, 0.5286184527548018, 0.4380241653709736], [Timestamp('2021-04-01 00:00:00'), 0.0, 2.0, 0.30394962129016045, 0.10468681567575738, 0.5913635630340806, 0.4401985469608382], [Timestamp('2021-04-05 00:00:00'), 1.0, 0.0, 0.5348003850591038, 0.1400798125304016, 0.32511980241049404, 0.4408260367893062], [Timestamp('2021-04-06 00:00:00'), 1.0, 2.0, 0.3775227506288321, 0.0743968591648002, 0.548080390206374, 0.4438131002755405], [Timestamp('2021-04-07 00:00:00'), 2.0, 0.0, 0.43715612808180926, 0.1486307565718619, 0.4142131153463252, 0.44603316499229323], [Timestamp('2021-04-08 00:00:00'), 1.0, 2.0, 0.2527259275646653, 0.10897359149268643, 0.638300480942654, 0.4379493386643558], [Timestamp('2021-04-09 00:00:00'), 1.0, 1.0, 0.1559775979870864, 0.4411389348050798, 0.4028834672078373, 0.43472508692701917], [Timestamp('2021-04-12 00:00:00'), 2.0, 2.0, 0.3680184861401064, 0.1959013154642291, 0.4360801983956682, 0.437183066659444], [Timestamp('2021-04-13 00:00:00'), 2.0, 0.0, 0.4808271532747364, 0.06252583476454357, 0.4566470119607263, 0.43732240074359313], [Timestamp('2021-04-14 00:00:00'), 2.0, 0.0, 0.6082165243605925, 0.17051941734992251, 0.22126405828949114, 0.44137713570659454], [Timestamp('2021-04-15 00:00:00'), 2.0, 0.0, 0.4712476890070841, 0.17850198345429413, 0.3502503275386175, 0.43804998641652304], [Timestamp('2021-04-16 00:00:00'), 2.0, 0.0, 0.518082894194637, 0.08232885666056998, 0.39958824914479346, 0.43965797722107663], [Timestamp('2021-04-19 00:00:00'), 0.0, 0.0, 0.48929117158300256, 0.26823928663057733, 0.24246954178642147, 0.43731399494176504], [Timestamp('2021-04-20 00:00:00'), 1.0, 2.0, 0.3865632140901129, 0.14559723812140435, 0.4678395477884808, 0.43900339968451424], [Timestamp('2021-04-22 00:00:00'), 1.0, 2.0, 0.3486128931278065, 0.13394200342126705, 0.5174451034509198, 0.44431273378641795], [Timestamp('2021-04-23 00:00:00'), 1.0, 2.0, 0.23900714927819647, 0.22508333336376385, 0.5359095173580356, 0.44131800098752366], [Timestamp('2021-04-26 00:00:00'), 0.0, 2.0, 0.2927360549986604, 0.11494647747688769, 0.5923174675244489, 0.44005208433258325], [Timestamp('2021-04-27 00:00:00'), 1.0, 2.0, 0.12120834092248828, 0.19313443503078656, 0.6856572240467229, 0.44169675087967447], [Timestamp('2021-04-28 00:00:00'), 1.0, 2.0, 0.3736683622827514, 0.1779943108446875, 0.4483373268725654, 0.44092151145007036], [Timestamp('2021-04-29 00:00:00'), 1.0, 2.0, 0.33575676454822895, 0.19904216395808177, 0.46520107149369294, 0.44205432432517], [Timestamp('2021-04-30 00:00:00'), 2.0, 2.0, 0.27243847494303153, 0.19009153450330551, 0.5374699905536618, 0.44316872649220374], [Timestamp('2021-05-03 00:00:00'), 2.0, 2.0, 0.3157196623768079, 0.10391656705888531, 0.580363770564313, 0.4403126234873959], [Timestamp('2021-05-04 00:00:00'), 2.0, 2.0, 0.4580803589922097, 0.06682056863371895, 0.475099072374076, 0.443894128496284], [Timestamp('2021-05-05 00:00:00'), 2.0, 1.0, 0.2990408022427128, 0.5033443351057236, 0.197614862651565, 0.4398895051788678], [Timestamp('2021-05-06 00:00:00'), 2.0, 2.0, 0.2349185117402143, 0.09814367740471906, 0.6669378108550734, 0.44324179870299546], [Timestamp('2021-05-07 00:00:00'), 2.0, 0.0, 0.5633283817511426, 0.11201486556936219, 0.3246567526794963, 0.4409103942838308], [Timestamp('2021-05-10 00:00:00'), 2.0, 2.0, 0.3171091627954384, 0.28796575280589387, 0.39492508439866786, 0.44003068052286426], [Timestamp('2021-05-11 00:00:00'), 2.0, 2.0, 0.280768735977423, 0.061131556916785915, 0.6580997071057967, 0.44179217991719005], [Timestamp('2021-05-12 00:00:00'), 2.0, 2.0, 0.2440484832189505, 0.04131953490013727, 0.7146319818809181, 0.4438521081438533], [Timestamp('2021-05-13 00:00:00'), 2.0, 2.0, 0.30696952038961417, 0.1925782258436381, 0.5004522537667498, 0.4416071496127676], [Timestamp('2021-05-14 00:00:00'), 0.0, 1.0, 0.2863392198221346, 0.3860431209262639, 0.3276176592516025, 0.44029652726468127], [Timestamp('2021-05-17 00:00:00'), 0.0, 0.0, 0.4930177004737895, 0.13249347420754762, 0.37448882531866584, 0.44049981633577734], [Timestamp('2021-05-18 00:00:00'), 0.0, 2.0, 0.3832410083043502, 0.10118329542043795, 0.5155756962752104, 0.44165851257678623], [Timestamp('2021-05-19 00:00:00'), 1.0, 2.0, 0.24881052278677163, 0.23632156627618411, 0.5148679109370405, 0.43961927841610837], [Timestamp('2021-05-20 00:00:00'), 0.0, 2.0, 0.30719470055443443, 0.07507794782773992, 0.6177273516178224, 0.4389352284735839], [Timestamp('2021-05-21 00:00:00'), 0.0, 2.0, 0.3751997542794434, 0.1931693528478101, 0.4316308928727428, 0.43659154270495604], [Timestamp('2021-05-24 00:00:00'), 0.0, 0.0, 0.5319770939193285, 0.21661598143455515, 0.2514069246461096, 0.43900797979633516], [Timestamp('2021-05-25 00:00:00'), 0.0, 0.0, 0.5091720649132143, 0.0360786526747468, 0.45474928241203616, 0.4406703427477963]]\n[[Timestamp('2018-01-26 00:00:00'), 0.0, 0.0, 0.801804796547142, 0.1974394813778091, 0.0007557220750454088, 0.5404420541406842], [Timestamp('2018-01-29 00:00:00'), 0.0, 0.0, 0.9252020945878967, 0.07390974500275475, 0.0008881604093472516, 0.5374988457180238], [Timestamp('2018-01-30 00:00:00'), 1.0, 0.0, 0.8873254104131967, 0.11136311181501542, 0.0013114777717899886, 0.5390844177733946], [Timestamp('2018-01-31 00:00:00'), 2.0, 0.0, 0.9411434431007119, 0.05757903271286444, 0.0012775241864232377, 0.5390069752485859], [Timestamp('2018-02-01 00:00:00'), 1.0, 0.0, 0.8739355149604894, 0.12525582283632555, 0.000808662203188271, 0.5421217466560403], [Timestamp('2018-02-02 00:00:00'), 2.0, 0.0, 0.930795773236833, 0.0677940922327073, 0.001410134530456479, 0.5486384647163826], [Timestamp('2018-02-05 00:00:00'), 2.0, 0.0, 0.9328206986366052, 0.06609358207975284, 0.0010857192836397735, 0.5482593835446185], [Timestamp('2018-02-06 00:00:00'), 2.0, 0.0, 0.954436238729895, 0.0452749741497416, 0.0002887871203622338, 0.5490363283094525], [Timestamp('2018-02-07 00:00:00'), 2.0, 0.0, 0.9522140919501481, 0.04669751079249637, 0.0010883972573523817, 0.5486584895471727], [Timestamp('2018-02-08 00:00:00'), 2.0, 0.0, 0.9801409518986516, 0.018290283253202456, 0.001568764848145859, 0.5494287229871537], [Timestamp('2018-02-09 00:00:00'), 2.0, 0.0, 0.950848970393034, 0.0476495090795601, 0.0015015205274060005, 0.5490521283442126], [Timestamp('2018-02-15 00:00:00'), 2.0, 0.0, 0.9666103096537599, 0.032715894668009865, 0.0006737956782314135, 0.5486781043131685], [Timestamp('2018-02-16 00:00:00'), 2.0, 0.0, 0.969295083705902, 0.029681060851566632, 0.0010238554425304781, 0.5494404115113607], [Timestamp('2018-02-19 00:00:00'), 2.0, 0.0, 0.8093834655323046, 0.18943090832732035, 0.0011856261403778224, 0.5501975505251583], [Timestamp('2018-02-20 00:00:00'), 2.0, 0.0, 0.7512465223481349, 0.24797896549559784, 0.0007745121562655508, 0.5509495737348086], [Timestamp('2018-02-21 00:00:00'), 2.0, 0.0, 0.9040075296912118, 0.09406228271215503, 0.0019301875966302351, 0.55730820509344], [Timestamp('2018-02-22 00:00:00'), 2.0, 0.0, 0.8763650246241902, 0.12165483875843647, 0.001980136617375298, 0.5557941834451902], [Timestamp('2018-02-23 00:00:00'), 2.0, 0.0, 0.8880519601785553, 0.11059801668468529, 0.001350023136758718, 0.5576347706373969], [Timestamp('2018-02-26 00:00:00'), 2.0, 0.0, 0.9446433427595873, 0.053835853705334294, 0.0015208035350748983, 0.5572408650260999], [Timestamp('2018-02-27 00:00:00'), 2.0, 0.0, 0.8666783937214423, 0.13068382357009495, 0.002637782708464179, 0.5590644161519767], [Timestamp('2018-02-28 00:00:00'), 2.0, 0.0, 0.9459450524735785, 0.05216797090186925, 0.0018869766245493307, 0.559772138020949], [Timestamp('2018-03-01 00:00:00'), 1.0, 0.0, 0.9248365991424805, 0.0728774811019248, 0.002285919755593162, 0.5604751884584431], [Timestamp('2018-03-02 00:00:00'), 1.0, 0.0, 0.910378790998715, 0.08850600127228671, 0.00111520772900051, 0.5557819116422416], [Timestamp('2018-03-05 00:00:00'), 0.0, 0.0, 0.6018186687357596, 0.3977850006302513, 0.00039633063398571334, 0.5553361664066695], [Timestamp('2018-03-06 00:00:00'), 0.0, 0.0, 0.9686896997778357, 0.030982203648617295, 0.000328096573544736, 0.5560481762461961], [Timestamp('2018-03-07 00:00:00'), 2.0, 0.0, 0.8784786921564434, 0.12116674850702891, 0.0003545593365302644, 0.5566934176861394], [Timestamp('2018-03-08 00:00:00'), 1.0, 0.0, 0.9410817027330146, 0.05861630542615341, 0.00030199184083002425, 0.557391842791808], [Timestamp('2018-03-09 00:00:00'), 0.0, 0.0, 0.9040479493597848, 0.09568778254539445, 0.0002642680948177929, 0.5505043226592546], [Timestamp('2018-03-12 00:00:00'), 1.0, 0.0, 0.9465158313023946, 0.05314302063593822, 0.00034114806167027505, 0.5501012145748988], [Timestamp('2018-03-13 00:00:00'), 0.0, 0.0, 0.9364978164888982, 0.06294963947699107, 0.0005525440341096529, 0.5539085054408173], [Timestamp('2018-03-14 00:00:00'), 0.0, 0.0, 0.9094357787731496, 0.0896014961977446, 0.0009627250291029474, 0.5544961674061882], [Timestamp('2018-03-15 00:00:00'), 1.0, 0.0, 0.9616794571064213, 0.03544485982911238, 0.0028756830644651272, 0.5466373788325821], [Timestamp('2018-03-16 00:00:00'), 0.0, 0.0, 0.7445997775973814, 0.2517448465738722, 0.003655375828745102, 0.5452751196172249], [Timestamp('2018-03-19 00:00:00'), 1.0, 0.0, 0.9359313844745443, 0.06088210949010508, 0.0031865060353492938, 0.5501627051499717], [Timestamp('2018-03-20 00:00:00'), 0.0, 0.0, 0.8256484987535613, 0.17015272887544303, 0.00419877237099716, 0.5446947466236279], [Timestamp('2018-03-21 00:00:00'), 0.0, 0.0, 0.6105090666045517, 0.38700477429068614, 0.0024861591047601783, 0.5430807550690591], [Timestamp('2018-03-22 00:00:00'), 0.0, 0.0, 0.8949493408033691, 0.10222043259530067, 0.002830226601328389, 0.5457755125408866], [Timestamp('2018-03-23 00:00:00'), 0.0, 0.0, 0.6145127012183803, 0.3832818710793879, 0.0022054277022343356, 0.5473396679305703], [Timestamp('2018-03-26 00:00:00'), 0.0, 1.0, 0.2688483964447563, 0.7300543283764693, 0.0010972751787757412, 0.5456774287804299], [Timestamp('2018-03-27 00:00:00'), 2.0, 0.0, 0.7420563069969256, 0.255293468007242, 0.0026502249958317305, 0.5367135044401127], [Timestamp('2018-03-28 00:00:00'), 2.0, 0.0, 0.8115042553044014, 0.18578920912756297, 0.0027065355680330546, 0.5374397051878838], [Timestamp('2018-03-29 00:00:00'), 1.0, 1.0, 0.4137764150861465, 0.5831843671651317, 0.0030392177487221124, 0.5381611595255387], [Timestamp('2018-04-02 00:00:00'), 1.0, 0.0, 0.9541635457625453, 0.037982205931195244, 0.007854248306260391, 0.5409605983314735], [Timestamp('2018-04-03 00:00:00'), 2.0, 0.0, 0.8402411522938381, 0.1536705602623734, 0.006088287443787087, 0.5366733272961125], [Timestamp('2018-04-04 00:00:00'), 2.0, 1.0, 0.4397420975756796, 0.5528902577224207, 0.007367644701899922, 0.5384794062242454], [Timestamp('2018-04-05 00:00:00'), 2.0, 0.0, 0.7572749314086284, 0.23744696883536837, 0.005278099756000856, 0.5402737573671308], [Timestamp('2018-04-06 00:00:00'), 2.0, 0.0, 0.901437343312726, 0.09506822021969928, 0.003494436467571869, 0.5420564945867612], [Timestamp('2018-04-09 00:00:00'), 2.0, 0.0, 0.8301987078090494, 0.16274075898540996, 0.007060533205540802, 0.5438277302759424], [Timestamp('2018-04-10 00:00:00'), 2.0, 0.0, 0.8023421145532429, 0.19512249467888412, 0.0025353907678726992, 0.544515764020713], [Timestamp('2018-04-11 00:00:00'), 1.0, 1.0, 0.4010742228389917, 0.5958552011466556, 0.003070576014352308, 0.5484045154978889], [Timestamp('2018-04-12 00:00:00'), 2.0, 0.0, 0.9178061942139032, 0.07798399097559236, 0.004209814810501583, 0.5570894383394384], [Timestamp('2018-04-13 00:00:00'), 2.0, 0.0, 0.7628434013179229, 0.23271879643274443, 0.004437802249331669, 0.5598917921801309], [Timestamp('2018-04-16 00:00:00'), 2.0, 0.0, 0.6389811250922934, 0.3542692317300159, 0.006749643177691792, 0.5605531543827722], [Timestamp('2018-04-17 00:00:00'), 2.0, 0.0, 0.6521669176666334, 0.34266171415689767, 0.0051713681764720435, 0.5612103174603175], [Timestamp('2018-04-18 00:00:00'), 2.0, 1.0, 0.2810914759443476, 0.7160687133455974, 0.002839810710055218, 0.5576966546112115], [Timestamp('2018-04-19 00:00:00'), 1.0, 0.0, 0.7951165484524036, 0.19930666003072137, 0.005576791516871637, 0.5593773471533724], [Timestamp('2018-04-20 00:00:00'), 1.0, 0.0, 0.5196067672848824, 0.4772428588795677, 0.0031503738355501696, 0.5600064947114491], [Timestamp('2018-04-23 00:00:00'), 2.0, 1.0, 0.4620486508045698, 0.5335249699368433, 0.00442637925858726, 0.5634407172376691], [Timestamp('2018-04-24 00:00:00'), 2.0, 1.0, 0.4547057913644637, 0.5408207362268682, 0.004473472408668048, 0.5641052362147141], [Timestamp('2018-04-25 00:00:00'), 2.0, 0.0, 0.6511360440945524, 0.3411085638973341, 0.007755392008112863, 0.5647459964520911], [Timestamp('2018-04-26 00:00:00'), 2.0, 0.0, 0.5740604583383171, 0.42138774833076786, 0.004551793330914521, 0.5653827519379845], [Timestamp('2018-04-27 00:00:00'), 2.0, 0.0, 0.511555229576058, 0.48171706580048707, 0.0067277046234541305, 0.5680923832983168], [Timestamp('2018-04-30 00:00:00'), 2.0, 1.0, 0.4858148034421499, 0.500098430380335, 0.014086766177514589, 0.5687147912754587], [Timestamp('2018-05-02 00:00:00'), 2.0, 1.0, 0.4242779122585126, 0.5681478685100122, 0.007574219231472257, 0.5693333453332373], [Timestamp('2018-05-03 00:00:00'), 2.0, 1.0, 0.42126560978774613, 0.5731053132267357, 0.005629076985518257, 0.5720056943248158], [Timestamp('2018-05-04 00:00:00'), 2.0, 0.0, 0.8304333534143201, 0.1520140735551816, 0.017552573030495327, 0.5695013416815743], [Timestamp('2018-05-07 00:00:00'), 2.0, 0.0, 0.8944096306252536, 0.09226459072500774, 0.01332577864973814, 0.5711278950872688], [Timestamp('2018-05-08 00:00:00'), 2.0, 1.0, 0.4582589133659845, 0.5349091582832382, 0.006831928350775956, 0.5717251321622454], [Timestamp('2018-05-09 00:00:00'), 0.0, 0.0, 0.6266255304072533, 0.364575041143445, 0.008799428449304945, 0.5702353942144073], [Timestamp('2018-05-10 00:00:00'), 0.0, 0.0, 0.7845526480724249, 0.20728956891389816, 0.008157783013679084, 0.5701050698214624], [Timestamp('2018-05-11 00:00:00'), 0.0, 0.0, 0.6654264503501958, 0.3270100338402784, 0.007563515809524862, 0.5678332504685942], [Timestamp('2018-05-14 00:00:00'), 0.0, 1.0, 0.2510989612965426, 0.7464697109274858, 0.0024313277759742094, 0.562505437260108], [Timestamp('2018-05-15 00:00:00'), 0.0, 1.0, 0.3489115017536287, 0.6492982590592284, 0.0017902391871409862, 0.5581868027982605], [Timestamp('2018-05-16 00:00:00'), 0.0, 0.0, 0.5956298626190091, 0.40196014208357134, 0.0024099952974201686, 0.5618145436211234], [Timestamp('2018-05-17 00:00:00'), 0.0, 0.0, 0.5007927384953542, 0.49407414011833245, 0.005133121386313557, 0.5623620403337685], [Timestamp('2018-05-18 00:00:00'), 0.0, 1.0, 0.4399248913004027, 0.5540116852435945, 0.006063423455999946, 0.5590026696465266], [Timestamp('2018-05-21 00:00:00'), 0.0, 1.0, 0.46617998210216427, 0.5276592407056983, 0.00616077719213585, 0.5585210544796477], [Timestamp('2018-05-22 00:00:00'), 0.0, 0.0, 0.8128934389496085, 0.17505496354265332, 0.012051597507739356, 0.5512373005937298], [Timestamp('2018-05-23 00:00:00'), 0.0, 0.0, 0.8848203736645872, 0.10501294069455308, 0.0101666856408644, 0.5526205887913872], [Timestamp('2018-05-24 00:00:00'), 0.0, 0.0, 0.9510143613300875, 0.03751254534291928, 0.011473093326989717, 0.5471936956211668], [Timestamp('2018-05-25 00:00:00'), 0.0, 0.0, 0.8850902263526589, 0.10319031730859116, 0.011719456338755804, 0.5368689733408961], [Timestamp('2018-05-28 00:00:00'), 0.0, 0.0, 0.8716503889552917, 0.10867304088425579, 0.019676570160458335, 0.5392662489603791], [Timestamp('2018-05-29 00:00:00'), 0.0, 0.0, 0.8255439092452387, 0.15812381396069272, 0.016332276794069317, 0.533928583070915], [Timestamp('2018-05-30 00:00:00'), 0.0, 0.0, 0.6542933164469468, 0.3290506776615708, 0.016656005891484884, 0.5362931150906194], [Timestamp('2018-06-01 00:00:00'), 0.0, 0.0, 0.8378054200411357, 0.11773644198653535, 0.04445813797232509, 0.5425067464205298], [Timestamp('2018-06-04 00:00:00'), 0.0, 0.0, 0.5071984508431615, 0.47692724498479894, 0.015874304172032862, 0.5348842550309617], [Timestamp('2018-06-05 00:00:00'), 0.0, 0.0, 0.7383511340831141, 0.22490984452639376, 0.03673902139048643, 0.5310906035634696], [Timestamp('2018-06-06 00:00:00'), 1.0, 0.0, 0.7491298868947258, 0.21314114994276603, 0.03772896316250713, 0.5324647391920198], [Timestamp('2018-06-07 00:00:00'), 0.0, 0.0, 0.4972070048759666, 0.4701318449348737, 0.03266115018915393, 0.5420511657919508], [Timestamp('2018-06-08 00:00:00'), 1.0, 0.0, 0.7157737025513937, 0.22974859411086573, 0.0544777033377356, 0.5310570663177896], [Timestamp('2018-06-11 00:00:00'), 2.0, 0.0, 0.5298945490345217, 0.442999774052201, 0.027105676913271568, 0.5406791492157346], [Timestamp('2018-06-12 00:00:00'), 2.0, 1.0, 0.4171952594288003, 0.5493019911952356, 0.033502749375963925, 0.5403300993726526], [Timestamp('2018-06-13 00:00:00'), 2.0, 0.0, 0.8483641261505906, 0.07313088318183897, 0.0785049906675666, 0.5428451178451178], [Timestamp('2018-06-14 00:00:00'), 2.0, 0.0, 0.8242508715126217, 0.10081772803521477, 0.0749314004521592, 0.5465254254529178], [Timestamp('2018-06-15 00:00:00'), 2.0, 1.0, 0.3966648031928447, 0.5259632728639592, 0.0773719239432002, 0.544159466147418], [Timestamp('2018-06-18 00:00:00'), 2.0, 1.0, 0.3277577727149122, 0.5435742566463483, 0.12866797063873855, 0.5458117208117208], [Timestamp('2018-06-19 00:00:00'), 2.0, 0.0, 0.6923040866526595, 0.16586644213103252, 0.14182947121630704, 0.5504480937115668], [Timestamp('2018-06-20 00:00:00'), 2.0, 1.0, 0.3010855526544832, 0.6665279542420808, 0.03238649310343116, 0.5463779586913916], [Timestamp('2018-06-21 00:00:00'), 2.0, 1.0, 0.3570499221156807, 0.5541751506267147, 0.08877492725760461, 0.5450216450216451], [Timestamp('2018-06-22 00:00:00'), 2.0, 1.0, 0.23775452435675992, 0.696345560772677, 0.06589991487056367, 0.54664073974163], [Timestamp('2018-06-25 00:00:00'), 2.0, 1.0, 0.1455117064190735, 0.839841367810469, 0.014646925770463835, 0.550222640607256], [Timestamp('2018-06-26 00:00:00'), 2.0, 1.0, 0.4445082888340133, 0.5147786184122501, 0.04071309275374173, 0.5488669884687584], [Timestamp('2018-06-27 00:00:00'), 2.0, 1.0, 0.35727454472091486, 0.6005157577196363, 0.042209697559453696, 0.5514408793820559], [Timestamp('2018-06-28 00:00:00'), 2.0, 1.0, 0.18223015795300415, 0.7830856256487262, 0.03468421639827575, 0.5520446399478658], [Timestamp('2018-06-29 00:00:00'), 2.0, 0.0, 0.5407024309937027, 0.3972950432201524, 0.062002525786143514, 0.5526448697501328], [Timestamp('2018-07-02 00:00:00'), 2.0, 1.0, 0.4642780901253761, 0.47087489664801063, 0.0648470132266103, 0.5532415996701711], [Timestamp('2018-07-03 00:00:00'), 2.0, 0.0, 0.5939974456583802, 0.3114675153854471, 0.09453503895617146, 0.5519830083783572], [Timestamp('2018-07-04 00:00:00'), 2.0, 0.0, 0.6692608436402749, 0.209529185121295, 0.12120997123842796, 0.553539013321622], [Timestamp('2018-07-05 00:00:00'), 2.0, 0.0, 0.7805028115561472, 0.11937284149260165, 0.10012434695124475, 0.5531592417430568], [Timestamp('2018-07-06 00:00:00'), 2.0, 1.0, 0.3258040415723885, 0.6057847369871622, 0.06841122144044857, 0.5509298071978187], [Timestamp('2018-07-10 00:00:00'), 2.0, 0.0, 0.875392192019551, 0.07326703805316317, 0.05134076992728943, 0.5496603970741902], [Timestamp('2018-07-11 00:00:00'), 2.0, 0.0, 0.7725022704376254, 0.11221833372383051, 0.11527939583854477, 0.5492843911898353], [Timestamp('2018-07-12 00:00:00'), 2.0, 0.0, 0.6634975346105981, 0.28395042452584696, 0.05255204086355624, 0.5489105339105339], [Timestamp('2018-07-13 00:00:00'), 2.0, 1.0, 0.3173755527784805, 0.6402435443472657, 0.04238090287425718, 0.5494884744884745], [Timestamp('2018-07-16 00:00:00'), 2.0, 1.0, 0.4301504286830187, 0.5204631666428139, 0.04938640467416721, 0.5519570707070708], [Timestamp('2018-07-17 00:00:00'), 2.0, 1.0, 0.45670261061119244, 0.5190381219522876, 0.024259267436515995, 0.5497636897778541], [Timestamp('2018-07-18 00:00:00'), 2.0, 0.0, 0.7879709635466625, 0.1453492523955252, 0.0666797840578128, 0.553117331507162], [Timestamp('2018-07-19 00:00:00'), 2.0, 1.0, 0.3253005729431316, 0.6189080367112749, 0.05579139034559267, 0.5508808744724237], [Timestamp('2018-07-20 00:00:00'), 2.0, 0.0, 0.49424660652829183, 0.3965260019128221, 0.10922739155888737, 0.5523502818446638], [Timestamp('2018-07-23 00:00:00'), 2.0, 0.0, 0.6508440375775798, 0.15997183366541856, 0.18918412875699903, 0.5501025662790369], [Timestamp('2018-07-24 00:00:00'), 2.0, 0.0, 0.7454903438776731, 0.13911880213483394, 0.11539085398749735, 0.5506607038729944], [Timestamp('2018-07-25 00:00:00'), 1.0, 1.0, 0.3466292389182344, 0.36095853761844476, 0.29241222346332774, 0.5512157320653143], [Timestamp('2018-07-26 00:00:00'), 2.0, 0.0, 0.4493325685653537, 0.22052561480372299, 0.3301418166309207, 0.5494272479734593], [Timestamp('2018-07-27 00:00:00'), 1.0, 0.0, 0.526978686741674, 0.17080714417684542, 0.3022141690814835, 0.54812734082397], [Timestamp('2018-07-30 00:00:00'), 1.0, 1.0, 0.10725096159723403, 0.7935185007156701, 0.09923053768709458, 0.549074074074074], [Timestamp('2018-07-31 00:00:00'), 2.0, 0.0, 0.39470059377489436, 0.2526837231826011, 0.3526156830425095, 0.5527167277167276], [Timestamp('2018-08-01 00:00:00'), 0.0, 1.0, 0.26625807428704745, 0.6673993493697289, 0.06634257634322345, 0.5551149015137935], [Timestamp('2018-08-02 00:00:00'), 0.0, 1.0, 0.12825528982179912, 0.8206894639508365, 0.051055246227360664, 0.542765137506571], [Timestamp('2018-08-03 00:00:00'), 0.0, 1.0, 0.14165755844664057, 0.8174838857474858, 0.04085855580586886, 0.5432356194129048], [Timestamp('2018-08-06 00:00:00'), 0.0, 1.0, 0.15107367851184364, 0.8210812256855722, 0.027845095802579836, 0.5418845115044197], [Timestamp('2018-08-07 00:00:00'), 0.0, 0.0, 0.4898949035903355, 0.3155257152569136, 0.19457938115275158, 0.5359617248542938], [Timestamp('2018-08-08 00:00:00'), 0.0, 0.0, 0.649078258809692, 0.2611784371302767, 0.08974330406003274, 0.5245436602777324], [Timestamp('2018-08-09 00:00:00'), 0.0, 1.0, 0.3477349633468077, 0.5708364254715884, 0.08142861118160036, 0.5258421618889852], [Timestamp('2018-08-10 00:00:00'), 0.0, 0.0, 0.6555370406520725, 0.16864135032399602, 0.17582160902393457, 0.5279867598590102], [Timestamp('2018-08-13 00:00:00'), 0.0, 0.0, 0.7064731052039865, 0.1252369378130527, 0.16828995698296723, 0.5292631181709981], [Timestamp('2018-08-14 00:00:00'), 0.0, 0.0, 0.6267656343565744, 0.20649178870785898, 0.1667425769355677, 0.5225381642740831], [Timestamp('2018-08-15 00:00:00'), 2.0, 0.0, 0.7413876081179155, 0.0395861434816675, 0.2190262484004126, 0.5191572047527726], [Timestamp('2018-08-16 00:00:00'), 1.0, 0.0, 0.8071340951557276, 0.05756210426694783, 0.13530380057733152, 0.5188103066846887], [Timestamp('2018-08-17 00:00:00'), 2.0, 0.0, 0.825070682399085, 0.046165671007804526, 0.12876364659310868, 0.5217269091537395], [Timestamp('2018-08-20 00:00:00'), 2.0, 0.0, 0.7611766703433087, 0.03861270224034914, 0.20021062741634352, 0.5195428384111554], [Timestamp('2018-08-21 00:00:00'), 2.0, 0.0, 0.7941726265357545, 0.04042275465959092, 0.16540461880465335, 0.5210337717202705], [Timestamp('2018-08-22 00:00:00'), 2.0, 0.0, 0.6903596949939005, 0.05996165537609222, 0.2496786496300144, 0.5198489075577568], [Timestamp('2018-08-23 00:00:00'), 2.0, 0.0, 0.6044649542331706, 0.18934155034967215, 0.20619349541715334, 0.523147557656317], [Timestamp('2018-08-24 00:00:00'), 2.0, 0.0, 0.5981116005174292, 0.10999509176259506, 0.2918933077199741, 0.52551996624662], [Timestamp('2018-08-27 00:00:00'), 1.0, 0.0, 0.3852582959579707, 0.24203519929091846, 0.3727065047511045, 0.5251620900076278], [Timestamp('2018-08-28 00:00:00'), 2.0, 2.0, 0.4009595088763847, 0.042038820985187776, 0.557001670138427, 0.5291711777105222], [Timestamp('2018-08-29 00:00:00'), 0.0, 0.0, 0.37894027392109497, 0.24235198862471571, 0.37870773745418856, 0.5323763118785687], [Timestamp('2018-08-30 00:00:00'), 1.0, 2.0, 0.34934792164831013, 0.11886616083685686, 0.5317859175148346, 0.5291207767676394], [Timestamp('2018-08-31 00:00:00'), 1.0, 2.0, 0.29323042112092484, 0.07973557082569596, 0.6270340080533722, 0.5267860583993683], [Timestamp('2018-09-03 00:00:00'), 2.0, 0.0, 0.5437315799324232, 0.016949527436206212, 0.4393188926313734, 0.5217410785295508], [Timestamp('2018-09-04 00:00:00'), 2.0, 2.0, 0.3567510445153499, 0.03297330754355247, 0.610275647941103, 0.520545207649093], [Timestamp('2018-09-05 00:00:00'), 2.0, 0.0, 0.6061195573810114, 0.056055301620227906, 0.3378251409987539, 0.524685466506333], [Timestamp('2018-09-06 00:00:00'), 2.0, 0.0, 0.48392090111621155, 0.21377196172408675, 0.3023071371596971, 0.5278604945624208], [Timestamp('2018-09-10 00:00:00'), 2.0, 0.0, 0.3980995727690353, 0.3849894475282296, 0.21691097970273523, 0.5283890002366499], [Timestamp('2018-09-11 00:00:00'), 2.0, 0.0, 0.5189883037691053, 0.24531568258223616, 0.23569601364866363, 0.5244956821878028], [Timestamp('2018-09-12 00:00:00'), 2.0, 0.0, 0.653895444227803, 0.15698514804174155, 0.1891194077304557, 0.5276597777165426], [Timestamp('2018-09-13 00:00:00'), 2.0, 0.0, 0.5898488530859777, 0.199220424201902, 0.21093072271212526, 0.5308163259323534], [Timestamp('2018-09-14 00:00:00'), 2.0, 0.0, 0.6388152442805488, 0.18649215079681195, 0.1746926049226347, 0.5313360128973815], [Timestamp('2018-09-17 00:00:00'), 2.0, 0.0, 0.775134616841691, 0.08073938701594073, 0.14412599614236224, 0.5292259152826801], [Timestamp('2018-09-18 00:00:00'), 2.0, 0.0, 0.6450684356475045, 0.22452354610127664, 0.13040801825121215, 0.5262520500231086], [Timestamp('2018-09-19 00:00:00'), 2.0, 0.0, 0.6896803817177171, 0.17386605641944214, 0.13645356186284768, 0.5276430605309085], [Timestamp('2018-09-20 00:00:00'), 2.0, 0.0, 0.6861156032666086, 0.11497421682522808, 0.1989101799081572, 0.5290267691462789], [Timestamp('2018-09-21 00:00:00'), 2.0, 0.0, 0.5377652868631847, 0.370812335138092, 0.0914223779987302, 0.5304032332139249], [Timestamp('2018-09-24 00:00:00'), 2.0, 0.0, 0.7415709543751358, 0.06125201255726911, 0.19717703306758827, 0.5300318654413568], [Timestamp('2018-09-25 00:00:00'), 2.0, 0.0, 0.5334678745197686, 0.26662701389614923, 0.1999051115840843, 0.5270400860353327], [Timestamp('2018-09-26 00:00:00'), 2.0, 0.0, 0.6361588916652804, 0.1968870053671635, 0.1669541029675511, 0.5266703169155635], [Timestamp('2018-09-27 00:00:00'), 2.0, 0.0, 0.46463710904077377, 0.20197938595586046, 0.33338350500336367, 0.5254389058395929], [Timestamp('2018-09-28 00:00:00'), 2.0, 0.0, 0.45853292887162755, 0.29699187276239225, 0.2444751983659856, 0.5250751850878735], [Timestamp('2018-10-01 00:00:00'), 2.0, 0.0, 0.5030597214155919, 0.16142343759366176, 0.3355168409907533, 0.52471333918539], [Timestamp('2018-10-02 00:00:00'), 2.0, 0.0, 0.5713759252739992, 0.1509490328995967, 0.2776750418264101, 0.5217512549459355], [Timestamp('2018-10-03 00:00:00'), 2.0, 0.0, 0.5127709387862367, 0.16219546904878004, 0.32503359216498906, 0.5205048128443003], [Timestamp('2018-10-04 00:00:00'), 2.0, 1.0, 0.3218465392261932, 0.40715660292889067, 0.2709968578449106, 0.5218535351648896], [Timestamp('2018-10-05 00:00:00'), 2.0, 0.0, 0.5760589528683702, 0.04093094489136653, 0.3830101022402668, 0.5206443558409862], [Timestamp('2018-10-08 00:00:00'), 1.0, 2.0, 0.3391723714449385, 0.23365663128246533, 0.42717099727259894, 0.5185931536807328], [Timestamp('2018-10-09 00:00:00'), 0.0, 2.0, 0.2129060076551844, 0.3479669152970131, 0.4391270770478073, 0.5205852084249231], [Timestamp('2018-10-10 00:00:00'), 1.0, 2.0, 0.1873647819205445, 0.0846532327237689, 0.7279819853556864, 0.519309266327396], [Timestamp('2018-10-11 00:00:00'), 2.0, 2.0, 0.4015540017489058, 0.1773074380804819, 0.42113856017060775, 0.523862339655308], [Timestamp('2018-10-15 00:00:00'), 2.0, 2.0, 0.1794444011297565, 0.25180918839831135, 0.5687464104719346, 0.5251991862473181], [Timestamp('2018-10-16 00:00:00'), 2.0, 2.0, 0.1605814440256487, 0.31003304806597737, 0.5293855079083756, 0.5230928378789856], [Timestamp('2018-10-17 00:00:00'), 2.0, 0.0, 0.4429401062046381, 0.1383687058407347, 0.41869118795462457, 0.5286943750216947], [Timestamp('2018-10-18 00:00:00'), 2.0, 2.0, 0.1949569733596221, 0.2175960876919377, 0.5874469389484405, 0.5240536303201849], [Timestamp('2018-10-19 00:00:00'), 2.0, 2.0, 0.2886683034383874, 0.12930715467042994, 0.5820245418911828, 0.5270429682778152], [Timestamp('2018-10-22 00:00:00'), 2.0, 1.0, 0.24225546019721925, 0.41835271057861245, 0.33939182922417127, 0.5215325685347388], [Timestamp('2018-10-23 00:00:00'), 1.0, 2.0, 0.4411013999320806, 0.03653093255470274, 0.522367667513212, 0.5236712485681557], [Timestamp('2018-10-24 00:00:00'), 1.0, 2.0, 0.3800655713837927, 0.10043682266459268, 0.5194976059516172, 0.5213307823129251], [Timestamp('2018-10-25 00:00:00'), 0.0, 0.0, 0.5249954213183256, 0.0882848722815529, 0.3867197064001266, 0.5248842592592592], [Timestamp('2018-10-26 00:00:00'), 0.0, 2.0, 0.38018542699642954, 0.1453658314976074, 0.474448741505966, 0.5128407564417927], [Timestamp('2018-10-29 00:00:00'), 0.0, 2.0, 0.3448069769582891, 0.21756375484826015, 0.4376292681934559, 0.5227299628588288], [Timestamp('2018-10-30 00:00:00'), 0.0, 0.0, 0.5834909702022666, 0.0496319273929978, 0.3668771024047304, 0.5266155141155141], [Timestamp('2018-10-31 00:00:00'), 0.0, 0.0, 0.40690757528895105, 0.3442447724113573, 0.2488476522996892, 0.5262447605304749], [Timestamp('2018-11-01 00:00:00'), 0.0, 0.0, 0.6756520416974677, 0.06268304027473533, 0.26166491802779124, 0.5224933343588166], [Timestamp('2018-11-05 00:00:00'), 0.0, 0.0, 0.6508900952799216, 0.18496665657257044, 0.16414324814750123, 0.5254545454545454], [Timestamp('2018-11-06 00:00:00'), 0.0, 0.0, 0.6986598097097404, 0.09008285125911095, 0.2112573390311456, 0.5292529990017427], [Timestamp('2018-11-07 00:00:00'), 0.0, 0.0, 0.7746857457214834, 0.034264664811801085, 0.19104958946671274, 0.5313468013468013], [Timestamp('2018-11-08 00:00:00'), 0.0, 0.0, 0.8012274602324999, 0.02418588362433683, 0.17458665614316005, 0.5308943916779738], [Timestamp('2018-11-09 00:00:00'), 1.0, 0.0, 0.7842446344141096, 0.015638829530301924, 0.20011653605558177, 0.5288213821382138], [Timestamp('2018-11-12 00:00:00'), 1.0, 0.0, 0.8913758070600529, 0.018468260247958078, 0.09015593269198352, 0.5275082508250825], [Timestamp('2018-11-13 00:00:00'), 2.0, 0.0, 0.8877834852925458, 0.006155966917317497, 0.10606054779013846, 0.5221039603960397], [Timestamp('2018-11-14 00:00:00'), 2.0, 0.0, 0.7962289396451242, 0.037494339627626466, 0.1662767207272508, 0.5159502234512728], [Timestamp('2018-11-16 00:00:00'), 1.0, 0.0, 0.8915840934713181, 0.024897266892324504, 0.0835186396363514, 0.5172405300231516], [Timestamp('2018-11-19 00:00:00'), 0.0, 0.0, 0.44343484742918793, 0.33039707458449113, 0.22616807798631614, 0.5160109953049739], [Timestamp('2018-11-21 00:00:00'), 2.0, 0.0, 0.680052742006276, 0.15230140066168277, 0.16764585733203613, 0.5172872717452052], [Timestamp('2018-11-22 00:00:00'), 0.0, 0.0, 0.8046446049775428, 0.019932301745828482, 0.17542309327662936, 0.5177419874141722], [Timestamp('2018-11-23 00:00:00'), 2.0, 0.0, 0.44709621827853485, 0.32197116301027806, 0.23093261871119258, 0.5214526670883407], [Timestamp('2018-11-26 00:00:00'), 0.0, 0.0, 0.4069285744012506, 0.31295955515114143, 0.28011187044760555, 0.5235391186177442], [Timestamp('2018-11-27 00:00:00'), 0.0, 0.0, 0.5995981246306678, 0.11010753711488999, 0.2902943382544398, 0.5174012307973982], [Timestamp('2018-11-28 00:00:00'), 0.0, 0.0, 0.5432842216754499, 0.12502355781607216, 0.33169222050847263, 0.5170026996988676], [Timestamp('2018-11-29 00:00:00'), 0.0, 0.0, 0.4654160269611749, 0.10920050498879368, 0.4253834680500347, 0.5132840862943925], [Timestamp('2018-11-30 00:00:00'), 0.0, 0.0, 0.40593652761132937, 0.3294301315279909, 0.2646333408606782, 0.5104339336648597], [Timestamp('2018-12-03 00:00:00'), 0.0, 0.0, 0.5661553052891553, 0.22758685491257177, 0.2062578397982663, 0.5157031957065675], [Timestamp('2018-12-04 00:00:00'), 0.0, 0.0, 0.44825329483947385, 0.15267913251102624, 0.399067572649503, 0.5160981224172837], [Timestamp('2018-12-05 00:00:00'), 0.0, 0.0, 0.5814327234992486, 0.05237067635578993, 0.3661966001449566, 0.514839140730354], [Timestamp('2018-12-06 00:00:00'), 0.0, 0.0, 0.46923683064023536, 0.26869888710774165, 0.26206428225202816, 0.5111790590823788], [Timestamp('2018-12-07 00:00:00'), 0.0, 0.0, 0.7050850070927152, 0.060220334838738586, 0.23469465806854256, 0.5090421842515652], [Timestamp('2018-12-10 00:00:00'), 0.0, 0.0, 0.5869495809510579, 0.10046180957595592, 0.31258860947298134, 0.5101550012065528], [Timestamp('2018-12-11 00:00:00'), 0.0, 0.0, 0.6843013877342818, 0.09445322383744063, 0.22124538842827154, 0.50732393348774], [Timestamp('2018-12-12 00:00:00'), 1.0, 1.0, 0.3963408503790351, 0.4796473118407849, 0.12401183778018497, 0.5029605901766647], [Timestamp('2018-12-13 00:00:00'), 2.0, 0.0, 0.4964599569329958, 0.3618128054286059, 0.14172723763840403, 0.5009504321521688], [Timestamp('2018-12-14 00:00:00'), 2.0, 1.0, 0.3993371638351837, 0.5239010257132439, 0.07676181045156934, 0.5038035878381079], [Timestamp('2018-12-17 00:00:00'), 2.0, 0.0, 0.7167248562803835, 0.10509902481448685, 0.1781761189051322, 0.5059280987748793], [Timestamp('2018-12-18 00:00:00'), 2.0, 0.0, 0.4949367004610938, 0.20297271937479136, 0.30209058016410817, 0.5047661665784643], [Timestamp('2018-12-19 00:00:00'), 2.0, 0.0, 0.7079479301244568, 0.038382236973762064, 0.25366983290178763, 0.5052439170562149], [Timestamp('2018-12-20 00:00:00'), 2.0, 0.0, 0.4757891103333693, 0.14335423978056855, 0.3808566498860627, 0.50571933134581], [Timestamp('2018-12-21 00:00:00'), 2.0, 1.0, 0.1813394922111391, 0.5512896145913576, 0.2673708931975068, 0.5061924265413097], [Timestamp('2018-12-26 00:00:00'), 2.0, 0.0, 0.44618949217403253, 0.11641385696896235, 0.43739665085700197, 0.5074742495785182], [Timestamp('2018-12-27 00:00:00'), 2.0, 2.0, 0.2586276498510838, 0.2450783962422612, 0.49629395390665876, 0.5079407886851253], [Timestamp('2018-12-28 00:00:00'), 2.0, 2.0, 0.35403684979029565, 0.2745032831148049, 0.37145986709490003, 0.5059759250697763], [Timestamp('2019-01-02 00:00:00'), 2.0, 1.0, 0.09931725194644583, 0.6801237350161184, 0.2205590130374326, 0.5072411654654514], [Timestamp('2019-01-03 00:00:00'), 2.0, 2.0, 0.12980367630663775, 0.25061815227396345, 0.6195781714193975, 0.5053506727376965], [Timestamp('2019-01-04 00:00:00'), 2.0, 1.0, 0.13773571292528203, 0.5984457905721483, 0.26381849650257655, 0.5058063415668593], [Timestamp('2019-01-07 00:00:00'), 2.0, 2.0, 0.16824610113994132, 0.3075093230135261, 0.5242445758465377, 0.5062598249340118], [Timestamp('2019-01-08 00:00:00'), 1.0, 2.0, 0.18652389996519586, 0.27554261665510515, 0.5379334833797027, 0.5067628249794832], [Timestamp('2019-01-09 00:00:00'), 1.0, 2.0, 0.2063289009609542, 0.3334868623983056, 0.46018423664073665, 0.5106905764800502], [Timestamp('2019-01-10 00:00:00'), 1.0, 2.0, 0.1624982290283708, 0.3547134338916721, 0.48278833707995983, 0.5143671468232872], [Timestamp('2019-01-11 00:00:00'), 1.0, 2.0, 0.17483642776881478, 0.3558032091109572, 0.4693603631202337, 0.5218356465460073], [Timestamp('2019-01-14 00:00:00'), 2.0, 2.0, 0.27651736473993443, 0.20913910218149218, 0.5143435330785779, 0.5299470907357446], [Timestamp('2019-01-15 00:00:00'), 2.0, 2.0, 0.14234606775376854, 0.33057403343182384, 0.5270798988144133, 0.5303943468669601], [Timestamp('2019-01-16 00:00:00'), 2.0, 2.0, 0.1869901640024874, 0.32801403727945794, 0.48499579871805565, 0.5308394732070745], [Timestamp('2019-01-17 00:00:00'), 2.0, 2.0, 0.3487220761359771, 0.2570145643829721, 0.3942633594810497, 0.5266240102586955], [Timestamp('2019-01-18 00:00:00'), 2.0, 2.0, 0.3029490417264768, 0.21196797405390386, 0.4850829842196226, 0.5247318231107042], [Timestamp('2019-01-21 00:00:00'), 2.0, 0.0, 0.4506474275137792, 0.2728588650056236, 0.2764937074806002, 0.5291046498211607], [Timestamp('2019-01-22 00:00:00'), 1.0, 0.0, 0.6022858259610163, 0.10641033901256783, 0.2913038350264216, 0.5272702682697981], [Timestamp('2019-01-23 00:00:00'), 1.0, 0.0, 0.49593509949993553, 0.33716137911657085, 0.16690352138348902, 0.5166841369671559], [Timestamp('2019-01-24 00:00:00'), 1.0, 0.0, 0.44241925652254466, 0.4170530044483602, 0.14052773902908872, 0.5177676161534566], [Timestamp('2019-01-28 00:00:00'), 1.0, 0.0, 0.7385037639046418, 0.07615350242204079, 0.1853427336733195, 0.5256352836541516], [Timestamp('2019-01-29 00:00:00'), 1.0, 0.0, 0.5954591294839023, 0.23340332815080292, 0.1711375423652913, 0.519805969491504], [Timestamp('2019-01-30 00:00:00'), 2.0, 1.0, 0.3546296834285011, 0.5474206338891504, 0.09794968268234686, 0.516272337027054], [Timestamp('2019-01-31 00:00:00'), 2.0, 0.0, 0.8257211693660391, 0.01957515159847816, 0.15470367903547871, 0.5174821039526922], [Timestamp('2019-02-01 00:00:00'), 2.0, 0.0, 0.5381095605191678, 0.29367477586780055, 0.16821566361303472, 0.5155780361179422], [Timestamp('2019-02-04 00:00:00'), 2.0, 0.0, 0.7746423319933986, 0.10732986427211486, 0.11802780373448864, 0.5189756989620378], [Timestamp('2019-02-05 00:00:00'), 2.0, 0.0, 0.5117555007455148, 0.30076887446692685, 0.18747562478755714, 0.5155155433582225], [Timestamp('2019-02-06 00:00:00'), 2.0, 0.0, 0.83078758183459, 0.08252588369499912, 0.0866865344704048, 0.5152825569492236], [Timestamp('2019-02-07 00:00:00'), 2.0, 0.0, 0.8471879339715167, 0.03931433591723484, 0.11349773011125251, 0.5141585660965505], [Timestamp('2019-02-08 00:00:00'), 2.0, 0.0, 0.7593989953655023, 0.11840375123278442, 0.1221972534017094, 0.5122592347070305], [Timestamp('2019-02-11 00:00:00'), 2.0, 0.0, 0.8784756132165757, 0.007411190946336186, 0.1141131958370882, 0.5134479717813051], [Timestamp('2019-02-12 00:00:00'), 2.0, 0.0, 0.8770729637023904, 0.04618915375306377, 0.07673788254454447, 0.5138613952124345], [Timestamp('2019-02-13 00:00:00'), 2.0, 0.0, 0.8188790993535086, 0.04232585065028427, 0.13879504999621065, 0.5119616544347727], [Timestamp('2019-02-14 00:00:00'), 1.0, 0.0, 0.6641078743945943, 0.20748580511784412, 0.12840632048756467, 0.5131393298059965], [Timestamp('2019-02-15 00:00:00'), 1.0, 0.0, 0.6288428394400342, 0.19852084754661203, 0.17263631301335206, 0.5058915109800065], [Timestamp('2019-02-18 00:00:00'), 1.0, 1.0, 0.4030399566877752, 0.4779936196146709, 0.11896642369755595, 0.5162605588044183], [Timestamp('2019-02-19 00:00:00'), 0.0, 0.0, 0.6874528779875295, 0.2632000219633427, 0.04934710004913235, 0.5271831676754215], [Timestamp('2019-02-20 00:00:00'), 2.0, 0.0, 0.7545584400610361, 0.1507874727822448, 0.09465408715671879, 0.5242325074022128], [Timestamp('2019-02-21 00:00:00'), 1.0, 0.0, 0.653979818581364, 0.26975272227700914, 0.07626745914162823, 0.5231112026111833], [Timestamp('2019-02-22 00:00:00'), 2.0, 0.0, 0.7764476636493463, 0.09960938326994252, 0.12394295308070902, 0.5194791941533989], [Timestamp('2019-02-25 00:00:00'), 2.0, 0.0, 0.8563220543378673, 0.04430286524815861, 0.09937508041397543, 0.5191135029138353], [Timestamp('2019-02-26 00:00:00'), 2.0, 0.0, 0.8001351605689904, 0.09522757178541517, 0.10463726764559408, 0.5187424673497478], [Timestamp('2019-02-27 00:00:00'), 2.0, 0.0, 0.7130053662759839, 0.18860063868157853, 0.09839399504243503, 0.5191429208503355], [Timestamp('2019-02-28 00:00:00'), 2.0, 0.0, 0.8349172026582938, 0.0879121406696886, 0.077170656672014, 0.513214538520434], [Timestamp('2019-03-01 00:00:00'), 2.0, 0.0, 0.7428489209218228, 0.16408394812340577, 0.09306713095477338, 0.5164832092767895], [Timestamp('2019-03-07 00:00:00'), 2.0, 0.0, 0.8629029717604416, 0.04637596498313576, 0.09072106325642519, 0.5124668674691576], [Timestamp('2019-03-08 00:00:00'), 1.0, 0.0, 0.7508399668953231, 0.14450215231911395, 0.10465788078556112, 0.5157319746987976], [Timestamp('2019-03-11 00:00:00'), 1.0, 0.0, 0.8305876116114372, 0.093265430268627, 0.07614695811993372, 0.528195158428902], [Timestamp('2019-03-12 00:00:00'), 2.0, 0.0, 0.7935605969869892, 0.07872255783151495, 0.1277168451814979, 0.5284033316824552], [Timestamp('2019-03-13 00:00:00'), 0.0, 0.0, 0.7392494764185263, 0.1465164987474854, 0.11423402483398476, 0.5295801542384374], [Timestamp('2019-03-14 00:00:00'), 1.0, 0.0, 0.6383043965059274, 0.1736023928435811, 0.18809321065049134, 0.5316242225743081], [Timestamp('2019-03-15 00:00:00'), 1.0, 0.0, 0.4865018199871239, 0.4108772046702451, 0.10262097534262887, 0.5389863111125929], [Timestamp('2019-03-18 00:00:00'), 0.0, 1.0, 0.2075369947033888, 0.6802333062323044, 0.1122296990643092, 0.5380837259277627], [Timestamp('2019-03-19 00:00:00'), 0.0, 0.0, 0.5371274798944126, 0.3041360622600649, 0.15873645784552035, 0.5346363486774446], [Timestamp('2019-03-20 00:00:00'), 0.0, 0.0, 0.6367953839095154, 0.1711096799881092, 0.1920949361023761, 0.5399877149877149], [Timestamp('2019-03-21 00:00:00'), 1.0, 0.0, 0.597424581907963, 0.27926109385968806, 0.12331432423235016, 0.5359701330289566], [Timestamp('2019-03-22 00:00:00'), 2.0, 0.0, 0.8035050850915659, 0.05226356721801257, 0.14423134769042195, 0.5363809361135564], [Timestamp('2019-03-25 00:00:00'), 2.0, 0.0, 0.7147057704813539, 0.09369548830628877, 0.1915987412123571, 0.5360317104834319], [Timestamp('2019-03-26 00:00:00'), 2.0, 0.0, 0.7764605370229896, 0.0903584939054264, 0.13318096907158045, 0.5364314350429792], [Timestamp('2019-03-27 00:00:00'), 2.0, 0.0, 0.7543528035208573, 0.04337611635295041, 0.20227108012619507, 0.537575083280858], [Timestamp('2019-03-28 00:00:00'), 1.0, 0.0, 0.7641106090505467, 0.10558427797171556, 0.13030511297773897, 0.537225530708151], [Timestamp('2019-03-29 00:00:00'), 0.0, 0.0, 0.8144388383332672, 0.049252568775795376, 0.1363085928909406, 0.5304062198649951], [Timestamp('2019-04-01 00:00:00'), 0.0, 0.0, 0.7657455339309595, 0.06544981311612832, 0.16880465295291414, 0.5331951169553629], [Timestamp('2019-04-02 00:00:00'), 0.0, 0.0, 0.7679291765832559, 0.13587563208798384, 0.09619519132876216, 0.5322187442020843], [Timestamp('2019-04-03 00:00:00'), 0.0, 0.0, 0.5608600237016275, 0.3432417658014032, 0.09589821049696953, 0.5366900858704137], [Timestamp('2019-04-04 00:00:00'), 0.0, 0.0, 0.8861762100095003, 0.030994385944709598, 0.08282940404579095, 0.532959168184578], [Timestamp('2019-04-05 00:00:00'), 0.0, 0.0, 0.8151515380122583, 0.09475495776881272, 0.0900935042189259, 0.5361764954785048], [Timestamp('2019-04-08 00:00:00'), 0.0, 0.0, 0.7637460357005684, 0.07450122524034691, 0.16175273905908713, 0.5317125916048516], [Timestamp('2019-04-09 00:00:00'), 0.0, 0.0, 0.6316121292122011, 0.28324637884833864, 0.08514149193945862, 0.524716846761713], [Timestamp('2019-04-10 00:00:00'), 0.0, 0.0, 0.8086126948196201, 0.05881384001897451, 0.13257346516140436, 0.5251892796634725], [Timestamp('2019-04-11 00:00:00'), 0.0, 0.0, 0.893545423839306, 0.02205138235723368, 0.0844031938034593, 0.527145699691138], [Timestamp('2019-04-12 00:00:00'), 2.0, 0.0, 0.8781724784618529, 0.028141829474299652, 0.09368569206384805, 0.5288541149196887], [Timestamp('2019-04-15 00:00:00'), 2.0, 0.0, 0.875969981272096, 0.07367900167112555, 0.05035101705677547, 0.5263240416769206], [Timestamp('2019-04-16 00:00:00'), 1.0, 0.0, 0.94128415951727, 0.022088190245204715, 0.03662765023752367, 0.5282047642703381], [Timestamp('2019-04-17 00:00:00'), 1.0, 0.0, 0.9517547812053498, 0.012548051188013314, 0.03569716760663619, 0.5282722697356844], [Timestamp('2019-04-18 00:00:00'), 0.0, 0.0, 0.9187969039138224, 0.039860848234147706, 0.04134224785203005, 0.5324465856723921], [Timestamp('2019-04-22 00:00:00'), 0.0, 0.0, 0.8444184043239608, 0.0982151570714454, 0.057366438604595324, 0.5309271618670951], [Timestamp('2019-04-23 00:00:00'), 1.0, 0.0, 0.9545352857044952, 0.01915303327287606, 0.026311681022631064, 0.5314405115346552], [Timestamp('2019-04-24 00:00:00'), 0.0, 0.0, 0.9254105220741756, 0.03759031427601042, 0.03699916364981345, 0.5283102845334605], [Timestamp('2019-04-25 00:00:00'), 0.0, 0.0, 0.9364263663328913, 0.03586600887905152, 0.027707624788055798, 0.5333675213675214], [Timestamp('2019-04-26 00:00:00'), 0.0, 0.0, 0.8937767910921677, 0.058254946486980506, 0.04796826242085306, 0.5359873916469661], [Timestamp('2019-04-29 00:00:00'), 0.0, 0.0, 0.8765986733861793, 0.09418435023573954, 0.029216976378080177, 0.5344431889516635], [Timestamp('2019-04-30 00:00:00'), 0.0, 0.0, 0.8371444878148415, 0.09514146389289914, 0.06771404829226223, 0.5308110642287858], [Timestamp('2019-05-02 00:00:00'), 0.0, 0.0, 0.9357552315383887, 0.033563376270547576, 0.030681392191061627, 0.526661686896981], [Timestamp('2019-05-03 00:00:00'), 0.0, 0.0, 0.9411089781677158, 0.02122016361404778, 0.03767085821823728, 0.5282863784286378], [Timestamp('2019-05-06 00:00:00'), 0.0, 0.0, 0.8296134222624828, 0.1355578879513798, 0.034828689786133925, 0.5274444444444444], [Timestamp('2019-05-07 00:00:00'), 1.0, 0.0, 0.9543070040846842, 0.013898724612953087, 0.03179427130236093, 0.5252042415859842], [Timestamp('2019-05-08 00:00:00'), 0.0, 0.0, 0.9221731886854562, 0.04779822270060505, 0.030028588613939658, 0.5345649739840611], [Timestamp('2019-05-09 00:00:00'), 0.0, 0.0, 0.947107082410414, 0.02513501362700314, 0.027757903962584658, 0.5316131007040098], [Timestamp('2019-05-10 00:00:00'), 1.0, 0.0, 0.9529402800122435, 0.02710741853769959, 0.01995230145005775, 0.528026650989614], [Timestamp('2019-05-13 00:00:00'), 2.0, 0.0, 0.9314245420323773, 0.0511925494576812, 0.017382908509944638, 0.5302463735242972], [Timestamp('2019-05-14 00:00:00'), 2.0, 0.0, 0.9368355320365628, 0.04481159886799408, 0.018352869095443695, 0.5285297188717798], [Timestamp('2019-05-15 00:00:00'), 2.0, 0.0, 0.9101137646230193, 0.07073319267175829, 0.019153042705224544, 0.5281863324934268], [Timestamp('2019-05-16 00:00:00'), 2.0, 0.0, 0.936288983880666, 0.03919418909359685, 0.024516827025737268, 0.5278444621697245], [Timestamp('2019-05-17 00:00:00'), 2.0, 0.0, 0.8114915487101964, 0.14524846408216757, 0.04325998720763559, 0.5269853824697648], [Timestamp('2019-05-20 00:00:00'), 2.0, 0.0, 0.925831425818929, 0.05764252150808996, 0.01652605267298366, 0.5293883848564515], [Timestamp('2019-05-21 00:00:00'), 1.0, 0.0, 0.8591203605255913, 0.1281759101123141, 0.012703729362097701, 0.5305113846636617], [Timestamp('2019-05-22 00:00:00'), 1.0, 0.0, 0.972810999682187, 0.012737437213902329, 0.014451563103912953, 0.5296554626019782], [Timestamp('2019-05-23 00:00:00'), 2.0, 0.0, 0.9681106239012525, 0.013922078366038291, 0.017967297732710937, 0.5325507519396635], [Timestamp('2019-05-24 00:00:00'), 2.0, 0.0, 0.8618850608123675, 0.12305628006785609, 0.01505865911977351, 0.5322116474314931], [Timestamp('2019-05-27 00:00:00'), 2.0, 0.0, 0.8168147273162341, 0.15534141166445636, 0.02784386101930962, 0.529858341541915], [Timestamp('2019-05-28 00:00:00'), 1.0, 0.0, 0.571986295868516, 0.4034718208213827, 0.024541883310100474, 0.5291959639631525], [Timestamp('2019-05-29 00:00:00'), 1.0, 0.0, 0.7816769282015956, 0.19222714761689078, 0.02609592418151547, 0.5330118118789143], [Timestamp('2019-05-30 00:00:00'), 2.0, 0.0, 0.6999726631560915, 0.2718917645165017, 0.028135572327403522, 0.5365285623563659], [Timestamp('2019-05-31 00:00:00'), 2.0, 0.0, 0.7218605754465814, 0.2372011735654903, 0.040938250987925175, 0.5361843989260985], [Timestamp('2019-06-03 00:00:00'), 2.0, 0.0, 0.6501006862357297, 0.3089278521062423, 0.04097146165802923, 0.5372878602106406], [Timestamp('2019-06-04 00:00:00'), 2.0, 0.0, 0.7120860370122836, 0.2607425911191498, 0.027171371868568273, 0.538386544606505], [Timestamp('2019-06-05 00:00:00'), 2.0, 0.0, 0.8615191937732856, 0.10711312279827022, 0.03136768342844677, 0.5373887985483637], [Timestamp('2019-06-06 00:00:00'), 2.0, 0.0, 0.8738779418238212, 0.10407224650639386, 0.022049811669781746, 0.537761182550747], [Timestamp('2019-06-07 00:00:00'), 2.0, 0.0, 0.5948541404498257, 0.3912951123942535, 0.01385074715592311, 0.5374151190233709], [Timestamp('2019-06-10 00:00:00'), 2.0, 1.0, 0.28483133356230267, 0.7027085038492004, 0.01246016258849741, 0.53504236410448], [Timestamp('2019-06-11 00:00:00'), 2.0, 1.0, 0.37700275533287103, 0.6137413159560399, 0.009255928711088893, 0.5340397635922643], [Timestamp('2019-06-12 00:00:00'), 1.0, 0.0, 0.6684202777652849, 0.3010471953131441, 0.03053252692157212, 0.5312033070020065], [Timestamp('2019-06-13 00:00:00'), 1.0, 1.0, 0.36823198825002257, 0.610505844911316, 0.0212621668386621, 0.5307011232937159], [Timestamp('2019-06-14 00:00:00'), 1.0, 1.0, 0.4801539932715106, 0.48645987374739424, 0.033386132981093004, 0.5304022560357843], [Timestamp('2019-06-17 00:00:00'), 1.0, 0.0, 0.5372799669019555, 0.4171236872833022, 0.04559634581474245, 0.5359369660088289], [Timestamp('2019-06-18 00:00:00'), 1.0, 0.0, 0.6768859268331592, 0.2745037249195765, 0.04861034824726246, 0.5357286060989764], [Timestamp('2019-06-19 00:00:00'), 1.0, 1.0, 0.39889835375220467, 0.567842590784956, 0.03325905546283625, 0.5302551378259439], [Timestamp('2019-06-21 00:00:00'), 0.0, 1.0, 0.1271887269456853, 0.8640457980137736, 0.00876547504053976, 0.5276349040204136], [Timestamp('2019-06-24 00:00:00'), 0.0, 1.0, 0.28354593482402496, 0.701858504545484, 0.014595560630491585, 0.5288397191292967], [Timestamp('2019-06-25 00:00:00'), 2.0, 1.0, 0.3084591608885762, 0.6770805925870603, 0.014460246524365795, 0.5283080454091924], [Timestamp('2019-06-26 00:00:00'), 2.0, 1.0, 0.2987943230663895, 0.6782147103363372, 0.022990966597272438, 0.5266773535866031], [Timestamp('2019-06-27 00:00:00'), 2.0, 1.0, 0.4842328099212209, 0.4954233596470685, 0.020343830431709155, 0.5277631102138014], [Timestamp('2019-06-28 00:00:00'), 2.0, 1.0, 0.3812170173894018, 0.5901036485684744, 0.02867933404212118, 0.5268308820575979], [Timestamp('2019-07-01 00:00:00'), 2.0, 0.0, 0.5273167045985565, 0.4335071682742875, 0.039176127127158525, 0.5234060987997823], [Timestamp('2019-07-02 00:00:00'), 2.0, 0.0, 0.5410515336479059, 0.4292618836775798, 0.029686582674514163, 0.524478111795327], [Timestamp('2019-07-03 00:00:00'), 1.0, 1.0, 0.40589651913780234, 0.5620748725157169, 0.03202860834648294, 0.5241850573116], [Timestamp('2019-07-04 00:00:00'), 1.0, 0.0, 0.613129456831553, 0.3485312545953857, 0.03833928857306471, 0.5260873309147543], [Timestamp('2019-07-05 00:00:00'), 1.0, 1.0, 0.27070318591614667, 0.7128758439873988, 0.016420970096451267, 0.5255047628133608], [Timestamp('2019-07-08 00:00:00'), 1.0, 0.0, 0.5535516243527643, 0.41374836377672936, 0.032700011870506904, 0.5320832974540027], [Timestamp('2019-07-10 00:00:00'), 0.0, 1.0, 0.41412239827056757, 0.5456782828561194, 0.04019931887331441, 0.5364228643248304], [Timestamp('2019-07-11 00:00:00'), 0.0, 0.0, 0.5345957700788734, 0.4075055984256703, 0.0578986314954549, 0.5380527802046132], [Timestamp('2019-07-12 00:00:00'), 0.0, 1.0, 0.4432649987834707, 0.5113711263698709, 0.04536387484665843, 0.5345359867380649], [Timestamp('2019-07-15 00:00:00'), 0.0, 1.0, 0.4077866647037388, 0.5748011914174244, 0.01741214387883656, 0.531320498452111], [Timestamp('2019-07-16 00:00:00'), 0.0, 1.0, 0.4518282710410546, 0.5233587521374412, 0.024812976821502627, 0.530074693242852], [Timestamp('2019-07-17 00:00:00'), 0.0, 1.0, 0.39323962387228134, 0.5936003995111357, 0.013159976616583073, 0.5279186043031989], [Timestamp('2019-07-18 00:00:00'), 0.0, 1.0, 0.4181273474809485, 0.5704365398881265, 0.011436112630923598, 0.5237163683298347], [Timestamp('2019-07-19 00:00:00'), 0.0, 0.0, 0.524450866299773, 0.46421980114100725, 0.011329332559223198, 0.5278682649407131], [Timestamp('2019-07-22 00:00:00'), 0.0, 0.0, 0.8323552036020601, 0.15769204447544602, 0.00995275192249422, 0.5296824732766606], [Timestamp('2019-07-23 00:00:00'), 0.0, 0.0, 0.7910255130830269, 0.19814376023648567, 0.010830726680486876, 0.5294668256691453], [Timestamp('2019-07-24 00:00:00'), 0.0, 0.0, 0.7918864398285568, 0.1993829838783674, 0.008730576293075347, 0.5304035107175108], [Timestamp('2019-07-25 00:00:00'), 0.0, 0.0, 0.8318443265924016, 0.15178315661341263, 0.01637251679418742, 0.527603371786815], [Timestamp('2019-07-26 00:00:00'), 1.0, 0.0, 0.8175209838439178, 0.15590738878680144, 0.026571627369281717, 0.5238146221854739], [Timestamp('2019-07-29 00:00:00'), 0.0, 0.0, 0.7955446097590239, 0.18923003463883784, 0.015225355602140375, 0.5268002362024408], [Timestamp('2019-07-30 00:00:00'), 1.0, 0.0, 0.7632597797457413, 0.2207533993953954, 0.01598682085886102, 0.5265745850499589], [Timestamp('2019-07-31 00:00:00'), 0.0, 0.0, 0.9168720862868955, 0.06377480111967569, 0.019353112593427632, 0.5288119299011262], [Timestamp('2019-08-01 00:00:00'), 0.0, 0.0, 0.7774094595719857, 0.2103955629450239, 0.01219497748298879, 0.5331518179619446], [Timestamp('2019-08-02 00:00:00'), 0.0, 0.0, 0.8737934914929022, 0.11215239862266793, 0.014054109884429476, 0.5289501637602904], [Timestamp('2019-08-05 00:00:00'), 0.0, 0.0, 0.7916031832738387, 0.18016711579621464, 0.028229700929946318, 0.5217107447574493], [Timestamp('2019-08-06 00:00:00'), 0.0, 0.0, 0.5917474894209839, 0.3906793763115515, 0.017573134267464194, 0.5216743724812137], [Timestamp('2019-08-07 00:00:00'), 1.0, 0.0, 0.5375276483183238, 0.44299191449761793, 0.019480437184059233, 0.5194406888347304], [Timestamp('2019-08-08 00:00:00'), 0.0, 0.0, 0.7990153463006552, 0.18638895733060978, 0.01459569636873642, 0.5237213647926214], [Timestamp('2019-08-09 00:00:00'), 0.0, 0.0, 0.6603632069804779, 0.31990346825098204, 0.019733324768542147, 0.5247224353237011], [Timestamp('2019-08-12 00:00:00'), 0.0, 0.0, 0.8366868093567886, 0.10848863745380881, 0.054824553189401284, 0.5221379667223948], [Timestamp('2019-08-13 00:00:00'), 0.0, 0.0, 0.7323445000437524, 0.22518766639384272, 0.042467833562404314, 0.5234629735379236], [Timestamp('2019-08-14 00:00:00'), 1.0, 0.0, 0.7346310371019531, 0.1926505333883522, 0.07271842950969488, 0.5205225403899134], [Timestamp('2019-08-15 00:00:00'), 2.0, 0.0, 0.7405357820781043, 0.16747402749394216, 0.09199019042795294, 0.5205352759072982], [Timestamp('2019-08-16 00:00:00'), 2.0, 0.0, 0.8440080123429978, 0.12274927919287783, 0.03324270846412101, 0.5216249198483746], [Timestamp('2019-08-19 00:00:00'), 2.0, 0.0, 0.8392356025185901, 0.09363254434549212, 0.0671318531359147, 0.5220097053415192], [Timestamp('2019-08-20 00:00:00'), 2.0, 0.0, 0.8697426427173593, 0.08245180836105964, 0.047805548921578414, 0.5216940654587318], [Timestamp('2019-08-21 00:00:00'), 2.0, 0.0, 0.7832318836902995, 0.18330507705619872, 0.033463039253504384, 0.5215260069336933], [Timestamp('2019-08-22 00:00:00'), 2.0, 0.0, 0.8836594519418824, 0.079701877673087, 0.03663867038502761, 0.522745225871735], [Timestamp('2019-08-23 00:00:00'), 2.0, 0.0, 0.7703083557649691, 0.1872652267344702, 0.0424264175005597, 0.5224248245727324], [Timestamp('2019-08-26 00:00:00'), 2.0, 0.0, 0.8138023213717928, 0.13699337330407707, 0.04920430532412906, 0.5252351687066914], [Timestamp('2019-08-27 00:00:00'), 2.0, 0.0, 0.6343303945050534, 0.30562778935308704, 0.06004181614185634, 0.5239973855830877], [Timestamp('2019-08-28 00:00:00'), 2.0, 0.0, 0.7396083202605387, 0.18311319135912496, 0.07727848838033942, 0.519700021578296], [Timestamp('2019-08-29 00:00:00'), 2.0, 0.0, 0.7962710328103412, 0.15656920920332787, 0.047159757986333005, 0.5193734925956405], [Timestamp('2019-08-30 00:00:00'), 2.0, 0.0, 0.8481105721886764, 0.08699271961209583, 0.0648967081992262, 0.5191744411203024], [Timestamp('2019-09-02 00:00:00'), 2.0, 0.0, 0.8146444388440229, 0.12043671526003542, 0.06491884589593959, 0.5165489179394211], [Timestamp('2019-09-03 00:00:00'), 2.0, 0.0, 0.8271254476301461, 0.09902938795616432, 0.07384516441369209, 0.5165895353317734], [Timestamp('2019-09-04 00:00:00'), 2.0, 0.0, 0.6925244175833402, 0.2622148145614269, 0.04526076785523002, 0.5181984397682659], [Timestamp('2019-09-05 00:00:00'), 2.0, 1.0, 0.41898821734132663, 0.5279962032460483, 0.05301557941262653, 0.5192390937180017], [Timestamp('2019-09-06 00:00:00'), 2.0, 1.0, 0.4011765807254233, 0.5407611110323886, 0.0580623082421879, 0.5160479379663527], [Timestamp('2019-09-09 00:00:00'), 2.0, 0.0, 0.7248853902855343, 0.19929097514707197, 0.07582363456739297, 0.5139944372154374], [Timestamp('2019-09-10 00:00:00'), 1.0, 0.0, 0.723055028879897, 0.22224114339064302, 0.054703827729457054, 0.5159717825384376], [Timestamp('2019-09-11 00:00:00'), 2.0, 0.0, 0.6934063584112521, 0.237318887903665, 0.06927475368508548, 0.519173118466601], [Timestamp('2019-09-12 00:00:00'), 2.0, 1.0, 0.26114673537843386, 0.6571555824889208, 0.08169768213264542, 0.5195276757647095], [Timestamp('2019-09-13 00:00:00'), 2.0, 0.0, 0.57632644166682, 0.3439925540820117, 0.07968100425116752, 0.5192060337755112], [Timestamp('2019-09-16 00:00:00'), 0.0, 0.0, 0.486603519020993, 0.41150073961656686, 0.10189574136244173, 0.51737434889659], [Timestamp('2019-09-17 00:00:00'), 1.0, 1.0, 0.4331848744725356, 0.48191968519895345, 0.0848954403285113, 0.5139965042643432], [Timestamp('2019-09-18 00:00:00'), 0.0, 0.0, 0.8289694170046219, 0.06782101979510403, 0.10320956320027115, 0.5168642460753335], [Timestamp('2019-09-19 00:00:00'), 0.0, 0.0, 0.7855893023934544, 0.14456443915105907, 0.0698462584554838, 0.5134648715105433], [Timestamp('2019-09-20 00:00:00'), 0.0, 0.0, 0.7062974162110462, 0.18944528781095765, 0.10425729597799689, 0.514956824480634], [Timestamp('2019-09-23 00:00:00'), 0.0, 0.0, 0.7298564512552451, 0.15977760918659498, 0.11036593955816318, 0.518797737142492], [Timestamp('2019-09-24 00:00:00'), 0.0, 0.0, 0.6610017304375424, 0.2800626142009169, 0.05893565536153986, 0.5118906148317913], [Timestamp('2019-09-25 00:00:00'), 0.0, 1.0, 0.35004196843490337, 0.6021132054671382, 0.04784482609795885, 0.5142339142339142], [Timestamp('2019-09-26 00:00:00'), 0.0, 1.0, 0.317042573655528, 0.6297212148089453, 0.05323621153552714, 0.5207578039694828], [Timestamp('2019-09-27 00:00:00'), 1.0, 0.0, 0.6232194063492514, 0.3136307008831215, 0.06314989276762424, 0.518075998075998], [Timestamp('2019-09-30 00:00:00'), 1.0, 1.0, 0.3181583661344131, 0.6588944656123222, 0.022947168253266436, 0.5136463736463736], [Timestamp('2019-10-01 00:00:00'), 1.0, 0.0, 0.7080288336475413, 0.23757188295053122, 0.05439928340192859, 0.5220093552979459], [Timestamp('2019-10-02 00:00:00'), 2.0, 0.0, 0.7721864454778893, 0.14078205172386662, 0.08703150279824196, 0.5255892255892256], [Timestamp('2019-10-03 00:00:00'), 2.0, 0.0, 0.7290412503801802, 0.20922819182241517, 0.06173055779740472, 0.5259408602150538], [Timestamp('2019-10-04 00:00:00'), 2.0, 0.0, 0.7511302245561078, 0.16373211445621344, 0.08513766098768184, 0.5240688575899843], [Timestamp('2019-10-07 00:00:00'), 2.0, 0.0, 0.7396340361060626, 0.21585506731141899, 0.04451089658251694, 0.5225362054277717], [Timestamp('2019-10-08 00:00:00'), 2.0, 0.0, 0.8004276807716346, 0.15559247913227658, 0.04397984009609079, 0.5228849618428776], [Timestamp('2019-10-09 00:00:00'), 2.0, 0.0, 0.8232444496952726, 0.1557161658684012, 0.021039384436328343, 0.5226868686868686], [Timestamp('2019-10-10 00:00:00'), 2.0, 0.0, 0.7619672783406508, 0.21536648819731355, 0.022666233462034002, 0.5199419343131918], [Timestamp('2019-10-11 00:00:00'), 2.0, 0.0, 0.6660613565117389, 0.31090276829599517, 0.023035875192264237, 0.5202865306450964], [Timestamp('2019-10-14 00:00:00'), 2.0, 0.0, 0.6681478963230351, 0.3167143749267359, 0.015137728750229586, 0.521292447336185], [Timestamp('2019-10-15 00:00:00'), 2.0, 0.0, 0.5713418638267992, 0.4134023242321665, 0.015255811941035159, 0.5197594997594998], [Timestamp('2019-10-16 00:00:00'), 2.0, 0.0, 0.8069104841590198, 0.17509145714809687, 0.017998058692883553, 0.517015701570157], [Timestamp('2019-10-17 00:00:00'), 2.0, 0.0, 0.7544150078609054, 0.23108972851714776, 0.014495263621950109, 0.5173561703996487], [Timestamp('2019-10-18 00:00:00'), 2.0, 0.0, 0.70180873834481, 0.2836442181416967, 0.014547043513490092, 0.5176952961568346], [Timestamp('2019-10-21 00:00:00'), 2.0, 0.0, 0.6257179852687692, 0.3593843495459705, 0.014897665185257767, 0.5181333015191283], [Timestamp('2019-10-22 00:00:00'), 2.0, 0.0, 0.660383575266259, 0.3267440099379204, 0.012872414795817438, 0.5197769442956083], [Timestamp('2019-10-23 00:00:00'), 2.0, 0.0, 0.6130160539072039, 0.3726821202341592, 0.01430182585863604, 0.5201069518716578], [Timestamp('2019-10-24 00:00:00'), 2.0, 0.0, 0.8636757959531968, 0.11617251318542289, 0.020151690861379783, 0.5180114254086856], [Timestamp('2019-10-25 00:00:00'), 2.0, 0.0, 0.8455596605448222, 0.13260415636971726, 0.021836183085461622, 0.5163857323232323], [Timestamp('2019-10-28 00:00:00'), 2.0, 0.0, 0.9013237716594508, 0.0806279491438865, 0.018048279196661934, 0.516803118908382], [Timestamp('2019-10-29 00:00:00'), 1.0, 0.0, 0.7665211522319979, 0.21181112964676949, 0.021667718121233977, 0.5172141649962662], [Timestamp('2019-10-30 00:00:00'), 1.0, 1.0, 0.43766517460866144, 0.5497932126728969, 0.012541612718439673, 0.51192855709412], [Timestamp('2019-10-31 00:00:00'), 0.0, 0.0, 0.5004883510814111, 0.48955824094534833, 0.009953407973241889, 0.5165390749601276], [Timestamp('2019-11-01 00:00:00'), 0.0, 0.0, 0.7555885357873819, 0.2303969656476552, 0.014014498564961406, 0.5158276125095348], [Timestamp('2019-11-04 00:00:00'), 0.0, 0.0, 0.7792796909653996, 0.2000621175336934, 0.020658191500910573, 0.5159835518152445], [Timestamp('2019-11-05 00:00:00'), 1.0, 0.0, 0.8345164408648981, 0.14567271674866422, 0.01981084238643829, 0.5161272579315208], [Timestamp('2019-11-06 00:00:00'), 2.0, 0.0, 0.825703161635289, 0.15620198110348607, 0.018094857261226123, 0.5132070737702964], [Timestamp('2019-11-07 00:00:00'), 0.0, 0.0, 0.7583483560710328, 0.2154095517458443, 0.02624209218312081, 0.5111338760706645], [Timestamp('2019-11-08 00:00:00'), 0.0, 0.0, 0.9133607795888113, 0.0601862567014119, 0.02645296370977828, 0.5115240514551507], [Timestamp('2019-11-11 00:00:00'), 0.0, 0.0, 0.8784709189006921, 0.10222268793873042, 0.01930639316058044, 0.5122085187904719], [Timestamp('2019-11-12 00:00:00'), 1.0, 0.0, 0.8474816687065654, 0.1355274378680081, 0.016990893425427856, 0.5110391192605525], [Timestamp('2019-11-13 00:00:00'), 0.0, 0.0, 0.7017153552358132, 0.28006725037577423, 0.018217394388415464, 0.5126009279621552], [Timestamp('2019-11-14 00:00:00'), 1.0, 0.0, 0.8408164391450996, 0.14198172297583042, 0.017201837879066442, 0.508539283709703], [Timestamp('2019-11-18 00:00:00'), 1.0, 0.0, 0.9225361994154009, 0.0611134839732351, 0.01635031661136243, 0.5121182640216965], [Timestamp('2019-11-19 00:00:00'), 2.0, 0.0, 0.7092970582154347, 0.27676771066377454, 0.01393523112079378, 0.5124430390697691], [Timestamp('2019-11-21 00:00:00'), 1.0, 0.0, 0.8703689245064925, 0.10819836074518495, 0.021432714748319696, 0.5121269406276828], [Timestamp('2019-11-22 00:00:00'), 1.0, 0.0, 0.7539114128828092, 0.23108910516003486, 0.014999481957153539, 0.5094561772303762], [Timestamp('2019-11-25 00:00:00'), 2.0, 0.0, 0.7341392231009437, 0.2476187633653713, 0.018242013533685552, 0.508515544587736], [Timestamp('2019-11-26 00:00:00'), 2.0, 0.0, 0.8268149553035395, 0.13711828100311932, 0.03606676369334368, 0.5076621316858322], [Timestamp('2019-11-27 00:00:00'), 2.0, 0.0, 0.7974162605716083, 0.17388824340090378, 0.028695496027487666, 0.508632983720514], [Timestamp('2019-11-28 00:00:00'), 2.0, 0.0, 0.7182225398563047, 0.2524758816386314, 0.02930157850506463, 0.5121691375456852], [Timestamp('2019-11-29 00:00:00'), 2.0, 0.0, 0.8803546907356146, 0.09482310086612643, 0.0248222083982594, 0.5113045294771662], [Timestamp('2019-12-02 00:00:00'), 1.0, 0.0, 0.8373616746248238, 0.12462366827109314, 0.038014657104086, 0.5119086122623003], [Timestamp('2019-12-03 00:00:00'), 2.0, 0.0, 0.8409096601864592, 0.1317652909857309, 0.027325048827811557, 0.5112411505904088], [Timestamp('2019-12-04 00:00:00'), 2.0, 0.0, 0.8575744041818828, 0.1161216535981782, 0.026303942219939105, 0.507736664251843], [Timestamp('2019-12-05 00:00:00'), 2.0, 0.0, 0.6746443659383613, 0.2919006707551249, 0.03345496330651458, 0.5067769783841332], [Timestamp('2019-12-06 00:00:00'), 1.0, 0.0, 0.6465989225273827, 0.32640930720586747, 0.02699177026675097, 0.5077293523861474], [Timestamp('2019-12-09 00:00:00'), 1.0, 1.0, 0.40248077215400585, 0.5794955741581688, 0.01802365368782805, 0.5019806949632758], [Timestamp('2019-12-10 00:00:00'), 2.0, 1.0, 0.24764426665972664, 0.7337574237005363, 0.018598309639737236, 0.5075253659897508], [Timestamp('2019-12-11 00:00:00'), 2.0, 1.0, 0.20692708089333356, 0.7729026216349751, 0.02017029747169078, 0.5060258725753638], [Timestamp('2019-12-12 00:00:00'), 1.0, 0.0, 0.6373968321107879, 0.31722882173653116, 0.04537434615268284, 0.5051612997958973], [Timestamp('2019-12-13 00:00:00'), 2.0, 0.0, 0.6914352057302063, 0.2377037659427114, 0.07086102832708135, 0.5060185310030252], [Timestamp('2019-12-16 00:00:00'), 2.0, 0.0, 0.7928350978349584, 0.09260570462811553, 0.11455919753692473, 0.5057010729760268], [Timestamp('2019-12-17 00:00:00'), 2.0, 0.0, 0.5082814521016064, 0.4092305340904412, 0.08248801380795567, 0.5033272042728307], [Timestamp('2019-12-18 00:00:00'), 2.0, 0.0, 0.7479258867163839, 0.20814846865249836, 0.04392564463111456, 0.5042723838569517], [Timestamp('2019-12-19 00:00:00'), 1.0, 0.0, 0.5499546128720935, 0.34295592595643204, 0.10708946117147218, 0.5052139967256609], [Timestamp('2019-12-20 00:00:00'), 2.0, 0.0, 0.6716737081578623, 0.2636746582081561, 0.06465163363398167, 0.5091477512637862], [Timestamp('2019-12-23 00:00:00'), 1.0, 1.0, 0.403245919587019, 0.5700551132945254, 0.02669896711845271, 0.5094592556047117], [Timestamp('2019-12-26 00:00:00'), 1.0, 1.0, 0.12185947125311135, 0.853704473699068, 0.024436055047820374, 0.5192645170832042], [Timestamp('2019-12-27 00:00:00'), 0.0, 1.0, 0.45922270640399565, 0.49624720251666266, 0.04453009107934277, 0.5143997979555138], [Timestamp('2019-12-30 00:00:00'), 0.0, 1.0, 0.2892936564763809, 0.6826258414838515, 0.028080502039765066, 0.5141046124336536], [Timestamp('2020-01-02 00:00:00'), 0.0, 1.0, 0.37084944280145754, 0.5264153574421248, 0.10273519975642034, 0.5094296590477062], [Timestamp('2020-01-03 00:00:00'), 0.0, 0.0, 0.5549704073022077, 0.3535879716080264, 0.09144162108976404, 0.5105109975761181], [Timestamp('2020-01-06 00:00:00'), 0.0, 1.0, 0.32076570688453065, 0.6081724093895161, 0.07106188372595615, 0.5081677268117946], [Timestamp('2020-01-07 00:00:00'), 0.0, 0.0, 0.6480074829945035, 0.28637131382371317, 0.0656212031817866, 0.5081355097064068], [Timestamp('2020-01-08 00:00:00'), 0.0, 0.0, 0.7019170273937309, 0.24093976972245212, 0.05714320288381383, 0.5028180819494379], [Timestamp('2020-01-09 00:00:00'), 0.0, 0.0, 0.8880500093910424, 0.05815196142170028, 0.05379802918725663, 0.5061591616515081], [Timestamp('2020-01-10 00:00:00'), 0.0, 0.0, 0.6302950708836742, 0.3268670958004879, 0.04283783331583604, 0.5048751689897218], [Timestamp('2020-01-13 00:00:00'), 0.0, 0.0, 0.6058978769369545, 0.3593397183153678, 0.03476240474768088, 0.5094330808816219], [Timestamp('2020-01-14 00:00:00'), 0.0, 0.0, 0.7673137157169321, 0.19664762977154693, 0.03603865451152279, 0.5099116699070264], [Timestamp('2020-01-15 00:00:00'), 0.0, 0.0, 0.8372648309239331, 0.13727946725472082, 0.025455701821345343, 0.5095044465207015], [Timestamp('2020-01-16 00:00:00'), 0.0, 0.0, 0.7864960776994515, 0.1985926230399987, 0.014911299260549378, 0.5106004120531966], [Timestamp('2020-01-17 00:00:00'), 0.0, 0.0, 0.8517404840843004, 0.13590935597567072, 0.01235015994002764, 0.5070250527877646], [Timestamp('2020-01-20 00:00:00'), 0.0, 0.0, 0.7506008375247518, 0.2402878385771456, 0.009111323898101487, 0.5106332835146395], [Timestamp('2020-01-21 00:00:00'), 0.0, 0.0, 0.7799122093990793, 0.20908246876188594, 0.011005321839033739, 0.5068234130381023], [Timestamp('2020-01-22 00:00:00'), 0.0, 0.0, 0.8634033474065931, 0.12642029870809043, 0.01017635388531795, 0.5128757334080961], [Timestamp('2020-01-23 00:00:00'), 1.0, 0.0, 0.8717716014712756, 0.11649792943706644, 0.011730469091660804, 0.507808084816191], [Timestamp('2020-01-24 00:00:00'), 1.0, 0.0, 0.9492840875022881, 0.044157610926191485, 0.006558301571518446, 0.5067345701229801], [Timestamp('2020-01-27 00:00:00'), 2.0, 0.0, 0.9586061197577923, 0.03212175711161017, 0.00927212313060057, 0.5067620366469978], [Timestamp('2020-01-28 00:00:00'), 2.0, 0.0, 0.8816590010059026, 0.10809923687612953, 0.010241762117967056, 0.5048474354130776], [Timestamp('2020-01-29 00:00:00'), 2.0, 0.0, 0.9194431804835571, 0.07253244685136956, 0.008024372665072412, 0.5034210883233939], [Timestamp('2020-01-30 00:00:00'), 2.0, 0.0, 0.6687218511208161, 0.32254055617739347, 0.008737592701788997, 0.5026201277002068], [Timestamp('2020-01-31 00:00:00'), 2.0, 0.0, 0.7295799631974427, 0.2603951306473635, 0.010024906155196885, 0.5013262195017335], [Timestamp('2020-02-03 00:00:00'), 2.0, 0.0, 0.8081882559375114, 0.18246032145606048, 0.00935142260642716, 0.5005217563674628], [Timestamp('2020-02-04 00:00:00'), 2.0, 1.0, 0.39435803695469046, 0.5982167714378313, 0.007425191607476346, 0.49971613731487713], [Timestamp('2020-02-05 00:00:00'), 2.0, 0.0, 0.8279306093557826, 0.15818321727849094, 0.013886173365724798, 0.4994046173049085], [Timestamp('2020-02-06 00:00:00'), 2.0, 0.0, 0.5409502105843357, 0.45190923369503333, 0.007140555720633058, 0.5003311115930598], [Timestamp('2020-02-07 00:00:00'), 1.0, 0.0, 0.7138046107284138, 0.26788927545936086, 0.018306113812223312, 0.4989047793178661], [Timestamp('2020-02-10 00:00:00'), 0.0, 0.0, 0.7738570128852574, 0.2047115392162611, 0.021431447898482277, 0.4971865359546519], [Timestamp('2020-02-11 00:00:00'), 0.0, 0.0, 0.6308174737207478, 0.3464780074315823, 0.022704518847666774, 0.49902998236331575], [Timestamp('2020-02-12 00:00:00'), 0.0, 1.0, 0.3687148799259592, 0.6142296064433832, 0.01705551363065424, 0.4969084533038021], [Timestamp('2020-02-13 00:00:00'), 0.0, 0.0, 0.7658115328477249, 0.2208759680926263, 0.013312499059649936, 0.4936799934592429], [Timestamp('2020-02-14 00:00:00'), 0.0, 0.0, 0.8121303341809157, 0.17405933264060763, 0.013810333178477891, 0.4968402395795135], [Timestamp('2020-02-17 00:00:00'), 0.0, 0.0, 0.796374994230778, 0.1914239679949002, 0.012201037774318682, 0.49778265107212477], [Timestamp('2020-02-18 00:00:00'), 0.0, 0.0, 0.5165740276000642, 0.4748423568263464, 0.008583615573588492, 0.5041193222887211], [Timestamp('2020-02-19 00:00:00'), 0.0, 1.0, 0.3918650351320897, 0.6005183181035263, 0.007616646764383669, 0.5018777881522979], [Timestamp('2020-02-20 00:00:00'), 0.0, 0.0, 0.8460491938504426, 0.1442746203480199, 0.009676185801535696, 0.49555004050118057], [Timestamp('2020-02-21 00:00:00'), 0.0, 0.0, 0.7372452011490906, 0.253859519525772, 0.008895279325137666, 0.49598364598364597], [Timestamp('2020-02-27 00:00:00'), 0.0, 0.0, 0.8574787255425614, 0.12565266627125066, 0.01686860818618918, 0.4965878152771357], [Timestamp('2020-02-28 00:00:00'), 0.0, 0.0, 0.8677575079262608, 0.11924440563058002, 0.012998086443157687, 0.5013099505034989], [Timestamp('2020-03-02 00:00:00'), 0.0, 0.0, 0.868561117636161, 0.12290486862989301, 0.00853401373394591, 0.4980412505600073], [Timestamp('2020-03-03 00:00:00'), 0.0, 0.0, 0.9432762115436718, 0.04957894007093194, 0.007144848385397846, 0.49936236602903267], [Timestamp('2020-03-04 00:00:00'), 0.0, 0.0, 0.8634731769605355, 0.12635465213070285, 0.010172170908759005, 0.49840171070203015], [Timestamp('2020-03-05 00:00:00'), 0.0, 0.0, 0.8762958791923753, 0.09538532478490389, 0.028318796022722997, 0.5042794234938608], [Timestamp('2020-03-06 00:00:00'), 0.0, 0.0, 0.9123959273946226, 0.06318969871395122, 0.024414373891426294, 0.4996472663139329], [Timestamp('2020-03-09 00:00:00'), 0.0, 0.0, 0.920640213736717, 0.04522173161570135, 0.0341380546475792, 0.5019724064027861], [Timestamp('2020-03-10 00:00:00'), 0.0, 0.0, 0.8303377729381909, 0.13691257687613342, 0.03274965018567929, 0.49963474816261355], [Timestamp('2020-03-11 00:00:00'), 1.0, 0.0, 0.8605331974236096, 0.10450497073774519, 0.03496183183864223, 0.5050381018934478], [Timestamp('2020-03-12 00:00:00'), 2.0, 0.0, 0.8265322986187494, 0.11774529429086246, 0.05572240709038905, 0.5004425120293529], [Timestamp('2020-03-13 00:00:00'), 0.0, 0.0, 0.835246768804883, 0.10511878710920651, 0.059634444085911835, 0.5001424275949492], [Timestamp('2020-03-16 00:00:00'), 2.0, 0.0, 0.7617306894282532, 0.1387559507795462, 0.09951335979220077, 0.5017493117778963], [Timestamp('2020-03-17 00:00:00'), 2.0, 0.0, 0.753142419550878, 0.1325356439776393, 0.1143219364714808, 0.49843074240873914], [Timestamp('2020-03-18 00:00:00'), 2.0, 0.0, 0.7084483196929472, 0.16009854298602597, 0.13145313732102845, 0.4978823577531024], [Timestamp('2020-03-19 00:00:00'), 2.0, 0.0, 0.6240564765743543, 0.17249392824348217, 0.20344959518216005, 0.49880542495420016], [Timestamp('2020-03-20 00:00:00'), 2.0, 0.0, 0.5591026744009617, 0.20521023640135153, 0.23568708919768708, 0.49716114400123845], [Timestamp('2020-03-23 00:00:00'), 2.0, 0.0, 0.701238599846002, 0.0346555213804477, 0.2641058787735505, 0.49445411647003157], [Timestamp('2020-03-24 00:00:00'), 2.0, 0.0, 0.5991586575826179, 0.1970139564923512, 0.20382738592503105, 0.4937183410069032], [Timestamp('2020-03-25 00:00:00'), 2.0, 0.0, 0.43370225589072076, 0.24914787584822093, 0.31714986826106417, 0.49463464182045375], [Timestamp('2020-03-26 00:00:00'), 2.0, 1.0, 0.3350556683821904, 0.4510341077887434, 0.21391022382906777, 0.4926811788576366], [Timestamp('2020-03-27 00:00:00'), 2.0, 0.0, 0.5866586308342351, 0.14686377962792532, 0.26647758953784617, 0.4931552620677289], [Timestamp('2020-03-30 00:00:00'), 2.0, 0.0, 0.3876149298140047, 0.3362302342351752, 0.2761548359508197, 0.49076401749862586], [Timestamp('2020-03-31 00:00:00'), 2.0, 1.0, 0.13744770507391768, 0.7397903792453586, 0.1227619156807263, 0.4919950482890167], [Timestamp('2020-04-01 00:00:00'), 2.0, 1.0, 0.32527340858059556, 0.4130262033518033, 0.2617003880676045, 0.4903251400582092], [Timestamp('2020-04-02 00:00:00'), 2.0, 1.0, 0.29144010090123434, 0.5064672157276874, 0.20209268337107705, 0.48930567943007014], [Timestamp('2020-04-03 00:00:00'), 2.0, 0.0, 0.4726678213912256, 0.11148360825874917, 0.4158485703500313, 0.48916381648477136], [Timestamp('2020-04-06 00:00:00'), 2.0, 0.0, 0.39839217193015336, 0.26927345289586124, 0.33233437517399156, 0.48886459640137864], [Timestamp('2020-04-07 00:00:00'), 2.0, 2.0, 0.2896836297100924, 0.3189670911360031, 0.3913492791539092, 0.4877909522200414], [Timestamp('2020-04-08 00:00:00'), 0.0, 1.0, 0.14428236321872553, 0.727149815589513, 0.12856782119176818, 0.4861167151746428], [Timestamp('2020-04-09 00:00:00'), 0.0, 0.0, 0.34947133829807686, 0.3394879157819176, 0.3110407459200012, 0.48248223349805236], [Timestamp('2020-04-13 00:00:00'), 2.0, 1.0, 0.25696994885443886, 0.5487437520516087, 0.19428629909395054, 0.48487972392798345], [Timestamp('2020-04-14 00:00:00'), 2.0, 0.0, 0.3741592715649992, 0.3398490311365708, 0.28599169729843454, 0.48413838514652713], [Timestamp('2020-04-15 00:00:00'), 2.0, 2.0, 0.36011552950965164, 0.16971012369216812, 0.470174346798178, 0.4844344069541769], [Timestamp('2020-04-16 00:00:00'), 2.0, 0.0, 0.4622274666250138, 0.20864126170183955, 0.3291312716731421, 0.48591772755514456], [Timestamp('2020-04-17 00:00:00'), 2.0, 1.0, 0.19684909318433133, 0.610673009379697, 0.19247789743597624, 0.4836440234167361], [Timestamp('2020-04-20 00:00:00'), 2.0, 1.0, 0.1327158265560611, 0.6466002193240618, 0.22068395411987884, 0.4803329680789831], [Timestamp('2020-04-22 00:00:00'), 2.0, 0.0, 0.4205007341828586, 0.21206393531599688, 0.3674353305011489, 0.4794427690859157], [Timestamp('2020-04-23 00:00:00'), 2.0, 1.0, 0.19930478357126044, 0.6545928920994702, 0.1461023243292695, 0.4777632756549051], [Timestamp('2020-04-24 00:00:00'), 2.0, 2.0, 0.2612366719199756, 0.27329778176636893, 0.4654655463136539, 0.4786440620042803], [Timestamp('2020-04-27 00:00:00'), 2.0, 2.0, 0.20775291538489993, 0.36083669333748813, 0.43141039127761155, 0.4789338520467205], [Timestamp('2020-04-28 00:00:00'), 2.0, 2.0, 0.22495585822141007, 0.26723778287912703, 0.5078063588994641, 0.4798094761617812], [Timestamp('2020-04-29 00:00:00'), 0.0, 2.0, 0.1006114535991208, 0.42865005501717396, 0.4707384913837108, 0.4755656031735372], [Timestamp('2020-04-30 00:00:00'), 0.0, 2.0, 0.23822991047368258, 0.24182695830981177, 0.5199431312165013, 0.474120136641958], [Timestamp('2020-05-04 00:00:00'), 2.0, 2.0, 0.22737394639072459, 0.08377425120517132, 0.688851802404111, 0.47453998421485455], [Timestamp('2020-05-05 00:00:00'), 2.0, 1.0, 0.1158137263803271, 0.5799929587591709, 0.304193314860506, 0.4779687064278633], [Timestamp('2020-05-06 00:00:00'), 2.0, 1.0, 0.09961502546056092, 0.6226229099863042, 0.27776206455312813, 0.47744755446489123], [Timestamp('2020-05-07 00:00:00'), 2.0, 1.0, 0.15714305232484102, 0.5869024488200786, 0.2559544988550869, 0.4801498414263358], [Timestamp('2020-05-08 00:00:00'), 1.0, 1.0, 0.19854089769605204, 0.6061397494440066, 0.19531935285994334, 0.4774261597102738], [Timestamp('2020-05-11 00:00:00'), 2.0, 1.0, 0.23114377845229211, 0.46444774595035815, 0.3044084755973445, 0.480270947361217], [Timestamp('2020-05-12 00:00:00'), 2.0, 1.0, 0.23260708723344084, 0.4484870010934179, 0.31890591167314136, 0.47951766072282576], [Timestamp('2020-05-13 00:00:00'), 2.0, 2.0, 0.24726498389280505, 0.20780037809578783, 0.5449346380114024, 0.4766751918158569], [Timestamp('2020-05-14 00:00:00'), 2.0, 0.0, 0.3765872192228756, 0.3136351724776929, 0.309777608299429, 0.4775326797385621], [Timestamp('2020-05-15 00:00:00'), 2.0, 1.0, 0.28120207277133497, 0.49668145234576255, 0.22211647488290934, 0.4812767723161508], [Timestamp('2020-05-18 00:00:00'), 2.0, 1.0, 0.14455743515606698, 0.6899097467323, 0.16553281811163065, 0.47959084562617615], [Timestamp('2020-05-19 00:00:00'), 2.0, 1.0, 0.2165767261305105, 0.578152271772464, 0.20527100209702595, 0.47479177472279704], [Timestamp('2020-05-20 00:00:00'), 2.0, 1.0, 0.24409805992318012, 0.4583300168792878, 0.2975719231975316, 0.4754047898651293], [Timestamp('2020-05-21 00:00:00'), 2.0, 1.0, 0.1754276460491904, 0.5593722328155081, 0.26520012113530794, 0.4762485590304606], [Timestamp('2020-05-22 00:00:00'), 2.0, 2.0, 0.34937038879506865, 0.18474214238647363, 0.4658874688184573, 0.472847978040918], [Timestamp('2020-05-25 00:00:00'), 2.0, 1.0, 0.2390527594297706, 0.40417687938879954, 0.356770361181434, 0.4717271435070372], [Timestamp('2020-05-26 00:00:00'), 2.0, 1.0, 0.21474497081696556, 0.42996933423463296, 0.3552856949483965, 0.47085188797941674], [Timestamp('2020-05-27 00:00:00'), 2.0, 1.0, 0.27797277781797375, 0.40896223173433854, 0.31306499044768815, 0.47111922591798755], [Timestamp('2020-05-28 00:00:00'), 2.0, 1.0, 0.12275652715954906, 0.7186594126855171, 0.1585840601549364, 0.4694248671266603], [Timestamp('2020-05-29 00:00:00'), 2.0, 1.0, 0.1698174019679914, 0.5958796489351617, 0.23430294909684107, 0.46969038489600096], [Timestamp('2020-06-01 00:00:00'), 2.0, 1.0, 0.1754268707590641, 0.6709989190775671, 0.1535742101633691, 0.471655679815786], [Timestamp('2020-06-02 00:00:00'), 1.0, 1.0, 0.06232359033740822, 0.7851268909123683, 0.15254951875022613, 0.46948966343753123], [Timestamp('2020-06-03 00:00:00'), 1.0, 1.0, 0.14690581446137263, 0.6384551903379735, 0.21463899520065444, 0.47388395086387697], [Timestamp('2020-06-04 00:00:00'), 1.0, 2.0, 0.3067767305181919, 0.2833782723822605, 0.40984499709954153, 0.4773881163789399], [Timestamp('2020-06-05 00:00:00'), 0.0, 1.0, 0.13547167107678126, 0.6177745562567217, 0.24675377266650214, 0.479571162498386], [Timestamp('2020-06-08 00:00:00'), 0.0, 1.0, 0.11887350039313979, 0.5684331300694083, 0.3126933695374518, 0.4775211864672027], [Timestamp('2020-06-09 00:00:00'), 0.0, 2.0, 0.3080011559106863, 0.2751154832334128, 0.4168833608559043, 0.4755747067866169], [Timestamp('2020-06-10 00:00:00'), 1.0, 0.0, 0.40408132469257335, 0.2173896484309714, 0.37852902687645856, 0.4749926712352561], [Timestamp('2020-06-12 00:00:00'), 1.0, 2.0, 0.3102070440737964, 0.16539570569383039, 0.5243972502323748, 0.47571126179456824], [Timestamp('2020-06-15 00:00:00'), 2.0, 0.0, 0.5414593231614867, 0.08243859391169225, 0.3761020829268279, 0.47723069796001977], [Timestamp('2020-06-16 00:00:00'), 1.0, 1.0, 0.29830450150608545, 0.3772232764682933, 0.32447222202561876, 0.4779539208841486], [Timestamp('2020-06-17 00:00:00'), 1.0, 0.0, 0.45741782327412406, 0.346879572603641, 0.1957026041222312, 0.4814205667993232], [Timestamp('2020-06-18 00:00:00'), 2.0, 0.0, 0.39537454173192293, 0.3315055861783045, 0.2731198720897709, 0.4810636257553176], [Timestamp('2020-06-19 00:00:00'), 2.0, 0.0, 0.4284907009578414, 0.16675937622371456, 0.4047499228184411, 0.4789895760005269], [Timestamp('2020-06-22 00:00:00'), 2.0, 2.0, 0.35148668396582916, 0.2363362012934551, 0.4121771147407088, 0.4808409454478915], [Timestamp('2020-06-23 00:00:00'), 2.0, 1.0, 0.24626892101080447, 0.5816991778650293, 0.17203190112416195, 0.48110491093108126], [Timestamp('2020-06-24 00:00:00'), 2.0, 0.0, 0.42811305412605843, 0.14584564351902546, 0.42604130235491017, 0.4808068204137665], [Timestamp('2020-06-25 00:00:00'), 2.0, 2.0, 0.23125302719821667, 0.22248283813695996, 0.5462641346648248, 0.48060768517312996], [Timestamp('2020-06-26 00:00:00'), 2.0, 2.0, 0.2758985908807073, 0.1130601357417623, 0.6110412733775362, 0.479287218747996], [Timestamp('2020-06-29 00:00:00'), 2.0, 2.0, 0.17688936711376563, 0.09095544673894591, 0.7321551861472871, 0.48168778220356684], [Timestamp('2020-06-30 00:00:00'), 2.0, 2.0, 0.20360905684291855, 0.18188724883506832, 0.6145036943220186, 0.48148133335821797], [Timestamp('2020-07-01 00:00:00'), 2.0, 2.0, 0.10103501655172435, 0.07577657900756635, 0.8231884044407076, 0.4822041835387532], [Timestamp('2020-07-02 00:00:00'), 2.0, 2.0, 0.12215772646606979, 0.12301189800192487, 0.7548303755319988, 0.4843443323743833], [Timestamp('2020-07-03 00:00:00'), 2.0, 2.0, 0.14135724686981183, 0.1149588999026489, 0.743683853227537, 0.4841325233807383], [Timestamp('2020-07-06 00:00:00'), 1.0, 2.0, 0.10376174593477747, 0.16276087210687581, 0.7334773819583489, 0.48485559114145604], [Timestamp('2020-07-07 00:00:00'), 2.0, 2.0, 0.18519631505080505, 0.19462440715943916, 0.6201792777897606, 0.4839031769976094], [Timestamp('2020-07-08 00:00:00'), 2.0, 2.0, 0.10285640488662426, 0.2837602853372223, 0.6133833097761605, 0.4839011974280692], [Timestamp('2020-07-09 00:00:00'), 2.0, 2.0, 0.19177722543168396, 0.03491025413269213, 0.7733125204356214, 0.48543171135621366], [Timestamp('2020-07-10 00:00:00'), 1.0, 2.0, 0.0802043255894858, 0.09702209056376082, 0.8227735838467464, 0.48755613562446626], [Timestamp('2020-07-13 00:00:00'), 2.0, 2.0, 0.1418912317654126, 0.06653908957069465, 0.7915696786638994, 0.485596271866279], [Timestamp('2020-07-14 00:00:00'), 1.0, 2.0, 0.13031840836888933, 0.10342160243397709, 0.7662599891971303, 0.487708494261703], [Timestamp('2020-07-15 00:00:00'), 1.0, 2.0, 0.033582699964069145, 0.338119754453966, 0.6282975455819629, 0.48671015363295017], [Timestamp('2020-07-16 00:00:00'), 1.0, 2.0, 0.11295398700969293, 0.046963884233215125, 0.8400821287570952, 0.48349100408464735], [Timestamp('2020-07-17 00:00:00'), 0.0, 2.0, 0.14696520669086993, 0.05207818420078192, 0.8009566091083494, 0.4812557437230947], [Timestamp('2020-07-20 00:00:00'), 0.0, 2.0, 0.18056861444886363, 0.10633673156333862, 0.7130946539878039, 0.4812179423406601], [Timestamp('2020-07-21 00:00:00'), 0.0, 2.0, 0.0679249472730759, 0.2688840957355935, 0.6631909569913265, 0.48144891211179547], [Timestamp('2020-07-22 00:00:00'), 1.0, 2.0, 0.14493925561032583, 0.15256248514692997, 0.7024982592427417, 0.4771947734871687], [Timestamp('2020-07-23 00:00:00'), 2.0, 2.0, 0.13265659612028313, 0.06232947383872363, 0.8050139300409952, 0.4807003845533788], [Timestamp('2020-07-24 00:00:00'), 1.0, 2.0, 0.11745111190654763, 0.07230695654311381, 0.8102419315503434, 0.48150326550045214], [Timestamp('2020-07-27 00:00:00'), 1.0, 2.0, 0.0888681097873235, 0.18177416490182233, 0.7293577253108513, 0.4814386501636088], [Timestamp('2020-07-28 00:00:00'), 1.0, 2.0, 0.1167557836978491, 0.04146957213473768, 0.8417746441674197, 0.47777706651133695], [Timestamp('2020-07-29 00:00:00'), 2.0, 2.0, 0.11985116494900967, 0.02655234185476616, 0.8535964931962269, 0.4807800695143399], [Timestamp('2020-07-30 00:00:00'), 1.0, 2.0, 0.07134805540399537, 0.03853085039620381, 0.890121094199802, 0.4821267486091114], [Timestamp('2020-07-31 00:00:00'), 2.0, 2.0, 0.12265394617287616, 0.013766751414327733, 0.8635793024127909, 0.480901005928178], [Timestamp('2020-08-03 00:00:00'), 2.0, 2.0, 0.12412740330020756, 0.019792823045107093, 0.856079773654692, 0.4783555699787196], [Timestamp('2020-08-04 00:00:00'), 2.0, 2.0, 0.2770319818524178, 0.004594346394514176, 0.718373671753069, 0.4793743758720472], [Timestamp('2020-08-05 00:00:00'), 1.0, 2.0, 0.13369087545299432, 0.05547075861555583, 0.8108383659314502, 0.47907745154303355], [Timestamp('2020-08-06 00:00:00'), 0.0, 2.0, 0.12058884287345237, 0.10497310664655632, 0.7744380504799913, 0.47718416335437613], [Timestamp('2020-08-07 00:00:00'), 1.0, 2.0, 0.15994115782381316, 0.024981843455207403, 0.8150769987209737, 0.48179586371075733], [Timestamp('2020-08-10 00:00:00'), 0.0, 2.0, 0.16362081599002926, 0.06595964813079566, 0.7704195358791741, 0.4737341460745716], [Timestamp('2020-08-11 00:00:00'), 1.0, 2.0, 0.1821943840111305, 0.016962106578970185, 0.8008435094099049, 0.47437854780744376], [Timestamp('2020-08-12 00:00:00'), 0.0, 2.0, 0.1089359789590784, 0.15716799201406406, 0.7338960290268628, 0.471422500357182], [Timestamp('2020-08-13 00:00:00'), 0.0, 2.0, 0.2528760316373685, 0.0174131983566066, 0.72971077000603, 0.46938976510746233], [Timestamp('2020-08-14 00:00:00'), 1.0, 2.0, 0.19084569936828547, 0.011700985727050695, 0.7974533149046695, 0.4717418598717591], [Timestamp('2020-08-17 00:00:00'), 0.0, 2.0, 0.22550121593568026, 0.008951699600074796, 0.7655470844642389, 0.46991929848117886], [Timestamp('2020-08-18 00:00:00'), 1.0, 2.0, 0.21829348197360107, 0.005413810776266768, 0.7762927072501332, 0.4721861749461891], [Timestamp('2020-08-19 00:00:00'), 1.0, 2.0, 0.42805746123038585, 0.03915795420723519, 0.5327845845623751, 0.4665773642295333], [Timestamp('2020-08-20 00:00:00'), 1.0, 2.0, 0.38148477472601505, 0.02513754535857821, 0.5933776799154041, 0.46882837701377006], [Timestamp('2020-08-21 00:00:00'), 2.0, 2.0, 0.3717363186856025, 0.033911110857626464, 0.5943525704567759, 0.4637358990490849], [Timestamp('2020-08-24 00:00:00'), 0.0, 2.0, 0.48928360309361996, 0.01701018382606103, 0.493706213080315, 0.46397747594898475], [Timestamp('2020-08-25 00:00:00'), 1.0, 2.0, 0.2305696838233679, 0.18459792894604757, 0.5848323872305888, 0.4634084647177159], [Timestamp('2020-08-26 00:00:00'), 1.0, 0.0, 0.5374587688499455, 0.03991125541340363, 0.42262997573665545, 0.47034135506479674], [Timestamp('2020-08-27 00:00:00'), 1.0, 2.0, 0.3478054461875736, 0.08545137832872093, 0.5667431754837073, 0.4668439846917208], [Timestamp('2020-08-28 00:00:00'), 0.0, 0.0, 0.4397205861039269, 0.2620686980765793, 0.29821071581949626, 0.466845436729296], [Timestamp('2020-08-31 00:00:00'), 1.0, 0.0, 0.5062708212840752, 0.0993441418447975, 0.3943850368711235, 0.4672659531945886], [Timestamp('2020-09-01 00:00:00'), 0.0, 2.0, 0.3701165355328861, 0.22165182271390832, 0.40823164175320253, 0.4669188382423677], [Timestamp('2020-09-02 00:00:00'), 0.0, 2.0, 0.4212421612940035, 0.08857213916545871, 0.49018569954053826, 0.4695679668624336], [Timestamp('2020-09-03 00:00:00'), 0.0, 2.0, 0.289202718031362, 0.09267147609812976, 0.6181258058705108, 0.46874446987569157], [Timestamp('2020-09-04 00:00:00'), 0.0, 2.0, 0.3896365942140142, 0.0869125265461821, 0.5234508792398093, 0.4681742814225896], [Timestamp('2020-09-08 00:00:00'), 0.0, 2.0, 0.3431993003967164, 0.1274808222877108, 0.5293198773155777, 0.4685878391760745], [Timestamp('2020-09-09 00:00:00'), 0.0, 2.0, 0.3382488341987039, 0.21121069171259466, 0.4505404740887039, 0.4679096467521515], [Timestamp('2020-09-10 00:00:00'), 0.0, 2.0, 0.2631597243885429, 0.025221392232344583, 0.711618883379112, 0.4680030438544371], [Timestamp('2020-09-11 00:00:00'), 0.0, 2.0, 0.1928776403553596, 0.2308409654656034, 0.5762813941790353, 0.46495495745195625], [Timestamp('2020-09-14 00:00:00'), 0.0, 2.0, 0.2575182768449036, 0.23330494097686763, 0.5091767821782252, 0.4636895632107671], [Timestamp('2020-09-15 00:00:00'), 0.0, 2.0, 0.2117979355520272, 0.24229263101708534, 0.5459094334308874, 0.4653202958829557], [Timestamp('2020-09-16 00:00:00'), 0.0, 2.0, 0.3508446308148763, 0.14742672936997928, 0.5017286398151387, 0.4632537548212489], [Timestamp('2020-09-17 00:00:00'), 0.0, 2.0, 0.21572880336790679, 0.3850961540748458, 0.3991750425572539, 0.46353316746603745], [Timestamp('2020-09-18 00:00:00'), 0.0, 2.0, 0.3215110989302801, 0.16156343846961474, 0.5169254626000991, 0.46256513751442757], [Timestamp('2020-09-21 00:00:00'), 0.0, 2.0, 0.2160131849103752, 0.16518414339166979, 0.6188026716979588, 0.46229245656179757], [Timestamp('2020-09-22 00:00:00'), 0.0, 2.0, 0.32202059068103206, 0.02635037570727398, 0.6516290336116908, 0.46310132133661547], [Timestamp('2020-09-23 00:00:00'), 0.0, 2.0, 0.35898803006447255, 0.1210287631754433, 0.5199832067600857, 0.46119312785979455], [Timestamp('2020-09-24 00:00:00'), 1.0, 2.0, 0.406201521338078, 0.07177183777818558, 0.5220266408837415, 0.46239973262032086], [Timestamp('2020-09-25 00:00:00'), 1.0, 0.0, 0.4689228778956245, 0.06253829480319303, 0.4685388273011828, 0.46081087503271956], [Timestamp('2020-09-28 00:00:00'), 2.0, 2.0, 0.43277567480334306, 0.058821868653608375, 0.5084024565430548, 0.4617003367003367], [Timestamp('2020-09-29 00:00:00'), 2.0, 2.0, 0.3715860945873814, 0.07662657655423148, 0.5517873288583938, 0.4608191210638193], [Timestamp('2020-09-30 00:00:00'), 1.0, 2.0, 0.3776735143424287, 0.2294417779599909, 0.39288470769757977, 0.4604277711973152], [Timestamp('2020-10-01 00:00:00'), 0.0, 2.0, 0.3825309927309853, 0.20972569622494314, 0.4077433110440669, 0.4585233196371317], [Timestamp('2020-10-02 00:00:00'), 2.0, 2.0, 0.37985505848116885, 0.03456698533120302, 0.5855779561876245, 0.46244831858155466], [Timestamp('2020-10-05 00:00:00'), 1.0, 2.0, 0.41306339581470736, 0.09479514692021394, 0.4921414572650843, 0.4626848942372135], [Timestamp('2020-10-06 00:00:00'), 1.0, 2.0, 0.37383681506112143, 0.1063866131817944, 0.519776571757081, 0.4640642942204813], [Timestamp('2020-10-07 00:00:00'), 2.0, 0.0, 0.5275235069703167, 0.10505353932692317, 0.3674229537027582, 0.4607679301370527], [Timestamp('2020-10-08 00:00:00'), 1.0, 2.0, 0.3896511959216904, 0.1491664308660381, 0.4611823732122737, 0.46046173226987924], [Timestamp('2020-10-09 00:00:00'), 2.0, 2.0, 0.2944894345608172, 0.09233827503002497, 0.6131722904091595, 0.4558656710173124], [Timestamp('2020-10-13 00:00:00'), 1.0, 2.0, 0.2947528647459512, 0.26184573133461075, 0.4434014039194354, 0.4561033452825393], [Timestamp('2020-10-14 00:00:00'), 0.0, 2.0, 0.13865543803406344, 0.3731614448612568, 0.4881831171046816, 0.4605988020018124], [Timestamp('2020-10-15 00:00:00'), 0.0, 0.0, 0.5182556207386957, 0.09653016032944309, 0.38521421893185936, 0.458037854916362], [Timestamp('2020-10-16 00:00:00'), 1.0, 2.0, 0.3663472888330537, 0.08276825888703683, 0.5508844522799126, 0.45789816046401616], [Timestamp('2020-10-19 00:00:00'), 1.0, 2.0, 0.34337561447848897, 0.14516501744550478, 0.5114593680760107, 0.4579022149450336], [Timestamp('2020-10-20 00:00:00'), 0.0, 0.0, 0.440111477583482, 0.20048298542357612, 0.3594055369929399, 0.45818270280630374], [Timestamp('2020-10-21 00:00:00'), 1.0, 2.0, 0.19753703850034932, 0.30831932622937497, 0.49414363527027305, 0.4580459991566809], [Timestamp('2020-10-22 00:00:00'), 0.0, 1.0, 0.2560409966222471, 0.4015266870854127, 0.34243231629234083, 0.4565888489344683], [Timestamp('2020-10-23 00:00:00'), 2.0, 1.0, 0.1998864642194693, 0.5360218376827008, 0.26409169809783384, 0.4583172981614205], [Timestamp('2020-10-26 00:00:00'), 2.0, 1.0, 0.25789241463203716, 0.4407982011798765, 0.30130938418808684, 0.45641401320299096], [Timestamp('2020-10-27 00:00:00'), 2.0, 2.0, 0.31014349626806104, 0.13448121807304672, 0.5553752856588969, 0.4577168280841188], [Timestamp('2020-10-28 00:00:00'), 2.0, 2.0, 0.27509018242521405, 0.03820824633859503, 0.6867015712361939, 0.4572868229177527], [Timestamp('2020-10-29 00:00:00'), 2.0, 2.0, 0.3151910068920948, 0.1148044894113221, 0.5700045036965862, 0.45806167444061735], [Timestamp('2020-10-30 00:00:00'), 2.0, 2.0, 0.2436395252846876, 0.10169942522602796, 0.6546610494892914, 0.4582981287988854], [Timestamp('2020-11-03 00:00:00'), 2.0, 0.0, 0.5064229396567108, 0.07337225344543964, 0.4202048068978452, 0.4601363881767055], [Timestamp('2020-11-04 00:00:00'), 2.0, 2.0, 0.23257700808061213, 0.18685162055710236, 0.5805713713622855, 0.4603713280183868], [Timestamp('2020-11-05 00:00:00'), 2.0, 1.0, 0.19542616271286853, 0.5473205785014802, 0.25725325878564625, 0.4595388493859082], [Timestamp('2020-11-06 00:00:00'), 2.0, 1.0, 0.10069333140379194, 0.5284101905956355, 0.37089647800057685, 0.45830268496355697], [Timestamp('2020-11-09 00:00:00'), 2.0, 2.0, 0.27236619822046537, 0.18917973502368524, 0.5384540667558473, 0.4585353801685071], [Timestamp('2020-11-10 00:00:00'), 2.0, 1.0, 0.1539868017871156, 0.511082358557581, 0.3349308396552969, 0.4566237772260627], [Timestamp('2020-11-11 00:00:00'), 2.0, 2.0, 0.0811537542069218, 0.42018943518977125, 0.4986568106033014, 0.4543170351993881], [Timestamp('2020-11-12 00:00:00'), 2.0, 2.0, 0.28994246045473354, 0.0826610113521581, 0.6273965281931085, 0.45361212861212863], [Timestamp('2020-11-13 00:00:00'), 2.0, 2.0, 0.14117165794536754, 0.07584056037125605, 0.7829877816833718, 0.4548967276780272], [Timestamp('2020-11-16 00:00:00'), 2.0, 2.0, 0.21742992370496897, 0.16995602978979818, 0.6126140465052347, 0.4561975471785598], [Timestamp('2020-11-17 00:00:00'), 2.0, 2.0, 0.12106438502967078, 0.3288184999217699, 0.5501171150485553, 0.4558739706843972], [Timestamp('2020-11-18 00:00:00'), 2.0, 2.0, 0.16682976066719946, 0.29635629508670275, 0.5368139442460992, 0.4555463595290094], [Timestamp('2020-11-19 00:00:00'), 2.0, 2.0, 0.20067758412488265, 0.18322222023376358, 0.6161001956413527, 0.4573699922125119], [Timestamp('2020-11-23 00:00:00'), 2.0, 2.0, 0.18824752888987642, 0.33013918399879016, 0.4816132871113376, 0.4559878015066694], [Timestamp('2020-11-24 00:00:00'), 2.0, 2.0, 0.27687543228070505, 0.1550558622030446, 0.5680687055162527, 0.45714118980925705], [Timestamp('2020-11-25 00:00:00'), 2.0, 1.0, 0.11399102506640885, 0.6040077205747606, 0.2820012543588318, 0.45683771659937983], [Timestamp('2020-11-26 00:00:00'), 2.0, 2.0, 0.25303486990734986, 0.1827614356367047, 0.5642036944559397, 0.4549326291248412], [Timestamp('2020-11-27 00:00:00'), 2.0, 0.0, 0.5537463973682069, 0.06187179005365578, 0.38438181257814047, 0.4556727178050708], [Timestamp('2020-11-30 00:00:00'), 2.0, 2.0, 0.3327792011641899, 0.22745056258852037, 0.4397702362472842, 0.4571218563028235], [Timestamp('2020-12-01 00:00:00'), 2.0, 1.0, 0.31206702592074165, 0.35665459068761995, 0.3312783833916375, 0.4577101209522869], [Timestamp('2020-12-02 00:00:00'), 2.0, 1.0, 0.16774859466376513, 0.5019724593547337, 0.3302789459815002, 0.457511222577319], [Timestamp('2020-12-03 00:00:00'), 2.0, 1.0, 0.24583170814396646, 0.4358186882516403, 0.3183496036043922, 0.4566909539735627], [Timestamp('2020-12-04 00:00:00'), 2.0, 2.0, 0.3445259901885213, 0.1767871886207453, 0.4786868211907294, 0.457840527385671], [Timestamp('2020-12-07 00:00:00'), 1.0, 2.0, 0.17160839664296876, 0.15226616273189086, 0.6761254406251368, 0.45603545061749395], [Timestamp('2020-12-08 00:00:00'), 2.0, 2.0, 0.3008721103121284, 0.10781762784531913, 0.5913102618425463, 0.45795415454858174], [Timestamp('2020-12-09 00:00:00'), 2.0, 2.0, 0.3162250604629965, 0.18071718114477006, 0.503057758392227, 0.45681567480678403], [Timestamp('2020-12-10 00:00:00'), 1.0, 2.0, 0.1878032227415599, 0.36296071580119765, 0.44923606145723927, 0.457027955608967], [Timestamp('2020-12-11 00:00:00'), 2.0, 2.0, 0.15764818381774773, 0.36693943232593007, 0.4754123838563185, 0.4535039941902687], [Timestamp('2020-12-14 00:00:00'), 2.0, 2.0, 0.1838790795490667, 0.27897649668675173, 0.537144423764176, 0.4541357169472822], [Timestamp('2020-12-15 00:00:00'), 2.0, 2.0, 0.16218756039372415, 0.31998296935573534, 0.5178294702505458, 0.4532730015082956], [Timestamp('2020-12-16 00:00:00'), 2.0, 2.0, 0.1532494244014917, 0.25853801701047896, 0.5882125585880242, 0.45348332881539716], [Timestamp('2020-12-17 00:00:00'), 2.0, 2.0, 0.20990569678674195, 0.39067909170630777, 0.39941521150695386, 0.4527593041306502], [Timestamp('2020-12-18 00:00:00'), 2.0, 2.0, 0.11468058989968624, 0.42951549264358224, 0.4558039174567293, 0.4534788085729442], [Timestamp('2020-12-21 00:00:00'), 2.0, 2.0, 0.39662356041834723, 0.036206051413632624, 0.5671703881680226, 0.45722680509512675], [Timestamp('2020-12-22 00:00:00'), 2.0, 0.0, 0.4980202962861106, 0.06509983740669913, 0.4368798663071836, 0.4539775197610851], [Timestamp('2020-12-23 00:00:00'), 2.0, 0.0, 0.5240375387716928, 0.13547703236347577, 0.3404854288648266, 0.45375674660107945], [Timestamp('2020-12-28 00:00:00'), 2.0, 0.0, 0.40724825974448464, 0.2044003278405757, 0.3883514124149373, 0.45237285172821035], [Timestamp('2020-12-29 00:00:00'), 2.0, 1.0, 0.22377827091341015, 0.3948236199668255, 0.3813981091197711, 0.45350934700121187], [Timestamp('2020-12-30 00:00:00'), 1.0, 0.0, 0.5651380406490937, 0.17632190409695772, 0.25854005525395257, 0.45328363009341316], [Timestamp('2021-01-04 00:00:00'), 0.0, 0.0, 0.37616371984563524, 0.30794134864625006, 0.3158949315081115, 0.4544695261856753], [Timestamp('2021-01-05 00:00:00'), 0.0, 0.0, 0.40126632594323086, 0.354680410302762, 0.24405326375400765, 0.4543731840682665], [Timestamp('2021-01-06 00:00:00'), 0.0, 2.0, 0.23898173912899257, 0.3773768429404478, 0.3836414179305558, 0.45815483854502226], [Timestamp('2021-01-07 00:00:00'), 0.0, 1.0, 0.25024236289154134, 0.43468093394790464, 0.31507670316055464, 0.45670275972525537], [Timestamp('2021-01-08 00:00:00'), 0.0, 0.0, 0.5858068157598595, 0.05773582233156281, 0.356457361908572, 0.4556762698033729], [Timestamp('2021-01-11 00:00:00'), 0.0, 0.0, 0.4861500254706843, 0.17314857586305046, 0.34070139866625854, 0.4562324353590153], [Timestamp('2021-01-12 00:00:00'), 0.0, 0.0, 0.5525703595487581, 0.1814676045928561, 0.2659620358583874, 0.4570482222474097], [Timestamp('2021-01-13 00:00:00'), 0.0, 0.0, 0.47704496492521176, 0.1386340550802195, 0.38432097999457354, 0.45836256447084284], [Timestamp('2021-01-14 00:00:00'), 0.0, 0.0, 0.6274775193404423, 0.09719505925031335, 0.27532742140924277, 0.45632577011796144], [Timestamp('2021-01-15 00:00:00'), 0.0, 0.0, 0.4833727362728624, 0.2356800089123891, 0.2809472548147437, 0.4590473217229276], [Timestamp('2021-01-18 00:00:00'), 2.0, 0.0, 0.5820723915748809, 0.1469525773530091, 0.2709750310721065, 0.4563372216443489], [Timestamp('2021-01-19 00:00:00'), 1.0, 0.0, 0.6553286612508452, 0.0712191223874451, 0.27345221636170647, 0.4557377173206816], [Timestamp('2021-01-20 00:00:00'), 2.0, 0.0, 0.528003860703193, 0.0773403050836457, 0.3946558342131665, 0.46063932211945163], [Timestamp('2021-01-21 00:00:00'), 2.0, 0.0, 0.7170569586713936, 0.05597811102713356, 0.2269649303014766, 0.4586664077697289], [Timestamp('2021-01-22 00:00:00'), 2.0, 1.0, 0.28091060544918617, 0.4017155312161287, 0.31737386333467993, 0.4598791328513517], [Timestamp('2021-01-26 00:00:00'), 2.0, 0.0, 0.5882558089425494, 0.0865601852645146, 0.32518400579293466, 0.457763373584275], [Timestamp('2021-01-27 00:00:00'), 2.0, 0.0, 0.48252990879051766, 0.287993709529548, 0.22947638167993087, 0.4557969681409304], [Timestamp('2021-01-28 00:00:00'), 1.0, 0.0, 0.4971628162043124, 0.05677095982626362, 0.446066223969423, 0.4550940355361181], [Timestamp('2021-01-29 00:00:00'), 2.0, 0.0, 0.48119403974510666, 0.07599256881678537, 0.44281339143811116, 0.4517947720101702], [Timestamp('2021-02-01 00:00:00'), 2.0, 2.0, 0.2705408125284368, 0.141254008478771, 0.588205178992793, 0.4515924759237497], [Timestamp('2021-02-02 00:00:00'), 0.0, 2.0, 0.32012588869458825, 0.0693654104177178, 0.6105087008876874, 0.45238756323824764], [Timestamp('2021-02-03 00:00:00'), 0.0, 2.0, 0.3710999032226685, 0.03736230943724842, 0.5915377873400812, 0.4534465455065894], [Timestamp('2021-02-04 00:00:00'), 0.0, 2.0, 0.26301499011007445, 0.26869110392619716, 0.4682939059637317, 0.4531850644012701], [Timestamp('2021-02-05 00:00:00'), 0.0, 2.0, 0.1996748786195044, 0.16473702947189517, 0.6355880919086049, 0.45317943109399494], [Timestamp('2021-02-08 00:00:00'), 0.0, 2.0, 0.25665862656893995, 0.13500994413788756, 0.6083314292931654, 0.4516772718508948], [Timestamp('2021-02-09 00:00:00'), 0.0, 2.0, 0.3218555414386054, 0.05106966145490258, 0.6270747971064959, 0.45257312279798306], [Timestamp('2021-02-10 00:00:00'), 0.0, 1.0, 0.2067822215924724, 0.5047716801805097, 0.2884460982270232, 0.4535325447321777], [Timestamp('2021-02-11 00:00:00'), 0.0, 1.0, 0.30659215595732, 0.3637918937930867, 0.3296159502495941, 0.452641633791324], [Timestamp('2021-02-12 00:00:00'), 0.0, 2.0, 0.29231671375062757, 0.16151727642690733, 0.546166009822464, 0.4522494996633139], [Timestamp('2021-02-18 00:00:00'), 0.0, 2.0, 0.402670421585294, 0.14406688051799355, 0.45326269789670576, 0.45331562692332783], [Timestamp('2021-02-19 00:00:00'), 0.0, 2.0, 0.36211922681991315, 0.07388100976627701, 0.5639997634138123, 0.4521386832766354], [Timestamp('2021-02-22 00:00:00'), 1.0, 2.0, 0.45491810170373176, 0.04235930334383117, 0.5027225949524403, 0.4518261586100906], [Timestamp('2021-02-23 00:00:00'), 0.0, 0.0, 0.43356751190266013, 0.1464354184838686, 0.4199970696134742, 0.4539438607848328], [Timestamp('2021-02-24 00:00:00'), 0.0, 0.0, 0.442655821595577, 0.2441994093772861, 0.3131447690271333, 0.45345059204178395], [Timestamp('2021-02-25 00:00:00'), 1.0, 0.0, 0.5482559556731441, 0.01800783936037036, 0.43373620496649135, 0.45433686255740796], [Timestamp('2021-02-26 00:00:00'), 2.0, 2.0, 0.2919468859140516, 0.0883547699995808, 0.6196983440863653, 0.45333765185950475], [Timestamp('2021-03-01 00:00:00'), 2.0, 2.0, 0.43971654749436484, 0.0306955737705111, 0.5295878787351288, 0.45254313893632564], [Timestamp('2021-03-02 00:00:00'), 2.0, 2.0, 0.2871811648313717, 0.1284074596575562, 0.5844113755110787, 0.45099312748098913], [Timestamp('2021-03-03 00:00:00'), 2.0, 2.0, 0.37078503739962915, 0.09483344705045721, 0.5343815155499092, 0.4518122362483787], [Timestamp('2021-03-04 00:00:00'), 2.0, 2.0, 0.43039231443440545, 0.07691296592800895, 0.4926947196375857, 0.45151639800294413], [Timestamp('2021-03-05 00:00:00'), 2.0, 2.0, 0.32282696627067475, 0.17008343680503768, 0.5070895969242815, 0.4527718278253774], [Timestamp('2021-03-08 00:00:00'), 2.0, 2.0, 0.24941260838935356, 0.058346443753811476, 0.6922409478568289, 0.4524777465970466], [Timestamp('2021-03-09 00:00:00'), 2.0, 2.0, 0.2509294613292612, 0.15611477271497515, 0.5929557659557605, 0.4541627773802735], [Timestamp('2021-03-10 00:00:00'), 2.0, 2.0, 0.26798363137929293, 0.18238396100711657, 0.5496324076135869, 0.45573235414116686], [Timestamp('2021-03-11 00:00:00'), 1.0, 2.0, 0.22363826216763807, 0.13610430877998886, 0.6402574290523758, 0.4550523998174878], [Timestamp('2021-03-12 00:00:00'), 1.0, 2.0, 0.2528523196611148, 0.10859181229863167, 0.638555868040259, 0.4500997727718375], [Timestamp('2021-03-15 00:00:00'), 1.0, 2.0, 0.15952101272121053, 0.18974298733533868, 0.6507359999434505, 0.45539557521404195], [Timestamp('2021-03-16 00:00:00'), 2.0, 2.0, 0.25023282802517066, 0.112844149839217, 0.636923022135614, 0.4520203867249144], [Timestamp('2021-03-17 00:00:00'), 1.0, 2.0, 0.12069051591755993, 0.1582955862695514, 0.721013897812892, 0.45183076129742733], [Timestamp('2021-03-18 00:00:00'), 2.0, 2.0, 0.2301163377020472, 0.12057267557315242, 0.649310986724807, 0.4482540804977783], [Timestamp('2021-03-19 00:00:00'), 1.0, 2.0, 0.1757824238531343, 0.27449580895541487, 0.5497217671914514, 0.449325170727765], [Timestamp('2021-03-22 00:00:00'), 2.0, 2.0, 0.11967626020040219, 0.20083395805934817, 0.6794897817402491, 0.44809153481719854], [Timestamp('2021-03-23 00:00:00'), 2.0, 2.0, 0.34599595012248235, 0.028776334197576463, 0.625227715679944, 0.4487793992948632], [Timestamp('2021-03-24 00:00:00'), 2.0, 2.0, 0.19512412888492786, 0.17657570655684393, 0.6283001645582295, 0.45044563279857397], [Timestamp('2021-03-25 00:00:00'), 2.0, 2.0, 0.14937050319908082, 0.24091352894508936, 0.6097159678558367, 0.45014906777021313], [Timestamp('2021-03-26 00:00:00'), 2.0, 2.0, 0.23225175469862636, 0.3117668515397049, 0.4559813937616677, 0.44946493800483617], [Timestamp('2021-03-29 00:00:00'), 1.0, 2.0, 0.3010233614575346, 0.1325669308194286, 0.56640970772304, 0.44916939293528646], [Timestamp('2021-03-30 00:00:00'), 2.0, 2.0, 0.23627597544838103, 0.2638516884261884, 0.4998723361254356, 0.4476524134647783], [Timestamp('2021-03-31 00:00:00'), 1.0, 2.0, 0.15702239317295297, 0.3004277615405109, 0.5425498452865367, 0.4460921030580473], [Timestamp('2021-04-01 00:00:00'), 1.0, 2.0, 0.32350859352907635, 0.13394803564460472, 0.5425433708263169, 0.45068752963489805], [Timestamp('2021-04-05 00:00:00'), 2.0, 2.0, 0.23949428885735538, 0.24479859449198998, 0.5157071166506514, 0.44751177894911676], [Timestamp('2021-04-06 00:00:00'), 2.0, 2.0, 0.19930804304446717, 0.38593695471389083, 0.4147550022416447, 0.4506962473448195], [Timestamp('2021-04-07 00:00:00'), 2.0, 1.0, 0.08228608893344488, 0.5014682602529692, 0.41624565081358683, 0.4492641758142955], [Timestamp('2021-04-08 00:00:00'), 2.0, 1.0, 0.15161817578398504, 0.48922882952022834, 0.3591529946957845, 0.4498492577747592], [Timestamp('2021-04-09 00:00:00'), 2.0, 2.0, 0.2032597482776377, 0.19957920016990552, 0.5971610515524539, 0.44946619664710846], [Timestamp('2021-04-12 00:00:00'), 1.0, 2.0, 0.1345004722036762, 0.25086496171166217, 0.6146345660846659, 0.44887417946742586], [Timestamp('2021-04-13 00:00:00'), 2.0, 2.0, 0.15749391338351204, 0.37867243088921865, 0.4638336557272711, 0.45105961565743097], [Timestamp('2021-04-14 00:00:00'), 1.0, 2.0, 0.2635022748795886, 0.23232761058209575, 0.5041701145383138, 0.4517362246195198], [Timestamp('2021-04-15 00:00:00'), 2.0, 2.0, 0.279880258082506, 0.23635191272347697, 0.4837678291940199, 0.4474896177642173], [Timestamp('2021-04-16 00:00:00'), 2.0, 2.0, 0.2312248024617855, 0.18496858662888582, 0.5838066109093268, 0.4481656666187095], [Timestamp('2021-04-19 00:00:00'), 0.0, 0.0, 0.38538131067558606, 0.30244155170983006, 0.3121771376145813, 0.44883976157477834], [Timestamp('2021-04-20 00:00:00'), 1.0, 0.0, 0.378139980111356, 0.28327893969702117, 0.3385810801916294, 0.4502424576504045], [Timestamp('2021-04-22 00:00:00'), 1.0, 0.0, 0.42049447683008184, 0.23406289199368047, 0.3454426311762401, 0.44954116687380924], [Timestamp('2021-04-23 00:00:00'), 2.0, 1.0, 0.28188769781966144, 0.4335371510969542, 0.28457515108337966, 0.44971748123549943], [Timestamp('2021-04-26 00:00:00'), 2.0, 2.0, 0.278989130397092, 0.3342489753316007, 0.38676189427131197, 0.450782825821849], [Timestamp('2021-04-27 00:00:00'), 2.0, 2.0, 0.30209775581980014, 0.30888312661850187, 0.3890191175616949, 0.4500978402722194], [Timestamp('2021-04-28 00:00:00'), 2.0, 0.0, 0.42230934189988256, 0.2825169861291104, 0.29517367197100464, 0.4502871988717951], [Timestamp('2021-04-29 00:00:00'), 2.0, 0.0, 0.4637073489022653, 0.13997855388808347, 0.39631409720965366, 0.4496011226987937], [Timestamp('2021-04-30 00:00:00'), 2.0, 0.0, 0.5029175664984263, 0.11457787282175409, 0.38250456067982064, 0.44978939537259555], [Timestamp('2021-05-03 00:00:00'), 2.0, 1.0, 0.2823039108289623, 0.3694729517342843, 0.3482231374367598, 0.4495797928632941], [Timestamp('2021-05-04 00:00:00'), 2.0, 2.0, 0.2784842567200383, 0.22222628226474025, 0.4992894610152244, 0.45111806808875965], [Timestamp('2021-05-05 00:00:00'), 2.0, 0.0, 0.4455727162422362, 0.17966996577324842, 0.37475731798451806, 0.45082717633864045], [Timestamp('2021-05-06 00:00:00'), 2.0, 2.0, 0.2787287692402665, 0.23770470391712142, 0.48356652684261664, 0.451488136871213], [Timestamp('2021-05-07 00:00:00'), 2.0, 0.0, 0.4463025228082205, 0.2711963516282361, 0.28250112556353946, 0.45119754670901085], [Timestamp('2021-05-10 00:00:00'), 2.0, 1.0, 0.19451524807363296, 0.43842662534401383, 0.3670581265823588, 0.4505070509915777], [Timestamp('2021-05-11 00:00:00'), 2.0, 2.0, 0.20681009474324222, 0.2211863162063896, 0.5720035890503694, 0.4496959781308874], [Timestamp('2021-05-12 00:00:00'), 2.0, 2.0, 0.3629959542996642, 0.11141937133235648, 0.5255846743679822, 0.4513470847543439], [Timestamp('2021-05-13 00:00:00'), 0.0, 2.0, 0.30639934030531235, 0.33781067366305556, 0.3557899860316364, 0.44972746563198296], [Timestamp('2021-05-14 00:00:00'), 0.0, 1.0, 0.3924580873943391, 0.40093051715423883, 0.2066113954514152, 0.449019747615287], [Timestamp('2021-05-17 00:00:00'), 0.0, 1.0, 0.3725596232886352, 0.4440197091713069, 0.18342066754005604, 0.44840931655875654], [Timestamp('2021-05-18 00:00:00'), 0.0, 0.0, 0.3701967975100598, 0.3612570116475866, 0.2685461908423483, 0.44928489060968224], [Timestamp('2021-05-19 00:00:00'), 0.0, 2.0, 0.2890753839323057, 0.28512263717741937, 0.4258019788902787, 0.4509456835859851], [Timestamp('2021-05-20 00:00:00'), 0.0, 0.0, 0.45964767404309204, 0.2994524225213663, 0.2408999034355449, 0.45000475150266145], [Timestamp('2021-05-21 00:00:00'), 0.0, 0.0, 0.6166937194860372, 0.12100836934750449, 0.2622979111664556, 0.44673648687526546], [Timestamp('2021-05-24 00:00:00'), 0.0, 0.0, 0.5304445796412803, 0.2089140339901368, 0.26064138636858386, 0.4479929579231534], [Timestamp('2021-05-25 00:00:00'), 0.0, 0.0, 0.5896019861935606, 0.07914926565931457, 0.3312487481471284, 0.44705702937575115]]\n"
],
[
"import statistics\ntrade_instruction = []\nfor i in range(len(res1)):\n #vend = res1[i][3]#*res2[i][3]#*res3[i][3]*res5[i][3]*res10[i][3]\n #mant = res1[i][4]#*res2[i][4]#*res3[i][4]*res5[i][4]*res10[i][4]\n #comp = res1[i][5]#*res2[i][5]#*res3[i][5]*res5[i][5]*res10[i][5]\n criterion_1 = [1 if res1[i][3]>res1[i][4]+res1[i][5] else (3 if res1[i][3]+res1[i][4]<res1[i][5] else 2)]\n criterion_2 = [1 if res2[i][3]>res2[i][4]+res2[i][5] else (3 if res2[i][3]+res2[i][4]<res2[i][5] else 2)]\n criterion_3 = [1 if res3[i][3]>res3[i][4]+res3[i][5] else (3 if res3[i][3]+res3[i][4]<res3[i][5] else 2)]\n criterion_5 = [1 if res5[i][3]>res5[i][4]+res5[i][5] else (3 if res5[i][3]+res5[i][4]<res5[i][5] else 2)]\n criterion_10 = [1 if res10[i][3]>res10[i][4]+res10[i][5] else (3 if res10[i][3]+res10[i][4]<res10[i][5] else 2)]\n #print(res1[i][6],res2[i][6],res3[i][6],res5[i][6],res10[i][6])\n criteria = [criterion_1, criterion_2, criterion_3, criterion_5, criterion_10]\n criteria_acc = [res1[i][6],res2[i][6],res3[i][6],res5[i][6],res10[i][6]]\n max_acc = max(criteria_acc)\n max_index = criteria_acc.index(max_acc)\n max_acc_criteria = criteria[max_index]\n media = max_acc_criteria[0]\n #media = sp.stats.mode(criteria_10)[0][0]\n #media = 1 if vend>mant+comp else (3 if vend+mant<comp else 2)\n #print(media)\n if media>2:\n trade_instruction.append([res1[i][0],\"C\"])\n elif media==2:\n trade_instruction.append([res1[i][0],\"_\"])\n elif media<2:\n trade_instruction.append([res1[i][0],\"V\"])\nprint(trade_instruction)",
"[[Timestamp('2018-01-26 00:00:00'), '_'], [Timestamp('2018-01-29 00:00:00'), '_'], [Timestamp('2018-01-30 00:00:00'), '_'], [Timestamp('2018-01-31 00:00:00'), 'V'], [Timestamp('2018-02-01 00:00:00'), '_'], [Timestamp('2018-02-02 00:00:00'), '_'], [Timestamp('2018-02-05 00:00:00'), '_'], [Timestamp('2018-02-06 00:00:00'), '_'], [Timestamp('2018-02-07 00:00:00'), '_'], [Timestamp('2018-02-08 00:00:00'), '_'], [Timestamp('2018-02-09 00:00:00'), '_'], [Timestamp('2018-02-15 00:00:00'), 'V'], [Timestamp('2018-02-16 00:00:00'), '_'], [Timestamp('2018-02-19 00:00:00'), '_'], [Timestamp('2018-02-20 00:00:00'), '_'], [Timestamp('2018-02-21 00:00:00'), '_'], [Timestamp('2018-02-22 00:00:00'), '_'], [Timestamp('2018-02-23 00:00:00'), '_'], [Timestamp('2018-02-26 00:00:00'), '_'], [Timestamp('2018-02-27 00:00:00'), '_'], [Timestamp('2018-02-28 00:00:00'), '_'], [Timestamp('2018-03-01 00:00:00'), '_'], [Timestamp('2018-03-02 00:00:00'), '_'], [Timestamp('2018-03-05 00:00:00'), '_'], [Timestamp('2018-03-06 00:00:00'), '_'], [Timestamp('2018-03-07 00:00:00'), '_'], [Timestamp('2018-03-08 00:00:00'), '_'], [Timestamp('2018-03-09 00:00:00'), '_'], [Timestamp('2018-03-12 00:00:00'), '_'], [Timestamp('2018-03-13 00:00:00'), '_'], [Timestamp('2018-03-14 00:00:00'), 'V'], [Timestamp('2018-03-15 00:00:00'), '_'], [Timestamp('2018-03-16 00:00:00'), '_'], [Timestamp('2018-03-19 00:00:00'), '_'], [Timestamp('2018-03-20 00:00:00'), '_'], [Timestamp('2018-03-21 00:00:00'), 'V'], [Timestamp('2018-03-22 00:00:00'), '_'], [Timestamp('2018-03-23 00:00:00'), 'V'], [Timestamp('2018-03-26 00:00:00'), '_'], [Timestamp('2018-03-27 00:00:00'), '_'], [Timestamp('2018-03-28 00:00:00'), 'C'], [Timestamp('2018-03-29 00:00:00'), '_'], [Timestamp('2018-04-02 00:00:00'), 'C'], [Timestamp('2018-04-03 00:00:00'), 'C'], [Timestamp('2018-04-04 00:00:00'), 'C'], [Timestamp('2018-04-05 00:00:00'), 'V'], [Timestamp('2018-04-06 00:00:00'), '_'], [Timestamp('2018-04-09 00:00:00'), '_'], [Timestamp('2018-04-10 00:00:00'), 'V'], [Timestamp('2018-04-11 00:00:00'), 'V'], [Timestamp('2018-04-12 00:00:00'), '_'], [Timestamp('2018-04-13 00:00:00'), 'V'], [Timestamp('2018-04-16 00:00:00'), '_'], [Timestamp('2018-04-17 00:00:00'), 'V'], [Timestamp('2018-04-18 00:00:00'), 'V'], [Timestamp('2018-04-19 00:00:00'), 'V'], [Timestamp('2018-04-20 00:00:00'), 'V'], [Timestamp('2018-04-23 00:00:00'), '_'], [Timestamp('2018-04-24 00:00:00'), '_'], [Timestamp('2018-04-25 00:00:00'), 'C'], [Timestamp('2018-04-26 00:00:00'), 'V'], [Timestamp('2018-04-27 00:00:00'), 'V'], [Timestamp('2018-04-30 00:00:00'), '_'], [Timestamp('2018-05-02 00:00:00'), 'C'], [Timestamp('2018-05-03 00:00:00'), '_'], [Timestamp('2018-05-04 00:00:00'), 'C'], [Timestamp('2018-05-07 00:00:00'), 'V'], [Timestamp('2018-05-08 00:00:00'), 'C'], [Timestamp('2018-05-09 00:00:00'), '_'], [Timestamp('2018-05-10 00:00:00'), 'V'], [Timestamp('2018-05-11 00:00:00'), 'C'], [Timestamp('2018-05-14 00:00:00'), '_'], [Timestamp('2018-05-15 00:00:00'), '_'], [Timestamp('2018-05-16 00:00:00'), '_'], [Timestamp('2018-05-17 00:00:00'), 'C'], [Timestamp('2018-05-18 00:00:00'), 'C'], [Timestamp('2018-05-21 00:00:00'), 'C'], [Timestamp('2018-05-22 00:00:00'), 'C'], [Timestamp('2018-05-23 00:00:00'), '_'], [Timestamp('2018-05-24 00:00:00'), 'C'], [Timestamp('2018-05-25 00:00:00'), 'C'], [Timestamp('2018-05-28 00:00:00'), 'C'], [Timestamp('2018-05-29 00:00:00'), 'V'], [Timestamp('2018-05-30 00:00:00'), 'V'], [Timestamp('2018-06-01 00:00:00'), '_'], [Timestamp('2018-06-04 00:00:00'), 'C'], [Timestamp('2018-06-05 00:00:00'), 'C'], [Timestamp('2018-06-06 00:00:00'), 'V'], [Timestamp('2018-06-07 00:00:00'), '_'], [Timestamp('2018-06-08 00:00:00'), 'V'], [Timestamp('2018-06-11 00:00:00'), 'C'], [Timestamp('2018-06-12 00:00:00'), 'V'], [Timestamp('2018-06-13 00:00:00'), 'V'], [Timestamp('2018-06-14 00:00:00'), 'V'], [Timestamp('2018-06-15 00:00:00'), 'V'], [Timestamp('2018-06-18 00:00:00'), '_'], [Timestamp('2018-06-19 00:00:00'), '_'], [Timestamp('2018-06-20 00:00:00'), 'V'], [Timestamp('2018-06-21 00:00:00'), '_'], [Timestamp('2018-06-22 00:00:00'), 'C'], [Timestamp('2018-06-25 00:00:00'), 'V'], [Timestamp('2018-06-26 00:00:00'), 'V'], [Timestamp('2018-06-27 00:00:00'), '_'], [Timestamp('2018-06-28 00:00:00'), 'C'], [Timestamp('2018-06-29 00:00:00'), 'C'], [Timestamp('2018-07-02 00:00:00'), '_'], [Timestamp('2018-07-03 00:00:00'), 'C'], [Timestamp('2018-07-04 00:00:00'), 'V'], [Timestamp('2018-07-05 00:00:00'), 'V'], [Timestamp('2018-07-06 00:00:00'), 'C'], [Timestamp('2018-07-10 00:00:00'), 'C'], [Timestamp('2018-07-11 00:00:00'), 'C'], [Timestamp('2018-07-12 00:00:00'), 'C'], [Timestamp('2018-07-13 00:00:00'), 'V'], [Timestamp('2018-07-16 00:00:00'), 'C'], [Timestamp('2018-07-17 00:00:00'), 'V'], [Timestamp('2018-07-18 00:00:00'), 'C'], [Timestamp('2018-07-19 00:00:00'), 'C'], [Timestamp('2018-07-20 00:00:00'), 'V'], [Timestamp('2018-07-23 00:00:00'), 'V'], [Timestamp('2018-07-24 00:00:00'), 'C'], [Timestamp('2018-07-25 00:00:00'), 'V'], [Timestamp('2018-07-26 00:00:00'), 'C'], [Timestamp('2018-07-27 00:00:00'), 'V'], [Timestamp('2018-07-30 00:00:00'), 'V'], [Timestamp('2018-07-31 00:00:00'), 'C'], [Timestamp('2018-08-01 00:00:00'), 'C'], [Timestamp('2018-08-02 00:00:00'), 'C'], [Timestamp('2018-08-03 00:00:00'), 'V'], [Timestamp('2018-08-06 00:00:00'), 'C'], [Timestamp('2018-08-07 00:00:00'), 'C'], [Timestamp('2018-08-08 00:00:00'), 'C'], [Timestamp('2018-08-09 00:00:00'), 'V'], [Timestamp('2018-08-10 00:00:00'), 'C'], [Timestamp('2018-08-13 00:00:00'), 'V'], [Timestamp('2018-08-14 00:00:00'), 'C'], [Timestamp('2018-08-15 00:00:00'), 'C'], [Timestamp('2018-08-16 00:00:00'), 'V'], [Timestamp('2018-08-17 00:00:00'), 'C'], [Timestamp('2018-08-20 00:00:00'), 'C'], [Timestamp('2018-08-21 00:00:00'), 'C'], [Timestamp('2018-08-22 00:00:00'), 'V'], [Timestamp('2018-08-23 00:00:00'), 'V'], [Timestamp('2018-08-24 00:00:00'), 'V'], [Timestamp('2018-08-27 00:00:00'), 'V'], [Timestamp('2018-08-28 00:00:00'), 'C'], [Timestamp('2018-08-29 00:00:00'), 'V'], [Timestamp('2018-08-30 00:00:00'), 'V'], [Timestamp('2018-08-31 00:00:00'), 'C'], [Timestamp('2018-09-03 00:00:00'), 'C'], [Timestamp('2018-09-04 00:00:00'), 'C'], [Timestamp('2018-09-05 00:00:00'), 'V'], [Timestamp('2018-09-06 00:00:00'), 'V'], [Timestamp('2018-09-10 00:00:00'), 'V'], [Timestamp('2018-09-11 00:00:00'), 'C'], [Timestamp('2018-09-12 00:00:00'), 'V'], [Timestamp('2018-09-13 00:00:00'), 'C'], [Timestamp('2018-09-14 00:00:00'), 'V'], [Timestamp('2018-09-17 00:00:00'), 'V'], [Timestamp('2018-09-18 00:00:00'), 'V'], [Timestamp('2018-09-19 00:00:00'), 'C'], [Timestamp('2018-09-20 00:00:00'), 'C'], [Timestamp('2018-09-21 00:00:00'), 'C'], [Timestamp('2018-09-24 00:00:00'), 'C'], [Timestamp('2018-09-25 00:00:00'), 'V'], [Timestamp('2018-09-26 00:00:00'), 'V'], [Timestamp('2018-09-27 00:00:00'), 'V'], [Timestamp('2018-09-28 00:00:00'), 'C'], [Timestamp('2018-10-01 00:00:00'), 'C'], [Timestamp('2018-10-02 00:00:00'), 'V'], [Timestamp('2018-10-03 00:00:00'), 'V'], [Timestamp('2018-10-04 00:00:00'), 'V'], [Timestamp('2018-10-05 00:00:00'), 'C'], [Timestamp('2018-10-08 00:00:00'), 'V'], [Timestamp('2018-10-09 00:00:00'), 'V'], [Timestamp('2018-10-10 00:00:00'), 'C'], [Timestamp('2018-10-11 00:00:00'), 'C'], [Timestamp('2018-10-15 00:00:00'), 'C'], [Timestamp('2018-10-16 00:00:00'), '_'], [Timestamp('2018-10-17 00:00:00'), 'C'], [Timestamp('2018-10-18 00:00:00'), 'C'], [Timestamp('2018-10-19 00:00:00'), 'C'], [Timestamp('2018-10-22 00:00:00'), 'V'], [Timestamp('2018-10-23 00:00:00'), 'C'], [Timestamp('2018-10-24 00:00:00'), 'C'], [Timestamp('2018-10-25 00:00:00'), 'V'], [Timestamp('2018-10-26 00:00:00'), 'C'], [Timestamp('2018-10-29 00:00:00'), '_'], [Timestamp('2018-10-30 00:00:00'), 'V'], [Timestamp('2018-10-31 00:00:00'), 'C'], [Timestamp('2018-11-01 00:00:00'), 'C'], [Timestamp('2018-11-05 00:00:00'), 'V'], [Timestamp('2018-11-06 00:00:00'), 'C'], [Timestamp('2018-11-07 00:00:00'), 'C'], [Timestamp('2018-11-08 00:00:00'), 'V'], [Timestamp('2018-11-09 00:00:00'), 'C'], [Timestamp('2018-11-12 00:00:00'), 'V'], [Timestamp('2018-11-13 00:00:00'), 'V'], [Timestamp('2018-11-14 00:00:00'), 'V'], [Timestamp('2018-11-16 00:00:00'), 'V'], [Timestamp('2018-11-19 00:00:00'), 'V'], [Timestamp('2018-11-21 00:00:00'), 'V'], [Timestamp('2018-11-22 00:00:00'), 'V'], [Timestamp('2018-11-23 00:00:00'), 'C'], [Timestamp('2018-11-26 00:00:00'), 'C'], [Timestamp('2018-11-27 00:00:00'), 'V'], [Timestamp('2018-11-28 00:00:00'), 'C'], [Timestamp('2018-11-29 00:00:00'), 'C'], [Timestamp('2018-11-30 00:00:00'), 'V'], [Timestamp('2018-12-03 00:00:00'), 'V'], [Timestamp('2018-12-04 00:00:00'), 'V'], [Timestamp('2018-12-05 00:00:00'), 'V'], [Timestamp('2018-12-06 00:00:00'), 'C'], [Timestamp('2018-12-07 00:00:00'), 'C'], [Timestamp('2018-12-10 00:00:00'), '_'], [Timestamp('2018-12-11 00:00:00'), 'C'], [Timestamp('2018-12-12 00:00:00'), 'C'], [Timestamp('2018-12-13 00:00:00'), 'V'], [Timestamp('2018-12-14 00:00:00'), '_'], [Timestamp('2018-12-17 00:00:00'), 'C'], [Timestamp('2018-12-18 00:00:00'), 'C'], [Timestamp('2018-12-19 00:00:00'), 'V'], [Timestamp('2018-12-20 00:00:00'), 'C'], [Timestamp('2018-12-21 00:00:00'), 'C'], [Timestamp('2018-12-26 00:00:00'), 'V'], [Timestamp('2018-12-27 00:00:00'), 'C'], [Timestamp('2018-12-28 00:00:00'), 'V'], [Timestamp('2019-01-02 00:00:00'), 'V'], [Timestamp('2019-01-03 00:00:00'), 'C'], [Timestamp('2019-01-04 00:00:00'), 'V'], [Timestamp('2019-01-07 00:00:00'), 'V'], [Timestamp('2019-01-08 00:00:00'), 'C'], [Timestamp('2019-01-09 00:00:00'), 'V'], [Timestamp('2019-01-10 00:00:00'), 'C'], [Timestamp('2019-01-11 00:00:00'), 'C'], [Timestamp('2019-01-14 00:00:00'), 'C'], [Timestamp('2019-01-15 00:00:00'), 'C'], [Timestamp('2019-01-16 00:00:00'), 'C'], [Timestamp('2019-01-17 00:00:00'), 'V'], [Timestamp('2019-01-18 00:00:00'), 'V'], [Timestamp('2019-01-21 00:00:00'), 'C'], [Timestamp('2019-01-22 00:00:00'), 'C'], [Timestamp('2019-01-23 00:00:00'), 'V'], [Timestamp('2019-01-24 00:00:00'), 'C'], [Timestamp('2019-01-28 00:00:00'), 'C'], [Timestamp('2019-01-29 00:00:00'), 'V'], [Timestamp('2019-01-30 00:00:00'), 'V'], [Timestamp('2019-01-31 00:00:00'), 'C'], [Timestamp('2019-02-01 00:00:00'), 'V'], [Timestamp('2019-02-04 00:00:00'), 'V'], [Timestamp('2019-02-05 00:00:00'), 'C'], [Timestamp('2019-02-06 00:00:00'), 'C'], [Timestamp('2019-02-07 00:00:00'), 'C'], [Timestamp('2019-02-08 00:00:00'), 'C'], [Timestamp('2019-02-11 00:00:00'), 'C'], [Timestamp('2019-02-12 00:00:00'), 'V'], [Timestamp('2019-02-13 00:00:00'), 'V'], [Timestamp('2019-02-14 00:00:00'), 'V'], [Timestamp('2019-02-15 00:00:00'), 'C'], [Timestamp('2019-02-18 00:00:00'), 'C'], [Timestamp('2019-02-19 00:00:00'), 'C'], [Timestamp('2019-02-20 00:00:00'), 'V'], [Timestamp('2019-02-21 00:00:00'), '_'], [Timestamp('2019-02-22 00:00:00'), 'C'], [Timestamp('2019-02-25 00:00:00'), 'V'], [Timestamp('2019-02-26 00:00:00'), 'C'], [Timestamp('2019-02-27 00:00:00'), 'V'], [Timestamp('2019-02-28 00:00:00'), '_'], [Timestamp('2019-03-01 00:00:00'), '_'], [Timestamp('2019-03-07 00:00:00'), 'C'], [Timestamp('2019-03-08 00:00:00'), 'V'], [Timestamp('2019-03-11 00:00:00'), 'V'], [Timestamp('2019-03-12 00:00:00'), 'C'], [Timestamp('2019-03-13 00:00:00'), 'V'], [Timestamp('2019-03-14 00:00:00'), 'V'], [Timestamp('2019-03-15 00:00:00'), '_'], [Timestamp('2019-03-18 00:00:00'), 'V'], [Timestamp('2019-03-19 00:00:00'), 'V'], [Timestamp('2019-03-20 00:00:00'), 'C'], [Timestamp('2019-03-21 00:00:00'), 'C'], [Timestamp('2019-03-22 00:00:00'), 'C'], [Timestamp('2019-03-25 00:00:00'), 'C'], [Timestamp('2019-03-26 00:00:00'), 'V'], [Timestamp('2019-03-27 00:00:00'), 'C'], [Timestamp('2019-03-28 00:00:00'), 'V'], [Timestamp('2019-03-29 00:00:00'), 'C'], [Timestamp('2019-04-01 00:00:00'), 'C'], [Timestamp('2019-04-02 00:00:00'), 'V'], [Timestamp('2019-04-03 00:00:00'), 'C'], [Timestamp('2019-04-04 00:00:00'), '_'], [Timestamp('2019-04-05 00:00:00'), 'C'], [Timestamp('2019-04-08 00:00:00'), 'V'], [Timestamp('2019-04-09 00:00:00'), 'C'], [Timestamp('2019-04-10 00:00:00'), 'C'], [Timestamp('2019-04-11 00:00:00'), 'C'], [Timestamp('2019-04-12 00:00:00'), 'C'], [Timestamp('2019-04-15 00:00:00'), 'V'], [Timestamp('2019-04-16 00:00:00'), 'V'], [Timestamp('2019-04-17 00:00:00'), 'V'], [Timestamp('2019-04-18 00:00:00'), 'V'], [Timestamp('2019-04-22 00:00:00'), 'C'], [Timestamp('2019-04-23 00:00:00'), 'V'], [Timestamp('2019-04-24 00:00:00'), 'C'], [Timestamp('2019-04-25 00:00:00'), 'V'], [Timestamp('2019-04-26 00:00:00'), 'C'], [Timestamp('2019-04-29 00:00:00'), 'V'], [Timestamp('2019-04-30 00:00:00'), 'C'], [Timestamp('2019-05-02 00:00:00'), 'C'], [Timestamp('2019-05-03 00:00:00'), 'V'], [Timestamp('2019-05-06 00:00:00'), 'C'], [Timestamp('2019-05-07 00:00:00'), 'V'], [Timestamp('2019-05-08 00:00:00'), 'C'], [Timestamp('2019-05-09 00:00:00'), 'C'], [Timestamp('2019-05-10 00:00:00'), 'C'], [Timestamp('2019-05-13 00:00:00'), '_'], [Timestamp('2019-05-14 00:00:00'), 'C'], [Timestamp('2019-05-15 00:00:00'), 'C'], [Timestamp('2019-05-16 00:00:00'), 'C'], [Timestamp('2019-05-17 00:00:00'), 'C'], [Timestamp('2019-05-20 00:00:00'), 'V'], [Timestamp('2019-05-21 00:00:00'), 'V'], [Timestamp('2019-05-22 00:00:00'), 'V'], [Timestamp('2019-05-23 00:00:00'), 'V'], [Timestamp('2019-05-24 00:00:00'), 'V'], [Timestamp('2019-05-27 00:00:00'), 'V'], [Timestamp('2019-05-28 00:00:00'), 'V'], [Timestamp('2019-05-29 00:00:00'), '_'], [Timestamp('2019-05-30 00:00:00'), 'C'], [Timestamp('2019-05-31 00:00:00'), 'C'], [Timestamp('2019-06-03 00:00:00'), 'V'], [Timestamp('2019-06-04 00:00:00'), 'V'], [Timestamp('2019-06-05 00:00:00'), 'C'], [Timestamp('2019-06-06 00:00:00'), 'V'], [Timestamp('2019-06-07 00:00:00'), 'V'], [Timestamp('2019-06-10 00:00:00'), 'C'], [Timestamp('2019-06-11 00:00:00'), 'V'], [Timestamp('2019-06-12 00:00:00'), 'C'], [Timestamp('2019-06-13 00:00:00'), 'V'], [Timestamp('2019-06-14 00:00:00'), 'C'], [Timestamp('2019-06-17 00:00:00'), '_'], [Timestamp('2019-06-18 00:00:00'), 'V'], [Timestamp('2019-06-19 00:00:00'), 'V'], [Timestamp('2019-06-21 00:00:00'), 'V'], [Timestamp('2019-06-24 00:00:00'), 'C'], [Timestamp('2019-06-25 00:00:00'), 'C'], [Timestamp('2019-06-26 00:00:00'), '_'], [Timestamp('2019-06-27 00:00:00'), 'C'], [Timestamp('2019-06-28 00:00:00'), 'V'], [Timestamp('2019-07-01 00:00:00'), 'C'], [Timestamp('2019-07-02 00:00:00'), '_'], [Timestamp('2019-07-03 00:00:00'), 'C'], [Timestamp('2019-07-04 00:00:00'), 'V'], [Timestamp('2019-07-05 00:00:00'), 'V'], [Timestamp('2019-07-08 00:00:00'), 'V'], [Timestamp('2019-07-10 00:00:00'), 'V'], [Timestamp('2019-07-11 00:00:00'), 'V'], [Timestamp('2019-07-12 00:00:00'), '_'], [Timestamp('2019-07-15 00:00:00'), 'C'], [Timestamp('2019-07-16 00:00:00'), 'C'], [Timestamp('2019-07-17 00:00:00'), 'C'], [Timestamp('2019-07-18 00:00:00'), 'C'], [Timestamp('2019-07-19 00:00:00'), 'C'], [Timestamp('2019-07-22 00:00:00'), 'C'], [Timestamp('2019-07-23 00:00:00'), 'C'], [Timestamp('2019-07-24 00:00:00'), 'C'], [Timestamp('2019-07-25 00:00:00'), 'C'], [Timestamp('2019-07-26 00:00:00'), 'C'], [Timestamp('2019-07-29 00:00:00'), 'V'], [Timestamp('2019-07-30 00:00:00'), 'C'], [Timestamp('2019-07-31 00:00:00'), '_'], [Timestamp('2019-08-01 00:00:00'), 'C'], [Timestamp('2019-08-02 00:00:00'), 'V'], [Timestamp('2019-08-05 00:00:00'), 'C'], [Timestamp('2019-08-06 00:00:00'), 'V'], [Timestamp('2019-08-07 00:00:00'), 'C'], [Timestamp('2019-08-08 00:00:00'), 'C'], [Timestamp('2019-08-09 00:00:00'), 'V'], [Timestamp('2019-08-12 00:00:00'), 'C'], [Timestamp('2019-08-13 00:00:00'), 'V'], [Timestamp('2019-08-14 00:00:00'), 'C'], [Timestamp('2019-08-15 00:00:00'), 'C'], [Timestamp('2019-08-16 00:00:00'), 'V'], [Timestamp('2019-08-19 00:00:00'), 'C'], [Timestamp('2019-08-20 00:00:00'), '_'], [Timestamp('2019-08-21 00:00:00'), 'V'], [Timestamp('2019-08-22 00:00:00'), 'C'], [Timestamp('2019-08-23 00:00:00'), 'C'], [Timestamp('2019-08-26 00:00:00'), 'C'], [Timestamp('2019-08-27 00:00:00'), 'V'], [Timestamp('2019-08-28 00:00:00'), '_'], [Timestamp('2019-08-29 00:00:00'), '_'], [Timestamp('2019-08-30 00:00:00'), 'C'], [Timestamp('2019-09-02 00:00:00'), 'C'], [Timestamp('2019-09-03 00:00:00'), 'V'], [Timestamp('2019-09-04 00:00:00'), 'V'], [Timestamp('2019-09-05 00:00:00'), 'V'], [Timestamp('2019-09-06 00:00:00'), 'C'], [Timestamp('2019-09-09 00:00:00'), 'C'], [Timestamp('2019-09-10 00:00:00'), 'V'], [Timestamp('2019-09-11 00:00:00'), 'C'], [Timestamp('2019-09-12 00:00:00'), '_'], [Timestamp('2019-09-13 00:00:00'), 'C'], [Timestamp('2019-09-16 00:00:00'), 'V'], [Timestamp('2019-09-17 00:00:00'), 'C'], [Timestamp('2019-09-18 00:00:00'), 'C'], [Timestamp('2019-09-19 00:00:00'), 'C'], [Timestamp('2019-09-20 00:00:00'), 'C'], [Timestamp('2019-09-23 00:00:00'), 'V'], [Timestamp('2019-09-24 00:00:00'), 'C'], [Timestamp('2019-09-25 00:00:00'), 'C'], [Timestamp('2019-09-26 00:00:00'), 'V'], [Timestamp('2019-09-27 00:00:00'), 'C'], [Timestamp('2019-09-30 00:00:00'), 'C'], [Timestamp('2019-10-01 00:00:00'), 'C'], [Timestamp('2019-10-02 00:00:00'), 'C'], [Timestamp('2019-10-03 00:00:00'), 'V'], [Timestamp('2019-10-04 00:00:00'), 'V'], [Timestamp('2019-10-07 00:00:00'), 'V'], [Timestamp('2019-10-08 00:00:00'), 'V'], [Timestamp('2019-10-09 00:00:00'), 'V'], [Timestamp('2019-10-10 00:00:00'), 'V'], [Timestamp('2019-10-11 00:00:00'), 'V'], [Timestamp('2019-10-14 00:00:00'), 'V'], [Timestamp('2019-10-15 00:00:00'), 'V'], [Timestamp('2019-10-16 00:00:00'), 'V'], [Timestamp('2019-10-17 00:00:00'), 'V'], [Timestamp('2019-10-18 00:00:00'), 'V'], [Timestamp('2019-10-21 00:00:00'), 'V'], [Timestamp('2019-10-22 00:00:00'), 'V'], [Timestamp('2019-10-23 00:00:00'), 'V'], [Timestamp('2019-10-24 00:00:00'), 'V'], [Timestamp('2019-10-25 00:00:00'), 'V'], [Timestamp('2019-10-28 00:00:00'), 'V'], [Timestamp('2019-10-29 00:00:00'), 'C'], [Timestamp('2019-10-30 00:00:00'), 'V'], [Timestamp('2019-10-31 00:00:00'), 'V'], [Timestamp('2019-11-01 00:00:00'), 'C'], [Timestamp('2019-11-04 00:00:00'), 'C'], [Timestamp('2019-11-05 00:00:00'), 'C'], [Timestamp('2019-11-06 00:00:00'), 'C'], [Timestamp('2019-11-07 00:00:00'), 'C'], [Timestamp('2019-11-08 00:00:00'), '_'], [Timestamp('2019-11-11 00:00:00'), '_'], [Timestamp('2019-11-12 00:00:00'), '_'], [Timestamp('2019-11-13 00:00:00'), 'C'], [Timestamp('2019-11-14 00:00:00'), 'C'], [Timestamp('2019-11-18 00:00:00'), 'C'], [Timestamp('2019-11-19 00:00:00'), 'C'], [Timestamp('2019-11-21 00:00:00'), 'V'], [Timestamp('2019-11-22 00:00:00'), 'V'], [Timestamp('2019-11-25 00:00:00'), 'V'], [Timestamp('2019-11-26 00:00:00'), 'V'], [Timestamp('2019-11-27 00:00:00'), 'V'], [Timestamp('2019-11-28 00:00:00'), 'V'], [Timestamp('2019-11-29 00:00:00'), 'C'], [Timestamp('2019-12-02 00:00:00'), 'C'], [Timestamp('2019-12-03 00:00:00'), 'C'], [Timestamp('2019-12-04 00:00:00'), 'V'], [Timestamp('2019-12-05 00:00:00'), 'V'], [Timestamp('2019-12-06 00:00:00'), 'V'], [Timestamp('2019-12-09 00:00:00'), 'C'], [Timestamp('2019-12-10 00:00:00'), 'V'], [Timestamp('2019-12-11 00:00:00'), 'C'], [Timestamp('2019-12-12 00:00:00'), 'V'], [Timestamp('2019-12-13 00:00:00'), 'C'], [Timestamp('2019-12-16 00:00:00'), 'C'], [Timestamp('2019-12-17 00:00:00'), 'C'], [Timestamp('2019-12-18 00:00:00'), 'V'], [Timestamp('2019-12-19 00:00:00'), 'V'], [Timestamp('2019-12-20 00:00:00'), 'C'], [Timestamp('2019-12-23 00:00:00'), '_'], [Timestamp('2019-12-26 00:00:00'), '_'], [Timestamp('2019-12-27 00:00:00'), '_'], [Timestamp('2019-12-30 00:00:00'), 'C'], [Timestamp('2020-01-02 00:00:00'), 'C'], [Timestamp('2020-01-03 00:00:00'), 'C'], [Timestamp('2020-01-06 00:00:00'), 'V'], [Timestamp('2020-01-07 00:00:00'), 'C'], [Timestamp('2020-01-08 00:00:00'), 'C'], [Timestamp('2020-01-09 00:00:00'), 'C'], [Timestamp('2020-01-10 00:00:00'), 'V'], [Timestamp('2020-01-13 00:00:00'), 'C'], [Timestamp('2020-01-14 00:00:00'), 'C'], [Timestamp('2020-01-15 00:00:00'), 'C'], [Timestamp('2020-01-16 00:00:00'), 'C'], [Timestamp('2020-01-17 00:00:00'), 'V'], [Timestamp('2020-01-20 00:00:00'), 'V'], [Timestamp('2020-01-21 00:00:00'), 'C'], [Timestamp('2020-01-22 00:00:00'), 'V'], [Timestamp('2020-01-23 00:00:00'), 'V'], [Timestamp('2020-01-24 00:00:00'), 'C'], [Timestamp('2020-01-27 00:00:00'), 'C'], [Timestamp('2020-01-28 00:00:00'), 'V'], [Timestamp('2020-01-29 00:00:00'), 'V'], [Timestamp('2020-01-30 00:00:00'), 'V'], [Timestamp('2020-01-31 00:00:00'), 'V'], [Timestamp('2020-02-03 00:00:00'), 'C'], [Timestamp('2020-02-04 00:00:00'), 'V'], [Timestamp('2020-02-05 00:00:00'), 'C'], [Timestamp('2020-02-06 00:00:00'), 'V'], [Timestamp('2020-02-07 00:00:00'), '_'], [Timestamp('2020-02-10 00:00:00'), 'V'], [Timestamp('2020-02-11 00:00:00'), 'V'], [Timestamp('2020-02-12 00:00:00'), 'V'], [Timestamp('2020-02-13 00:00:00'), 'C'], [Timestamp('2020-02-14 00:00:00'), 'C'], [Timestamp('2020-02-17 00:00:00'), 'C'], [Timestamp('2020-02-18 00:00:00'), 'V'], [Timestamp('2020-02-19 00:00:00'), 'V'], [Timestamp('2020-02-20 00:00:00'), '_'], [Timestamp('2020-02-21 00:00:00'), 'C'], [Timestamp('2020-02-27 00:00:00'), 'C'], [Timestamp('2020-02-28 00:00:00'), 'C'], [Timestamp('2020-03-02 00:00:00'), 'V'], [Timestamp('2020-03-03 00:00:00'), 'V'], [Timestamp('2020-03-04 00:00:00'), 'C'], [Timestamp('2020-03-05 00:00:00'), 'C'], [Timestamp('2020-03-06 00:00:00'), 'C'], [Timestamp('2020-03-09 00:00:00'), 'V'], [Timestamp('2020-03-10 00:00:00'), 'C'], [Timestamp('2020-03-11 00:00:00'), 'V'], [Timestamp('2020-03-12 00:00:00'), 'C'], [Timestamp('2020-03-13 00:00:00'), 'V'], [Timestamp('2020-03-16 00:00:00'), 'C'], [Timestamp('2020-03-17 00:00:00'), 'V'], [Timestamp('2020-03-18 00:00:00'), 'C'], [Timestamp('2020-03-19 00:00:00'), 'V'], [Timestamp('2020-03-20 00:00:00'), '_'], [Timestamp('2020-03-23 00:00:00'), 'V'], [Timestamp('2020-03-24 00:00:00'), 'V'], [Timestamp('2020-03-25 00:00:00'), 'C'], [Timestamp('2020-03-26 00:00:00'), 'V'], [Timestamp('2020-03-27 00:00:00'), 'V'], [Timestamp('2020-03-30 00:00:00'), 'V'], [Timestamp('2020-03-31 00:00:00'), 'C'], [Timestamp('2020-04-01 00:00:00'), 'C'], [Timestamp('2020-04-02 00:00:00'), 'V'], [Timestamp('2020-04-03 00:00:00'), 'C'], [Timestamp('2020-04-06 00:00:00'), 'C'], [Timestamp('2020-04-07 00:00:00'), 'C'], [Timestamp('2020-04-08 00:00:00'), 'V'], [Timestamp('2020-04-09 00:00:00'), 'C'], [Timestamp('2020-04-13 00:00:00'), 'C'], [Timestamp('2020-04-14 00:00:00'), 'C'], [Timestamp('2020-04-15 00:00:00'), 'C'], [Timestamp('2020-04-16 00:00:00'), 'V'], [Timestamp('2020-04-17 00:00:00'), 'C'], [Timestamp('2020-04-20 00:00:00'), 'C'], [Timestamp('2020-04-22 00:00:00'), 'C'], [Timestamp('2020-04-23 00:00:00'), 'V'], [Timestamp('2020-04-24 00:00:00'), 'C'], [Timestamp('2020-04-27 00:00:00'), 'V'], [Timestamp('2020-04-28 00:00:00'), 'C'], [Timestamp('2020-04-29 00:00:00'), 'V'], [Timestamp('2020-04-30 00:00:00'), 'V'], [Timestamp('2020-05-04 00:00:00'), 'C'], [Timestamp('2020-05-05 00:00:00'), 'C'], [Timestamp('2020-05-06 00:00:00'), 'C'], [Timestamp('2020-05-07 00:00:00'), 'V'], [Timestamp('2020-05-08 00:00:00'), 'V'], [Timestamp('2020-05-11 00:00:00'), 'C'], [Timestamp('2020-05-12 00:00:00'), 'C'], [Timestamp('2020-05-13 00:00:00'), 'C'], [Timestamp('2020-05-14 00:00:00'), 'V'], [Timestamp('2020-05-15 00:00:00'), 'C'], [Timestamp('2020-05-18 00:00:00'), 'C'], [Timestamp('2020-05-19 00:00:00'), 'C'], [Timestamp('2020-05-20 00:00:00'), 'V'], [Timestamp('2020-05-21 00:00:00'), 'V'], [Timestamp('2020-05-22 00:00:00'), 'C'], [Timestamp('2020-05-25 00:00:00'), 'V'], [Timestamp('2020-05-26 00:00:00'), 'C'], [Timestamp('2020-05-27 00:00:00'), 'V'], [Timestamp('2020-05-28 00:00:00'), '_'], [Timestamp('2020-05-29 00:00:00'), '_'], [Timestamp('2020-06-01 00:00:00'), 'C'], [Timestamp('2020-06-02 00:00:00'), 'V'], [Timestamp('2020-06-03 00:00:00'), 'V'], [Timestamp('2020-06-04 00:00:00'), 'C'], [Timestamp('2020-06-05 00:00:00'), 'C'], [Timestamp('2020-06-08 00:00:00'), 'C'], [Timestamp('2020-06-09 00:00:00'), 'V'], [Timestamp('2020-06-10 00:00:00'), 'C'], [Timestamp('2020-06-12 00:00:00'), 'C'], [Timestamp('2020-06-15 00:00:00'), 'C'], [Timestamp('2020-06-16 00:00:00'), 'V'], [Timestamp('2020-06-17 00:00:00'), 'V'], [Timestamp('2020-06-18 00:00:00'), 'C'], [Timestamp('2020-06-19 00:00:00'), 'C'], [Timestamp('2020-06-22 00:00:00'), 'C'], [Timestamp('2020-06-23 00:00:00'), 'V'], [Timestamp('2020-06-24 00:00:00'), 'C'], [Timestamp('2020-06-25 00:00:00'), '_'], [Timestamp('2020-06-26 00:00:00'), 'C'], [Timestamp('2020-06-29 00:00:00'), 'V'], [Timestamp('2020-06-30 00:00:00'), 'V'], [Timestamp('2020-07-01 00:00:00'), 'V'], [Timestamp('2020-07-02 00:00:00'), '_'], [Timestamp('2020-07-03 00:00:00'), 'C'], [Timestamp('2020-07-06 00:00:00'), 'V'], [Timestamp('2020-07-07 00:00:00'), 'C'], [Timestamp('2020-07-08 00:00:00'), 'V'], [Timestamp('2020-07-09 00:00:00'), 'C'], [Timestamp('2020-07-10 00:00:00'), 'V'], [Timestamp('2020-07-13 00:00:00'), 'V'], [Timestamp('2020-07-14 00:00:00'), 'C'], [Timestamp('2020-07-15 00:00:00'), 'V'], [Timestamp('2020-07-16 00:00:00'), 'C'], [Timestamp('2020-07-17 00:00:00'), 'V'], [Timestamp('2020-07-20 00:00:00'), 'C'], [Timestamp('2020-07-21 00:00:00'), 'C'], [Timestamp('2020-07-22 00:00:00'), 'C'], [Timestamp('2020-07-23 00:00:00'), 'C'], [Timestamp('2020-07-24 00:00:00'), 'V'], [Timestamp('2020-07-27 00:00:00'), 'V'], [Timestamp('2020-07-28 00:00:00'), 'C'], [Timestamp('2020-07-29 00:00:00'), 'C'], [Timestamp('2020-07-30 00:00:00'), 'C'], [Timestamp('2020-07-31 00:00:00'), 'C'], [Timestamp('2020-08-03 00:00:00'), 'C'], [Timestamp('2020-08-04 00:00:00'), 'C'], [Timestamp('2020-08-05 00:00:00'), 'V'], [Timestamp('2020-08-06 00:00:00'), 'V'], [Timestamp('2020-08-07 00:00:00'), 'C'], [Timestamp('2020-08-10 00:00:00'), 'V'], [Timestamp('2020-08-11 00:00:00'), 'C'], [Timestamp('2020-08-12 00:00:00'), 'V'], [Timestamp('2020-08-13 00:00:00'), 'C'], [Timestamp('2020-08-14 00:00:00'), 'C'], [Timestamp('2020-08-17 00:00:00'), 'C'], [Timestamp('2020-08-18 00:00:00'), 'V'], [Timestamp('2020-08-19 00:00:00'), 'V'], [Timestamp('2020-08-20 00:00:00'), 'C'], [Timestamp('2020-08-21 00:00:00'), 'C'], [Timestamp('2020-08-24 00:00:00'), 'C'], [Timestamp('2020-08-25 00:00:00'), 'V'], [Timestamp('2020-08-26 00:00:00'), 'V'], [Timestamp('2020-08-27 00:00:00'), 'C'], [Timestamp('2020-08-28 00:00:00'), 'V'], [Timestamp('2020-08-31 00:00:00'), 'C'], [Timestamp('2020-09-01 00:00:00'), 'V'], [Timestamp('2020-09-02 00:00:00'), 'C'], [Timestamp('2020-09-03 00:00:00'), 'V'], [Timestamp('2020-09-04 00:00:00'), 'C'], [Timestamp('2020-09-08 00:00:00'), 'C'], [Timestamp('2020-09-09 00:00:00'), 'V'], [Timestamp('2020-09-10 00:00:00'), 'C'], [Timestamp('2020-09-11 00:00:00'), 'C'], [Timestamp('2020-09-14 00:00:00'), 'C'], [Timestamp('2020-09-15 00:00:00'), 'V'], [Timestamp('2020-09-16 00:00:00'), 'V'], [Timestamp('2020-09-17 00:00:00'), 'V'], [Timestamp('2020-09-18 00:00:00'), 'C'], [Timestamp('2020-09-21 00:00:00'), 'C'], [Timestamp('2020-09-22 00:00:00'), 'C'], [Timestamp('2020-09-23 00:00:00'), 'C'], [Timestamp('2020-09-24 00:00:00'), 'V'], [Timestamp('2020-09-25 00:00:00'), 'C'], [Timestamp('2020-09-28 00:00:00'), '_'], [Timestamp('2020-09-29 00:00:00'), 'C'], [Timestamp('2020-09-30 00:00:00'), 'V'], [Timestamp('2020-10-01 00:00:00'), 'V'], [Timestamp('2020-10-02 00:00:00'), 'V'], [Timestamp('2020-10-05 00:00:00'), 'V'], [Timestamp('2020-10-06 00:00:00'), 'C'], [Timestamp('2020-10-07 00:00:00'), 'C'], [Timestamp('2020-10-08 00:00:00'), 'V'], [Timestamp('2020-10-09 00:00:00'), '_'], [Timestamp('2020-10-13 00:00:00'), 'V'], [Timestamp('2020-10-14 00:00:00'), 'C'], [Timestamp('2020-10-15 00:00:00'), 'C'], [Timestamp('2020-10-16 00:00:00'), 'C'], [Timestamp('2020-10-19 00:00:00'), 'V'], [Timestamp('2020-10-20 00:00:00'), 'C'], [Timestamp('2020-10-21 00:00:00'), 'V'], [Timestamp('2020-10-22 00:00:00'), 'C'], [Timestamp('2020-10-23 00:00:00'), 'V'], [Timestamp('2020-10-26 00:00:00'), 'C'], [Timestamp('2020-10-27 00:00:00'), 'C'], [Timestamp('2020-10-28 00:00:00'), 'C'], [Timestamp('2020-10-29 00:00:00'), 'C'], [Timestamp('2020-10-30 00:00:00'), 'V'], [Timestamp('2020-11-03 00:00:00'), 'V'], [Timestamp('2020-11-04 00:00:00'), '_'], [Timestamp('2020-11-05 00:00:00'), 'V'], [Timestamp('2020-11-06 00:00:00'), 'C'], [Timestamp('2020-11-09 00:00:00'), 'V'], [Timestamp('2020-11-10 00:00:00'), 'C'], [Timestamp('2020-11-11 00:00:00'), 'C'], [Timestamp('2020-11-12 00:00:00'), 'C'], [Timestamp('2020-11-13 00:00:00'), 'V'], [Timestamp('2020-11-16 00:00:00'), 'V'], [Timestamp('2020-11-17 00:00:00'), '_'], [Timestamp('2020-11-18 00:00:00'), 'C'], [Timestamp('2020-11-19 00:00:00'), 'C'], [Timestamp('2020-11-23 00:00:00'), 'V'], [Timestamp('2020-11-24 00:00:00'), 'V'], [Timestamp('2020-11-25 00:00:00'), 'V'], [Timestamp('2020-11-26 00:00:00'), 'C'], [Timestamp('2020-11-27 00:00:00'), 'C'], [Timestamp('2020-11-30 00:00:00'), 'C'], [Timestamp('2020-12-01 00:00:00'), 'C'], [Timestamp('2020-12-02 00:00:00'), 'V'], [Timestamp('2020-12-03 00:00:00'), '_'], [Timestamp('2020-12-04 00:00:00'), 'V'], [Timestamp('2020-12-07 00:00:00'), 'C'], [Timestamp('2020-12-08 00:00:00'), 'C'], [Timestamp('2020-12-09 00:00:00'), 'C'], [Timestamp('2020-12-10 00:00:00'), 'V'], [Timestamp('2020-12-11 00:00:00'), 'C'], [Timestamp('2020-12-14 00:00:00'), '_'], [Timestamp('2020-12-15 00:00:00'), 'V'], [Timestamp('2020-12-16 00:00:00'), 'V'], [Timestamp('2020-12-17 00:00:00'), 'C'], [Timestamp('2020-12-18 00:00:00'), 'C'], [Timestamp('2020-12-21 00:00:00'), 'C'], [Timestamp('2020-12-22 00:00:00'), 'V'], [Timestamp('2020-12-23 00:00:00'), 'C'], [Timestamp('2020-12-28 00:00:00'), 'V'], [Timestamp('2020-12-29 00:00:00'), 'V'], [Timestamp('2020-12-30 00:00:00'), 'V'], [Timestamp('2021-01-04 00:00:00'), 'V'], [Timestamp('2021-01-05 00:00:00'), 'V'], [Timestamp('2021-01-06 00:00:00'), 'C'], [Timestamp('2021-01-07 00:00:00'), '_'], [Timestamp('2021-01-08 00:00:00'), 'C'], [Timestamp('2021-01-11 00:00:00'), '_'], [Timestamp('2021-01-12 00:00:00'), 'C'], [Timestamp('2021-01-13 00:00:00'), 'C'], [Timestamp('2021-01-14 00:00:00'), 'C'], [Timestamp('2021-01-15 00:00:00'), 'C'], [Timestamp('2021-01-18 00:00:00'), 'V'], [Timestamp('2021-01-19 00:00:00'), '_'], [Timestamp('2021-01-20 00:00:00'), 'C'], [Timestamp('2021-01-21 00:00:00'), 'V'], [Timestamp('2021-01-22 00:00:00'), 'C'], [Timestamp('2021-01-26 00:00:00'), 'C'], [Timestamp('2021-01-27 00:00:00'), 'C'], [Timestamp('2021-01-28 00:00:00'), 'V'], [Timestamp('2021-01-29 00:00:00'), 'C'], [Timestamp('2021-02-01 00:00:00'), 'V'], [Timestamp('2021-02-02 00:00:00'), 'C'], [Timestamp('2021-02-03 00:00:00'), 'V'], [Timestamp('2021-02-04 00:00:00'), 'V'], [Timestamp('2021-02-05 00:00:00'), 'C'], [Timestamp('2021-02-08 00:00:00'), 'C'], [Timestamp('2021-02-09 00:00:00'), 'C'], [Timestamp('2021-02-10 00:00:00'), 'V'], [Timestamp('2021-02-11 00:00:00'), 'V'], [Timestamp('2021-02-12 00:00:00'), 'V'], [Timestamp('2021-02-18 00:00:00'), 'V'], [Timestamp('2021-02-19 00:00:00'), 'C'], [Timestamp('2021-02-22 00:00:00'), 'C'], [Timestamp('2021-02-23 00:00:00'), 'C'], [Timestamp('2021-02-24 00:00:00'), 'V'], [Timestamp('2021-02-25 00:00:00'), 'C'], [Timestamp('2021-02-26 00:00:00'), '_'], [Timestamp('2021-03-01 00:00:00'), 'C'], [Timestamp('2021-03-02 00:00:00'), 'C'], [Timestamp('2021-03-03 00:00:00'), 'C'], [Timestamp('2021-03-04 00:00:00'), 'V'], [Timestamp('2021-03-05 00:00:00'), 'V'], [Timestamp('2021-03-08 00:00:00'), '_'], [Timestamp('2021-03-09 00:00:00'), 'V'], [Timestamp('2021-03-10 00:00:00'), 'V'], [Timestamp('2021-03-11 00:00:00'), 'V'], [Timestamp('2021-03-12 00:00:00'), 'V'], [Timestamp('2021-03-15 00:00:00'), 'C'], [Timestamp('2021-03-16 00:00:00'), 'C'], [Timestamp('2021-03-17 00:00:00'), 'V'], [Timestamp('2021-03-18 00:00:00'), 'C'], [Timestamp('2021-03-19 00:00:00'), 'V'], [Timestamp('2021-03-22 00:00:00'), 'V'], [Timestamp('2021-03-23 00:00:00'), 'C'], [Timestamp('2021-03-24 00:00:00'), 'C'], [Timestamp('2021-03-25 00:00:00'), 'C'], [Timestamp('2021-03-26 00:00:00'), 'V'], [Timestamp('2021-03-29 00:00:00'), 'V'], [Timestamp('2021-03-30 00:00:00'), 'V'], [Timestamp('2021-03-31 00:00:00'), 'V'], [Timestamp('2021-04-01 00:00:00'), 'C'], [Timestamp('2021-04-05 00:00:00'), 'C'], [Timestamp('2021-04-06 00:00:00'), 'C'], [Timestamp('2021-04-07 00:00:00'), '_'], [Timestamp('2021-04-08 00:00:00'), '_'], [Timestamp('2021-04-09 00:00:00'), '_'], [Timestamp('2021-04-12 00:00:00'), 'V'], [Timestamp('2021-04-13 00:00:00'), 'C'], [Timestamp('2021-04-14 00:00:00'), 'V'], [Timestamp('2021-04-15 00:00:00'), 'C'], [Timestamp('2021-04-16 00:00:00'), '_'], [Timestamp('2021-04-19 00:00:00'), 'V'], [Timestamp('2021-04-20 00:00:00'), 'V'], [Timestamp('2021-04-22 00:00:00'), 'C'], [Timestamp('2021-04-23 00:00:00'), 'C'], [Timestamp('2021-04-26 00:00:00'), 'V'], [Timestamp('2021-04-27 00:00:00'), 'C'], [Timestamp('2021-04-28 00:00:00'), 'V'], [Timestamp('2021-04-29 00:00:00'), 'C'], [Timestamp('2021-04-30 00:00:00'), '_'], [Timestamp('2021-05-03 00:00:00'), 'V'], [Timestamp('2021-05-04 00:00:00'), '_'], [Timestamp('2021-05-05 00:00:00'), 'V'], [Timestamp('2021-05-06 00:00:00'), 'C'], [Timestamp('2021-05-07 00:00:00'), 'V'], [Timestamp('2021-05-10 00:00:00'), '_'], [Timestamp('2021-05-11 00:00:00'), 'V'], [Timestamp('2021-05-12 00:00:00'), 'V'], [Timestamp('2021-05-13 00:00:00'), 'V'], [Timestamp('2021-05-14 00:00:00'), '_'], [Timestamp('2021-05-17 00:00:00'), '_'], [Timestamp('2021-05-18 00:00:00'), '_'], [Timestamp('2021-05-19 00:00:00'), 'C'], [Timestamp('2021-05-20 00:00:00'), 'V'], [Timestamp('2021-05-21 00:00:00'), 'C'], [Timestamp('2021-05-24 00:00:00'), 'V'], [Timestamp('2021-05-25 00:00:00'), 'C']]\n"
],
[
"fig, ax = plt.subplots(figsize=(25, 7.5))\nsn.lineplot(data=df_petr4[\"Close\"].iloc[dia_ini_test_idx:dia_end_test_idx],ax=ax)\nstyle = dict(size=8, color='gray')\nfor i in range(len(trade_instruction)):\n ax.text(trade_instruction[i][0], df_petr4[\"Close\"].iloc[dia_ini_test_idx:dia_end_test_idx].iloc[i]+.1, trade_instruction[i][1],**style)",
"_____no_output_____"
],
[
"stock = 100\ncash = 0\nfor i in range(len(trade_instruction)):\n Total_Value_Init = 100*df_petr4_1[\"Close\"].loc[trade_instruction[0][0]]\n if cash <=0 and stock <=0:\n print(i,stock,cash)\n break\n elif trade_instruction[i][1] == \"C\":\n if cash > 0:\n stock += (cash/2)/df_petr4_1[\"Close\"].loc[trade_instruction[i][0]]\n cash = cash/2\n print(i,stock,cash,\"c\",df_petr4_1[\"Close\"].loc[trade_instruction[i][0]],trade_instruction[i][0])\n elif cash == 0 and stock > 0:\n \n pass\n elif cash <=0 and stock <=0:\n \n break\n print(i,stock,cash,\"c\",df_petr4_1[\"Close\"].loc[trade_instruction[i][0]],trade_instruction[i][0])\n elif trade_instruction[i][1] == \"M\":\n print(i,stock,cash,\"m\",df_petr4_1[\"Close\"].loc[trade_instruction[i][0]],trade_instruction[i][0])\n pass\n elif trade_instruction[i][1] == \"V\":\n if stock > 0:\n cash += (stock*.75)*df_petr4_1[\"Close\"].loc[trade_instruction[i][0]]\n stock = 0.25*stock\n print(i,stock,cash,\"v\",df_petr4_1[\"Close\"].loc[trade_instruction[i][0]],trade_instruction[i][0])\n elif cash > 0 and stock == 0:\n \n pass\n elif cash <=0 and stock <=0:\n \n break \n print(i,stock,cash,\"v\",df_petr4_1[\"Close\"].loc[trade_instruction[i][0]],trade_instruction[i][0])\n a = df_petr4_1[\"Close\"].loc[trade_instruction[i][0]]\n print(a)\n print(Total_Value_Init)\n b = i\n print(f\"dias negociados: {b}, qtd_ações: {stock}, dinheiro: R$ {cash}, Total Value: R$ {cash+stock*a}, Lucro: R$ {(cash+stock*a)-Total_Value_Init}\")\n\n",
"17.887107849121094\n1788.7107849121094\ndias negociados: 0, qtd_ações: 100, dinheiro: R$ 0, Total Value: R$ 1788.7107849121094, Lucro: R$ 0.0\n17.8153076171875\n1788.7107849121094\ndias negociados: 1, qtd_ações: 100, dinheiro: R$ 0, Total Value: R$ 1781.53076171875, Lucro: R$ -7.180023193359375\n17.49220848083496\n1788.7107849121094\ndias negociados: 2, qtd_ações: 100, dinheiro: R$ 0, Total Value: R$ 1749.220848083496, Lucro: R$ -39.48993682861328\n3 25.0 1326.0512351989746 v 17.680683135986328 2018-01-31 00:00:00\n3 25.0 1326.0512351989746 v 17.680683135986328 2018-01-31 00:00:00\n17.680683135986328\n1788.7107849121094\ndias negociados: 3, qtd_ações: 25.0, dinheiro: R$ 1326.0512351989746, Total Value: R$ 1768.0683135986328, Lucro: R$ -20.642471313476562\n18.416627883911133\n1788.7107849121094\ndias negociados: 4, qtd_ações: 25.0, dinheiro: R$ 1326.0512351989746, Total Value: R$ 1786.466932296753, Lucro: R$ -2.2438526153564453\n17.923006057739258\n1788.7107849121094\ndias negociados: 5, qtd_ações: 25.0, dinheiro: R$ 1326.0512351989746, Total Value: R$ 1774.126386642456, Lucro: R$ -14.58439826965332\n17.088333129882812\n1788.7107849121094\ndias negociados: 6, qtd_ações: 25.0, dinheiro: R$ 1326.0512351989746, Total Value: R$ 1753.259563446045, Lucro: R$ -35.45122146606445\n17.940956115722656\n1788.7107849121094\ndias negociados: 7, qtd_ações: 25.0, dinheiro: R$ 1326.0512351989746, Total Value: R$ 1774.575138092041, Lucro: R$ -14.13564682006836\n17.44733238220215\n1788.7107849121094\ndias negociados: 8, qtd_ações: 25.0, dinheiro: R$ 1326.0512351989746, Total Value: R$ 1762.2345447540283, Lucro: R$ -26.476240158081055\n17.097309112548828\n1788.7107849121094\ndias negociados: 9, qtd_ações: 25.0, dinheiro: R$ 1326.0512351989746, Total Value: R$ 1753.4839630126953, Lucro: R$ -35.22682189941406\n16.846010208129883\n1788.7107849121094\ndias negociados: 10, qtd_ações: 25.0, dinheiro: R$ 1326.0512351989746, Total Value: R$ 1747.2014904022217, Lucro: R$ -41.509294509887695\n11 6.25 1652.5156617164612 v 17.411436080932617 2018-02-15 00:00:00\n11 6.25 1652.5156617164612 v 17.411436080932617 2018-02-15 00:00:00\n17.411436080932617\n1788.7107849121094\ndias negociados: 11, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1761.33713722229, Lucro: R$ -27.373647689819336\n17.429384231567383\n1788.7107849121094\ndias negociados: 12, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1761.4493131637573, Lucro: R$ -27.26147174835205\n17.99480438232422\n1788.7107849121094\ndias negociados: 13, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1764.9831891059875, Lucro: R$ -23.727595806121826\n18.344831466674805\n1788.7107849121094\ndias negociados: 14, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1767.1708583831787, Lucro: R$ -21.539926528930664\n18.174304962158203\n1788.7107849121094\ndias negociados: 15, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1766.10506772995, Lucro: R$ -22.605717182159424\n18.61408042907715\n1788.7107849121094\ndias negociados: 16, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1768.8536643981934, Lucro: R$ -19.857120513916016\n18.95512580871582\n1788.7107849121094\ndias negociados: 17, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1770.985198020935, Lucro: R$ -17.725586891174316\n19.314125061035156\n1788.7107849121094\ndias negociados: 18, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1773.228943347931, Lucro: R$ -15.481841564178467\n19.296173095703125\n1788.7107849121094\ndias negociados: 19, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1773.1167435646057, Lucro: R$ -15.594041347503662\n19.26027488708496\n1788.7107849121094\ndias negociados: 20, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1772.8923797607422, Lucro: R$ -15.818405151367188\n18.874351501464844\n1788.7107849121094\ndias negociados: 21, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1770.4803586006165, Lucro: R$ -18.23042631149292\n19.30514907836914\n1788.7107849121094\ndias negociados: 22, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1773.1728434562683, Lucro: R$ -15.537941455841064\n19.852624893188477\n1788.7107849121094\ndias negociados: 23, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1776.5945672988892, Lucro: R$ -12.116217613220215\n19.655174255371094\n1788.7107849121094\ndias negociados: 24, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1775.3605008125305, Lucro: R$ -13.350284099578857\n19.44875144958496\n1788.7107849121094\ndias negociados: 25, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1774.0703582763672, Lucro: R$ -14.640426635742188\n19.475671768188477\n1788.7107849121094\ndias negociados: 26, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1774.2386102676392, Lucro: R$ -14.472174644470215\n20.094945907592773\n1788.7107849121094\ndias negociados: 27, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1778.109073638916, Lucro: R$ -10.60171127319336\n20.04109764099121\n1788.7107849121094\ndias negociados: 28, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1777.7725219726562, Lucro: R$ -10.938262939453125\n19.843645095825195\n1788.7107849121094\ndias negociados: 29, qtd_ações: 6.25, dinheiro: R$ 1652.5156617164612, Total Value: R$ 1776.5384435653687, Lucro: R$ -12.172341346740723\n30 1.5625 1746.6686636209488 v 20.085973739624023 2018-03-14 00:00:00\n30 1.5625 1746.6686636209488 v 20.085973739624023 2018-03-14 00:00:00\n20.085973739624023\n1788.7107849121094\ndias negociados: 30, qtd_ações: 1.5625, dinheiro: R$ 1746.6686636209488, Total Value: R$ 1778.0529975891113, Lucro: R$ -10.657787322998047\n19.12565040588379\n1788.7107849121094\ndias negociados: 31, qtd_ações: 1.5625, dinheiro: R$ 1746.6686636209488, Total Value: R$ 1776.5524923801422, Lucro: R$ -12.158292531967163\n19.23335075378418\n1788.7107849121094\ndias negociados: 32, qtd_ações: 1.5625, dinheiro: R$ 1746.6686636209488, Total Value: R$ 1776.7207741737366, Lucro: R$ -11.990010738372803\n18.784603118896484\n1788.7107849121094\ndias negociados: 33, qtd_ações: 1.5625, dinheiro: R$ 1746.6686636209488, Total Value: R$ 1776.0196059942245, Lucro: R$ -12.691178917884827\n18.99102783203125\n1788.7107849121094\ndias negociados: 34, qtd_ações: 1.5625, dinheiro: R$ 1746.6686636209488, Total Value: R$ 1776.3421446084976, Lucro: R$ -12.368640303611755\n35 0.390625 1769.8598317801952 v 19.789796829223633 2018-03-21 00:00:00\n35 0.390625 1769.8598317801952 v 19.789796829223633 2018-03-21 00:00:00\n19.789796829223633\n1788.7107849121094\ndias negociados: 35, qtd_ações: 0.390625, dinheiro: R$ 1769.8598317801952, Total Value: R$ 1777.5902211666107, Lucro: R$ -11.120563745498657\n19.49362564086914\n1788.7107849121094\ndias negociados: 36, qtd_ações: 0.390625, dinheiro: R$ 1769.8598317801952, Total Value: R$ 1777.4745292961597, Lucro: R$ -11.23625561594963\n37 0.09765625 1775.5787434056401 v 19.520551681518555 2018-03-23 00:00:00\n37 0.09765625 1775.5787434056401 v 19.520551681518555 2018-03-23 00:00:00\n19.520551681518555\n1788.7107849121094\ndias negociados: 37, qtd_ações: 0.09765625, dinheiro: R$ 1775.5787434056401, Total Value: R$ 1777.4850472807884, Lucro: R$ -11.225737631320953\n19.771846771240234\n1788.7107849121094\ndias negociados: 38, qtd_ações: 0.09765625, dinheiro: R$ 1775.5787434056401, Total Value: R$ 1777.509587816894, Lucro: R$ -11.20119709521532\n19.242324829101562\n1788.7107849121094\ndias negociados: 39, qtd_ações: 0.09765625, dinheiro: R$ 1775.5787434056401, Total Value: R$ 1777.457876689732, Lucro: R$ -11.2529082223773\n40 46.757283812011686 887.7893717028201 c 19.026927947998047 2018-03-28 00:00:00\n40 46.757283812011686 887.7893717028201 c 19.026927947998047 2018-03-28 00:00:00\n19.026927947998047\n1788.7107849121094\ndias negociados: 40, qtd_ações: 46.757283812011686, dinheiro: R$ 887.7893717028201, Total Value: R$ 1777.4368418380618, Lucro: R$ -11.273943074047565\n19.215402603149414\n1788.7107849121094\ndias negociados: 41, qtd_ações: 46.757283812011686, dinheiro: R$ 887.7893717028201, Total Value: R$ 1786.2494047803452, Lucro: R$ -2.4613801317641446\n42 70.42197768640332 443.89468585141003 c 18.75767707824707 2018-04-02 00:00:00\n42 70.42197768640332 443.89468585141003 c 18.75767707824707 2018-04-02 00:00:00\n18.75767707824707\n1788.7107849121094\ndias negociados: 42, qtd_ações: 70.42197768640332, dinheiro: R$ 443.89468585141003, Total Value: R$ 1764.8474025044843, Lucro: R$ -23.863382407625068\n43 82.36287843231572 221.94734292570502 c 18.5871524810791 2018-04-03 00:00:00\n43 82.36287843231572 221.94734292570502 c 18.5871524810791 2018-04-03 00:00:00\n18.5871524810791\n1788.7107849121094\ndias negociados: 43, qtd_ações: 82.36287843231572, dinheiro: R$ 221.94734292570502, Total Value: R$ 1752.8387231277386, Lucro: R$ -35.87206178437077\n44 88.43000478442913 110.97367146285251 c 18.290977478027344 2018-04-04 00:00:00\n44 88.43000478442913 110.97367146285251 c 18.290977478027344 2018-04-04 00:00:00\n18.290977478027344\n1788.7107849121094\ndias negociados: 44, qtd_ações: 88.43000478442913, dinheiro: R$ 110.97367146285251, Total Value: R$ 1728.444897356696, Lucro: R$ -60.26588755541343\n45 22.107501196107282 1369.9108733560897 v 18.982051849365234 2018-04-05 00:00:00\n45 22.107501196107282 1369.9108733560897 v 18.982051849365234 2018-04-05 00:00:00\n18.982051849365234\n1788.7107849121094\ndias negociados: 45, qtd_ações: 22.107501196107282, dinheiro: R$ 1369.9108733560897, Total Value: R$ 1789.556607320502, Lucro: R$ 0.845822408392678\n19.098726272583008\n1788.7107849121094\ndias negociados: 46, qtd_ações: 22.107501196107282, dinheiro: R$ 1369.9108733560897, Total Value: R$ 1792.1359872713442, Lucro: R$ 3.425202359234845\n18.42560386657715\n1788.7107849121094\ndias negociados: 47, qtd_ações: 22.107501196107282, dinheiro: R$ 1369.9108733560897, Total Value: R$ 1777.254932875443, Lucro: R$ -11.455852036666329\n48 5.526875299026821 1688.3654163450183 v 19.206424713134766 2018-04-10 00:00:00\n48 5.526875299026821 1688.3654163450183 v 19.206424713134766 2018-04-10 00:00:00\n19.206424713134766\n1788.7107849121094\ndias negociados: 48, qtd_ações: 5.526875299026821, dinheiro: R$ 1688.3654163450183, Total Value: R$ 1794.5169306746611, Lucro: R$ 5.8061457625517505\n49 1.3817188247567052 1769.46716016726 v 19.5654239654541 2018-04-11 00:00:00\n49 1.3817188247567052 1769.46716016726 v 19.5654239654541 2018-04-11 00:00:00\n19.5654239654541\n1788.7107849121094\ndias negociados: 49, qtd_ações: 1.3817188247567052, dinheiro: R$ 1769.46716016726, Total Value: R$ 1796.501074774674, Lucro: R$ 7.790289862564578\n19.457727432250977\n1788.7107849121094\ndias negociados: 50, qtd_ações: 1.3817188247567052, dinheiro: R$ 1769.46716016726, Total Value: R$ 1796.3522684473862, Lucro: R$ 7.641483535276848\n51 0.3454297061891763 1789.1845585595388 v 19.026927947998047 2018-04-13 00:00:00\n51 0.3454297061891763 1789.1845585595388 v 19.026927947998047 2018-04-13 00:00:00\n19.026927947998047\n1788.7107849121094\ndias negociados: 51, qtd_ações: 0.3454297061891763, dinheiro: R$ 1789.1845585595388, Total Value: R$ 1795.7570246902983, Lucro: R$ 7.0462397781889194\n18.50638198852539\n1788.7107849121094\ndias negociados: 52, qtd_ações: 0.3454297061891763, dinheiro: R$ 1789.1845585595388, Total Value: R$ 1795.5772126524598, Lucro: R$ 6.866427740350446\n53 0.08635742654729407 1794.0743798297856 v 18.874351501464844 2018-04-17 00:00:00\n53 0.08635742654729407 1794.0743798297856 v 18.874351501464844 2018-04-17 00:00:00\n18.874351501464844\n1788.7107849121094\ndias negociados: 53, qtd_ações: 0.08635742654729407, dinheiro: R$ 1794.0743798297856, Total Value: R$ 1795.704320253201, Lucro: R$ 6.993535341091729\n54 0.021589356636823518 1795.3415945770082 v 19.5654239654541 2018-04-18 00:00:00\n54 0.021589356636823518 1795.3415945770082 v 19.5654239654541 2018-04-18 00:00:00\n19.5654239654541\n1788.7107849121094\ndias negociados: 54, qtd_ações: 0.021589356636823518, dinheiro: R$ 1795.3415945770082, Total Value: R$ 1795.763999492749, Lucro: R$ 7.053214580639633\n55 0.0053973391592058795 1795.664211155098 v 19.924421310424805 2018-04-19 00:00:00\n55 0.0053973391592058795 1795.664211155098 v 19.924421310424805 2018-04-19 00:00:00\n19.924421310424805\n1788.7107849121094\ndias negociados: 55, qtd_ações: 0.0053973391592058795, dinheiro: R$ 1795.664211155098, Total Value: R$ 1795.7717500144613, Lucro: R$ 7.060965102351929\n56 0.0013493347898014699 1795.7454466103675 v 20.068025588989258 2018-04-20 00:00:00\n56 0.0013493347898014699 1795.7454466103675 v 20.068025588989258 2018-04-20 00:00:00\n20.068025588989258\n1788.7107849121094\ndias negociados: 56, qtd_ações: 0.0013493347898014699, dinheiro: R$ 1795.7454466103675, Total Value: R$ 1795.7725250954572, Lucro: R$ 7.061740183347865\n20.17572021484375\n1788.7107849121094\ndias negociados: 57, qtd_ações: 0.0013493347898014699, dinheiro: R$ 1795.7454466103675, Total Value: R$ 1795.7726704115626, Lucro: R$ 7.061885499453183\n20.10391616821289\n1788.7107849121094\ndias negociados: 58, qtd_ações: 0.0013493347898014699, dinheiro: R$ 1795.7454466103675, Total Value: R$ 1795.7725735238646, Lucro: R$ 7.061788611755219\n59 46.03997106641877 897.8727233051837 c 19.50259780883789 2018-04-25 00:00:00\n59 46.03997106641877 897.8727233051837 c 19.50259780883789 2018-04-25 00:00:00\n19.50259780883789\n1788.7107849121094\ndias negociados: 59, qtd_ações: 46.03997106641877, dinheiro: R$ 897.8727233051837, Total Value: R$ 1795.7717621440825, Lucro: R$ 7.060977231973084\n60 11.509992766604693 1599.1885507820657 v 20.310346603393555 2018-04-26 00:00:00\n60 11.509992766604693 1599.1885507820657 v 20.310346603393555 2018-04-26 00:00:00\n20.310346603393555\n1788.7107849121094\ndias negociados: 60, qtd_ações: 11.509992766604693, dinheiro: R$ 1599.1885507820657, Total Value: R$ 1832.9604932743598, Lucro: R$ 44.24970836225043\n61 2.8774981916511733 1775.1372898335799 v 20.382143020629883 2018-04-27 00:00:00\n61 2.8774981916511733 1775.1372898335799 v 20.382143020629883 2018-04-27 00:00:00\n20.382143020629883\n1788.7107849121094\ndias negociados: 61, qtd_ações: 2.8774981916511733, dinheiro: R$ 1775.1372898335799, Total Value: R$ 1833.786869517418, Lucro: R$ 45.07608460530855\n20.615493774414062\n1788.7107849121094\ndias negociados: 62, qtd_ações: 2.8774981916511733, dinheiro: R$ 1775.1372898335799, Total Value: R$ 1834.4583358894524, Lucro: R$ 45.74755097734305\n63 46.63582165817176 887.5686449167899 c 20.283424377441406 2018-05-02 00:00:00\n63 46.63582165817176 887.5686449167899 c 20.283424377441406 2018-05-02 00:00:00\n20.283424377441406\n1788.7107849121094\ndias negociados: 63, qtd_ações: 46.63582165817176, dinheiro: R$ 887.5686449167899, Total Value: R$ 1833.502806800161, Lucro: R$ 44.79202188805152\n20.1487979888916\n1788.7107849121094\ndias negociados: 64, qtd_ações: 46.63582165817176, dinheiro: R$ 887.5686449167899, Total Value: R$ 1827.2243945532684, Lucro: R$ 38.513609641159064\n65 68.83919312493452 443.78432245839497 c 19.98724937438965 2018-05-04 00:00:00\n65 68.83919312493452 443.78432245839497 c 19.98724937438965 2018-05-04 00:00:00\n19.98724937438965\n1788.7107849121094\ndias negociados: 65, qtd_ações: 68.83919312493452, dinheiro: R$ 443.78432245839497, Total Value: R$ 1819.6904421782306, Lucro: R$ 30.979657266121194\n66 17.20979828123363 1493.3218788132567 v 20.32829475402832 2018-05-07 00:00:00\n66 17.20979828123363 1493.3218788132567 v 20.32829475402832 2018-05-07 00:00:00\n20.32829475402832\n1788.7107849121094\ndias negociados: 66, qtd_ações: 17.20979828123363, dinheiro: R$ 1493.3218788132567, Total Value: R$ 1843.167730931544, Lucro: R$ 54.456946019434554\n67 53.52308551901481 746.6609394066284 c 20.5616455078125 2018-05-08 00:00:00\n67 53.52308551901481 746.6609394066284 c 20.5616455078125 2018-05-08 00:00:00\n20.5616455078125\n1788.7107849121094\ndias negociados: 67, qtd_ações: 53.52308551901481, dinheiro: R$ 746.6609394066284, Total Value: R$ 1847.1836503329437, Lucro: R$ 58.472865420834296\n22.239961624145508\n1788.7107849121094\ndias negociados: 68, qtd_ações: 53.52308551901481, dinheiro: R$ 746.6609394066284, Total Value: R$ 1937.012307355376, Lucro: R$ 148.30152244326655\n69 13.380771379753703 1674.3712819745142 v 23.11053466796875 2018-05-10 00:00:00\n69 13.380771379753703 1674.3712819745142 v 23.11053466796875 2018-05-10 00:00:00\n23.11053466796875\n1788.7107849121094\ndias negociados: 69, qtd_ações: 13.380771379753703, dinheiro: R$ 1674.3712819745142, Total Value: R$ 1983.6080628304762, Lucro: R$ 194.89727791836685\n70 50.04747357831431 837.1856409872571 c 22.832313537597656 2018-05-11 00:00:00\n70 50.04747357831431 837.1856409872571 c 22.832313537597656 2018-05-11 00:00:00\n22.832313537597656\n1788.7107849121094\ndias negociados: 70, qtd_ações: 50.04747357831431, dinheiro: R$ 837.1856409872571, Total Value: R$ 1979.885249491964, Lucro: R$ 191.17446457985466\n23.55030632019043\n1788.7107849121094\ndias negociados: 71, qtd_ações: 50.04747357831431, dinheiro: R$ 837.1856409872571, Total Value: R$ 2015.8189743081962, Lucro: R$ 227.1081893960868\n24.04393196105957\n1788.7107849121094\ndias negociados: 72, qtd_ações: 50.04747357831431, dinheiro: R$ 837.1856409872571, Total Value: R$ 2040.523690527173, Lucro: R$ 251.81290561506353\n24.582427978515625\n1788.7107849121094\ndias negociados: 73, qtd_ações: 50.04747357831431, dinheiro: R$ 837.1856409872571, Total Value: R$ 2067.4740557328323, Lucro: R$ 278.76327082072294\n74 68.02051577249745 418.59282049362855 c 23.290037155151367 2018-05-17 00:00:00\n74 68.02051577249745 418.59282049362855 c 23.290037155151367 2018-05-17 00:00:00\n23.290037155151367\n1788.7107849121094\ndias negociados: 74, qtd_ações: 68.02051577249745, dinheiro: R$ 418.59282049362855, Total Value: R$ 2002.7931601476537, Lucro: R$ 214.0823752355443\n75 77.11214237078818 209.29641024681428 c 23.020788192749023 2018-05-18 00:00:00\n75 77.11214237078818 209.29641024681428 c 23.020788192749023 2018-05-18 00:00:00\n23.020788192749023\n1788.7107849121094\ndias negociados: 75, qtd_ações: 77.11214237078818, dinheiro: R$ 209.29641024681428, Total Value: R$ 1984.4787068538365, Lucro: R$ 195.76792194172708\n76 81.76683781125865 104.64820512340714 c 22.482288360595703 2018-05-21 00:00:00\n76 81.76683781125865 104.64820512340714 c 22.482288360595703 2018-05-21 00:00:00\n22.482288360595703\n1788.7107849121094\ndias negociados: 76, qtd_ações: 81.76683781125865, dinheiro: R$ 104.64820512340714, Total Value: R$ 1942.9538311301844, Lucro: R$ 154.24304621807505\n77 84.1214995173102 52.32410256170357 c 22.221494674682617 2018-05-22 00:00:00\n77 84.1214995173102 52.32410256170357 c 22.221494674682617 2018-05-22 00:00:00\n22.221494674682617\n1788.7107849121094\ndias negociados: 77, qtd_ações: 84.1214995173102, dinheiro: R$ 52.32410256170357, Total Value: R$ 1921.6295561119284, Lucro: R$ 132.91877119981905\n20.926515579223633\n1788.7107849121094\ndias negociados: 78, qtd_ações: 84.1214995173102, dinheiro: R$ 52.32410256170357, Total Value: R$ 1812.6939727583488, Lucro: R$ 23.98318784623939\n79 85.5702966876996 26.162051280851784 c 18.05777359008789 2018-05-24 00:00:00\n79 85.5702966876996 26.162051280851784 c 18.05777359008789 2018-05-24 00:00:00\n18.05777359008789\n1788.7107849121094\ndias negociados: 79, qtd_ações: 85.5702966876996, dinheiro: R$ 26.162051280851784, Total Value: R$ 1571.371094903979, Lucro: R$ -217.33969000813045\n80 86.30493932715127 13.081025640425892 c 17.805971145629883 2018-05-25 00:00:00\n80 86.30493932715127 13.081025640425892 c 17.805971145629883 2018-05-25 00:00:00\n17.805971145629883\n1788.7107849121094\ndias negociados: 80, qtd_ações: 86.30493932715127, dinheiro: R$ 13.081025640425892, Total Value: R$ 1549.8242850250192, Lucro: R$ -238.88649988709017\n81 86.7350376451675 6.540512820212946 c 15.20701789855957 2018-05-28 00:00:00\n81 86.7350376451675 6.540512820212946 c 15.20701789855957 2018-05-28 00:00:00\n15.20701789855957\n1788.7107849121094\ndias negociados: 81, qtd_ações: 86.7350376451675, dinheiro: R$ 6.540512820212946, Total Value: R$ 1325.5217827225133, Lucro: R$ -463.18900218959607\n82 21.683759411291874 1135.5917733157676 v 17.356327056884766 2018-05-29 00:00:00\n82 21.683759411291874 1135.5917733157676 v 17.356327056884766 2018-05-29 00:00:00\n17.356327056884766\n1788.7107849121094\ndias negociados: 82, qtd_ações: 21.683759411291874, dinheiro: R$ 1135.5917733157676, Total Value: R$ 1511.9421934809525, Lucro: R$ -276.7685914311569\n83 5.420939852822968 1413.1745548960253 v 17.068552017211914 2018-05-30 00:00:00\n83 5.420939852822968 1413.1745548960253 v 17.068552017211914 2018-05-30 00:00:00\n17.068552017211914\n1788.7107849121094\ndias negociados: 83, qtd_ações: 5.420939852822968, dinheiro: R$ 1413.1745548960253, Total Value: R$ 1505.7021487561112, Lucro: R$ -283.00863615599815\n14.532550811767578\n1788.7107849121094\ndias negociados: 84, qtd_ações: 5.420939852822968, dinheiro: R$ 1413.1745548960253, Total Value: R$ 1491.954638754711, Lucro: R$ -296.75614615739846\n85 50.24213045097913 706.5872774480126 c 15.764580726623535 2018-06-04 00:00:00\n85 50.24213045097913 706.5872774480126 c 15.764580726623535 2018-06-04 00:00:00\n15.764580726623535\n1788.7107849121094\ndias negociados: 85, qtd_ações: 50.24213045097913, dinheiro: R$ 706.5872774480126, Total Value: R$ 1498.6333988200236, Lucro: R$ -290.0773860920858\n86 73.92252591846162 353.2936387240063 c 14.919245719909668 2018-06-05 00:00:00\n86 73.92252591846162 353.2936387240063 c 14.919245719909668 2018-06-05 00:00:00\n14.919245719909668\n1788.7107849121094\ndias negociados: 86, qtd_ações: 73.92252591846162, dinheiro: R$ 353.2936387240063, Total Value: R$ 1456.1619671379262, Lucro: R$ -332.54881777418314\n87 18.480631479615404 1167.4816220291477 v 14.685428619384766 2018-06-06 00:00:00\n87 18.480631479615404 1167.4816220291477 v 14.685428619384766 2018-06-06 00:00:00\n14.685428619384766\n1788.7107849121094\ndias negociados: 87, qtd_ações: 18.480631479615404, dinheiro: R$ 1167.4816220291477, Total Value: R$ 1438.877616464195, Lucro: R$ -349.83316844791443\n14.172834396362305\n1788.7107849121094\ndias negociados: 88, qtd_ações: 18.480631479615404, dinheiro: R$ 1167.4816220291477, Total Value: R$ 1429.404551529937, Lucro: R$ -359.3062333821724\n89 4.620157869903851 1357.5668633905077 v 13.714195251464844 2018-06-08 00:00:00\n89 4.620157869903851 1357.5668633905077 v 13.714195251464844 2018-06-08 00:00:00\n13.714195251464844\n1788.7107849121094\ndias negociados: 89, qtd_ações: 4.620157869903851, dinheiro: R$ 1357.5668633905077, Total Value: R$ 1420.928610510961, Lucro: R$ -367.78217440114827\n90 53.601206900794885 678.7834316952539 c 13.85808277130127 2018-06-11 00:00:00\n90 53.601206900794885 678.7834316952539 c 13.85808277130127 2018-06-11 00:00:00\n13.85808277130127\n1788.7107849121094\ndias negociados: 90, qtd_ações: 53.601206900794885, dinheiro: R$ 678.7834316952539, Total Value: R$ 1421.5933935681142, Lucro: R$ -367.1173913439952\n91 13.400301725198721 1237.698413357056 v 13.903044700622559 2018-06-12 00:00:00\n91 13.400301725198721 1237.698413357056 v 13.903044700622559 2018-06-12 00:00:00\n13.903044700622559\n1788.7107849121094\ndias negociados: 91, qtd_ações: 13.400301725198721, dinheiro: R$ 1237.698413357056, Total Value: R$ 1424.0034072443236, Lucro: R$ -364.70737766778575\n92 3.3500754312996803 1374.8061423823456 v 13.642252922058105 2018-06-13 00:00:00\n92 3.3500754312996803 1374.8061423823456 v 13.642252922058105 2018-06-13 00:00:00\n13.642252922058105\n1788.7107849121094\ndias negociados: 92, qtd_ações: 3.3500754312996803, dinheiro: R$ 1374.8061423823456, Total Value: R$ 1420.5087187241088, Lucro: R$ -368.2020661880006\n93 0.8375188578249201 1408.924904072667 v 13.579300880432129 2018-06-14 00:00:00\n93 0.8375188578249201 1408.924904072667 v 13.579300880432129 2018-06-14 00:00:00\n13.579300880432129\n1788.7107849121094\ndias negociados: 93, qtd_ações: 0.8375188578249201, dinheiro: R$ 1408.924904072667, Total Value: R$ 1420.2978246361074, Lucro: R$ -368.41296027600197\n94 0.20937971445623002 1417.3811605571932 v 13.462393760681152 2018-06-15 00:00:00\n94 0.20937971445623002 1417.3811605571932 v 13.462393760681152 2018-06-15 00:00:00\n13.462393760681152\n1788.7107849121094\ndias negociados: 94, qtd_ações: 0.20937971445623002, dinheiro: R$ 1417.3811605571932, Total Value: R$ 1420.1999127187019, Lucro: R$ -368.5108721934075\n13.039726257324219\n1788.7107849121094\ndias negociados: 95, qtd_ações: 0.20937971445623002, dinheiro: R$ 1417.3811605571932, Total Value: R$ 1420.1114147175392, Lucro: R$ -368.59937019457016\n13.867074966430664\n1788.7107849121094\ndias negociados: 96, qtd_ações: 0.20937971445623002, dinheiro: R$ 1417.3811605571932, Total Value: R$ 1420.2846447540076, Lucro: R$ -368.4261401581018\n97 0.052344928614057505 1419.6703374481217 v 14.5775146484375 2018-06-20 00:00:00\n97 0.052344928614057505 1419.6703374481217 v 14.5775146484375 2018-06-20 00:00:00\n14.5775146484375\n1788.7107849121094\ndias negociados: 97, qtd_ações: 0.052344928614057505, dinheiro: R$ 1419.6703374481217, Total Value: R$ 1420.4333964117645, Lucro: R$ -368.27738850034484\n13.579300880432129\n1788.7107849121094\ndias negociados: 98, qtd_ações: 0.052344928614057505, dinheiro: R$ 1419.6703374481217, Total Value: R$ 1420.3811449833368, Lucro: R$ -368.3296399287726\n99 52.29107460399145 709.8351687240608 c 13.588293075561523 2018-06-22 00:00:00\n99 52.29107460399145 709.8351687240608 c 13.588293075561523 2018-06-22 00:00:00\n13.588293075561523\n1788.7107849121094\ndias negociados: 99, qtd_ações: 52.29107460399145, dinheiro: R$ 709.8351687240608, Total Value: R$ 1420.3816156791488, Lucro: R$ -368.32916923296057\n100 13.072768650997862 1263.5536227371026 v 14.118877410888672 2018-06-25 00:00:00\n100 13.072768650997862 1263.5536227371026 v 14.118877410888672 2018-06-25 00:00:00\n14.118877410888672\n1788.7107849121094\ndias negociados: 100, qtd_ações: 13.072768650997862, dinheiro: R$ 1263.5536227371026, Total Value: R$ 1448.1264407414499, Lucro: R$ -340.5843441706595\n101 3.2681921627494654 1404.9810686077583 v 14.424635887145996 2018-06-26 00:00:00\n101 3.2681921627494654 1404.9810686077583 v 14.424635887145996 2018-06-26 00:00:00\n14.424635887145996\n1788.7107849121094\ndias negociados: 101, qtd_ações: 3.2681921627494654, dinheiro: R$ 1404.9810686077583, Total Value: R$ 1452.1235505646434, Lucro: R$ -336.58723434746594\n14.883273124694824\n1788.7107849121094\ndias negociados: 102, qtd_ações: 3.2681921627494654, dinheiro: R$ 1404.9810686077583, Total Value: R$ 1453.6224651899456, Lucro: R$ -335.08831972216376\n103 50.32602265983239 702.4905343038791 c 14.928238868713379 2018-06-28 00:00:00\n103 50.32602265983239 702.4905343038791 c 14.928238868713379 2018-06-28 00:00:00\n14.928238868713379\n1788.7107849121094\ndias negociados: 103, qtd_ações: 50.32602265983239, dinheiro: R$ 702.4905343038791, Total Value: R$ 1453.7694218821393, Lucro: R$ -334.94136302997003\n104 73.04736982737737 351.2452671519396 c 15.458822250366211 2018-06-29 00:00:00\n104 73.04736982737737 351.2452671519396 c 15.458822250366211 2018-06-29 00:00:00\n15.458822250366211\n1788.7107849121094\ndias negociados: 104, qtd_ações: 73.04736982737737, dinheiro: R$ 351.2452671519396, Total Value: R$ 1480.4715731701301, Lucro: R$ -308.23921174197926\n15.710624694824219\n1788.7107849121094\ndias negociados: 105, qtd_ações: 73.04736982737737, dinheiro: R$ 351.2452671519396, Total Value: R$ 1498.865079453892, Lucro: R$ -289.8457054582175\n106 84.20680018632123 175.6226335759698 c 15.737598419189453 2018-07-03 00:00:00\n106 84.20680018632123 175.6226335759698 c 15.737598419189453 2018-07-03 00:00:00\n15.737598419189453\n1788.7107849121094\ndias negociados: 106, qtd_ações: 84.20680018632123, dinheiro: R$ 175.6226335759698, Total Value: R$ 1500.8354390732209, Lucro: R$ -287.8753458388885\n107 21.051700046580308 1175.7798932742746 v 15.83652400970459 2018-07-04 00:00:00\n107 21.051700046580308 1175.7798932742746 v 15.83652400970459 2018-07-04 00:00:00\n15.83652400970459\n1788.7107849121094\ndias negociados: 107, qtd_ações: 21.051700046580308, dinheiro: R$ 1175.7798932742746, Total Value: R$ 1509.1656465070428, Lucro: R$ -279.5451384050666\n108 5.262925011645077 1429.3688728803434 v 16.06134605407715 2018-07-05 00:00:00\n108 5.262925011645077 1429.3688728803434 v 16.06134605407715 2018-07-05 00:00:00\n16.06134605407715\n1788.7107849121094\ndias negociados: 108, qtd_ações: 5.262925011645077, dinheiro: R$ 1429.3688728803434, Total Value: R$ 1513.898532749033, Lucro: R$ -274.81225216307644\n109 49.56166790929521 714.6844364401717 c 16.133289337158203 2018-07-06 00:00:00\n109 49.56166790929521 714.6844364401717 c 16.133289337158203 2018-07-06 00:00:00\n16.133289337158203\n1788.7107849121094\ndias negociados: 109, qtd_ações: 49.56166790929521, dinheiro: R$ 714.6844364401717, Total Value: R$ 1514.2771648529801, Lucro: R$ -274.4336200591292\n110 71.9481294292656 357.34221822008584 c 15.962425231933594 2018-07-10 00:00:00\n110 71.9481294292656 357.34221822008584 c 15.962425231933594 2018-07-10 00:00:00\n15.962425231933594\n1788.7107849121094\ndias negociados: 110, qtd_ações: 71.9481294292656, dinheiro: R$ 357.34221822008584, Total Value: R$ 1505.8088548122191, Lucro: R$ -282.90193009989025\n111 83.35995308122074 178.67110911004292 c 15.656665802001953 2018-07-11 00:00:00\n111 83.35995308122074 178.67110911004292 c 15.656665802001953 2018-07-11 00:00:00\n15.656665802001953\n1788.7107849121094\ndias negociados: 111, qtd_ações: 83.35995308122074, dinheiro: R$ 178.67110911004292, Total Value: R$ 1483.810035773279, Lucro: R$ -304.9007491388304\n112 88.89421099256084 89.33555455502146 c 16.142282485961914 2018-07-12 00:00:00\n112 88.89421099256084 89.33555455502146 c 16.142282485961914 2018-07-12 00:00:00\n16.142282485961914\n1788.7107849121094\ndias negociados: 112, qtd_ações: 88.89421099256084, dinheiro: R$ 89.33555455502146, Total Value: R$ 1524.2910197636393, Lucro: R$ -264.4197651484701\n113 22.22355274814021 1175.1450383758533 v 16.28616714477539 2018-07-13 00:00:00\n113 22.22355274814021 1175.1450383758533 v 16.28616714477539 2018-07-13 00:00:00\n16.28616714477539\n1788.7107849121094\ndias negociados: 113, qtd_ações: 22.22355274814021, dinheiro: R$ 1175.1450383758533, Total Value: R$ 1537.0815329827974, Lucro: R$ -251.62925192931198\n114 58.76564954274804 587.5725191879267 c 16.07933235168457 2018-07-16 00:00:00\n114 58.76564954274804 587.5725191879267 c 16.07933235168457 2018-07-16 00:00:00\n16.07933235168457\n1788.7107849121094\ndias negociados: 114, qtd_ações: 58.76564954274804, dinheiro: R$ 587.5725191879267, Total Value: R$ 1532.4849290483926, Lucro: R$ -256.2258558637168\n115 14.69141238568701 1312.5075034232354 v 16.448043823242188 2018-07-17 00:00:00\n115 14.69141238568701 1312.5075034232354 v 16.448043823242188 2018-07-17 00:00:00\n16.448043823242188\n1788.7107849121094\ndias negociados: 115, qtd_ações: 14.69141238568701, dinheiro: R$ 1312.5075034232354, Total Value: R$ 1554.1524981683385, Lucro: R$ -234.55828674377085\n116 55.12051879024868 656.2537517116177 c 16.232210159301758 2018-07-18 00:00:00\n116 55.12051879024868 656.2537517116177 c 16.232210159301758 2018-07-18 00:00:00\n16.232210159301758\n1788.7107849121094\ndias negociados: 116, qtd_ações: 55.12051879024868, dinheiro: R$ 656.2537517116177, Total Value: R$ 1550.981596804676, Lucro: R$ -237.7291881074334\n117 74.95055305259358 328.12687585580886 c 16.546964645385742 2018-07-19 00:00:00\n117 74.95055305259358 328.12687585580886 c 16.546964645385742 2018-07-19 00:00:00\n16.546964645385742\n1788.7107849121094\ndias negociados: 117, qtd_ações: 74.95055305259358, dinheiro: R$ 328.12687585580886, Total Value: R$ 1568.3310273691832, Lucro: R$ -220.3797575429262\n118 18.737638263148394 1303.2711858724483 v 17.347335815429688 2018-07-20 00:00:00\n118 18.737638263148394 1303.2711858724483 v 17.347335815429688 2018-07-20 00:00:00\n17.347335815429688\n1788.7107849121094\ndias negociados: 118, qtd_ações: 18.737638263148394, dinheiro: R$ 1303.2711858724483, Total Value: R$ 1628.3192892113282, Lucro: R$ -160.3914957007812\n119 4.6844095657870986 1546.6780882416579 v 17.320354461669922 2018-07-23 00:00:00\n119 4.6844095657870986 1546.6780882416579 v 17.320354461669922 2018-07-23 00:00:00\n17.320354461669922\n1788.7107849121094\ndias negociados: 119, qtd_ações: 4.6844095657870986, dinheiro: R$ 1546.6780882416579, Total Value: R$ 1627.8137223647277, Lucro: R$ -160.89706254738167\n120 48.42513037497664 773.3390441208289 c 17.680070877075195 2018-07-24 00:00:00\n120 48.42513037497664 773.3390441208289 c 17.680070877075195 2018-07-24 00:00:00\n17.680070877075195\n1788.7107849121094\ndias negociados: 120, qtd_ações: 48.42513037497664, dinheiro: R$ 773.3390441208289, Total Value: R$ 1629.4987813820228, Lucro: R$ -159.21200353008658\n121 12.10628259374416 1425.2571940501844 v 17.949857711791992 2018-07-25 00:00:00\n121 12.10628259374416 1425.2571940501844 v 17.949857711791992 2018-07-25 00:00:00\n17.949857711791992\n1788.7107849121094\ndias negociados: 121, qtd_ações: 12.10628259374416, dinheiro: R$ 1425.2571940501844, Total Value: R$ 1642.563244026636, Lucro: R$ -146.1475408854733\n122 52.932324127693235 712.6285970250922 c 17.455245971679688 2018-07-26 00:00:00\n122 52.932324127693235 712.6285970250922 c 17.455245971679688 2018-07-26 00:00:00\n17.455245971679688\n1788.7107849121094\ndias negociados: 122, qtd_ações: 52.932324127693235, dinheiro: R$ 712.6285970250922, Total Value: R$ 1636.575334526653, Lucro: R$ -152.13545038545635\n123 13.233081031923309 1418.4412619306472 v 17.778995513916016 2018-07-27 00:00:00\n123 13.233081031923309 1418.4412619306472 v 17.778995513916016 2018-07-27 00:00:00\n17.778995513916016\n1788.7107849121094\ndias negociados: 123, qtd_ações: 13.233081031923309, dinheiro: R$ 1418.4412619306472, Total Value: R$ 1653.7121502324987, Lucro: R$ -134.99863467961063\n124 3.308270257980827 1595.9654160384039 v 17.886905670166016 2018-07-30 00:00:00\n124 3.308270257980827 1595.9654160384039 v 17.886905670166016 2018-07-30 00:00:00\n17.886905670166016\n1788.7107849121094\ndias negociados: 124, qtd_ações: 3.308270257980827, dinheiro: R$ 1595.9654160384039, Total Value: R$ 1655.1401340743228, Lucro: R$ -133.57065083778662\n125 48.305537659927275 797.9827080192019 c 17.734025955200195 2018-07-31 00:00:00\n125 48.305537659927275 797.9827080192019 c 17.734025955200195 2018-07-31 00:00:00\n17.734025955200195\n1788.7107849121094\ndias negociados: 125, qtd_ações: 48.305537659927275, dinheiro: R$ 797.9827080192019, Total Value: R$ 1654.6343666602527, Lucro: R$ -134.0764182518567\n126 70.47810050775928 398.99135400960097 c 17.994823455810547 2018-08-01 00:00:00\n126 70.47810050775928 398.99135400960097 c 17.994823455810547 2018-08-01 00:00:00\n17.994823455810547\n1788.7107849121094\ndias negociados: 126, qtd_ações: 70.47810050775928, dinheiro: R$ 398.99135400960097, Total Value: R$ 1667.2323301476008, Lucro: R$ -121.47845476450857\n127 81.34711048592038 199.49567700480048 c 18.35453987121582 2018-08-02 00:00:00\n127 81.34711048592038 199.49567700480048 c 18.35453987121582 2018-08-02 00:00:00\n18.35453987121582\n1788.7107849121094\ndias negociados: 127, qtd_ações: 81.34711048592038, dinheiro: R$ 199.49567700480048, Total Value: R$ 1692.5844598268245, Lucro: R$ -96.12632508528486\n128 20.336777621480095 1357.718699686463 v 18.984046936035156 2018-08-03 00:00:00\n128 20.336777621480095 1357.718699686463 v 18.984046936035156 2018-08-03 00:00:00\n18.984046936035156\n1788.7107849121094\ndias negociados: 128, qtd_ações: 20.336777621480095, dinheiro: R$ 1357.718699686463, Total Value: R$ 1743.7930405803504, Lucro: R$ -44.91774433175897\n129 56.21521305024255 678.8593498432315 c 18.921096801757812 2018-08-06 00:00:00\n129 56.21521305024255 678.8593498432315 c 18.921096801757812 2018-08-06 00:00:00\n18.921096801757812\n1788.7107849121094\ndias negociados: 129, qtd_ações: 56.21521305024255, dinheiro: R$ 678.8593498432315, Total Value: R$ 1742.5128376983098, Lucro: R$ -46.19794721379958\n130 74.44028582965535 339.42967492161574 c 18.624324798583984 2018-08-07 00:00:00\n130 74.44028582965535 339.42967492161574 c 18.624324798583984 2018-08-07 00:00:00\n18.624324798583984\n1788.7107849121094\ndias negociados: 130, qtd_ações: 74.44028582965535, dinheiro: R$ 339.42967492161574, Total Value: R$ 1725.829736312546, Lucro: R$ -62.88104859956343\n131 83.81072421076179 169.71483746080787 c 18.11172866821289 2018-08-08 00:00:00\n131 83.81072421076179 169.71483746080787 c 18.11172866821289 2018-08-08 00:00:00\n18.11172866821289\n1788.7107849121094\ndias negociados: 131, qtd_ações: 83.81072421076179, dinheiro: R$ 169.71483746080787, Total Value: R$ 1687.6719338525463, Lucro: R$ -101.03885105956306\n132 20.952681052690448 1319.4883745750212 v 18.291589736938477 2018-08-09 00:00:00\n132 20.952681052690448 1319.4883745750212 v 18.291589736938477 2018-08-09 00:00:00\n18.291589736938477\n1788.7107849121094\ndias negociados: 132, qtd_ações: 20.952681052690448, dinheiro: R$ 1319.4883745750212, Total Value: R$ 1702.7462202797592, Lucro: R$ -85.96456463235018\n133 58.40171696040109 659.7441872875106 c 17.61712074279785 2018-08-10 00:00:00\n133 58.40171696040109 659.7441872875106 c 17.61712074279785 2018-08-10 00:00:00\n17.61712074279785\n1788.7107849121094\ndias negociados: 133, qtd_ações: 58.40171696040109, dinheiro: R$ 659.7441872875106, Total Value: R$ 1688.6142865656018, Lucro: R$ -100.09649834650759\n134 14.600429240100272 1449.1221943576222 v 18.021799087524414 2018-08-13 00:00:00\n134 14.600429240100272 1449.1221943576222 v 18.021799087524414 2018-08-13 00:00:00\n18.021799087524414\n1788.7107849121094\ndias negociados: 134, qtd_ações: 14.600429240100272, dinheiro: R$ 1449.1221943576222, Total Value: R$ 1712.248196714326, Lucro: R$ -76.46258819778336\n135 54.825252823618854 724.5610971788111 c 18.012784957885742 2018-08-14 00:00:00\n135 54.825252823618854 724.5610971788111 c 18.012784957885742 2018-08-14 00:00:00\n18.012784957885742\n1788.7107849121094\ndias negociados: 135, qtd_ações: 54.825252823618854, dinheiro: R$ 724.5610971788111, Total Value: R$ 1712.1165865523756, Lucro: R$ -76.59419835973381\n136 75.91952728189396 362.28054858940556 c 17.174354553222656 2018-08-15 00:00:00\n136 75.91952728189396 362.28054858940556 c 17.174354553222656 2018-08-15 00:00:00\n17.174354553222656\n1788.7107849121094\ndias negociados: 136, qtd_ações: 75.91952728189396, dinheiro: R$ 362.28054858940556, Total Value: R$ 1666.1494276417127, Lucro: R$ -122.56135727039668\n137 18.97988182047349 1335.0488341946589 v 17.084199905395508 2018-08-16 00:00:00\n137 18.97988182047349 1335.0488341946589 v 17.084199905395508 2018-08-16 00:00:00\n17.084199905395508\n1788.7107849121094\ndias negociados: 137, qtd_ações: 18.97988182047349, dinheiro: R$ 1335.0488341946589, Total Value: R$ 1659.30492939641, Lucro: R$ -129.4058555156994\n138 59.08963929398422 667.5244170973294 c 16.642444610595703 2018-08-17 00:00:00\n138 59.08963929398422 667.5244170973294 c 16.642444610595703 2018-08-17 00:00:00\n16.642444610595703\n1788.7107849121094\ndias negociados: 138, qtd_ações: 59.08963929398422, dinheiro: R$ 667.5244170973294, Total Value: R$ 1650.9204661075412, Lucro: R$ -137.7903188045682\n139 79.25375240216425 333.7622085486647 c 16.552288055419922 2018-08-20 00:00:00\n139 79.25375240216425 333.7622085486647 c 16.552288055419922 2018-08-20 00:00:00\n16.552288055419922\n1788.7107849121094\ndias negociados: 139, qtd_ações: 79.25375240216425, dinheiro: R$ 333.7622085486647, Total Value: R$ 1645.593147782216, Lucro: R$ -143.1176371298934\n140 89.69994659390262 166.88110427433236 c 15.975301742553711 2018-08-21 00:00:00\n140 89.69994659390262 166.88110427433236 c 15.975301742553711 2018-08-21 00:00:00\n15.975301742553711\n1788.7107849121094\ndias negociados: 140, qtd_ações: 89.69994659390262, dinheiro: R$ 166.88110427433236, Total Value: R$ 1599.8648174028797, Lucro: R$ -188.8459675092297\n141 22.424986648475656 1279.8291949974923 v 16.54327392578125 2018-08-22 00:00:00\n141 22.424986648475656 1279.8291949974923 v 16.54327392578125 2018-08-22 00:00:00\n16.54327392578125\n1788.7107849121094\ndias negociados: 141, qtd_ações: 22.424986648475656, dinheiro: R$ 1279.8291949974923, Total Value: R$ 1650.8118919052122, Lucro: R$ -137.8988930068972\n142 5.606246662118914 1552.0010994360175 v 16.18265724182129 2018-08-23 00:00:00\n142 5.606246662118914 1552.0010994360175 v 16.18265724182129 2018-08-23 00:00:00\n16.18265724182129\n1788.7107849121094\ndias negociados: 142, qtd_ações: 5.606246662118914, dinheiro: R$ 1552.0010994360175, Total Value: R$ 1642.7250675821927, Lucro: R$ -145.98571732991672\n143 1.4015616655297285 1621.3708071289689 v 16.498193740844727 2018-08-24 00:00:00\n143 1.4015616655297285 1621.3708071289689 v 16.498193740844727 2018-08-24 00:00:00\n16.498193740844727\n1788.7107849121094\ndias negociados: 143, qtd_ações: 1.4015616655297285, dinheiro: R$ 1621.3708071289689, Total Value: R$ 1644.4940430266192, Lucro: R$ -144.21674188549014\n144 0.3503904163824321 1639.1017793362605 v 16.86782455444336 2018-08-27 00:00:00\n144 0.3503904163824321 1639.1017793362605 v 16.86782455444336 2018-08-27 00:00:00\n16.86782455444336\n1788.7107849121094\ndias negociados: 144, qtd_ações: 0.3503904163824321, dinheiro: R$ 1639.1017793362605, Total Value: R$ 1645.0121034053577, Lucro: R$ -143.69868150675165\n145 49.89021508137402 819.5508896681303 c 16.54327392578125 2018-08-28 00:00:00\n145 49.89021508137402 819.5508896681303 c 16.54327392578125 2018-08-28 00:00:00\n16.54327392578125\n1788.7107849121094\ndias negociados: 145, qtd_ações: 49.89021508137402, dinheiro: R$ 819.5508896681303, Total Value: R$ 1644.8983839754437, Lucro: R$ -143.81240093666565\n146 12.472553770343506 1470.6084047184113 v 17.399738311767578 2018-08-29 00:00:00\n146 12.472553770343506 1470.6084047184113 v 17.399738311767578 2018-08-29 00:00:00\n17.399738311767578\n1788.7107849121094\ndias negociados: 146, qtd_ações: 12.472553770343506, dinheiro: R$ 1470.6084047184113, Total Value: R$ 1687.6275764018383, Lucro: R$ -101.0832085102711\n147 3.1181384425858765 1629.1560633834672 v 16.948965072631836 2018-08-30 00:00:00\n147 3.1181384425858765 1629.1560633834672 v 16.948965072631836 2018-08-30 00:00:00\n16.948965072631836\n1788.7107849121094\ndias negociados: 147, qtd_ações: 3.1181384425858765, dinheiro: R$ 1629.1560633834672, Total Value: R$ 1682.0052829384858, Lucro: R$ -106.70550197362354\n148 50.0309072498418 814.5780316917336 c 17.36367416381836 2018-08-31 00:00:00\n148 50.0309072498418 814.5780316917336 c 17.36367416381836 2018-08-31 00:00:00\n17.36367416381836\n1788.7107849121094\ndias negociados: 148, qtd_ações: 50.0309072498418, dinheiro: R$ 814.5780316917336, Total Value: R$ 1683.2984032982044, Lucro: R$ -105.41238161390493\n149 73.80827001123342 407.2890158458668 c 17.129276275634766 2018-09-03 00:00:00\n149 73.80827001123342 407.2890158458668 c 17.129276275634766 2018-09-03 00:00:00\n17.129276275634766\n1788.7107849121094\ndias negociados: 149, qtd_ações: 73.80827001123342, dinheiro: R$ 407.2890158458668, Total Value: R$ 1671.5712642949322, Lucro: R$ -117.13952061717714\n150 85.92006547927448 203.6445079229334 c 16.81373405456543 2018-09-04 00:00:00\n150 85.92006547927448 203.6445079229334 c 16.81373405456543 2018-09-04 00:00:00\n16.81373405456543\n1788.7107849121094\ndias negociados: 150, qtd_ações: 85.92006547927448, dinheiro: R$ 203.6445079229334, Total Value: R$ 1648.2816388423023, Lucro: R$ -140.42914606980708\n151 21.48001636981862 1292.9322947748058 v 16.903894424438477 2018-09-05 00:00:00\n151 21.48001636981862 1292.9322947748058 v 16.903894424438477 2018-09-05 00:00:00\n16.903894424438477\n1788.7107849121094\ndias negociados: 151, qtd_ações: 21.48001636981862, dinheiro: R$ 1292.9322947748058, Total Value: R$ 1656.02822372543, Lucro: R$ -132.68256118667932\n152 5.370004092454655 1568.7398974110824 v 17.12026023864746 2018-09-06 00:00:00\n152 5.370004092454655 1568.7398974110824 v 17.12026023864746 2018-09-06 00:00:00\n17.12026023864746\n1788.7107849121094\ndias negociados: 152, qtd_ações: 5.370004092454655, dinheiro: R$ 1568.7398974110824, Total Value: R$ 1660.675764956508, Lucro: R$ -128.03501995560146\n153 1.3425010231136638 1638.6721484008976 v 17.36367416381836 2018-09-10 00:00:00\n153 1.3425010231136638 1638.6721484008976 v 17.36367416381836 2018-09-10 00:00:00\n17.36367416381836\n1788.7107849121094\ndias negociados: 153, qtd_ações: 1.3425010231136638, dinheiro: R$ 1638.6721484008976, Total Value: R$ 1661.982898730836, Lucro: R$ -126.72788618127333\n154 50.4677775786549 819.3360742004488 c 16.678503036499023 2018-09-11 00:00:00\n154 50.4677775786549 819.3360742004488 c 16.678503036499023 2018-09-11 00:00:00\n16.678503036499023\n1788.7107849121094\ndias negociados: 154, qtd_ações: 50.4677775786549, dinheiro: R$ 819.3360742004488, Total Value: R$ 1661.063055791402, Lucro: R$ -127.64772912070748\n155 12.616944394663726 1465.9872749015321 v 17.084199905395508 2018-09-12 00:00:00\n155 12.616944394663726 1465.9872749015321 v 17.084199905395508 2018-09-12 00:00:00\n17.084199905395508\n1788.7107849121094\ndias negociados: 155, qtd_ações: 12.616944394663726, dinheiro: R$ 1465.9872749015321, Total Value: R$ 1681.5376751352264, Lucro: R$ -107.17310977688294\n156 56.07208201984606 732.9936374507661 c 16.86782455444336 2018-09-13 00:00:00\n156 56.07208201984606 732.9936374507661 c 16.86782455444336 2018-09-13 00:00:00\n16.86782455444336\n1788.7107849121094\ndias negociados: 156, qtd_ações: 56.07208201984606, dinheiro: R$ 732.9936374507661, Total Value: R$ 1678.8076793638875, Lucro: R$ -109.9031055482219\n157 14.018020504961514 1445.3876171017973 v 16.939956665039062 2018-09-14 00:00:00\n157 14.018020504961514 1445.3876171017973 v 16.939956665039062 2018-09-14 00:00:00\n16.939956665039062\n1788.7107849121094\ndias negociados: 157, qtd_ações: 14.018020504961514, dinheiro: R$ 1445.3876171017973, Total Value: R$ 1682.8522769854744, Lucro: R$ -105.85850792663496\n158 3.5045051262403786 1629.267875704683 v 17.489892959594727 2018-09-17 00:00:00\n158 3.5045051262403786 1629.267875704683 v 17.489892959594727 2018-09-17 00:00:00\n17.489892959594727\n1788.7107849121094\ndias negociados: 158, qtd_ações: 3.5045051262403786, dinheiro: R$ 1629.267875704683, Total Value: R$ 1690.5612952389783, Lucro: R$ -98.14948967313103\n159 0.8761262815600946 1677.2520876464198 v 18.256200790405273 2018-09-18 00:00:00\n159 0.8761262815600946 1677.2520876464198 v 18.256200790405273 2018-09-18 00:00:00\n18.256200790405273\n1788.7107849121094\ndias negociados: 159, qtd_ações: 0.8761262815600946, dinheiro: R$ 1677.2520876464198, Total Value: R$ 1693.246824960332, Lucro: R$ -95.46395995177727\n160 47.433393566099085 838.6260438232099 c 18.012784957885742 2018-09-19 00:00:00\n160 47.433393566099085 838.6260438232099 c 18.012784957885742 2018-09-19 00:00:00\n18.012784957885742\n1788.7107849121094\ndias negociados: 160, qtd_ações: 47.433393566099085, dinheiro: R$ 838.6260438232099, Total Value: R$ 1693.0335619521138, Lucro: R$ -95.67722295999556\n161 70.8408914378804 419.31302191160495 c 17.913619995117188 2018-09-20 00:00:00\n161 70.8408914378804 419.31302191160495 c 17.913619995117188 2018-09-20 00:00:00\n17.913619995117188\n1788.7107849121094\ndias negociados: 161, qtd_ações: 70.8408914378804, dinheiro: R$ 419.31302191160495, Total Value: R$ 1688.3298312451452, Lucro: R$ -100.38095366696416\n162 82.38774065896415 209.65651095580247 c 18.157032012939453 2018-09-21 00:00:00\n162 82.38774065896415 209.65651095580247 c 18.157032012939453 2018-09-21 00:00:00\n18.157032012939453\n1788.7107849121094\ndias negociados: 162, qtd_ações: 82.38774065896415, dinheiro: R$ 209.65651095580247, Total Value: R$ 1705.573355574368, Lucro: R$ -83.13742933774142\n163 88.20157777542103 104.82825547790124 c 18.030820846557617 2018-09-24 00:00:00\n163 88.20157777542103 104.82825547790124 c 18.030820846557617 2018-09-24 00:00:00\n18.030820846557617\n1788.7107849121094\ndias negociados: 163, qtd_ações: 88.20157777542103, dinheiro: R$ 104.82825547790124, Total Value: R$ 1695.1751027302357, Lucro: R$ -93.53568218187365\n164 22.050394443855257 1302.3591321756776 v 18.10293960571289 2018-09-25 00:00:00\n164 22.050394443855257 1302.3591321756776 v 18.10293960571289 2018-09-25 00:00:00\n18.10293960571289\n1788.7107849121094\ndias negociados: 164, qtd_ações: 22.050394443855257, dinheiro: R$ 1302.3591321756776, Total Value: R$ 1701.5360910749364, Lucro: R$ -87.17469383717298\n165 5.512598610963814 1603.381915888191 v 18.202110290527344 2018-09-26 00:00:00\n165 5.512598610963814 1603.381915888191 v 18.202110290527344 2018-09-26 00:00:00\n18.202110290527344\n1788.7107849121094\ndias negociados: 165, qtd_ações: 5.512598610963814, dinheiro: R$ 1603.381915888191, Total Value: R$ 1703.7228437923623, Lucro: R$ -84.98794111974712\n166 1.3781496527409536 1683.3713842777026 v 19.347068786621094 2018-09-27 00:00:00\n166 1.3781496527409536 1683.3713842777026 v 19.347068786621094 2018-09-27 00:00:00\n19.347068786621094\n1788.7107849121094\ndias negociados: 166, qtd_ações: 1.3781496527409536, dinheiro: R$ 1683.3713842777026, Total Value: R$ 1710.03454040754, Lucro: R$ -78.67624450456947\n167 45.64595600005457 841.6856921388513 c 19.01349449157715 2018-09-28 00:00:00\n167 45.64595600005457 841.6856921388513 c 19.01349449157715 2018-09-28 00:00:00\n19.01349449157715\n1788.7107849121094\ndias negociados: 167, qtd_ações: 45.64595600005457, dinheiro: R$ 841.6856921388513, Total Value: R$ 1709.5748251086616, Lucro: R$ -79.1359598034478\n168 67.87471869184463 420.84284606942566 c 18.932355880737305 2018-10-01 00:00:00\n168 67.87471869184463 420.84284606942566 c 18.932355880737305 2018-10-01 00:00:00\n18.932355880737305\n1788.7107849121094\ndias negociados: 168, qtd_ações: 67.87471869184463, dinheiro: R$ 420.84284606942566, Total Value: R$ 1705.8711756483606, Lucro: R$ -82.8396092637488\n169 16.96867967296116 1468.1408434064276 v 20.573158264160156 2018-10-02 00:00:00\n169 16.96867967296116 1468.1408434064276 v 20.573158264160156 2018-10-02 00:00:00\n20.573158264160156\n1788.7107849121094\ndias negociados: 169, qtd_ações: 16.96867967296116, dinheiro: R$ 1468.1408434064276, Total Value: R$ 1817.240175852095, Lucro: R$ 28.52939093998566\n170 4.24216991824029 1741.0946547427309 v 21.447656631469727 2018-10-03 00:00:00\n170 4.24216991824029 1741.0946547427309 v 21.447656631469727 2018-10-03 00:00:00\n21.447656631469727\n1788.7107849121094\ndias negociados: 170, qtd_ações: 4.24216991824029, dinheiro: R$ 1741.0946547427309, Total Value: R$ 1832.0792585214986, Lucro: R$ 43.368473609389184\n171 1.0605424795600724 1809.992829454365 v 21.655010223388672 2018-10-04 00:00:00\n171 1.0605424795600724 1809.992829454365 v 21.655010223388672 2018-10-04 00:00:00\n21.655010223388672\n1788.7107849121094\ndias negociados: 171, qtd_ações: 1.0605424795600724, dinheiro: R$ 1809.992829454365, Total Value: R$ 1832.9588876915764, Lucro: R$ 44.24810277946699\n172 42.956744410852615 904.9964147271825 c 21.60091781616211 2018-10-05 00:00:00\n172 42.956744410852615 904.9964147271825 c 21.60091781616211 2018-10-05 00:00:00\n21.60091781616211\n1788.7107849121094\ndias negociados: 172, qtd_ações: 42.956744410852615, dinheiro: R$ 904.9964147271825, Total Value: R$ 1832.9015203958907, Lucro: R$ 44.19073548378128\n173 10.739186102713154 1677.6051564680874 v 23.98098373413086 2018-10-08 00:00:00\n173 10.739186102713154 1677.6051564680874 v 23.98098373413086 2018-10-08 00:00:00\n23.98098373413086\n1788.7107849121094\ndias negociados: 173, qtd_ações: 10.739186102713154, dinheiro: R$ 1677.6051564680874, Total Value: R$ 1935.1414037150557, Lucro: R$ 146.43061880294636\n174 2.6847965256782884 1872.3548605635476 v 24.179325103759766 2018-10-09 00:00:00\n174 2.6847965256782884 1872.3548605635476 v 24.179325103759766 2018-10-09 00:00:00\n24.179325103759766\n1788.7107849121094\ndias negociados: 174, qtd_ações: 2.6847965256782884, dinheiro: R$ 1872.3548605635476, Total Value: R$ 1937.2714285953678, Lucro: R$ 148.56064368325838\n175 42.54734418008536 936.1774302817738 c 23.485137939453125 2018-10-10 00:00:00\n175 42.54734418008536 936.1774302817738 c 23.485137939453125 2018-10-10 00:00:00\n23.485137939453125\n1788.7107849121094\ndias negociados: 175, qtd_ações: 42.54734418008536, dinheiro: R$ 936.1774302817738, Total Value: R$ 1935.4076773084666, Lucro: R$ 146.69689239635727\n176 63.07758086849042 468.0887151408869 c 22.79996681213379 2018-10-11 00:00:00\n176 63.07758086849042 468.0887151408869 c 22.79996681213379 2018-10-11 00:00:00\n22.79996681213379\n1788.7107849121094\ndias negociados: 176, qtd_ações: 63.07758086849042, dinheiro: R$ 468.0887151408869, Total Value: R$ 1906.2554655321537, Lucro: R$ 117.54468062004435\n177 73.1514980809671 234.04435757044345 c 23.23270606994629 2018-10-15 00:00:00\n177 73.1514980809671 234.04435757044345 c 23.23270606994629 2018-10-15 00:00:00\n23.23270606994629\n1788.7107849121094\ndias negociados: 177, qtd_ações: 73.1514980809671, dinheiro: R$ 234.04435757044345, Total Value: R$ 1933.551611061792, Lucro: R$ 144.8408261496827\n24.098182678222656\n1788.7107849121094\ndias negociados: 178, qtd_ações: 73.1514980809671, dinheiro: R$ 234.04435757044345, Total Value: R$ 1996.8625215112427, Lucro: R$ 208.1517365991333\n179 78.0589619307512 117.02217878522173 c 23.845754623413086 2018-10-17 00:00:00\n179 78.0589619307512 117.02217878522173 c 23.845754623413086 2018-10-17 00:00:00\n23.845754623413086\n1788.7107849121094\ndias negociados: 179, qtd_ações: 78.0589619307512, dinheiro: R$ 117.02217878522173, Total Value: R$ 1978.397031144258, Lucro: R$ 189.68624623214873\n180 80.58430096933803 58.51108939261086 c 23.169597625732422 2018-10-18 00:00:00\n180 80.58430096933803 58.51108939261086 c 23.169597625732422 2018-10-18 00:00:00\n23.169597625732422\n1788.7107849121094\ndias negociados: 180, qtd_ações: 80.58430096933803, dinheiro: R$ 58.51108939261086, Total Value: R$ 1925.616917803092, Lucro: R$ 136.9061328909827\n181 81.83625335974878 29.25554469630543 c 23.367937088012695 2018-10-19 00:00:00\n181 81.83625335974878 29.25554469630543 c 23.367937088012695 2018-10-19 00:00:00\n23.367937088012695\n1788.7107849121094\ndias negociados: 181, qtd_ações: 81.83625335974878, dinheiro: R$ 29.25554469630543, Total Value: R$ 1941.5999647255824, Lucro: R$ 152.889179813473\n182 20.459063339937195 1497.2675212356967 v 23.917875289916992 2018-10-22 00:00:00\n182 20.459063339937195 1497.2675212356967 v 23.917875289916992 2018-10-22 00:00:00\n23.917875289916992\n1788.7107849121094\ndias negociados: 182, qtd_ações: 20.459063339937195, dinheiro: R$ 1497.2675212356967, Total Value: R$ 1986.6048467488272, Lucro: R$ 197.8940618367178\n183 52.15347768055321 748.6337606178483 c 23.62036895751953 2018-10-23 00:00:00\n183 52.15347768055321 748.6337606178483 c 23.62036895751953 2018-10-23 00:00:00\n23.62036895751953\n1788.7107849121094\ndias negociados: 183, qtd_ações: 52.15347768055321, dinheiro: R$ 748.6337606178483, Total Value: R$ 1980.518145850275, Lucro: R$ 191.80736093816563\n184 68.32157998327216 374.31688030892417 c 23.151565551757812 2018-10-24 00:00:00\n184 68.32157998327216 374.31688030892417 c 23.151565551757812 2018-10-24 00:00:00\n23.151565551757812\n1788.7107849121094\ndias negociados: 184, qtd_ações: 68.32157998327216, dinheiro: R$ 374.31688030892417, Total Value: R$ 1956.0684178913139, Lucro: R$ 167.35763297920448\n185 17.08039499581804 1590.1959958885232 v 23.728551864624023 2018-10-25 00:00:00\n185 17.08039499581804 1590.1959958885232 v 23.728551864624023 2018-10-25 00:00:00\n23.728551864624023\n1788.7107849121094\ndias negociados: 185, qtd_ações: 17.08039499581804, dinheiro: R$ 1590.1959958885232, Total Value: R$ 1995.4890344150563, Lucro: R$ 206.7782495029469\n186 49.0344653034695 795.0979979442616 c 24.882526397705078 2018-10-26 00:00:00\n186 49.0344653034695 795.0979979442616 c 24.882526397705078 2018-10-26 00:00:00\n24.882526397705078\n1788.7107849121094\ndias negociados: 186, qtd_ações: 49.0344653034695, dinheiro: R$ 795.0979979442616, Total Value: R$ 2015.1993752551953, Lucro: R$ 226.4885903430859\n23.818710327148438\n1788.7107849121094\ndias negociados: 187, qtd_ações: 49.0344653034695, dinheiro: R$ 795.0979979442616, Total Value: R$ 1963.0357230542124, Lucro: R$ 174.32493814210306\n188 12.258616325867376 1723.436015635577 v 25.24314308166504 2018-10-30 00:00:00\n188 12.258616325867376 1723.436015635577 v 25.24314308166504 2018-10-30 00:00:00\n25.24314308166504\n1788.7107849121094\ndias negociados: 188, qtd_ações: 12.258616325867376, dinheiro: R$ 1723.436015635577, Total Value: R$ 2032.882021532682, Lucro: R$ 244.1712366205727\n189 46.86498376075991 861.7180078177885 c 24.900562286376953 2018-10-31 00:00:00\n189 46.86498376075991 861.7180078177885 c 24.900562286376953 2018-10-31 00:00:00\n24.900562286376953\n1788.7107849121094\ndias negociados: 189, qtd_ações: 46.86498376075991, dinheiro: R$ 861.7180078177885, Total Value: R$ 2028.6824550026352, Lucro: R$ 239.97167009052578\n190 64.35817703355133 430.85900390889424 c 24.630094528198242 2018-11-01 00:00:00\n190 64.35817703355133 430.85900390889424 c 24.630094528198242 2018-11-01 00:00:00\n24.630094528198242\n1788.7107849121094\ndias negociados: 190, qtd_ações: 64.35817703355133, dinheiro: R$ 430.85900390889424, Total Value: R$ 2016.0069879077805, Lucro: R$ 227.29620299567114\n191 16.089544258387832 1656.2735234728686 v 25.387388229370117 2018-11-05 00:00:00\n191 16.089544258387832 1656.2735234728686 v 25.387388229370117 2018-11-05 00:00:00\n25.387388229370117\n1788.7107849121094\ndias negociados: 191, qtd_ações: 16.089544258387832, dinheiro: R$ 1656.2735234728686, Total Value: R$ 2064.7450299941934, Lucro: R$ 276.034245082084\n192 49.87325740726362 828.1367617364343 c 24.51289939880371 2018-11-06 00:00:00\n192 49.87325740726362 828.1367617364343 c 24.51289939880371 2018-11-06 00:00:00\n24.51289939880371\n1788.7107849121094\ndias negociados: 192, qtd_ações: 49.87325740726362, dinheiro: R$ 828.1367617364343, Total Value: R$ 2050.6749032513294, Lucro: R$ 261.96411833922\n193 67.33674132062154 414.06838086821716 c 23.710525512695312 2018-11-07 00:00:00\n193 67.33674132062154 414.06838086821716 c 23.710525512695312 2018-11-07 00:00:00\n23.710525512695312\n1788.7107849121094\ndias negociados: 193, qtd_ações: 67.33674132062154, dinheiro: R$ 414.06838086821716, Total Value: R$ 2010.657903892579, Lucro: R$ 221.94711898046967\n194 16.834185330155385 1568.2568825293738 v 22.854061126708984 2018-11-08 00:00:00\n194 16.834185330155385 1568.2568825293738 v 22.854061126708984 2018-11-08 00:00:00\n22.854061126708984\n1788.7107849121094\ndias negociados: 194, qtd_ações: 16.834185330155385, dinheiro: R$ 1568.2568825293738, Total Value: R$ 1952.9863830830927, Lucro: R$ 164.27559817098336\n195 50.99618842168402 784.1284412646869 c 22.95323371887207 2018-11-09 00:00:00\n195 50.99618842168402 784.1284412646869 c 22.95323371887207 2018-11-09 00:00:00\n22.95323371887207\n1788.7107849121094\ndias negociados: 195, qtd_ações: 50.99618842168402, dinheiro: R$ 784.1284412646869, Total Value: R$ 1954.655872879238, Lucro: R$ 165.94508796712853\n196 12.749047105421004 1657.5412715469063 v 22.836029052734375 2018-11-12 00:00:00\n196 12.749047105421004 1657.5412715469063 v 22.836029052734375 2018-11-12 00:00:00\n22.836029052734375\n1788.7107849121094\ndias negociados: 196, qtd_ações: 12.749047105421004, dinheiro: R$ 1657.5412715469063, Total Value: R$ 1948.6788816409794, Lucro: R$ 159.96809672887002\n197 3.187261776355251 1866.4983099622793 v 21.853349685668945 2018-11-13 00:00:00\n197 3.187261776355251 1866.4983099622793 v 21.853349685668945 2018-11-13 00:00:00\n21.853349685668945\n1788.7107849121094\ndias negociados: 197, qtd_ações: 3.187261776355251, dinheiro: R$ 1866.4983099622793, Total Value: R$ 1936.150656100737, Lucro: R$ 147.43987118862765\n198 0.7968154440888128 1920.5909396622185 v 22.628673553466797 2018-11-14 00:00:00\n198 0.7968154440888128 1920.5909396622185 v 22.628673553466797 2018-11-14 00:00:00\n22.628673553466797\n1788.7107849121094\ndias negociados: 198, qtd_ações: 0.7968154440888128, dinheiro: R$ 1920.5909396622185, Total Value: R$ 1938.6218162288649, Lucro: R$ 149.9110313167555\n199 0.1992038610222032 1934.4912370401576 v 23.25975227355957 2018-11-16 00:00:00\n199 0.1992038610222032 1934.4912370401576 v 23.25975227355957 2018-11-16 00:00:00\n23.25975227355957\n1788.7107849121094\ndias negociados: 199, qtd_ações: 0.1992038610222032, dinheiro: R$ 1934.4912370401576, Total Value: R$ 1939.1246694994707, Lucro: R$ 150.41388458736128\n200 0.0498009652555508 1937.9932501155467 v 23.440061569213867 2018-11-19 00:00:00\n200 0.0498009652555508 1937.9932501155467 v 23.440061569213867 2018-11-19 00:00:00\n23.440061569213867\n1788.7107849121094\ndias negociados: 200, qtd_ações: 0.0498009652555508, dinheiro: R$ 1937.9932501155467, Total Value: R$ 1939.160587807343, Lucro: R$ 150.4498028952337\n201 0.0124502413138877 1938.8408046006866 v 22.691781997680664 2018-11-21 00:00:00\n201 0.0124502413138877 1938.8408046006866 v 22.691781997680664 2018-11-21 00:00:00\n22.691781997680664\n1788.7107849121094\ndias negociados: 201, qtd_ações: 0.0124502413138877, dinheiro: R$ 1938.8408046006866, Total Value: R$ 1939.1233227624, Lucro: R$ 150.41253785029062\n202 0.003112560328471925 1939.0533693867105 v 22.76419448852539 2018-11-22 00:00:00\n202 0.003112560328471925 1939.0533693867105 v 22.76419448852539 2018-11-22 00:00:00\n22.76419448852539\n1788.7107849121094\ndias negociados: 202, qtd_ações: 0.003112560328471925, dinheiro: R$ 1939.0533693867105, Total Value: R$ 1939.1242243153852, Lucro: R$ 150.4134394032758\n203 43.95625627914159 969.5266846933553 c 22.05818748474121 2018-11-23 00:00:00\n203 43.95625627914159 969.5266846933553 c 22.05818748474121 2018-11-23 00:00:00\n22.05818748474121\n1788.7107849121094\ndias negociados: 203, qtd_ações: 43.95625627914159, dinheiro: R$ 969.5266846933553, Total Value: R$ 1939.1220268259935, Lucro: R$ 150.41124191388417\n204 66.04157954173058 484.76334234667763 c 21.949569702148438 2018-11-26 00:00:00\n204 66.04157954173058 484.76334234667763 c 21.949569702148438 2018-11-26 00:00:00\n21.949569702148438\n1788.7107849121094\ndias negociados: 204, qtd_ações: 66.04157954173058, dinheiro: R$ 484.76334234667763, Total Value: R$ 1934.3475957378732, Lucro: R$ 145.63681082576386\n205 16.510394885432646 1629.3372219984685 v 23.10814666748047 2018-11-27 00:00:00\n205 16.510394885432646 1629.3372219984685 v 23.10814666748047 2018-11-27 00:00:00\n23.10814666748047\n1788.7107849121094\ndias negociados: 205, qtd_ações: 16.510394885432646, dinheiro: R$ 1629.3372219984685, Total Value: R$ 2010.8618485490656, Lucro: R$ 222.15106363695622\n206 51.973367931788765 814.6686109992343 c 22.97237205505371 2018-11-28 00:00:00\n206 51.973367931788765 814.6686109992343 c 22.97237205505371 2018-11-28 00:00:00\n22.97237205505371\n1788.7107849121094\ndias negociados: 206, qtd_ações: 51.973367931788765, dinheiro: R$ 814.6686109992343, Total Value: R$ 2008.6201560824832, Lucro: R$ 219.9093711703738\n207 69.85279296523586 407.33430549961713 c 22.78229331970215 2018-11-29 00:00:00\n207 69.85279296523586 407.33430549961713 c 22.78229331970215 2018-11-29 00:00:00\n22.78229331970215\n1788.7107849121094\ndias negociados: 207, qtd_ações: 69.85279296523586, dinheiro: R$ 407.33430549961713, Total Value: R$ 1998.7411240340473, Lucro: R$ 210.0303391219379\n208 17.463198241308966 1614.6414282098938 v 23.044788360595703 2018-11-30 00:00:00\n208 17.463198241308966 1614.6414282098938 v 23.044788360595703 2018-11-30 00:00:00\n23.044788360595703\n1788.7107849121094\ndias negociados: 208, qtd_ações: 17.463198241308966, dinheiro: R$ 1614.6414282098938, Total Value: R$ 2017.077135779986, Lucro: R$ 228.3663508678767\n209 4.3657995603272415 1922.1585755533936 v 23.4792537689209 2018-12-03 00:00:00\n209 4.3657995603272415 1922.1585755533936 v 23.4792537689209 2018-12-03 00:00:00\n23.4792537689209\n1788.7107849121094\ndias negociados: 209, qtd_ações: 4.3657995603272415, dinheiro: R$ 1922.1585755533936, Total Value: R$ 2024.6642913345602, Lucro: R$ 235.9535064224508\n210 1.0914498900818104 1997.259611876844 v 22.936168670654297 2018-12-04 00:00:00\n210 1.0914498900818104 1997.259611876844 v 22.936168670654297 2018-12-04 00:00:00\n22.936168670654297\n1788.7107849121094\ndias negociados: 210, qtd_ações: 1.0914498900818104, dinheiro: R$ 1997.259611876844, Total Value: R$ 2022.2932906513274, Lucro: R$ 233.58250573921805\n211 0.2728624725204526 2016.1978770985456 v 23.135299682617188 2018-12-05 00:00:00\n211 0.2728624725204526 2016.1978770985456 v 23.135299682617188 2018-12-05 00:00:00\n23.135299682617188\n1788.7107849121094\ndias negociados: 211, qtd_ações: 0.2728624725204526, dinheiro: R$ 2016.1978770985456, Total Value: R$ 2022.510632172446, Lucro: R$ 233.79984726033672\n212 45.56578213391424 1008.0989385492728 c 22.2573184967041 2018-12-06 00:00:00\n212 45.56578213391424 1008.0989385492728 c 22.2573184967041 2018-12-06 00:00:00\n22.2573184967041\n1788.7107849121094\ndias negociados: 212, qtd_ações: 45.56578213391424, dinheiro: R$ 1008.0989385492728, Total Value: R$ 2022.2710640552314, Lucro: R$ 233.560279143122\n213 68.0476761828598 504.0494692746364 c 22.42024040222168 2018-12-07 00:00:00\n213 68.0476761828598 504.0494692746364 c 22.42024040222168 2018-12-07 00:00:00\n22.42024040222168\n1788.7107849121094\ndias negociados: 213, qtd_ações: 68.0476761828598, dinheiro: R$ 504.0494692746364, Total Value: R$ 2029.6947281068879, Lucro: R$ 240.98394319477848\n21.216411590576172\n1788.7107849121094\ndias negociados: 214, qtd_ações: 68.0476761828598, dinheiro: R$ 504.0494692746364, Total Value: R$ 1947.7769749524373, Lucro: R$ 159.0661900403279\n215 80.00294533781064 252.0247346373182 c 21.08064079284668 2018-12-11 00:00:00\n215 80.00294533781064 252.0247346373182 c 21.08064079284668 2018-12-11 00:00:00\n21.08064079284668\n1788.7107849121094\ndias negociados: 215, qtd_ações: 80.00294533781064, dinheiro: R$ 252.0247346373182, Total Value: R$ 1938.5380876734523, Lucro: R$ 149.82730276134293\n216 85.97801360964867 126.0123673186591 c 21.08969497680664 2018-12-12 00:00:00\n216 85.97801360964867 126.0123673186591 c 21.08969497680664 2018-12-12 00:00:00\n21.08969497680664\n1788.7107849121094\ndias negociados: 216, qtd_ações: 85.97801360964867, dinheiro: R$ 126.0123673186591, Total Value: R$ 1939.2624490578796, Lucro: R$ 150.55166414577025\n217 21.494503402412168 1489.451772080526 v 21.144001007080078 2018-12-13 00:00:00\n217 21.494503402412168 1489.451772080526 v 21.144001007080078 2018-12-13 00:00:00\n21.144001007080078\n1788.7107849121094\ndias negociados: 217, qtd_ações: 21.494503402412168, dinheiro: R$ 1489.451772080526, Total Value: R$ 1943.9315736678152, Lucro: R$ 155.22078875570583\n20.863405227661133\n1788.7107849121094\ndias negociados: 218, qtd_ações: 21.494503402412168, dinheiro: R$ 1489.451772080526, Total Value: R$ 1937.900306732392, Lucro: R$ 149.1895218202826\n219 57.47076265539732 744.725886040263 c 20.700481414794922 2018-12-17 00:00:00\n219 57.47076265539732 744.725886040263 c 20.700481414794922 2018-12-17 00:00:00\n20.700481414794922\n1788.7107849121094\ndias negociados: 219, qtd_ações: 57.47076265539732, dinheiro: R$ 744.725886040263, Total Value: R$ 1934.3983402824053, Lucro: R$ 145.68755537029597\n220 76.17024174063091 372.3629430201315 c 19.91301155090332 2018-12-18 00:00:00\n220 76.17024174063091 372.3629430201315 c 19.91301155090332 2018-12-18 00:00:00\n19.91301155090332\n1788.7107849121094\ndias negociados: 220, qtd_ações: 76.17024174063091, dinheiro: R$ 372.3629430201315, Total Value: R$ 1889.1418466364132, Lucro: R$ 100.43106172430385\n221 19.04256043515773 1522.874311479413 v 20.139297485351562 2018-12-19 00:00:00\n221 19.04256043515773 1522.874311479413 v 20.139297485351562 2018-12-19 00:00:00\n20.139297485351562\n1788.7107849121094\ndias negociados: 221, qtd_ações: 19.04256043515773, dinheiro: R$ 1522.874311479413, Total Value: R$ 1906.3781009658403, Lucro: R$ 117.66731605373093\n222 58.18820414713818 761.4371557397066 c 19.45138931274414 2018-12-20 00:00:00\n222 58.18820414713818 761.4371557397066 c 19.45138931274414 2018-12-20 00:00:00\n19.45138931274414\n1788.7107849121094\ndias negociados: 222, qtd_ações: 58.18820414713818, dinheiro: R$ 761.4371557397066, Total Value: R$ 1893.2785680151246, Lucro: R$ 104.56778310301524\n223 77.70652735940948 380.7185778698533 c 19.505701065063477 2018-12-21 00:00:00\n223 77.70652735940948 380.7185778698533 c 19.505701065063477 2018-12-21 00:00:00\n19.505701065063477\n1788.7107849121094\ndias negociados: 223, qtd_ações: 77.70652735940948, dinheiro: R$ 380.7185778698533, Total Value: R$ 1896.438871346671, Lucro: R$ 107.72808643456165\n224 19.42663183985237 1563.701924622802 v 20.298309326171875 2018-12-26 00:00:00\n224 19.42663183985237 1563.701924622802 v 20.298309326171875 2018-12-26 00:00:00\n20.298309326171875\n1788.7107849121094\ndias negociados: 224, qtd_ações: 19.42663183985237, dinheiro: R$ 1563.701924622802, Total Value: R$ 1958.0297068737848, Lucro: R$ 169.31892196167541\n225 57.96243764085434 781.850962311401 c 20.28894805908203 2018-12-27 00:00:00\n225 57.96243764085434 781.850962311401 c 20.28894805908203 2018-12-27 00:00:00\n20.28894805908203\n1788.7107849121094\ndias negociados: 225, qtd_ações: 57.96243764085434, dinheiro: R$ 781.850962311401, Total Value: R$ 1957.8478489844758, Lucro: R$ 169.13706407236646\n226 14.490609410213585 1704.9569369011524 v 21.23457908630371 2018-12-28 00:00:00\n226 14.490609410213585 1704.9569369011524 v 21.23457908630371 2018-12-28 00:00:00\n21.23457908630371\n1788.7107849121094\ndias negociados: 226, qtd_ações: 14.490609410213585, dinheiro: R$ 1704.9569369011524, Total Value: R$ 2012.6589284310696, Lucro: R$ 223.94814351896025\n227 3.6226523525533962 1949.7753887596468 v 22.526630401611328 2019-01-02 00:00:00\n227 3.6226523525533962 1949.7753887596468 v 22.526630401611328 2019-01-02 00:00:00\n22.526630401611328\n1788.7107849121094\ndias negociados: 227, qtd_ações: 3.6226523525533962, dinheiro: R$ 1949.7753887596468, Total Value: R$ 2031.381539379145, Lucro: R$ 242.67075446703552\n228 45.86393082502095 974.8876943798234 c 23.079029083251953 2019-01-03 00:00:00\n228 45.86393082502095 974.8876943798234 c 23.079029083251953 2019-01-03 00:00:00\n23.079029083251953\n1788.7107849121094\ndias negociados: 228, qtd_ações: 45.86393082502095, dinheiro: R$ 974.8876943798234, Total Value: R$ 2033.3826877627378, Lucro: R$ 244.67190285062838\n229 11.465982706255238 1771.0133261191822 v 23.144567489624023 2019-01-04 00:00:00\n229 11.465982706255238 1771.0133261191822 v 23.144567489624023 2019-01-04 00:00:00\n23.144567489624023\n1788.7107849121094\ndias negociados: 229, qtd_ações: 11.465982706255238, dinheiro: R$ 1771.0133261191822, Total Value: R$ 2036.3885366989684, Lucro: R$ 247.67775178685906\n230 2.8664956765638094 1973.1848242247395 v 23.509716033935547 2019-01-07 00:00:00\n230 2.8664956765638094 1973.1848242247395 v 23.509716033935547 2019-01-07 00:00:00\n23.509716033935547\n1788.7107849121094\ndias negociados: 230, qtd_ações: 2.8664956765638094, dinheiro: R$ 1973.1848242247395, Total Value: R$ 2040.5753235932586, Lucro: R$ 251.86453868114927\n231 45.083995702605606 986.5924121123697 c 23.36927604675293 2019-01-08 00:00:00\n231 45.083995702605606 986.5924121123697 c 23.36927604675293 2019-01-08 00:00:00\n23.36927604675293\n1788.7107849121094\ndias negociados: 231, qtd_ações: 45.083995702605606, dinheiro: R$ 986.5924121123697, Total Value: R$ 2040.1727529771829, Lucro: R$ 251.4619680650735\n232 11.270998925651401 1793.2398081862893 v 23.85613441467285 2019-01-09 00:00:00\n232 11.270998925651401 1793.2398081862893 v 23.85613441467285 2019-01-09 00:00:00\n23.85613441467285\n1788.7107849121094\ndias negociados: 232, qtd_ações: 11.270998925651401, dinheiro: R$ 1793.2398081862893, Total Value: R$ 2062.1222735442625, Lucro: R$ 273.4114886321531\n233 49.18280161797051 896.6199040931447 c 23.6501522064209 2019-01-10 00:00:00\n233 49.18280161797051 896.6199040931447 c 23.6501522064209 2019-01-10 00:00:00\n23.6501522064209\n1788.7107849121094\ndias negociados: 233, qtd_ações: 49.18280161797051, dinheiro: R$ 896.6199040931447, Total Value: R$ 2059.800648296351, Lucro: R$ 271.08986338424165\n234 68.34350686046045 448.30995204657233 c 23.397361755371094 2019-01-11 00:00:00\n234 68.34350686046045 448.30995204657233 c 23.397361755371094 2019-01-11 00:00:00\n23.397361755371094\n1788.7107849121094\ndias negociados: 234, qtd_ações: 68.34350686046045, dinheiro: R$ 448.30995204657233, Total Value: R$ 2047.3677056914516, Lucro: R$ 258.6569207793423\n235 77.97783294973671 224.15497602328617 c 23.266284942626953 2019-01-14 00:00:00\n235 77.97783294973671 224.15497602328617 c 23.266284942626953 2019-01-14 00:00:00\n23.266284942626953\n1788.7107849121094\ndias negociados: 235, qtd_ações: 77.97783294973671, dinheiro: R$ 224.15497602328617, Total Value: R$ 2038.409456640425, Lucro: R$ 249.69867172831573\n236 82.79887631046942 112.07748801164308 c 23.24755859375 2019-01-15 00:00:00\n236 82.79887631046942 112.07748801164308 c 23.24755859375 2019-01-15 00:00:00\n23.24755859375\n1788.7107849121094\ndias negociados: 236, qtd_ações: 82.79887631046942, dinheiro: R$ 112.07748801164308, Total Value: R$ 2036.9492165359397, Lucro: R$ 248.23843162383037\n237 85.21036904452633 56.03874400582154 c 23.238197326660156 2019-01-16 00:00:00\n237 85.21036904452633 56.03874400582154 c 23.238197326660156 2019-01-16 00:00:00\n23.238197326660156\n1788.7107849121094\ndias negociados: 237, qtd_ações: 85.21036904452633, dinheiro: R$ 56.03874400582154, Total Value: R$ 2036.1741141400587, Lucro: R$ 247.46332922794932\n238 21.302592261131583 1561.4840830647406 v 23.556528091430664 2019-01-17 00:00:00\n238 21.302592261131583 1561.4840830647406 v 23.556528091430664 2019-01-17 00:00:00\n23.556528091430664\n1788.7107849121094\ndias negociados: 238, qtd_ações: 21.302592261131583, dinheiro: R$ 1561.4840830647406, Total Value: R$ 2063.29919608438, Lucro: R$ 274.58841117227075\n239 5.325648065282896 1941.4354824862405 v 23.781230926513672 2019-01-18 00:00:00\n239 5.325648065282896 1941.4354824862405 v 23.781230926513672 2019-01-18 00:00:00\n23.781230926513672\n1788.7107849121094\ndias negociados: 239, qtd_ações: 5.325648065282896, dinheiro: R$ 1941.4354824862405, Total Value: R$ 2068.085948960074, Lucro: R$ 279.3751640479645\n240 45.93644326220377 970.7177412431203 c 23.9029483795166 2019-01-21 00:00:00\n240 45.93644326220377 970.7177412431203 c 23.9029483795166 2019-01-21 00:00:00\n23.9029483795166\n1788.7107849121094\ndias negociados: 240, qtd_ações: 45.93644326220377, dinheiro: R$ 970.7177412431203, Total Value: R$ 2068.73417327817, Lucro: R$ 280.0233883660608\n241 66.56504848037147 485.35887062156013 c 23.528438568115234 2019-01-22 00:00:00\n241 66.56504848037147 485.35887062156013 c 23.528438568115234 2019-01-22 00:00:00\n23.528438568115234\n1788.7107849121094\ndias negociados: 241, qtd_ações: 66.56504848037147, dinheiro: R$ 485.35887062156013, Total Value: R$ 2051.5305245755926, Lucro: R$ 262.81973966348323\n242 16.64126212009287 1674.0102981422406 v 23.8093204498291 2019-01-23 00:00:00\n242 16.64126212009287 1674.0102981422406 v 23.8093204498291 2019-01-23 00:00:00\n23.8093204498291\n1788.7107849121094\ndias negociados: 242, qtd_ações: 16.64126212009287, dinheiro: R$ 1674.0102981422406, Total Value: R$ 2070.227440649134, Lucro: R$ 281.51665573702485\n243 51.64436978476403 837.0051490711203 c 23.912309646606445 2019-01-24 00:00:00\n243 51.64436978476403 837.0051490711203 c 23.912309646606445 2019-01-24 00:00:00\n23.912309646606445\n1788.7107849121094\ndias negociados: 243, qtd_ações: 51.64436978476403, dinheiro: R$ 837.0051490711203, Total Value: R$ 2071.941310868244, Lucro: R$ 283.23052595613444\n244 69.68997612080547 418.50257453556014 c 23.191383361816406 2019-01-28 00:00:00\n244 69.68997612080547 418.50257453556014 c 23.191383361816406 2019-01-28 00:00:00\n23.191383361816406\n1788.7107849121094\ndias negociados: 244, qtd_ações: 69.68997612080547, dinheiro: R$ 418.50257453556014, Total Value: R$ 2034.7095272289907, Lucro: R$ 245.99874231688136\n245 17.422494030201367 1660.019666807432 v 23.753145217895508 2019-01-29 00:00:00\n245 17.422494030201367 1660.019666807432 v 23.753145217895508 2019-01-29 00:00:00\n23.753145217895508\n1788.7107849121094\ndias negociados: 245, qtd_ações: 17.422494030201367, dinheiro: R$ 1660.019666807432, Total Value: R$ 2073.8586975647227, Lucro: R$ 285.1479126526133\n246 4.355623507550342 1973.4574750386485 v 23.987213134765625 2019-01-30 00:00:00\n246 4.355623507550342 1973.4574750386485 v 23.987213134765625 2019-01-30 00:00:00\n23.987213134765625\n1788.7107849121094\ndias negociados: 246, qtd_ações: 4.355623507550342, dinheiro: R$ 1973.4574750386485, Total Value: R$ 2077.936744449054, Lucro: R$ 289.22595953694463\n247 45.555565365609326 986.7287375193242 c 23.94976043701172 2019-01-31 00:00:00\n247 45.555565365609326 986.7287375193242 c 23.94976043701172 2019-01-31 00:00:00\n23.94976043701172\n1788.7107849121094\ndias negociados: 247, qtd_ações: 45.555565365609326, dinheiro: R$ 986.7287375193242, Total Value: R$ 2077.773614598296, Lucro: R$ 289.06282968618643\n248 11.388891341402331 1812.0499919571657 v 24.155738830566406 2019-02-01 00:00:00\n248 11.388891341402331 1812.0499919571657 v 24.155738830566406 2019-02-01 00:00:00\n24.155738830566406\n1788.7107849121094\ndias negociados: 248, qtd_ações: 11.388891341402331, dinheiro: R$ 1812.0499919571657, Total Value: R$ 2087.1570767697795, Lucro: R$ 298.4462918576701\n249 2.847222835350583 2020.219698147937 v 24.371082305908203 2019-02-04 00:00:00\n249 2.847222835350583 2020.219698147937 v 24.371082305908203 2019-02-04 00:00:00\n24.371082305908203\n1788.7107849121094\ndias negociados: 249, qtd_ações: 2.847222835350583, dinheiro: R$ 2020.219698147937, Total Value: R$ 2089.6096002115273, Lucro: R$ 300.89881529941795\n250 44.21482301561076 1010.1098490739685 c 24.417898178100586 2019-02-05 00:00:00\n250 44.21482301561076 1010.1098490739685 c 24.417898178100586 2019-02-05 00:00:00\n24.417898178100586\n1788.7107849121094\ndias negociados: 250, qtd_ações: 44.21482301561076, dinheiro: R$ 1010.1098490739685, Total Value: R$ 2089.7428954318902, Lucro: R$ 301.0321105197809\n251 65.37737882033097 505.05492453698423 c 23.865497589111328 2019-02-06 00:00:00\n251 65.37737882033097 505.05492453698423 c 23.865497589111328 2019-02-06 00:00:00\n23.865497589111328\n1788.7107849121094\ndias negociados: 251, qtd_ações: 65.37737882033097, dinheiro: R$ 505.05492453698423, Total Value: R$ 2065.318601156011, Lucro: R$ 276.60781624390165\n252 76.12735000733089 252.52746226849212 c 23.490989685058594 2019-02-07 00:00:00\n252 76.12735000733089 252.52746226849212 c 23.490989685058594 2019-02-07 00:00:00\n23.490989685058594\n1788.7107849121094\ndias negociados: 252, qtd_ações: 76.12735000733089, dinheiro: R$ 252.52746226849212, Total Value: R$ 2040.8342560415474, Lucro: R$ 252.12347112943803\n253 81.49378054069206 126.26373113424606 c 23.528438568115234 2019-02-08 00:00:00\n253 81.49378054069206 126.26373113424606 c 23.528438568115234 2019-02-08 00:00:00\n23.528438568115234\n1788.7107849121094\ndias negociados: 253, qtd_ações: 81.49378054069206, dinheiro: R$ 126.26373113424606, Total Value: R$ 2043.6851402693837, Lucro: R$ 254.97435535727436\n254 84.20832167274561 63.13186556712303 c 23.256919860839844 2019-02-11 00:00:00\n254 84.20832167274561 63.13186556712303 c 23.256919860839844 2019-02-11 00:00:00\n23.256919860839844\n1788.7107849121094\ndias negociados: 254, qtd_ações: 84.20832167274561, dinheiro: R$ 63.13186556712303, Total Value: R$ 2021.5580543259907, Lucro: R$ 232.84726941388135\n255 21.052080418186403 1583.9870325244801 v 24.08083724975586 2019-02-12 00:00:00\n255 21.052080418186403 1583.9870325244801 v 24.08083724975586 2019-02-12 00:00:00\n24.08083724975586\n1788.7107849121094\ndias negociados: 255, qtd_ações: 21.052080418186403, dinheiro: R$ 1583.9870325244801, Total Value: R$ 2090.9387548435993, Lucro: R$ 302.22796993148995\n256 5.263020104546601 1969.0791622977745 v 24.389806747436523 2019-02-13 00:00:00\n256 5.263020104546601 1969.0791622977745 v 24.389806747436523 2019-02-13 00:00:00\n24.389806747436523\n1788.7107849121094\ndias negociados: 256, qtd_ações: 5.263020104546601, dinheiro: R$ 1969.0791622977745, Total Value: R$ 2097.4432055555394, Lucro: R$ 308.73242064343003\n257 1.3157550261366502 2068.678332940829 v 25.232450485229492 2019-02-14 00:00:00\n257 1.3157550261366502 2068.678332940829 v 25.232450485229492 2019-02-14 00:00:00\n25.232450485229492\n1788.7107849121094\ndias negociados: 257, qtd_ações: 1.3157550261366502, dinheiro: R$ 2068.678332940829, Total Value: R$ 2101.8780564885137, Lucro: R$ 313.16727157640435\n258 42.47617757180438 1034.3391664704145 c 25.129459381103516 2019-02-15 00:00:00\n258 42.47617757180438 1034.3391664704145 c 25.129459381103516 2019-02-15 00:00:00\n25.129459381103516\n1788.7107849121094\ndias negociados: 258, qtd_ações: 42.47617757180438, dinheiro: R$ 1034.3391664704145, Total Value: R$ 2101.7425454256127, Lucro: R$ 313.0317605135033\n259 63.117914191444896 517.1695832352073 c 25.05455780029297 2019-02-18 00:00:00\n259 63.117914191444896 517.1695832352073 c 25.05455780029297 2019-02-18 00:00:00\n25.05455780029297\n1788.7107849121094\ndias negociados: 259, qtd_ações: 63.117914191444896, dinheiro: R$ 517.1695832352073, Total Value: R$ 2098.561012578695, Lucro: R$ 309.85022766658585\n260 73.20507419905272 258.5847916176036 c 25.63504409790039 2019-02-19 00:00:00\n260 73.20507419905272 258.5847916176036 c 25.63504409790039 2019-02-19 00:00:00\n25.63504409790039\n1788.7107849121094\ndias negociados: 260, qtd_ações: 73.20507419905272, dinheiro: R$ 258.5847916176036, Total Value: R$ 2135.2000969003902, Lucro: R$ 346.48931198828086\n261 18.30126854976318 1648.5688048592424 v 25.316715240478516 2019-02-20 00:00:00\n261 18.30126854976318 1648.5688048592424 v 25.316715240478516 2019-02-20 00:00:00\n25.316715240478516\n1788.7107849121094\ndias negociados: 261, qtd_ações: 18.30126854976318, dinheiro: R$ 1648.5688048592424, Total Value: R$ 2111.896809273122, Lucro: R$ 323.18602436101264\n25.653770446777344\n1788.7107849121094\ndias negociados: 262, qtd_ações: 18.30126854976318, dinheiro: R$ 1648.5688048592424, Total Value: R$ 2118.065347119693, Lucro: R$ 329.3545622075835\n263 50.75216321440232 824.2844024296212 c 25.400976181030273 2019-02-22 00:00:00\n263 50.75216321440232 824.2844024296212 c 25.400976181030273 2019-02-22 00:00:00\n25.400976181030273\n1788.7107849121094\ndias negociados: 263, qtd_ações: 50.75216321440232, dinheiro: R$ 824.2844024296212, Total Value: R$ 2113.438891374415, Lucro: R$ 324.72810646230573\n264 12.68804080360058 1775.825896583705 v 24.998382568359375 2019-02-25 00:00:00\n264 12.68804080360058 1775.825896583705 v 24.998382568359375 2019-02-25 00:00:00\n24.998382568359375\n1788.7107849121094\ndias negociados: 264, qtd_ações: 12.68804080360058, dinheiro: R$ 1775.825896583705, Total Value: R$ 2093.006394635066, Lucro: R$ 304.29560972295667\n265 48.36721266258868 887.9129482918524 c 24.886030197143555 2019-02-26 00:00:00\n265 48.36721266258868 887.9129482918524 c 24.886030197143555 2019-02-26 00:00:00\n24.886030197143555\n1788.7107849121094\ndias negociados: 265, qtd_ações: 48.36721266258868, dinheiro: R$ 887.9129482918524, Total Value: R$ 2091.5808631646987, Lucro: R$ 302.87007825258934\n266 12.09180316564717 1807.6457035156059 v 25.35416603088379 2019-02-27 00:00:00\n266 12.09180316564717 1807.6457035156059 v 25.35416603088379 2019-02-27 00:00:00\n25.35416603088379\n1788.7107849121094\ndias negociados: 266, qtd_ações: 12.09180316564717, dinheiro: R$ 1807.6457035156059, Total Value: R$ 2114.2232885901904, Lucro: R$ 325.512503678081\n25.335437774658203\n1788.7107849121094\ndias negociados: 267, qtd_ações: 12.09180316564717, dinheiro: R$ 1807.6457035156059, Total Value: R$ 2113.996830202275, Lucro: R$ 325.2860452901655\n24.998382568359375\n1788.7107849121094\ndias negociados: 268, qtd_ações: 12.09180316564717, dinheiro: R$ 1807.6457035156059, Total Value: R$ 2109.9212249917528, Lucro: R$ 321.2104400796434\n269 48.15251579991557 903.8228517578029 c 25.063920974731445 2019-03-07 00:00:00\n269 48.15251579991557 903.8228517578029 c 25.063920974731445 2019-03-07 00:00:00\n25.063920974731445\n1788.7107849121094\ndias negociados: 269, qtd_ações: 48.15251579991557, dinheiro: R$ 903.8228517578029, Total Value: R$ 2110.7137025013944, Lucro: R$ 322.00291758928506\n270 12.038128949978892 1805.6096745428033 v 24.970293045043945 2019-03-08 00:00:00\n270 12.038128949978892 1805.6096745428033 v 24.970293045043945 2019-03-08 00:00:00\n24.970293045043945\n1788.7107849121094\ndias negociados: 270, qtd_ações: 12.038128949978892, dinheiro: R$ 1805.6096745428033, Total Value: R$ 2106.2052821378034, Lucro: R$ 317.494497225694\n271 3.009532237494723 2040.185838483644 v 25.981464385986328 2019-03-11 00:00:00\n271 3.009532237494723 2040.185838483644 v 25.981464385986328 2019-03-11 00:00:00\n25.981464385986328\n1788.7107849121094\ndias negociados: 271, qtd_ações: 3.009532237494723, dinheiro: R$ 2040.185838483644, Total Value: R$ 2118.377893130591, Lucro: R$ 329.66710821848164\n272 42.628797061626166 1020.092919241822 c 25.74739646911621 2019-03-12 00:00:00\n272 42.628797061626166 1020.092919241822 c 25.74739646911621 2019-03-12 00:00:00\n25.74739646911621\n1788.7107849121094\ndias negociados: 272, qtd_ações: 42.628797061626166, dinheiro: R$ 1020.092919241822, Total Value: R$ 2117.673458189007, Lucro: R$ 328.9626732768975\n273 10.657199265406541 1861.2387475720298 v 26.309158325195312 2019-03-13 00:00:00\n273 10.657199265406541 1861.2387475720298 v 26.309158325195312 2019-03-13 00:00:00\n26.309158325195312\n1788.7107849121094\ndias negociados: 273, qtd_ações: 10.657199265406541, dinheiro: R$ 1861.2387475720298, Total Value: R$ 2141.6206903487655, Lucro: R$ 352.9099054366561\n274 2.6642998163516354 2072.1987396156323 v 26.39342498779297 2019-03-14 00:00:00\n274 2.6642998163516354 2072.1987396156323 v 26.39342498779297 2019-03-14 00:00:00\n26.39342498779297\n1788.7107849121094\ndias negociados: 274, qtd_ações: 2.6642998163516354, dinheiro: R$ 2072.1987396156323, Total Value: R$ 2142.5187369635, Lucro: R$ 353.80795205139066\n26.44959831237793\n1788.7107849121094\ndias negociados: 275, qtd_ações: 2.6642998163516354, dinheiro: R$ 2072.1987396156323, Total Value: R$ 2142.6683995418753, Lucro: R$ 353.9576146297659\n276 0.6660749540879088 2125.9677154942583 v 26.908370971679688 2019-03-18 00:00:00\n276 0.6660749540879088 2125.9677154942583 v 26.908370971679688 2019-03-18 00:00:00\n26.908370971679688\n1788.7107849121094\ndias negociados: 276, qtd_ações: 0.6660749540879088, dinheiro: R$ 2125.9677154942583, Total Value: R$ 2143.8907074538, Lucro: R$ 355.17992254169076\n277 0.1665187385219772 2139.625110854264 v 27.33905601501465 2019-03-19 00:00:00\n277 0.1665187385219772 2139.625110854264 v 27.33905601501465 2019-03-19 00:00:00\n27.33905601501465\n1788.7107849121094\ndias negociados: 277, qtd_ações: 0.1665187385219772, dinheiro: R$ 2139.625110854264, Total Value: R$ 2144.1775759742663, Lucro: R$ 355.466791062157\n278 39.60848779389899 1069.812555427132 c 27.12371063232422 2019-03-20 00:00:00\n278 39.60848779389899 1069.812555427132 c 27.12371063232422 2019-03-20 00:00:00\n27.12371063232422\n1788.7107849121094\ndias negociados: 278, qtd_ações: 39.60848779389899, dinheiro: R$ 1069.812555427132, Total Value: R$ 2144.141716932794, Lucro: R$ 355.43093202068485\n279 59.61258088061725 534.906277713566 c 26.73984146118164 2019-03-21 00:00:00\n279 59.61258088061725 534.906277713566 c 26.73984146118164 2019-03-21 00:00:00\n26.73984146118164\n1788.7107849121094\ndias negociados: 279, qtd_ações: 59.61258088061725, dinheiro: R$ 534.906277713566, Total Value: R$ 2128.937239553139, Lucro: R$ 340.2264546410297\n280 70.19252396971643 267.453138856783 c 25.279260635375977 2019-03-22 00:00:00\n280 70.19252396971643 267.453138856783 c 25.279260635375977 2019-03-22 00:00:00\n25.279260635375977\n1788.7107849121094\ndias negociados: 280, qtd_ações: 70.19252396971643, dinheiro: R$ 267.453138856783, Total Value: R$ 2041.8682469421203, Lucro: R$ 253.15746203001095\n281 75.41670844258066 133.7265694283915 c 25.597597122192383 2019-03-25 00:00:00\n281 75.41670844258066 133.7265694283915 c 25.597597122192383 2019-03-25 00:00:00\n25.597597122192383\n1788.7107849121094\ndias negociados: 281, qtd_ações: 75.41670844258066, dinheiro: R$ 133.7265694283915, Total Value: R$ 2064.213088423416, Lucro: R$ 275.50230351130676\n282 18.854177110645164 1649.906600166864 v 26.805377960205078 2019-03-26 00:00:00\n282 18.854177110645164 1649.906600166864 v 26.805377960205078 2019-03-26 00:00:00\n26.805377960205078\n1788.7107849121094\ndias negociados: 282, qtd_ações: 18.854177110645164, dinheiro: R$ 1649.906600166864, Total Value: R$ 2155.299943746355, Lucro: R$ 366.5891588342456\n283 51.08194037082325 824.953300083432 c 25.597597122192383 2019-03-27 00:00:00\n283 51.08194037082325 824.953300083432 c 25.597597122192383 2019-03-27 00:00:00\n25.597597122192383\n1788.7107849121094\ndias negociados: 283, qtd_ações: 51.08194037082325, dinheiro: R$ 824.953300083432, Total Value: R$ 2132.52822991562, Lucro: R$ 343.8174450035108\n284 12.770485092705812 1831.4606483823213 v 26.27170753479004 2019-03-28 00:00:00\n284 12.770485092705812 1831.4606483823213 v 26.27170753479004 2019-03-28 00:00:00\n26.27170753479004\n1788.7107849121094\ndias negociados: 284, qtd_ações: 12.770485092705812, dinheiro: R$ 1831.4606483823213, Total Value: R$ 2166.9630978152845, Lucro: R$ 378.25231290317515\n285 47.62662540937591 915.7303241911607 c 26.27170753479004 2019-03-29 00:00:00\n285 47.62662540937591 915.7303241911607 c 26.27170753479004 2019-03-29 00:00:00\n26.27170753479004\n1788.7107849121094\ndias negociados: 285, qtd_ações: 47.62662540937591, dinheiro: R$ 915.7303241911607, Total Value: R$ 2166.9630978152845, Lucro: R$ 378.25231290317515\n286 65.09204082822252 457.86516209558033 c 26.215532302856445 2019-04-01 00:00:00\n286 65.09204082822252 457.86516209558033 c 26.215532302856445 2019-04-01 00:00:00\n26.215532302856445\n1788.7107849121094\ndias negociados: 286, qtd_ações: 65.09204082822252, dinheiro: R$ 457.86516209558033, Total Value: R$ 2164.2876610866983, Lucro: R$ 375.57687617458896\n287 16.27301020705563 1750.9373164236158 v 26.487051010131836 2019-04-02 00:00:00\n287 16.27301020705563 1750.9373164236158 v 26.487051010131836 2019-04-02 00:00:00\n26.487051010131836\n1788.7107849121094\ndias negociados: 287, qtd_ações: 16.27301020705563, dinheiro: R$ 1750.9373164236158, Total Value: R$ 2181.9613678662945, Lucro: R$ 393.2505829541851\n288 50.2258424515297 875.4686582118079 c 25.784849166870117 2019-04-03 00:00:00\n288 50.2258424515297 875.4686582118079 c 25.784849166870117 2019-04-03 00:00:00\n25.784849166870117\n1788.7107849121094\ndias negociados: 288, qtd_ações: 50.2258424515297, dinheiro: R$ 875.4686582118079, Total Value: R$ 2170.5344301034834, Lucro: R$ 381.823645191374\n26.65557861328125\n1788.7107849121094\ndias negociados: 289, qtd_ações: 50.2258424515297, dinheiro: R$ 875.4686582118079, Total Value: R$ 2214.267550096836, Lucro: R$ 425.5567651847268\n290 66.47082336009768 437.73432910590395 c 26.945819854736328 2019-04-05 00:00:00\n290 66.47082336009768 437.73432910590395 c 26.945819854736328 2019-04-05 00:00:00\n26.945819854736328\n1788.7107849121094\ndias negociados: 290, qtd_ações: 66.47082336009768, dinheiro: R$ 437.73432910590395, Total Value: R$ 2228.8451609630956, Lucro: R$ 440.13437605098625\n291 16.61770584002442 1803.00513342668 v 27.385866165161133 2019-04-08 00:00:00\n291 16.61770584002442 1803.00513342668 v 27.385866165161133 2019-04-08 00:00:00\n27.385866165161133\n1788.7107849121094\ndias negociados: 291, qtd_ações: 16.61770584002442, dinheiro: R$ 1803.00513342668, Total Value: R$ 2258.0954015336056, Lucro: R$ 469.3846166214962\n292 49.63783631574953 901.50256671334 c 27.301605224609375 2019-04-09 00:00:00\n292 49.63783631574953 901.50256671334 c 27.301605224609375 2019-04-09 00:00:00\n27.301605224609375\n1788.7107849121094\ndias negociados: 292, qtd_ações: 49.63783631574953, dinheiro: R$ 901.50256671334, Total Value: R$ 2256.6951780097124, Lucro: R$ 467.98439309760306\n293 66.36589602173174 450.75128335667 c 26.945819854736328 2019-04-10 00:00:00\n293 66.36589602173174 450.75128335667 c 26.945819854736328 2019-04-10 00:00:00\n26.945819854736328\n1788.7107849121094\ndias negociados: 293, qtd_ações: 66.36589602173174, dinheiro: R$ 450.75128335667, Total Value: R$ 2239.034762056416, Lucro: R$ 450.3239771443068\n294 74.96292312286667 225.375641678335 c 26.215532302856445 2019-04-11 00:00:00\n294 74.96292312286667 225.375641678335 c 26.215532302856445 2019-04-11 00:00:00\n26.215532302856445\n1788.7107849121094\ndias negociados: 294, qtd_ações: 74.96292312286667, dinheiro: R$ 225.375641678335, Total Value: R$ 2190.5685743223908, Lucro: R$ 401.8577894102814\n295 79.62255844570336 112.6878208391675 c 24.183828353881836 2019-04-12 00:00:00\n295 79.62255844570336 112.6878208391675 c 24.183828353881836 2019-04-12 00:00:00\n24.183828353881836\n1788.7107849121094\ndias negociados: 295, qtd_ações: 79.62255844570336, dinheiro: R$ 112.6878208391675, Total Value: R$ 2038.266107386982, Lucro: R$ 249.55532247487258\n296 19.90563961142584 1562.4628211287818 v 24.27745819091797 2019-04-15 00:00:00\n296 19.90563961142584 1562.4628211287818 v 24.27745819091797 2019-04-15 00:00:00\n24.27745819091797\n1788.7107849121094\ndias negociados: 296, qtd_ações: 19.90563961142584, dinheiro: R$ 1562.4628211287818, Total Value: R$ 2045.7211545586533, Lucro: R$ 257.01036964654395\n297 4.97640990285646 1935.9489583232564 v 25.017107009887695 2019-04-16 00:00:00\n297 4.97640990285646 1935.9489583232564 v 25.017107009887695 2019-04-16 00:00:00\n25.017107009887695\n1788.7107849121094\ndias negociados: 297, qtd_ações: 4.97640990285646, dinheiro: R$ 1935.9489583232564, Total Value: R$ 2060.4443373880813, Lucro: R$ 271.7335524759719\n298 1.244102475714115 2029.4253171207472 v 25.04519271850586 2019-04-17 00:00:00\n298 1.244102475714115 2029.4253171207472 v 25.04519271850586 2019-04-17 00:00:00\n25.04519271850586\n1788.7107849121094\ndias negociados: 298, qtd_ações: 1.244102475714115, dinheiro: R$ 2029.4253171207472, Total Value: R$ 2060.5841033865777, Lucro: R$ 271.8733184744683\n299 0.31102561892852876 2053.536978942903 v 25.84102439880371 2019-04-18 00:00:00\n299 0.31102561892852876 2053.536978942903 v 25.84102439880371 2019-04-18 00:00:00\n25.84102439880371\n1788.7107849121094\ndias negociados: 299, qtd_ações: 0.31102561892852876, dinheiro: R$ 2053.536978942903, Total Value: R$ 2061.574199550288, Lucro: R$ 272.8634146381787\n300 40.2767556931704 1026.7684894714514 c 25.69122314453125 2019-04-22 00:00:00\n300 40.2767556931704 1026.7684894714514 c 25.69122314453125 2019-04-22 00:00:00\n25.69122314453125\n1788.7107849121094\ndias negociados: 300, qtd_ações: 40.2767556931704, dinheiro: R$ 1026.7684894714514, Total Value: R$ 2061.5276075224615, Lucro: R$ 272.8168226103521\n301 10.0691889232926 1809.6255539038598 v 25.915925979614258 2019-04-23 00:00:00\n301 10.0691889232926 1809.6255539038598 v 25.915925979614258 2019-04-23 00:00:00\n25.915925979614258\n1788.7107849121094\ndias negociados: 301, qtd_ações: 10.0691889232926, dinheiro: R$ 1809.6255539038598, Total Value: R$ 2070.5779087146625, Lucro: R$ 281.8671238025531\n302 45.08377514820315 904.8127769519299 c 25.84102439880371 2019-04-24 00:00:00\n302 45.08377514820315 904.8127769519299 c 25.84102439880371 2019-04-24 00:00:00\n25.84102439880371\n1788.7107849121094\ndias negociados: 302, qtd_ações: 45.08377514820315, dinheiro: R$ 904.8127769519299, Total Value: R$ 2069.823710546828, Lucro: R$ 281.1129256347185\n303 11.270943787050788 1784.902434463965 v 26.028274536132812 2019-04-25 00:00:00\n303 11.270943787050788 1784.902434463965 v 26.028274536132812 2019-04-25 00:00:00\n26.028274536132812\n1788.7107849121094\ndias negociados: 303, qtd_ações: 11.270943787050788, dinheiro: R$ 1784.902434463965, Total Value: R$ 2078.265653634643, Lucro: R$ 289.5548687225337\n304 46.22621431266477 892.4512172319825 c 25.531234741210938 2019-04-26 00:00:00\n304 46.22621431266477 892.4512172319825 c 25.531234741210938 2019-04-26 00:00:00\n25.531234741210938\n1788.7107849121094\ndias negociados: 304, qtd_ações: 46.22621431266477, dinheiro: R$ 892.4512172319825, Total Value: R$ 2072.6635460461516, Lucro: R$ 283.95276113404225\n305 11.556553578166193 1781.5083275205784 v 25.64366340637207 2019-04-29 00:00:00\n305 11.556553578166193 1781.5083275205784 v 25.64366340637207 2019-04-29 00:00:00\n25.64366340637207\n1788.7107849121094\ndias negociados: 305, qtd_ações: 11.556553578166193, dinheiro: R$ 1781.5083275205784, Total Value: R$ 2077.860697616777, Lucro: R$ 289.1499127046677\n306 46.625523500675065 890.7541637602892 c 25.400066375732422 2019-04-30 00:00:00\n306 46.625523500675065 890.7541637602892 c 25.400066375732422 2019-04-30 00:00:00\n25.400066375732422\n1788.7107849121094\ndias negociados: 306, qtd_ações: 46.625523500675065, dinheiro: R$ 890.7541637602892, Total Value: R$ 2075.0455554807077, Lucro: R$ 286.3347705685983\n307 64.40928791038179 445.3770818801446 c 25.04402732849121 2019-05-02 00:00:00\n307 64.40928791038179 445.3770818801446 c 25.04402732849121 2019-05-02 00:00:00\n25.04402732849121\n1788.7107849121094\ndias negociados: 307, qtd_ações: 64.40928791038179, dinheiro: R$ 445.3770818801446, Total Value: R$ 2058.4450485164048, Lucro: R$ 269.7342636042954\n308 16.102321977595448 1660.6094209698235 v 25.156461715698242 2019-05-03 00:00:00\n308 16.102321977595448 1660.6094209698235 v 25.156461715698242 2019-05-03 00:00:00\n25.156461715698242\n1788.7107849121094\ndias negociados: 308, qtd_ações: 16.102321977595448, dinheiro: R$ 1660.6094209698235, Total Value: R$ 2065.68686733305, Lucro: R$ 276.97608242094066\n309 49.20657908266635 830.3047104849118 c 25.08150863647461 2019-05-06 00:00:00\n309 49.20657908266635 830.3047104849118 c 25.08150863647461 2019-05-06 00:00:00\n25.08150863647461\n1788.7107849121094\ndias negociados: 309, qtd_ações: 49.20657908266635, dinheiro: R$ 830.3047104849118, Total Value: R$ 2064.479948718179, Lucro: R$ 275.7691638060696\n310 12.301644770666588 1741.413718643319 v 24.687999725341797 2019-05-07 00:00:00\n310 12.301644770666588 1741.413718643319 v 24.687999725341797 2019-05-07 00:00:00\n24.687999725341797\n1788.7107849121094\ndias negociados: 310, qtd_ações: 12.301644770666588, dinheiro: R$ 1741.413718643319, Total Value: R$ 2045.1167213627882, Lucro: R$ 256.40593645067884\n311 46.255719331842656 870.7068593216595 c 25.64366340637207 2019-05-08 00:00:00\n311 46.255719331842656 870.7068593216595 c 25.64366340637207 2019-05-08 00:00:00\n25.64366340637207\n1788.7107849121094\ndias negociados: 311, qtd_ações: 46.255719331842656, dinheiro: R$ 870.7068593216595, Total Value: R$ 2056.87295648705, Lucro: R$ 268.16217157494066\n312 63.574451014344035 435.35342966082976 c 25.137720108032227 2019-05-09 00:00:00\n312 63.574451014344035 435.35342966082976 c 25.137720108032227 2019-05-09 00:00:00\n25.137720108032227\n1788.7107849121094\ndias negociados: 312, qtd_ações: 63.574451014344035, dinheiro: R$ 435.35342966082976, Total Value: R$ 2033.4701852812157, Lucro: R$ 244.7594003691063\n313 72.28250088423741 217.67671483041488 c 24.997182846069336 2019-05-10 00:00:00\n313 72.28250088423741 217.67671483041488 c 24.997182846069336 2019-05-10 00:00:00\n24.997182846069336\n1788.7107849121094\ndias negociados: 313, qtd_ações: 72.28250088423741, dinheiro: R$ 217.67671483041488, Total Value: R$ 2024.5356060048657, Lucro: R$ 235.82482109275634\n24.266382217407227\n1788.7107849121094\ndias negociados: 314, qtd_ações: 72.28250088423741, dinheiro: R$ 217.67671483041488, Total Value: R$ 1971.7115089173958, Lucro: R$ 183.00072400528643\n315 76.75039964509926 108.83835741520744 c 24.360076904296875 2019-05-14 00:00:00\n315 76.75039964509926 108.83835741520744 c 24.360076904296875 2019-05-14 00:00:00\n24.360076904296875\n1788.7107849121094\ndias negociados: 315, qtd_ações: 76.75039964509926, dinheiro: R$ 108.83835741520744, Total Value: R$ 1978.483995205345, Lucro: R$ 189.7732102932357\n316 78.99470749434983 54.41917870760372 c 24.247644424438477 2019-05-15 00:00:00\n316 78.99470749434983 54.41917870760372 c 24.247644424438477 2019-05-15 00:00:00\n24.247644424438477\n1788.7107849121094\ndias negociados: 316, qtd_ações: 78.99470749434983, dinheiro: R$ 54.41917870760372, Total Value: R$ 1969.8547574431236, Lucro: R$ 181.14397253101424\n317 80.14394955202742 27.20958935380186 c 23.676116943359375 2019-05-16 00:00:00\n317 80.14394955202742 27.20958935380186 c 23.676116943359375 2019-05-16 00:00:00\n23.676116943359375\n1788.7107849121094\ndias negociados: 317, qtd_ações: 80.14394955202742, dinheiro: R$ 27.20958935380186, Total Value: R$ 1924.7071112502974, Lucro: R$ 135.99632633818806\n318 80.73230741074353 13.60479467690093 c 23.123332977294922 2019-05-17 00:00:00\n318 80.73230741074353 13.60479467690093 c 23.123332977294922 2019-05-17 00:00:00\n23.123332977294922\n1788.7107849121094\ndias negociados: 318, qtd_ações: 80.73230741074353, dinheiro: R$ 13.60479467690093, Total Value: R$ 1880.404820960858, Lucro: R$ 91.69403604874856\n319 20.183076852685883 1461.3582534520224 v 23.91035270690918 2019-05-20 00:00:00\n319 20.183076852685883 1461.3582534520224 v 23.91035270690918 2019-05-20 00:00:00\n23.91035270690918\n1788.7107849121094\ndias negociados: 319, qtd_ações: 20.183076852685883, dinheiro: R$ 1461.3582534520224, Total Value: R$ 1943.9427397103964, Lucro: R$ 155.231954798287\n320 5.045769213171471 1837.053665071712 v 24.819169998168945 2019-05-21 00:00:00\n320 5.045769213171471 1837.053665071712 v 24.819169998168945 2019-05-21 00:00:00\n24.819169998168945\n1788.7107849121094\ndias negociados: 320, qtd_ações: 5.045769213171471, dinheiro: R$ 1837.053665071712, Total Value: R$ 1962.2854689449418, Lucro: R$ 173.57468403283247\n321 1.2614423032928677 1930.6216185340659 v 24.72512435913086 2019-05-22 00:00:00\n321 1.2614423032928677 1930.6216185340659 v 24.72512435913086 2019-05-22 00:00:00\n24.72512435913086\n1788.7107849121094\ndias negociados: 321, qtd_ações: 1.2614423032928677, dinheiro: R$ 1930.6216185340659, Total Value: R$ 1961.8109363548506, Lucro: R$ 173.1001514427412\n322 0.3153605758232169 1953.6132101019725 v 24.301908493041992 2019-05-23 00:00:00\n322 0.3153605758232169 1953.6132101019725 v 24.301908493041992 2019-05-23 00:00:00\n24.301908493041992\n1788.7107849121094\ndias negociados: 322, qtd_ações: 0.3153605758232169, dinheiro: R$ 1953.6132101019725, Total Value: R$ 1961.2770739579414, Lucro: R$ 172.56628904583204\n323 0.07884014395580423 1959.4167179585409 v 24.537025451660156 2019-05-24 00:00:00\n323 0.07884014395580423 1959.4167179585409 v 24.537025451660156 2019-05-24 00:00:00\n24.537025451660156\n1788.7107849121094\ndias negociados: 323, qtd_ações: 0.07884014395580423, dinheiro: R$ 1959.4167179585409, Total Value: R$ 1961.351220577397, Lucro: R$ 172.64043566528767\n324 0.019710035988951057 1960.875936372259 v 24.6780948638916 2019-05-27 00:00:00\n324 0.019710035988951057 1960.875936372259 v 24.6780948638916 2019-05-27 00:00:00\n24.6780948638916\n1788.7107849121094\ndias negociados: 324, qtd_ações: 0.019710035988951057, dinheiro: R$ 1960.875936372259, Total Value: R$ 1961.362342510165, Lucro: R$ 172.65155759805566\n325 0.004927508997237764 1961.2485264959191 v 25.20476531982422 2019-05-28 00:00:00\n325 0.004927508997237764 1961.2485264959191 v 25.20476531982422 2019-05-28 00:00:00\n25.20476531982422\n1788.7107849121094\ndias negociados: 325, qtd_ações: 0.004927508997237764, dinheiro: R$ 1961.2485264959191, Total Value: R$ 1961.372723203806, Lucro: R$ 172.66193829169652\n24.922624588012695\n1788.7107849121094\ndias negociados: 326, qtd_ações: 0.004927508997237764, dinheiro: R$ 1961.2485264959191, Total Value: R$ 1961.3713329528114, Lucro: R$ 172.660548040702\n327 39.87831206308169 980.6242632479596 c 24.593454360961914 2019-05-30 00:00:00\n327 39.87831206308169 980.6242632479596 c 24.593454360961914 2019-05-30 00:00:00\n24.593454360961914\n1788.7107849121094\ndias negociados: 327, qtd_ações: 39.87831206308169, dinheiro: R$ 980.6242632479596, Total Value: R$ 1961.369710963556, Lucro: R$ 172.65892605144654\n328 60.2831861915585 490.3121316239798 c 24.02916717529297 2019-05-31 00:00:00\n328 60.2831861915585 490.3121316239798 c 24.02916717529297 2019-05-31 00:00:00\n24.02916717529297\n1788.7107849121094\ndias negociados: 328, qtd_ações: 60.2831861915585, dinheiro: R$ 490.3121316239798, Total Value: R$ 1938.8668904802516, Lucro: R$ 150.15610556814227\n329 15.070796547889625 1595.4377452003705 v 24.442981719970703 2019-06-03 00:00:00\n329 15.070796547889625 1595.4377452003705 v 24.442981719970703 2019-06-03 00:00:00\n24.442981719970703\n1788.7107849121094\ndias negociados: 329, qtd_ações: 15.070796547889625, dinheiro: R$ 1595.4377452003705, Total Value: R$ 1963.8129497258342, Lucro: R$ 175.1021648137248\n330 3.767699136972406 1873.9515129201982 v 24.64048194885254 2019-06-04 00:00:00\n330 3.767699136972406 1873.9515129201982 v 24.64048194885254 2019-06-04 00:00:00\n24.64048194885254\n1788.7107849121094\ndias negociados: 330, qtd_ações: 3.767699136972406, dinheiro: R$ 1873.9515129201982, Total Value: R$ 1966.789435493474, Lucro: R$ 178.07865058136463\n331 42.29352117461542 936.9757564600991 c 24.320720672607422 2019-06-05 00:00:00\n331 42.29352117461542 936.9757564600991 c 24.320720672607422 2019-06-05 00:00:00\n24.320720672607422\n1788.7107849121094\ndias negociados: 331, qtd_ações: 42.29352117461542, dinheiro: R$ 936.9757564600991, Total Value: R$ 1965.584671208928, Lucro: R$ 176.87388629681868\n332 10.573380293653855 1720.9617916872942 v 24.715717315673828 2019-06-06 00:00:00\n332 10.573380293653855 1720.9617916872942 v 24.715717315673828 2019-06-06 00:00:00\n24.715717315673828\n1788.7107849121094\ndias negociados: 332, qtd_ações: 10.573380293653855, dinheiro: R$ 1720.9617916872942, Total Value: R$ 1982.290470096359, Lucro: R$ 193.57968518424968\n333 2.643345073413464 1920.5381361490308 v 25.167144775390625 2019-06-07 00:00:00\n333 2.643345073413464 1920.5381361490308 v 25.167144775390625 2019-06-07 00:00:00\n25.167144775390625\n1788.7107849121094\ndias negociados: 333, qtd_ações: 2.643345073413464, dinheiro: R$ 1920.5381361490308, Total Value: R$ 1987.0635843029431, Lucro: R$ 198.35279939083375\n334 40.956495084946546 960.2690680745154 c 25.06369400024414 2019-06-10 00:00:00\n334 40.956495084946546 960.2690680745154 c 25.06369400024414 2019-06-10 00:00:00\n25.06369400024414\n1788.7107849121094\ndias negociados: 334, qtd_ações: 40.956495084946546, dinheiro: R$ 960.2690680745154, Total Value: R$ 1986.7901282061189, Lucro: R$ 198.0793432940095\n335 10.239123771236637 1744.8931726601004 v 25.5433349609375 2019-06-11 00:00:00\n335 10.239123771236637 1744.8931726601004 v 25.5433349609375 2019-06-11 00:00:00\n25.5433349609375\n1788.7107849121094\ndias negociados: 335, qtd_ações: 10.239123771236637, dinheiro: R$ 1744.8931726601004, Total Value: R$ 2006.4345408552954, Lucro: R$ 217.723755943186\n336 44.78901839513821 872.4465863300502 c 25.251787185668945 2019-06-12 00:00:00\n336 44.78901839513821 872.4465863300502 c 25.251787185668945 2019-06-12 00:00:00\n25.251787185668945\n1788.7107849121094\ndias negociados: 336, qtd_ações: 44.78901839513821, dinheiro: R$ 872.4465863300502, Total Value: R$ 2003.449347099092, Lucro: R$ 214.73856218698256\n337 11.197254598784552 1731.1241952014616 v 25.56214714050293 2019-06-13 00:00:00\n337 11.197254598784552 1731.1241952014616 v 25.56214714050293 2019-06-13 00:00:00\n25.56214714050293\n1788.7107849121094\ndias negociados: 337, qtd_ações: 11.197254598784552, dinheiro: R$ 1731.1241952014616, Total Value: R$ 2017.3500648252655, Lucro: R$ 228.6392799131561\n338 45.20850287580046 865.5620976007308 c 25.449289321899414 2019-06-14 00:00:00\n338 45.20850287580046 865.5620976007308 c 25.449289321899414 2019-06-14 00:00:00\n25.449289321899414\n1788.7107849121094\ndias negociados: 338, qtd_ações: 45.20850287580046, dinheiro: R$ 865.5620976007308, Total Value: R$ 2016.0863670968984, Lucro: R$ 227.37558218478898\n25.496313095092773\n1788.7107849121094\ndias negociados: 339, qtd_ações: 45.20850287580046, dinheiro: R$ 865.5620976007308, Total Value: R$ 2018.2122414825415, Lucro: R$ 229.50145657043208\n340 11.302125718950116 1740.8916519446116 v 25.81607437133789 2019-06-18 00:00:00\n340 11.302125718950116 1740.8916519446116 v 25.81607437133789 2019-06-18 00:00:00\n25.81607437133789\n1788.7107849121094\ndias negociados: 340, qtd_ações: 11.302125718950116, dinheiro: R$ 1740.8916519446116, Total Value: R$ 2032.6681700592385, Lucro: R$ 243.95738514712912\n341 2.825531429737529 1960.2820728561207 v 25.881906509399414 2019-06-19 00:00:00\n341 2.825531429737529 1960.2820728561207 v 25.881906509399414 2019-06-19 00:00:00\n25.881906509399414\n1788.7107849121094\ndias negociados: 341, qtd_ações: 2.825531429737529, dinheiro: R$ 1960.2820728561207, Total Value: R$ 2033.4122131599572, Lucro: R$ 244.70142824784784\n342 0.7063828574343822 2016.6443684341148 v 26.596670150756836 2019-06-21 00:00:00\n342 0.7063828574343822 2016.6443684341148 v 26.596670150756836 2019-06-21 00:00:00\n26.596670150756836\n1788.7107849121094\ndias negociados: 342, qtd_ações: 0.7063828574343822, dinheiro: R$ 2016.6443684341148, Total Value: R$ 2035.431800293446, Lucro: R$ 246.72101538133666\n343 38.658236723413324 1008.3221842170574 c 26.568456649780273 2019-06-24 00:00:00\n343 38.658236723413324 1008.3221842170574 c 26.568456649780273 2019-06-24 00:00:00\n26.568456649780273\n1788.7107849121094\ndias negociados: 343, qtd_ações: 38.658236723413324, dinheiro: R$ 1008.3221842170574, Total Value: R$ 2035.411870760008, Lucro: R$ 246.70108584789864\n344 58.14460364507845 504.1610921085287 c 25.87250328063965 2019-06-25 00:00:00\n344 58.14460364507845 504.1610921085287 c 25.87250328063965 2019-06-25 00:00:00\n25.87250328063965\n1788.7107849121094\ndias negociados: 344, qtd_ações: 58.14460364507845, dinheiro: R$ 504.1610921085287, Total Value: R$ 2008.507540667313, Lucro: R$ 219.79675575520355\n26.022977828979492\n1788.7107849121094\ndias negociados: 345, qtd_ações: 58.14460364507845, dinheiro: R$ 504.1610921085287, Total Value: R$ 2017.2568236392053, Lucro: R$ 228.54603872709595\n346 67.98797443121298 252.08054605426435 c 25.609169006347656 2019-06-27 00:00:00\n346 67.98797443121298 252.08054605426435 c 25.609169006347656 2019-06-27 00:00:00\n25.609169006347656\n1788.7107849121094\ndias negociados: 346, qtd_ações: 67.98797443121298, dinheiro: R$ 252.08054605426435, Total Value: R$ 1993.1960736624405, Lucro: R$ 204.48528875033116\n347 16.996993607803244 1566.5493853142498 v 25.778457641601562 2019-06-28 00:00:00\n347 16.996993607803244 1566.5493853142498 v 25.778457641601562 2019-06-28 00:00:00\n25.778457641601562\n1788.7107849121094\ndias negociados: 347, qtd_ações: 16.996993607803244, dinheiro: R$ 1566.5493853142498, Total Value: R$ 2004.7056650675781, Lucro: R$ 215.99488015546876\n348 47.54904525191867 783.2746926571249 c 25.63738441467285 2019-07-01 00:00:00\n348 47.54904525191867 783.2746926571249 c 25.63738441467285 2019-07-01 00:00:00\n25.63738441467285\n1788.7107849121094\ndias negociados: 348, qtd_ações: 47.54904525191867, dinheiro: R$ 783.2746926571249, Total Value: R$ 2002.3078443312388, Lucro: R$ 213.59705941912944\n25.223573684692383\n1788.7107849121094\ndias negociados: 349, qtd_ações: 47.54904525191867, dinheiro: R$ 783.2746926571249, Total Value: R$ 1982.6315392056679, Lucro: R$ 193.9207542935585\n350 62.870033124178335 391.63734632856244 c 25.56214714050293 2019-07-03 00:00:00\n350 62.870033124178335 391.63734632856244 c 25.56214714050293 2019-07-03 00:00:00\n25.56214714050293\n1788.7107849121094\ndias negociados: 350, qtd_ações: 62.870033124178335, dinheiro: R$ 391.63734632856244, Total Value: R$ 1998.7303837771021, Lucro: R$ 210.01959886499276\n351 15.717508281044584 1606.2696689275388 v 25.759645462036133 2019-07-04 00:00:00\n351 15.717508281044584 1606.2696689275388 v 25.759645462036133 2019-07-04 00:00:00\n25.759645462036133\n1788.7107849121094\ndias negociados: 351, qtd_ações: 15.717508281044584, dinheiro: R$ 1606.2696689275388, Total Value: R$ 2011.1471097938643, Lucro: R$ 222.4363248817549\n352 3.929377070261146 1910.0386185557838 v 25.76905059814453 2019-07-05 00:00:00\n352 3.929377070261146 1910.0386185557838 v 25.76905059814453 2019-07-05 00:00:00\n25.76905059814453\n1788.7107849121094\ndias negociados: 352, qtd_ações: 3.929377070261146, dinheiro: R$ 1910.0386185557838, Total Value: R$ 2011.2949350985323, Lucro: R$ 222.5841501864229\n353 0.9823442675652865 1986.6737589733818 v 26.004169464111328 2019-07-08 00:00:00\n353 0.9823442675652865 1986.6737589733818 v 26.004169464111328 2019-07-08 00:00:00\n26.004169464111328\n1788.7107849121094\ndias negociados: 353, qtd_ações: 0.9823442675652865, dinheiro: R$ 1986.6737589733818, Total Value: R$ 2012.2188057792478, Lucro: R$ 223.50802086713838\n354 0.24558606689132162 2006.123562499054 v 26.399168014526367 2019-07-10 00:00:00\n354 0.24558606689132162 2006.123562499054 v 26.399168014526367 2019-07-10 00:00:00\n26.399168014526367\n1788.7107849121094\ndias negociados: 354, qtd_ações: 0.24558606689132162, dinheiro: R$ 2006.123562499054, Total Value: R$ 2012.6068303409447, Lucro: R$ 223.89604542883535\n355 0.061396516722830405 2011.0431780896297 v 26.70952606201172 2019-07-11 00:00:00\n355 0.061396516722830405 2011.0431780896297 v 26.70952606201172 2019-07-11 00:00:00\n26.70952606201172\n1788.7107849121094\ndias negociados: 355, qtd_ações: 0.061396516722830405, dinheiro: R$ 2011.0431780896297, Total Value: R$ 2012.683049953155, Lucro: R$ 223.9722650410456\n26.831790924072266\n1788.7107849121094\ndias negociados: 356, qtd_ações: 0.061396516722830405, dinheiro: R$ 2011.0431780896297, Total Value: R$ 2012.690556589803, Lucro: R$ 223.97977167769363\n357 38.001849870699154 1005.5215890448148 c 26.50262451171875 2019-07-15 00:00:00\n357 38.001849870699154 1005.5215890448148 c 26.50262451171875 2019-07-15 00:00:00\n26.50262451171875\n1788.7107849121094\ndias negociados: 357, qtd_ações: 38.001849870699154, dinheiro: R$ 1005.5215890448148, Total Value: R$ 2012.6703469186623, Lucro: R$ 223.95956200655291\n358 57.210654043674936 502.7607945224074 c 26.1734561920166 2019-07-16 00:00:00\n358 57.210654043674936 502.7607945224074 c 26.1734561920166 2019-07-16 00:00:00\n26.1734561920166\n1788.7107849121094\ndias negociados: 358, qtd_ações: 57.210654043674936, dinheiro: R$ 502.7607945224074, Total Value: R$ 2000.1613418511508, Lucro: R$ 211.45055693904146\n359 66.86710235122726 251.3803972612037 c 26.032386779785156 2019-07-17 00:00:00\n359 66.86710235122726 251.3803972612037 c 26.032386779785156 2019-07-17 00:00:00\n26.032386779785156\n1788.7107849121094\ndias negociados: 359, qtd_ações: 66.86710235122726, dinheiro: R$ 251.3803972612037, Total Value: R$ 1992.090668511833, Lucro: R$ 203.3798835997236\n360 71.72516333076362 125.69019863060186 c 25.87250328063965 2019-07-18 00:00:00\n360 71.72516333076362 125.69019863060186 c 25.87250328063965 2019-07-18 00:00:00\n25.87250328063965\n1788.7107849121094\ndias negociados: 360, qtd_ações: 71.72516333076362, dinheiro: R$ 125.69019863060186, Total Value: R$ 1981.3997222101982, Lucro: R$ 192.68893729808883\n361 74.16039039329013 62.84509931530093 c 25.806669235229492 2019-07-19 00:00:00\n361 74.16039039329013 62.84509931530093 c 25.806669235229492 2019-07-19 00:00:00\n25.806669235229492\n1788.7107849121094\ndias negociados: 361, qtd_ações: 74.16039039329013, dinheiro: R$ 62.84509931530093, Total Value: R$ 1976.67776455043, Lucro: R$ 187.9669796383207\n362 75.37578918870493 31.422549657650464 c 25.853694915771484 2019-07-22 00:00:00\n362 75.37578918870493 31.422549657650464 c 25.853694915771484 2019-07-22 00:00:00\n25.853694915771484\n1788.7107849121094\ndias negociados: 362, qtd_ações: 75.37578918870493, dinheiro: R$ 31.422549657650464, Total Value: R$ 1980.1652073779344, Lucro: R$ 191.45442246582502\n363 75.98282618666022 15.711274828825232 c 25.881906509399414 2019-07-23 00:00:00\n363 75.98282618666022 15.711274828825232 c 25.881906509399414 2019-07-23 00:00:00\n25.881906509399414\n1788.7107849121094\ndias negociados: 363, qtd_ações: 75.98282618666022, dinheiro: R$ 15.711274828825232, Total Value: R$ 1982.2916785119107, Lucro: R$ 193.58089359980136\n364 76.28823125710328 7.855637414412616 c 25.722026824951172 2019-07-24 00:00:00\n364 76.28823125710328 7.855637414412616 c 25.722026824951172 2019-07-24 00:00:00\n25.722026824951172\n1788.7107849121094\ndias negociados: 364, qtd_ações: 76.28823125710328, dinheiro: R$ 7.855637414412616, Total Value: R$ 1970.1435682377014, Lucro: R$ 181.43278332559203\n365 76.44354603351582 3.927818707206308 c 25.28940773010254 2019-07-25 00:00:00\n365 76.44354603351582 3.927818707206308 c 25.28940773010254 2019-07-25 00:00:00\n25.28940773010254\n1788.7107849121094\ndias negociados: 365, qtd_ações: 76.44354603351582, dinheiro: R$ 3.927818707206308, Total Value: R$ 1937.1398226836504, Lucro: R$ 148.42903777154106\n366 76.52343153908372 1.963909353603154 c 24.58405113220215 2019-07-26 00:00:00\n366 76.52343153908372 1.963909353603154 c 24.58405113220215 2019-07-26 00:00:00\n24.58405113220215\n1788.7107849121094\ndias negociados: 366, qtd_ações: 76.52343153908372, dinheiro: R$ 1.963909353603154, Total Value: R$ 1883.2198631220078, Lucro: R$ 94.50907820989846\n367 19.13085788477093 1425.8600570779474 v 24.809762954711914 2019-07-29 00:00:00\n367 19.13085788477093 1425.8600570779474 v 24.809762954711914 2019-07-29 00:00:00\n24.809762954711914\n1788.7107849121094\ndias negociados: 367, qtd_ações: 19.13085788477093, dinheiro: R$ 1425.8600570779474, Total Value: R$ 1900.4921063193956, Lucro: R$ 111.78132140728621\n368 48.02004209737058 712.9300285389737 c 24.6780948638916 2019-07-30 00:00:00\n368 48.02004209737058 712.9300285389737 c 24.6780948638916 2019-07-30 00:00:00\n24.6780948638916\n1788.7107849121094\ndias negociados: 368, qtd_ações: 48.02004209737058, dinheiro: R$ 712.9300285389737, Total Value: R$ 1897.9731827859532, Lucro: R$ 109.26239787384384\n24.527624130249023\n1788.7107849121094\ndias negociados: 369, qtd_ações: 48.02004209737058, dinheiro: R$ 712.9300285389737, Total Value: R$ 1890.7475718220144, Lucro: R$ 102.036786909905\n370 62.45913016741056 356.46501426948686 c 24.687501907348633 2019-08-01 00:00:00\n370 62.45913016741056 356.46501426948686 c 24.687501907348633 2019-08-01 00:00:00\n24.687501907348633\n1788.7107849121094\ndias negociados: 370, qtd_ações: 62.45913016741056, dinheiro: R$ 356.46501426948686, Total Value: R$ 1898.4249094087716, Lucro: R$ 109.71412449666218\n371 15.61478254185264 1524.8300804452808 v 24.941431045532227 2019-08-02 00:00:00\n371 15.61478254185264 1524.8300804452808 v 24.941431045532227 2019-08-02 00:00:00\n24.941431045532227\n1788.7107849121094\ndias negociados: 371, qtd_ações: 15.61478254185264, dinheiro: R$ 1524.8300804452808, Total Value: R$ 1914.285102503879, Lucro: R$ 125.57431759176961\n372 47.34351598736977 762.4150402226404 c 24.02916717529297 2019-08-05 00:00:00\n372 47.34351598736977 762.4150402226404 c 24.02916717529297 2019-08-05 00:00:00\n24.02916717529297\n1788.7107849121094\ndias negociados: 372, qtd_ações: 47.34351598736977, dinheiro: R$ 762.4150402226404, Total Value: R$ 1900.040300549304, Lucro: R$ 111.32951563719462\n373 11.835878996842442 1626.654066354837 v 24.33952522277832 2019-08-06 00:00:00\n373 11.835878996842442 1626.654066354837 v 24.33952522277832 2019-08-06 00:00:00\n24.33952522277832\n1788.7107849121094\ndias negociados: 373, qtd_ações: 11.835878996842442, dinheiro: R$ 1626.654066354837, Total Value: R$ 1914.7337417322358, Lucro: R$ 126.02295682012641\n374 45.617256875589604 813.3270331774185 c 24.076194763183594 2019-08-07 00:00:00\n374 45.617256875589604 813.3270331774185 c 24.076194763183594 2019-08-07 00:00:00\n24.076194763183594\n1788.7107849121094\ndias negociados: 374, qtd_ações: 45.617256875589604, dinheiro: R$ 813.3270331774185, Total Value: R$ 1911.6169942762895, Lucro: R$ 122.90620936418009\n375 62.02718661176094 406.66351658870923 c 24.781551361083984 2019-08-08 00:00:00\n375 62.02718661176094 406.66351658870923 c 24.781551361083984 2019-08-08 00:00:00\n24.781551361083984\n1788.7107849121094\ndias negociados: 375, qtd_ações: 62.02718661176094, dinheiro: R$ 406.66351658870923, Total Value: R$ 1943.7934273916037, Lucro: R$ 155.08264247949433\n376 15.506796652940235 1556.448324225833 v 24.715717315673828 2019-08-09 00:00:00\n376 15.506796652940235 1556.448324225833 v 24.715717315673828 2019-08-09 00:00:00\n24.715717315673828\n1788.7107849121094\ndias negociados: 376, qtd_ações: 15.506796652940235, dinheiro: R$ 1556.448324225833, Total Value: R$ 1939.709926771541, Lucro: R$ 150.99914185943157\n377 47.767180316213825 778.2241621129165 c 24.123214721679688 2019-08-12 00:00:00\n377 47.767180316213825 778.2241621129165 c 24.123214721679688 2019-08-12 00:00:00\n24.123214721679688\n1788.7107849121094\ndias negociados: 377, qtd_ações: 47.767180316213825, dinheiro: R$ 778.2241621129165, Total Value: R$ 1930.522109530134, Lucro: R$ 141.81132461802463\n378 11.941795079053456 1653.993322829146 v 24.445491790771484 2019-08-13 00:00:00\n378 11.941795079053456 1653.993322829146 v 24.445491790771484 2019-08-13 00:00:00\n24.445491790771484\n1788.7107849121094\ndias negociados: 378, qtd_ações: 11.941795079053456, dinheiro: R$ 1653.993322829146, Total Value: R$ 1945.9163764012228, Lucro: R$ 157.20559148911343\n379 46.953098307752875 826.996661414573 c 23.620847702026367 2019-08-14 00:00:00\n379 46.953098307752875 826.996661414573 c 23.620847702026367 2019-08-14 00:00:00\n23.620847702026367\n1788.7107849121094\ndias negociados: 379, qtd_ações: 46.953098307752875, dinheiro: R$ 826.996661414573, Total Value: R$ 1936.0686456802757, Lucro: R$ 147.35786076816635\n380 64.95725974459818 413.4983307072865 c 22.966819763183594 2019-08-15 00:00:00\n380 64.95725974459818 413.4983307072865 c 22.966819763183594 2019-08-15 00:00:00\n22.966819763183594\n1788.7107849121094\ndias negociados: 380, qtd_ações: 64.95725974459818, dinheiro: R$ 413.4983307072865, Total Value: R$ 1905.360007571774, Lucro: R$ 116.6492226596647\n381 16.239314936149544 1517.6175576185224 v 22.663501739501953 2019-08-16 00:00:00\n381 16.239314936149544 1517.6175576185224 v 22.663501739501953 2019-08-16 00:00:00\n22.663501739501953\n1788.7107849121094\ndias negociados: 381, qtd_ações: 16.239314936149544, dinheiro: R$ 1517.6175576185224, Total Value: R$ 1885.6572999222676, Lucro: R$ 96.94651501015824\n382 49.55365094146313 758.8087788092612 c 22.777244567871094 2019-08-19 00:00:00\n382 49.55365094146313 758.8087788092612 c 22.777244567871094 2019-08-19 00:00:00\n22.777244567871094\n1788.7107849121094\ndias negociados: 382, qtd_ações: 49.55365094146313, dinheiro: R$ 758.8087788092612, Total Value: R$ 1887.5044055338826, Lucro: R$ 98.79362062177324\n22.76776695251465\n1788.7107849121094\ndias negociados: 383, qtd_ações: 49.55365094146313, dinheiro: R$ 758.8087788092612, Total Value: R$ 1887.0347550907518, Lucro: R$ 98.32397017864241\n384 12.388412735365783 1655.3538711243873 v 24.12321662902832 2019-08-21 00:00:00\n384 12.388412735365783 1655.3538711243873 v 24.12321662902832 2019-08-21 00:00:00\n24.12321662902832\n1788.7107849121094\ndias negociados: 384, qtd_ações: 12.388412735365783, dinheiro: R$ 1655.3538711243873, Total Value: R$ 1954.2022352294293, Lucro: R$ 165.49145031731996\n385 47.01170131816915 827.6769355621936 c 23.905208587646484 2019-08-22 00:00:00\n385 47.01170131816915 827.6769355621936 c 23.905208587646484 2019-08-22 00:00:00\n23.905208587646484\n1788.7107849121094\ndias negociados: 385, qtd_ações: 47.01170131816915, dinheiro: R$ 827.6769355621936, Total Value: R$ 1951.5014616331623, Lucro: R$ 162.79067672105293\n386 64.99356572843938 413.8384677810968 c 23.01421356201172 2019-08-23 00:00:00\n386 64.99356572843938 413.8384677810968 c 23.01421356201172 2019-08-23 00:00:00\n23.01421356201172\n1788.7107849121094\ndias negociados: 386, qtd_ações: 64.99356572843938, dinheiro: R$ 413.8384677810968, Total Value: R$ 1909.6142696120464, Lucro: R$ 120.90348469993705\n387 74.10457814621039 206.9192338905484 c 22.710893630981445 2019-08-26 00:00:00\n387 74.10457814621039 206.9192338905484 c 22.710893630981445 2019-08-26 00:00:00\n22.710893630981445\n1788.7107849121094\ndias negociados: 387, qtd_ações: 74.10457814621039, dinheiro: R$ 206.9192338905484, Total Value: R$ 1889.9004257378847, Lucro: R$ 101.18964082577531\n388 18.526144536552597 1489.1739985428471 v 23.07108497619629 2019-08-27 00:00:00\n388 18.526144536552597 1489.1739985428471 v 23.07108497619629 2019-08-27 00:00:00\n23.07108497619629\n1788.7107849121094\ndias negociados: 388, qtd_ações: 18.526144536552597, dinheiro: R$ 1489.1739985428471, Total Value: R$ 1916.5922534269466, Lucro: R$ 127.88146851483725\n23.30805206298828\n1788.7107849121094\ndias negociados: 389, qtd_ações: 18.526144536552597, dinheiro: R$ 1489.1739985428471, Total Value: R$ 1920.982339927261, Lucro: R$ 132.27155501515153\n24.170610427856445\n1788.7107849121094\ndias negociados: 390, qtd_ações: 18.526144536552597, dinheiro: R$ 1489.1739985428471, Total Value: R$ 1936.962220866021, Lucro: R$ 148.2514359539116\n391 49.331613909940565 744.5869992714236 c 24.170610427856445 2019-08-30 00:00:00\n391 49.331613909940565 744.5869992714236 c 24.170610427856445 2019-08-30 00:00:00\n24.170610427856445\n1788.7107849121094\ndias negociados: 391, qtd_ações: 49.331613909940565, dinheiro: R$ 744.5869992714236, Total Value: R$ 1936.962220866021, Lucro: R$ 148.2514359539116\n392 64.85611049758093 372.2934996357118 c 23.981035232543945 2019-09-02 00:00:00\n392 64.85611049758093 372.2934996357118 c 23.981035232543945 2019-09-02 00:00:00\n23.981035232543945\n1788.7107849121094\ndias negociados: 392, qtd_ações: 64.85611049758093, dinheiro: R$ 372.2934996357118, Total Value: R$ 1927.6101705239635, Lucro: R$ 138.8993856118541\n393 16.214027624395232 1552.6130013435368 v 24.265398025512695 2019-09-03 00:00:00\n393 16.214027624395232 1552.6130013435368 v 24.265398025512695 2019-09-03 00:00:00\n24.265398025512695\n1788.7107849121094\ndias negociados: 393, qtd_ações: 16.214027624395232, dinheiro: R$ 1552.6130013435368, Total Value: R$ 1946.0528352461452, Lucro: R$ 157.34205033403578\n394 4.053506906098808 1855.3003924687239 v 24.890989303588867 2019-09-04 00:00:00\n394 4.053506906098808 1855.3003924687239 v 24.890989303588867 2019-09-04 00:00:00\n24.890989303588867\n1788.7107849121094\ndias negociados: 394, qtd_ações: 4.053506906098808, dinheiro: R$ 1855.3003924687239, Total Value: R$ 1956.196189510453, Lucro: R$ 167.48540459834362\n395 1.013376726524702 1931.3468464397313 v 25.014209747314453 2019-09-05 00:00:00\n395 1.013376726524702 1931.3468464397313 v 25.014209747314453 2019-09-05 00:00:00\n25.014209747314453\n1788.7107849121094\ndias negociados: 395, qtd_ações: 1.013376726524702, dinheiro: R$ 1931.3468464397313, Total Value: R$ 1956.6956644300672, Lucro: R$ 167.98487951795778\n396 39.42912842972014 965.6734232198656 c 25.137434005737305 2019-09-06 00:00:00\n396 39.42912842972014 965.6734232198656 c 25.137434005737305 2019-09-06 00:00:00\n25.137434005737305\n1788.7107849121094\ndias negociados: 396, qtd_ações: 39.42912842972014, dinheiro: R$ 965.6734232198656, Total Value: R$ 1956.820537025696, Lucro: R$ 168.10975211358664\n397 58.34457051658062 482.8367116099328 c 25.526060104370117 2019-09-09 00:00:00\n397 58.34457051658062 482.8367116099328 c 25.526060104370117 2019-09-09 00:00:00\n25.526060104370117\n1788.7107849121094\ndias negociados: 397, qtd_ações: 58.34457051658062, dinheiro: R$ 482.8367116099328, Total Value: R$ 1972.1437253798304, Lucro: R$ 183.43294046772098\n398 14.586142629145154 1606.8681408811694 v 25.687198638916016 2019-09-10 00:00:00\n398 14.586142629145154 1606.8681408811694 v 25.687198638916016 2019-09-10 00:00:00\n25.687198638916016\n1788.7107849121094\ndias negociados: 398, qtd_ações: 14.586142629145154, dinheiro: R$ 1606.8681408811694, Total Value: R$ 1981.5452839715817, Lucro: R$ 192.83449905947236\n399 46.131474521960385 803.4340704405847 c 25.46919059753418 2019-09-11 00:00:00\n399 46.131474521960385 803.4340704405847 c 25.46919059753418 2019-09-11 00:00:00\n25.46919059753418\n1788.7107849121094\ndias negociados: 399, qtd_ações: 46.131474521960385, dinheiro: R$ 803.4340704405847, Total Value: R$ 1978.3653875856858, Lucro: R$ 189.6546026735764\n25.649282455444336\n1788.7107849121094\ndias negociados: 400, qtd_ações: 46.131474521960385, dinheiro: R$ 803.4340704405847, Total Value: R$ 1986.6732905404806, Lucro: R$ 197.96250562837122\n401 61.89827449503876 401.71703522029236 c 25.478666305541992 2019-09-13 00:00:00\n401 61.89827449503876 401.71703522029236 c 25.478666305541992 2019-09-13 00:00:00\n25.478666305541992\n1788.7107849121094\ndias negociados: 401, qtd_ações: 61.89827449503876, dinheiro: R$ 401.71703522029236, Total Value: R$ 1978.8025159682256, Lucro: R$ 190.09173105611626\n402 15.47456862375969 1636.4552525486279 v 26.597148895263672 2019-09-16 00:00:00\n402 15.47456862375969 1636.4552525486279 v 26.597148895263672 2019-09-16 00:00:00\n26.597148895263672\n1788.7107849121094\ndias negociados: 402, qtd_ações: 15.47456862375969, dinheiro: R$ 1636.4552525486279, Total Value: R$ 2048.0346583247397, Lucro: R$ 259.3238734126303\n403 46.64937170593763 818.2276262743139 c 26.246440887451172 2019-09-17 00:00:00\n403 46.64937170593763 818.2276262743139 c 26.246440887451172 2019-09-17 00:00:00\n26.246440887451172\n1788.7107849121094\ndias negociados: 403, qtd_ações: 46.64937170593763, dinheiro: R$ 818.2276262743139, Total Value: R$ 2042.6076031909433, Lucro: R$ 253.89681827883396\n404 62.50591996964471 409.11381313715697 c 25.80093765258789 2019-09-18 00:00:00\n404 62.50591996964471 409.11381313715697 c 25.80093765258789 2019-09-18 00:00:00\n25.80093765258789\n1788.7107849121094\ndias negociados: 404, qtd_ações: 62.50591996964471, dinheiro: R$ 409.11381313715697, Total Value: R$ 2021.8251571916085, Lucro: R$ 233.1143722794991\n405 70.41385650093642 204.55690656857848 c 25.867292404174805 2019-09-19 00:00:00\n405 70.41385650093642 204.55690656857848 c 25.867292404174805 2019-09-19 00:00:00\n25.867292404174805\n1788.7107849121094\ndias negociados: 405, qtd_ações: 70.41385650093642, dinheiro: R$ 204.55690656857848, Total Value: R$ 2025.972721983906, Lucro: R$ 237.26193707179664\n406 74.41029359871497 102.27845328428924 c 25.592409133911133 2019-09-20 00:00:00\n406 74.41029359871497 102.27845328428924 c 25.592409133911133 2019-09-20 00:00:00\n25.592409133911133\n1788.7107849121094\ndias negociados: 406, qtd_ações: 74.41029359871497, dinheiro: R$ 102.27845328428924, Total Value: R$ 2006.6171308370515, Lucro: R$ 217.90634592494212\n407 18.602573399678743 1555.9237989890642 v 26.047388076782227 2019-09-23 00:00:00\n407 18.602573399678743 1555.9237989890642 v 26.047388076782227 2019-09-23 00:00:00\n26.047388076782227\n1788.7107849121094\ndias negociados: 407, qtd_ações: 18.602573399678743, dinheiro: R$ 1555.9237989890642, Total Value: R$ 2040.4722475573226, Lucro: R$ 251.76146264521321\n408 48.69975176304664 777.9618994945321 c 25.84833335876465 2019-09-24 00:00:00\n408 48.69975176304664 777.9618994945321 c 25.84833335876465 2019-09-24 00:00:00\n25.84833335876465\n1788.7107849121094\ndias negociados: 408, qtd_ações: 48.69975176304664, dinheiro: R$ 777.9618994945321, Total Value: R$ 2036.769317554848, Lucro: R$ 248.05853264273856\n409 63.709808920241926 388.98094974726604 c 25.914688110351562 2019-09-25 00:00:00\n409 63.709808920241926 388.98094974726604 c 25.914688110351562 2019-09-25 00:00:00\n25.914688110351562\n1788.7107849121094\ndias negociados: 409, qtd_ações: 63.709808920241926, dinheiro: R$ 388.98094974726604, Total Value: R$ 2040.0007774854294, Lucro: R$ 251.28999257331998\n410 15.927452230060481 1643.5506128767545 v 26.255918502807617 2019-09-26 00:00:00\n410 15.927452230060481 1643.5506128767545 v 26.255918502807617 2019-09-26 00:00:00\n26.255918502807617\n1788.7107849121094\ndias negociados: 410, qtd_ações: 15.927452230060481, dinheiro: R$ 1643.5506128767545, Total Value: R$ 2061.740500586584, Lucro: R$ 273.0297156744746\n411 47.27138516702239 821.7753064383772 c 26.218002319335938 2019-09-27 00:00:00\n411 47.27138516702239 821.7753064383772 c 26.218002319335938 2019-09-27 00:00:00\n26.218002319335938\n1788.7107849121094\ndias negociados: 411, qtd_ações: 47.27138516702239, dinheiro: R$ 821.7753064383772, Total Value: R$ 2061.136592385593, Lucro: R$ 272.4258074734835\n412 63.005924483819115 410.8876532191886 c 26.113739013671875 2019-09-30 00:00:00\n412 63.005924483819115 410.8876532191886 c 26.113739013671875 2019-09-30 00:00:00\n26.113739013671875\n1788.7107849121094\ndias negociados: 412, qtd_ações: 63.005924483819115, dinheiro: R$ 410.8876532191886, Total Value: R$ 2056.2079215047597, Lucro: R$ 267.49713659265035\n413 70.88463373673119 205.4438266095943 c 26.075822830200195 2019-10-01 00:00:00\n413 70.88463373673119 205.4438266095943 c 26.075822830200195 2019-10-01 00:00:00\n26.075822830200195\n1788.7107849121094\ndias negociados: 413, qtd_ações: 70.88463373673119, dinheiro: R$ 205.4438266095943, Total Value: R$ 2053.8189773122285, Lucro: R$ 265.10819240011915\n414 74.94045889260518 102.72191330479716 c 25.327007293701172 2019-10-02 00:00:00\n414 74.94045889260518 102.72191330479716 c 25.327007293701172 2019-10-02 00:00:00\n25.327007293701172\n1788.7107849121094\ndias negociados: 414, qtd_ações: 74.94045889260518, dinheiro: R$ 102.72191330479716, Total Value: R$ 2000.7394622711215, Lucro: R$ 212.02867735901214\n415 18.735114723151295 1527.3006747019424 v 25.345966339111328 2019-10-03 00:00:00\n415 18.735114723151295 1527.3006747019424 v 25.345966339111328 2019-10-03 00:00:00\n25.345966339111328\n1788.7107849121094\ndias negociados: 415, qtd_ações: 18.735114723151295, dinheiro: R$ 1527.3006747019424, Total Value: R$ 2002.160261834324, Lucro: R$ 213.44947692221467\n416 4.683778680787824 1880.3820072002418 v 25.127954483032227 2019-10-04 00:00:00\n416 4.683778680787824 1880.3820072002418 v 25.127954483032227 2019-10-04 00:00:00\n25.127954483032227\n1788.7107849121094\ndias negociados: 416, qtd_ações: 4.683778680787824, dinheiro: R$ 1880.3820072002418, Total Value: R$ 1998.0757846996748, Lucro: R$ 209.36499978756547\n417 1.170944670196956 1967.5202411758767 v 24.805679321289062 2019-10-07 00:00:00\n417 1.170944670196956 1967.5202411758767 v 24.805679321289062 2019-10-07 00:00:00\n24.805679321289062\n1788.7107849121094\ndias negociados: 417, qtd_ações: 1.170944670196956, dinheiro: R$ 1967.5202411758767, Total Value: R$ 1996.566319167755, Lucro: R$ 207.85553425564558\n418 0.292736167549239 1989.1799364335243 v 24.66349983215332 2019-10-08 00:00:00\n418 0.292736167549239 1989.1799364335243 v 24.66349983215332 2019-10-08 00:00:00\n24.66349983215332\n1788.7107849121094\ndias negociados: 418, qtd_ações: 0.292736167549239, dinheiro: R$ 1989.1799364335243, Total Value: R$ 1996.3998348527402, Lucro: R$ 207.68904994063087\n419 0.07318404188730974 1994.6989135031704 v 25.137434005737305 2019-10-09 00:00:00\n419 0.07318404188730974 1994.6989135031704 v 25.137434005737305 2019-10-09 00:00:00\n25.137434005737305\n1788.7107849121094\ndias negociados: 419, qtd_ações: 0.07318404188730974, dinheiro: R$ 1994.6989135031704, Total Value: R$ 1996.5385725263857, Lucro: R$ 207.8277876142763\n420 0.018296010471827436 1996.0901036998473 v 25.345966339111328 2019-10-10 00:00:00\n420 0.018296010471827436 1996.0901036998473 v 25.345966339111328 2019-10-10 00:00:00\n25.345966339111328\n1788.7107849121094\ndias negociados: 420, qtd_ações: 0.018296010471827436, dinheiro: R$ 1996.0901036998473, Total Value: R$ 1996.5538337654064, Lucro: R$ 207.84304885329698\n421 0.004574002617956859 1996.4446646812935 v 25.838855743408203 2019-10-11 00:00:00\n421 0.004574002617956859 1996.4446646812935 v 25.838855743408203 2019-10-11 00:00:00\n25.838855743408203\n1788.7107849121094\ndias negociados: 421, qtd_ações: 0.004574002617956859, dinheiro: R$ 1996.4446646812935, Total Value: R$ 1996.562851675109, Lucro: R$ 207.85206676299958\n422 0.0011435006544892148 1996.5334675046317 v 25.886247634887695 2019-10-14 00:00:00\n422 0.0011435006544892148 1996.5334675046317 v 25.886247634887695 2019-10-14 00:00:00\n25.886247634887695\n1788.7107849121094\ndias negociados: 422, qtd_ações: 0.0011435006544892148, dinheiro: R$ 1996.5334675046317, Total Value: R$ 1996.5630684457444, Lucro: R$ 207.85228353363505\n423 0.0002858751636223037 1996.555903957366 v 26.161130905151367 2019-10-15 00:00:00\n423 0.0002858751636223037 1996.555903957366 v 26.161130905151367 2019-10-15 00:00:00\n26.161130905151367\n1788.7107849121094\ndias negociados: 423, qtd_ações: 0.0002858751636223037, dinheiro: R$ 1996.555903957366, Total Value: R$ 1996.5633827749439, Lucro: R$ 207.8525978628345\n424 7.146879090557592e-05 1996.561580135928 v 26.473926544189453 2019-10-16 00:00:00\n424 7.146879090557592e-05 1996.561580135928 v 26.473926544189453 2019-10-16 00:00:00\n26.473926544189453\n1788.7107849121094\ndias negociados: 424, qtd_ações: 7.146879090557592e-05, dinheiro: R$ 1996.561580135928, Total Value: R$ 1996.5634721954486, Lucro: R$ 207.85268728333926\n425 1.786719772639398e-05 1996.5629854626222 v 26.218002319335938 2019-10-17 00:00:00\n425 1.786719772639398e-05 1996.5629854626222 v 26.218002319335938 2019-10-17 00:00:00\n26.218002319335938\n1788.7107849121094\ndias negociados: 425, qtd_ações: 1.786719772639398e-05, dinheiro: R$ 1996.5629854626222, Total Value: R$ 1996.5634539048538, Lucro: R$ 207.85266899274438\n426 4.466799431598495e-06 1996.5633360321963 v 26.161130905151367 2019-10-18 00:00:00\n426 4.466799431598495e-06 1996.5633360321963 v 26.161130905151367 2019-10-18 00:00:00\n26.161130905151367\n1788.7107849121094\ndias negociados: 426, qtd_ações: 4.466799431598495e-06, dinheiro: R$ 1996.5633360321963, Total Value: R$ 1996.563452888721, Lucro: R$ 207.85266797661166\n427 1.1166998578996238e-06 1996.56342421442 v 26.322269439697266 2019-10-21 00:00:00\n427 1.1166998578996238e-06 1996.56342421442 v 26.322269439697266 2019-10-21 00:00:00\n26.322269439697266\n1788.7107849121094\ndias negociados: 427, qtd_ações: 1.1166998578996238e-06, dinheiro: R$ 1996.56342421442, Total Value: R$ 1996.5634536084945, Lucro: R$ 207.85266869638508\n428 2.7917496447490595e-07 1996.5634468950652 v 27.080562591552734 2019-10-22 00:00:00\n428 2.7917496447490595e-07 1996.5634468950652 v 27.080562591552734 2019-10-22 00:00:00\n27.080562591552734\n1788.7107849121094\ndias negociados: 428, qtd_ações: 2.7917496447490595e-07, dinheiro: R$ 1996.5634468950652, Total Value: R$ 1996.5634544552804, Lucro: R$ 207.85266954317103\n429 6.979374111872649e-08 1996.5634526406434 v 27.440752029418945 2019-10-23 00:00:00\n429 6.979374111872649e-08 1996.5634526406434 v 27.440752029418945 2019-10-23 00:00:00\n27.440752029418945\n1788.7107849121094\ndias negociados: 429, qtd_ações: 6.979374111872649e-08, dinheiro: R$ 1996.5634526406434, Total Value: R$ 1996.5634545558362, Lucro: R$ 207.8526696437268\n430 1.7448435279681622e-08 1996.5634540457795 v 26.843595504760742 2019-10-24 00:00:00\n430 1.7448435279681622e-08 1996.5634540457795 v 26.843595504760742 2019-10-24 00:00:00\n26.843595504760742\n1788.7107849121094\ndias negociados: 430, qtd_ações: 1.7448435279681622e-08, dinheiro: R$ 1996.5634540457795, Total Value: R$ 1996.5634545141581, Lucro: R$ 207.85266960204876\n431 4.3621088199204054e-09 1996.5634544085995 v 27.72511100769043 2019-10-25 00:00:00\n431 4.3621088199204054e-09 1996.5634544085995 v 27.72511100769043 2019-10-25 00:00:00\n27.72511100769043\n1788.7107849121094\ndias negociados: 431, qtd_ações: 4.3621088199204054e-09, dinheiro: R$ 1996.5634544085995, Total Value: R$ 1996.5634545295395, Lucro: R$ 207.85266961743014\n432 1.0905272049801014e-09 1996.5634545003898 v 28.056865692138672 2019-10-28 00:00:00\n432 1.0905272049801014e-09 1996.5634545003898 v 28.056865692138672 2019-10-28 00:00:00\n28.056865692138672\n1788.7107849121094\ndias negociados: 432, qtd_ações: 1.0905272049801014e-09, dinheiro: R$ 1996.5634545003898, Total Value: R$ 1996.5634545309865, Lucro: R$ 207.85266961887714\n433 35.31816281066009 998.2817272501949 c 28.265392303466797 2019-10-29 00:00:00\n433 35.31816281066009 998.2817272501949 c 28.265392303466797 2019-10-29 00:00:00\n28.265392303466797\n1788.7107849121094\ndias negociados: 433, qtd_ações: 35.31816281066009, dinheiro: R$ 998.2817272501949, Total Value: R$ 1996.563454531214, Lucro: R$ 207.85266961910452\n434 8.829540702665023 1753.5210032966866 v 28.511837005615234 2019-10-30 00:00:00\n434 8.829540702665023 1753.5210032966866 v 28.511837005615234 2019-10-30 00:00:00\n28.511837005615234\n1788.7107849121094\ndias negociados: 434, qtd_ações: 8.829540702665023, dinheiro: R$ 1753.5210032966866, Total Value: R$ 2005.2674286455172, Lucro: R$ 216.55664373340778\n435 2.2073851756662557 1944.2766791923557 v 28.80567741394043 2019-10-31 00:00:00\n435 2.2073851756662557 1944.2766791923557 v 28.80567741394043 2019-10-31 00:00:00\n28.80567741394043\n1788.7107849121094\ndias negociados: 435, qtd_ações: 2.2073851756662557, dinheiro: R$ 1944.2766791923557, Total Value: R$ 2007.8619044909121, Lucro: R$ 219.15111957880276\n436 35.91117233772776 972.1383395961778 c 28.84359359741211 2019-11-01 00:00:00\n436 35.91117233772776 972.1383395961778 c 28.84359359741211 2019-11-01 00:00:00\n28.84359359741211\n1788.7107849121094\ndias negociados: 436, qtd_ações: 35.91117233772776, dinheiro: R$ 972.1383395961778, Total Value: R$ 2007.9456001122253, Lucro: R$ 219.23481520011592\n437 52.801919770189386 486.0691697980889 c 28.777244567871094 2019-11-04 00:00:00\n437 52.801919770189386 486.0691697980889 c 28.777244567871094 2019-11-04 00:00:00\n28.777244567871094\n1788.7107849121094\ndias negociados: 437, qtd_ações: 52.801919770189386, dinheiro: R$ 486.0691697980889, Total Value: R$ 2005.5629286779367, Lucro: R$ 216.85214376582735\n438 61.44952713629566 243.03458489904446 c 28.104257583618164 2019-11-05 00:00:00\n438 61.44952713629566 243.03458489904446 c 28.104257583618164 2019-11-05 00:00:00\n28.104257583618164\n1788.7107849121094\ndias negociados: 438, qtd_ações: 61.44952713629566, dinheiro: R$ 243.03458489904446, Total Value: R$ 1970.0279239290317, Lucro: R$ 181.31713901692228\n439 65.76459889563469 121.51729244952223 c 28.161128997802734 2019-11-06 00:00:00\n439 65.76459889563469 121.51729244952223 c 28.161128997802734 2019-11-06 00:00:00\n28.161128997802734\n1788.7107849121094\ndias negociados: 439, qtd_ações: 65.76459889563469, dinheiro: R$ 121.51729244952223, Total Value: R$ 1973.5226454382462, Lucro: R$ 184.8118605261368\n440 67.8390451799559 60.758646224761115 c 29.289091110229492 2019-11-07 00:00:00\n440 67.8390451799559 60.758646224761115 c 29.289091110229492 2019-11-07 00:00:00\n29.289091110229492\n1788.7107849121094\ndias negociados: 440, qtd_ações: 67.8390451799559, dinheiro: R$ 60.758646224761115, Total Value: R$ 2047.7026213314646, Lucro: R$ 258.9918364193552\n28.454967498779297\n1788.7107849121094\ndias negociados: 441, qtd_ações: 67.8390451799559, dinheiro: R$ 60.758646224761115, Total Value: R$ 1991.1164719686267, Lucro: R$ 202.4056870565173\n28.862552642822266\n1788.7107849121094\ndias negociados: 442, qtd_ações: 67.8390451799559, dinheiro: R$ 60.758646224761115, Total Value: R$ 2018.7666589700366, Lucro: R$ 230.0558740579272\n28.643978118896484\n1788.7107849121094\ndias negociados: 443, qtd_ações: 67.8390451799559, dinheiro: R$ 60.758646224761115, Total Value: R$ 2003.9387719662482, Lucro: R$ 215.2279870541388\n444 68.90388502965592 30.379323112380558 c 28.529476165771484 2019-11-13 00:00:00\n444 68.90388502965592 30.379323112380558 c 28.529476165771484 2019-11-13 00:00:00\n28.529476165771484\n1788.7107849121094\ndias negociados: 444, qtd_ações: 68.90388502965592, dinheiro: R$ 30.379323112380558, Total Value: R$ 1996.1710687950078, Lucro: R$ 207.46028388289847\n445 69.44720776077583 15.189661556190279 c 27.95697784423828 2019-11-14 00:00:00\n445 69.44720776077583 15.189661556190279 c 27.95697784423828 2019-11-14 00:00:00\n27.95697784423828\n1788.7107849121094\ndias negociados: 445, qtd_ações: 69.44720776077583, dinheiro: R$ 15.189661556190279, Total Value: R$ 1956.7237102684128, Lucro: R$ 168.0129253563034\n446 69.72092432907837 7.594830778095139 c 27.74706268310547 2019-11-18 00:00:00\n446 69.72092432907837 7.594830778095139 c 27.74706268310547 2019-11-18 00:00:00\n27.74706268310547\n1788.7107849121094\ndias negociados: 446, qtd_ações: 69.72092432907837, dinheiro: R$ 7.594830778095139, Total Value: R$ 1942.145688461086, Lucro: R$ 153.43490354897654\n447 69.85920921222498 3.7974153890475697 c 27.460813522338867 2019-11-19 00:00:00\n447 69.85920921222498 3.7974153890475697 c 27.460813522338867 2019-11-19 00:00:00\n27.460813522338867\n1788.7107849121094\ndias negociados: 447, qtd_ações: 69.85920921222498, dinheiro: R$ 3.7974153890475697, Total Value: R$ 1922.1881323840153, Lucro: R$ 133.4773474719059\n448 17.464802303056246 1496.0828393014745 v 28.481769561767578 2019-11-21 00:00:00\n448 17.464802303056246 1496.0828393014745 v 28.481769561767578 2019-11-21 00:00:00\n28.481769561767578\n1788.7107849121094\ndias negociados: 448, qtd_ações: 17.464802303056246, dinheiro: R$ 1496.0828393014745, Total Value: R$ 1993.51131393895, Lucro: R$ 204.80052902684065\n449 4.366200575764061 1870.7789287610442 v 28.60580825805664 2019-11-22 00:00:00\n449 4.366200575764061 1870.7789287610442 v 28.60580825805664 2019-11-22 00:00:00\n28.60580825805664\n1788.7107849121094\ndias negociados: 449, qtd_ações: 4.366200575764061, dinheiro: R$ 1870.7789287610442, Total Value: R$ 1995.6776252475674, Lucro: R$ 206.96684033545807\n450 1.0915501439410153 1963.6718201312256 v 28.36726951599121 2019-11-25 00:00:00\n450 1.0915501439410153 1963.6718201312256 v 28.36726951599121 2019-11-25 00:00:00\n28.36726951599121\n1788.7107849121094\ndias negociados: 450, qtd_ações: 1.0915501439410153, dinheiro: R$ 1963.6718201312256, Total Value: R$ 1994.6361172546194, Lucro: R$ 205.92533234251005\n451 0.27288753598525384 1986.473229238595 v 27.852022171020508 2019-11-26 00:00:00\n451 0.27288753598525384 1986.473229238595 v 27.852022171020508 2019-11-26 00:00:00\n27.852022171020508\n1788.7107849121094\ndias negociados: 451, qtd_ações: 0.27288753598525384, dinheiro: R$ 1986.473229238595, Total Value: R$ 1994.0736989410514, Lucro: R$ 205.362914028942\n452 0.06822188399631346 1992.2009213813176 v 27.985605239868164 2019-11-27 00:00:00\n452 0.06822188399631346 1992.2009213813176 v 27.985605239868164 2019-11-27 00:00:00\n27.985605239868164\n1788.7107849121094\ndias negociados: 452, qtd_ações: 0.06822188399631346, dinheiro: R$ 1992.2009213813176, Total Value: R$ 1994.1101520955585, Lucro: R$ 205.39936718344916\n453 0.017055470999078365 1993.6426086129875 v 28.176437377929688 2019-11-28 00:00:00\n453 0.017055470999078365 1993.6426086129875 v 28.176437377929688 2019-11-28 00:00:00\n28.176437377929688\n1788.7107849121094\ndias negociados: 453, qtd_ações: 0.017055470999078365, dinheiro: R$ 1993.6426086129875, Total Value: R$ 1994.123171023544, Lucro: R$ 205.41238611143467\n454 35.85607643193663 996.8213043064937 c 27.813854217529297 2019-11-29 00:00:00\n454 35.85607643193663 996.8213043064937 c 27.813854217529297 2019-11-29 00:00:00\n27.813854217529297\n1788.7107849121094\ndias negociados: 454, qtd_ações: 35.85607643193663, dinheiro: R$ 996.8213043064937, Total Value: R$ 1994.1169869969672, Lucro: R$ 205.4062020848578\n455 53.824899156257416 498.4106521532469 c 27.737524032592773 2019-12-02 00:00:00\n455 53.824899156257416 498.4106521532469 c 27.737524032592773 2019-12-02 00:00:00\n27.737524032592773\n1788.7107849121094\ndias negociados: 455, qtd_ações: 53.824899156257416, dinheiro: R$ 498.4106521532469, Total Value: R$ 1991.3800860518195, Lucro: R$ 202.66930113971011\n456 62.837212996365786 249.20532607662344 c 27.651647567749023 2019-12-03 00:00:00\n456 62.837212996365786 249.20532607662344 c 27.651647567749023 2019-12-03 00:00:00\n27.651647567749023\n1788.7107849121094\ndias negociados: 456, qtd_ações: 62.837212996365786, dinheiro: R$ 249.20532607662344, Total Value: R$ 1986.7577939917087, Lucro: R$ 198.04700907959932\n457 15.709303249091446 1582.9476083073014 v 28.30047607421875 2019-12-04 00:00:00\n457 15.709303249091446 1582.9476083073014 v 28.30047607421875 2019-12-04 00:00:00\n28.30047607421875\n1788.7107849121094\ndias negociados: 457, qtd_ações: 15.709303249091446, dinheiro: R$ 1582.9476083073014, Total Value: R$ 2027.5283690508606, Lucro: R$ 238.81758413875127\n458 3.9273258122728616 1920.767554589574 v 28.6726016998291 2019-12-05 00:00:00\n458 3.9273258122728616 1920.767554589574 v 28.6726016998291 2019-12-05 00:00:00\n28.6726016998291\n1788.7107849121094\ndias negociados: 458, qtd_ações: 3.9273258122728616, dinheiro: R$ 1920.767554589574, Total Value: R$ 2033.3742033503315, Lucro: R$ 244.66341843822215\n459 0.9818314530682154 2006.065692066592 v 28.958852767944336 2019-12-06 00:00:00\n459 0.9818314530682154 2006.065692066592 v 28.958852767944336 2019-12-06 00:00:00\n28.958852767944336\n1788.7107849121094\ndias negociados: 459, qtd_ações: 0.9818314530682154, dinheiro: R$ 2006.065692066592, Total Value: R$ 2034.4984045589313, Lucro: R$ 245.7876196468219\n460 35.77883154617539 1003.032846033296 c 28.825267791748047 2019-12-09 00:00:00\n460 35.77883154617539 1003.032846033296 c 28.825267791748047 2019-12-09 00:00:00\n28.825267791748047\n1788.7107849121094\ndias negociados: 460, qtd_ações: 35.77883154617539, dinheiro: R$ 1003.032846033296, Total Value: R$ 2034.3672466276444, Lucro: R$ 245.656461715535\n461 8.944707886543847 1782.4226507444178 v 29.044727325439453 2019-12-10 00:00:00\n461 8.944707886543847 1782.4226507444178 v 29.044727325439453 2019-12-10 00:00:00\n29.044727325439453\n1788.7107849121094\ndias negociados: 461, qtd_ações: 8.944707886543847, dinheiro: R$ 1782.4226507444178, Total Value: R$ 2042.2192523147917, Lucro: R$ 253.5084674026823\n462 39.66918549681757 891.2113253722089 c 29.00655746459961 2019-12-11 00:00:00\n462 39.66918549681757 891.2113253722089 c 29.00655746459961 2019-12-11 00:00:00\n29.00655746459961\n1788.7107849121094\ndias negociados: 462, qtd_ações: 39.66918549681757, dinheiro: R$ 891.2113253722089, Total Value: R$ 2041.877834059509, Lucro: R$ 253.16704914739967\n463 9.917296374204392 1770.3924511193977 v 29.550430297851562 2019-12-12 00:00:00\n463 9.917296374204392 1770.3924511193977 v 29.550430297851562 2019-12-12 00:00:00\n29.550430297851562\n1788.7107849121094\ndias negociados: 463, qtd_ações: 9.917296374204392, dinheiro: R$ 1770.3924511193977, Total Value: R$ 2063.4528263684606, Lucro: R$ 274.7420414563512\n464 40.86192893183853 885.1962255596989 c 28.60580825805664 2019-12-13 00:00:00\n464 40.86192893183853 885.1962255596989 c 28.60580825805664 2019-12-13 00:00:00\n28.60580825805664\n1788.7107849121094\ndias negociados: 464, qtd_ações: 40.86192893183853, dinheiro: R$ 885.1962255596989, Total Value: R$ 2054.084729638209, Lucro: R$ 265.3739447260996\n465 56.6341155610461 442.59811277984943 c 28.06193733215332 2019-12-16 00:00:00\n465 56.6341155610461 442.59811277984943 c 28.06193733215332 2019-12-16 00:00:00\n28.06193733215332\n1788.7107849121094\ndias negociados: 465, qtd_ações: 56.6341155610461, dinheiro: R$ 442.59811277984943, Total Value: R$ 2031.8611145158543, Lucro: R$ 243.15032960374492\n466 64.40396482936491 221.29905638992471 c 28.481769561767578 2019-12-17 00:00:00\n466 64.40396482936491 221.29905638992471 c 28.481769561767578 2019-12-17 00:00:00\n28.481769561767578\n1788.7107849121094\ndias negociados: 466, qtd_ações: 64.40396482936491, dinheiro: R$ 221.29905638992471, Total Value: R$ 2055.63794152408, Lucro: R$ 266.92715661197053\n467 16.100991207341227 1628.8546801803127 v 29.14014434814453 2019-12-18 00:00:00\n467 16.100991207341227 1628.8546801803127 v 29.14014434814453 2019-12-18 00:00:00\n29.14014434814453\n1788.7107849121094\ndias negociados: 467, qtd_ações: 16.100991207341227, dinheiro: R$ 1628.8546801803127, Total Value: R$ 2098.039888110442, Lucro: R$ 309.32910319833263\n468 4.025247801835307 1981.5501435592719 v 29.20693588256836 2019-12-19 00:00:00\n468 4.025247801835307 1981.5501435592719 v 29.20693588256836 2019-12-19 00:00:00\n29.20693588256836\n1788.7107849121094\ndias negociados: 468, qtd_ações: 4.025247801835307, dinheiro: R$ 1981.5501435592719, Total Value: R$ 2099.115298018925, Lucro: R$ 310.40451310681556\n469 38.34020935546078 990.7750717796359 c 28.872976303100586 2019-12-20 00:00:00\n469 38.34020935546078 990.7750717796359 c 28.872976303100586 2019-12-20 00:00:00\n28.872976303100586\n1788.7107849121094\ndias negociados: 469, qtd_ações: 38.34020935546078, dinheiro: R$ 990.7750717796359, Total Value: R$ 2097.7710279557705, Lucro: R$ 309.0602430436611\n29.101972579956055\n1788.7107849121094\ndias negociados: 470, qtd_ações: 38.34020935546078, dinheiro: R$ 990.7750717796359, Total Value: R$ 2106.55079315203, Lucro: R$ 317.84000823992073\n29.493183135986328\n1788.7107849121094\ndias negociados: 471, qtd_ações: 38.34020935546078, dinheiro: R$ 990.7750717796359, Total Value: R$ 2121.549887772297, Lucro: R$ 332.8391028601877\n29.52408218383789\n1788.7107849121094\ndias negociados: 472, qtd_ações: 38.34020935546078, dinheiro: R$ 990.7750717796359, Total Value: R$ 2122.7345637358103, Lucro: R$ 334.0237788237009\n473 55.308339636919655 495.38753588981797 c 29.195175170898438 2019-12-30 00:00:00\n473 55.308339636919655 495.38753588981797 c 29.195175170898438 2019-12-30 00:00:00\n29.195175170898438\n1788.7107849121094\ndias negociados: 473, qtd_ações: 55.308339636919655, dinheiro: R$ 495.38753588981797, Total Value: R$ 2110.1242000012326, Lucro: R$ 321.4134150891232\n474 63.648700413987015 247.69376794490898 c 29.69820785522461 2020-01-02 00:00:00\n474 63.648700413987015 247.69376794490898 c 29.69820785522461 2020-01-02 00:00:00\n29.69820785522461\n1788.7107849121094\ndias negociados: 474, qtd_ações: 63.648700413987015, dinheiro: R$ 247.69376794490898, Total Value: R$ 2137.946102554416, Lucro: R$ 349.23531764230665\n475 67.8531184911056 123.84688397245449 c 29.45636749267578 2020-01-03 00:00:00\n475 67.8531184911056 123.84688397245449 c 29.45636749267578 2020-01-03 00:00:00\n29.45636749267578\n1788.7107849121094\ndias negociados: 475, qtd_ações: 67.8531184911056, dinheiro: R$ 123.84688397245449, Total Value: R$ 2122.5532777705357, Lucro: R$ 333.84249285842634\n476 16.9632796227764 1640.5991340404348 v 29.80461883544922 2020-01-06 00:00:00\n476 16.9632796227764 1640.5991340404348 v 29.80461883544922 2020-01-06 00:00:00\n29.80461883544922\n1788.7107849121094\ndias negociados: 476, qtd_ações: 16.9632796227764, dinheiro: R$ 1640.5991340404348, Total Value: R$ 2146.1832173964285, Lucro: R$ 357.47243248431914\n477 44.593460790263485 820.2995670202174 c 29.688533782958984 2020-01-07 00:00:00\n477 44.593460790263485 820.2995670202174 c 29.688533782958984 2020-01-07 00:00:00\n29.688533782958984\n1788.7107849121094\ndias negociados: 477, qtd_ações: 44.593460790263485, dinheiro: R$ 820.2995670202174, Total Value: R$ 2144.2140341910117, Lucro: R$ 355.50324927890233\n478 58.494613380828966 410.1497835101087 c 29.504732131958008 2020-01-08 00:00:00\n478 58.494613380828966 410.1497835101087 c 29.504732131958008 2020-01-08 00:00:00\n29.504732131958008\n1788.7107849121094\ndias negociados: 478, qtd_ações: 58.494613380828966, dinheiro: R$ 410.1497835101087, Total Value: R$ 2136.0176824739137, Lucro: R$ 347.30689756180436\n479 65.46805343320284 205.07489175505435 c 29.407995223999023 2020-01-09 00:00:00\n479 65.46805343320284 205.07489175505435 c 29.407995223999023 2020-01-09 00:00:00\n29.407995223999023\n1788.7107849121094\ndias negociados: 479, qtd_ações: 65.46805343320284, dinheiro: R$ 205.07489175505435, Total Value: R$ 2130.359094443196, Lucro: R$ 341.64830953108685\n480 16.36701335830071 1642.8634209051436 v 29.282241821289062 2020-01-10 00:00:00\n480 16.36701335830071 1642.8634209051436 v 29.282241821289062 2020-01-10 00:00:00\n29.282241821289062\n1788.7107849121094\ndias negociados: 480, qtd_ações: 16.36701335830071, dinheiro: R$ 1642.8634209051436, Total Value: R$ 2122.1262639551733, Lucro: R$ 333.4154790430639\n481 44.36373648210471 821.4317104525718 c 29.340280532836914 2020-01-13 00:00:00\n481 44.36373648210471 821.4317104525718 c 29.340280532836914 2020-01-13 00:00:00\n29.340280532836914\n1788.7107849121094\ndias negociados: 481, qtd_ações: 44.36373648210471, dinheiro: R$ 821.4317104525718, Total Value: R$ 2123.0761843223754, Lucro: R$ 334.36539941026604\n482 58.516078844490515 410.7158552262859 c 29.02105140686035 2020-01-14 00:00:00\n482 58.516078844490515 410.7158552262859 c 29.02105140686035 2020-01-14 00:00:00\n29.02105140686035\n1788.7107849121094\ndias negociados: 482, qtd_ações: 58.516078844490515, dinheiro: R$ 410.7158552262859, Total Value: R$ 2108.913987500139, Lucro: R$ 320.2032025880294\n483 65.70000953611131 205.35792761314295 c 28.58573341369629 2020-01-15 00:00:00\n483 65.70000953611131 205.35792761314295 c 28.58573341369629 2020-01-15 00:00:00\n28.58573341369629\n1788.7107849121094\ndias negociados: 483, qtd_ações: 65.70000953611131, dinheiro: R$ 205.35792761314295, Total Value: R$ 2083.440885489725, Lucro: R$ 294.73010057761576\n484 69.29562540022805 102.67896380657147 c 28.556711196899414 2020-01-16 00:00:00\n484 69.29562540022805 102.67896380657147 c 28.556711196899414 2020-01-16 00:00:00\n28.556711196899414\n1788.7107849121094\ndias negociados: 484, qtd_ações: 69.29562540022805, dinheiro: R$ 102.67896380657147, Total Value: R$ 2081.534125569411, Lucro: R$ 292.82334065730174\n485 17.323906350057012 1603.4115189611487 v 28.875946044921875 2020-01-17 00:00:00\n485 17.323906350057012 1603.4115189611487 v 28.875946044921875 2020-01-17 00:00:00\n28.875946044921875\n1788.7107849121094\ndias negociados: 485, qtd_ações: 17.323906350057012, dinheiro: R$ 1603.4115189611487, Total Value: R$ 2103.6557040126745, Lucro: R$ 314.94491910056513\n486 4.330976587514253 1980.4800015256278 v 29.02105140686035 2020-01-20 00:00:00\n486 4.330976587514253 1980.4800015256278 v 29.02105140686035 2020-01-20 00:00:00\n29.02105140686035\n1788.7107849121094\ndias negociados: 486, qtd_ações: 4.330976587514253, dinheiro: R$ 1980.4800015256278, Total Value: R$ 2106.1694957137875, Lucro: R$ 317.4587108016781\n487 38.89016580699563 990.2400007628139 c 28.65345001220703 2020-01-21 00:00:00\n487 38.89016580699563 990.2400007628139 c 28.65345001220703 2020-01-21 00:00:00\n28.65345001220703\n1788.7107849121094\ndias negociados: 487, qtd_ações: 38.89016580699563, dinheiro: R$ 990.2400007628139, Total Value: R$ 2104.5774226800063, Lucro: R$ 315.8666377678969\n488 9.722541451748908 1816.6818563381416 v 28.334218978881836 2020-01-22 00:00:00\n488 9.722541451748908 1816.6818563381416 v 28.334218978881836 2020-01-22 00:00:00\n28.334218978881836\n1788.7107849121094\ndias negociados: 488, qtd_ações: 9.722541451748908, dinheiro: R$ 1816.6818563381416, Total Value: R$ 2092.1624748632507, Lucro: R$ 303.45168995114136\n489 2.430635362937227 2025.8317502269872 v 28.682472229003906 2020-01-23 00:00:00\n489 2.430635362937227 2025.8317502269872 v 28.682472229003906 2020-01-23 00:00:00\n28.682472229003906\n1788.7107849121094\ndias negociados: 489, qtd_ações: 2.430635362937227, dinheiro: R$ 2025.8317502269872, Total Value: R$ 2095.548381523269, Lucro: R$ 306.8375966111598\n490 38.167290925668695 1012.9158751134936 c 28.343891143798828 2020-01-24 00:00:00\n490 38.167290925668695 1012.9158751134936 c 28.343891143798828 2020-01-24 00:00:00\n28.343891143798828\n1788.7107849121094\ndias negociados: 490, qtd_ações: 38.167290925668695, dinheiro: R$ 1012.9158751134936, Total Value: R$ 2094.725414364348, Lucro: R$ 306.01462945223875\n491 56.845206167404584 506.4579375567468 c 27.11533546447754 2020-01-27 00:00:00\n491 56.845206167404584 506.4579375567468 c 27.11533546447754 2020-01-27 00:00:00\n27.11533546447754\n1788.7107849121094\ndias negociados: 491, qtd_ações: 56.845206167404584, dinheiro: R$ 506.4579375567468, Total Value: R$ 2047.8347723333097, Lucro: R$ 259.12398742120035\n492 14.211301541851146 1694.2473287600635 v 27.860206604003906 2020-01-28 00:00:00\n492 14.211301541851146 1694.2473287600635 v 27.860206604003906 2020-01-28 00:00:00\n27.860206604003906\n1788.7107849121094\ndias negociados: 492, qtd_ações: 14.211301541851146, dinheiro: R$ 1694.2473287600635, Total Value: R$ 2090.177125827836, Lucro: R$ 301.4663409157265\n493 3.5528253854627865 1991.7102309037843 v 27.90857696533203 2020-01-29 00:00:00\n493 3.5528253854627865 1991.7102309037843 v 27.90857696533203 2020-01-29 00:00:00\n27.90857696533203\n1788.7107849121094\ndias negociados: 493, qtd_ações: 3.5528253854627865, dinheiro: R$ 1991.7102309037843, Total Value: R$ 2090.864531618358, Lucro: R$ 302.1537467062485\n494 0.8882063463656966 2066.307945729301 v 27.99563980102539 2020-01-30 00:00:00\n494 0.8882063463656966 2066.307945729301 v 27.99563980102539 2020-01-30 00:00:00\n27.99563980102539\n1788.7107849121094\ndias negociados: 494, qtd_ações: 0.8882063463656966, dinheiro: R$ 2066.307945729301, Total Value: R$ 2091.17385067114, Lucro: R$ 302.46306575903054\n495 0.22205158659142415 2084.641611377142 v 27.521631240844727 2020-01-31 00:00:00\n495 0.22205158659142415 2084.641611377142 v 27.521631240844727 2020-01-31 00:00:00\n27.521631240844727\n1788.7107849121094\ndias negociados: 495, qtd_ações: 0.22205158659142415, dinheiro: R$ 2084.641611377142, Total Value: R$ 2090.752833259756, Lucro: R$ 302.0420483476464\n496 38.45770373165207 1042.320805688571 c 27.26044273376465 2020-02-03 00:00:00\n496 38.45770373165207 1042.320805688571 c 27.26044273376465 2020-02-03 00:00:00\n27.26044273376465\n1788.7107849121094\ndias negociados: 496, qtd_ações: 38.45770373165207, dinheiro: R$ 1042.320805688571, Total Value: R$ 2090.694835937359, Lucro: R$ 301.98405102524976\n497 9.614425932913017 1841.1571611402233 v 27.695755004882812 2020-02-04 00:00:00\n497 9.614425932913017 1841.1571611402233 v 27.695755004882812 2020-02-04 00:00:00\n27.695755004882812\n1788.7107849121094\ndias negociados: 497, qtd_ações: 9.614425932913017, dinheiro: R$ 1841.1571611402233, Total Value: R$ 2107.435946290774, Lucro: R$ 318.7251613786648\n498 43.134392683679756 920.5785805701116 c 27.46358871459961 2020-02-05 00:00:00\n498 43.134392683679756 920.5785805701116 c 27.46358871459961 2020-02-05 00:00:00\n27.46358871459961\n1788.7107849121094\ndias negociados: 498, qtd_ações: 43.134392683679756, dinheiro: R$ 920.5785805701116, Total Value: R$ 2105.203800688727, Lucro: R$ 316.49301577661754\n499 10.783598170919939 1833.7705966803096 v 28.227807998657227 2020-02-06 00:00:00\n499 10.783598170919939 1833.7705966803096 v 28.227807998657227 2020-02-06 00:00:00\n28.227807998657227\n1788.7107849121094\ndias negociados: 499, qtd_ações: 10.783598170919939, dinheiro: R$ 1833.7705966803096, Total Value: R$ 2138.167935383709, Lucro: R$ 349.45715047159956\n27.985965728759766\n1788.7107849121094\ndias negociados: 500, qtd_ações: 10.783598170919939, dinheiro: R$ 1833.7705966803096, Total Value: R$ 2135.5600055243913, Lucro: R$ 346.8492206122819\n501 2.6958995427299848 2061.6774112289613 v 28.179439544677734 2020-02-10 00:00:00\n501 2.6958995427299848 2061.6774112289613 v 28.179439544677734 2020-02-10 00:00:00\n28.179439544677734\n1788.7107849121094\ndias negociados: 501, qtd_ações: 2.6958995427299848, dinheiro: R$ 2061.6774112289613, Total Value: R$ 2137.646349411845, Lucro: R$ 348.93556449973585\n502 0.6739748856824962 2119.338696454195 v 28.51801872253418 2020-02-11 00:00:00\n502 0.6739748856824962 2119.338696454195 v 28.51801872253418 2020-02-11 00:00:00\n28.51801872253418\n1788.7107849121094\ndias negociados: 502, qtd_ações: 0.6739748856824962, dinheiro: R$ 2119.338696454195, Total Value: R$ 2138.5591248626065, Lucro: R$ 349.8483399504971\n503 0.16849372142062405 2134.071858247979 v 29.146806716918945 2020-02-12 00:00:00\n503 0.16849372142062405 2134.071858247979 v 29.146806716918945 2020-02-12 00:00:00\n29.146806716918945\n1788.7107849121094\ndias negociados: 503, qtd_ações: 0.16849372142062405, dinheiro: R$ 2134.071858247979, Total Value: R$ 2138.9829121792404, Lucro: R$ 350.27212726713105\n504 37.28254560869886 1067.0359291239895 c 28.750186920166016 2020-02-13 00:00:00\n504 37.28254560869886 1067.0359291239895 c 28.750186920166016 2020-02-13 00:00:00\n28.750186920166016\n1788.7107849121094\ndias negociados: 504, qtd_ações: 37.28254560869886, dinheiro: R$ 1067.0359291239895, Total Value: R$ 2138.9160842336964, Lucro: R$ 350.20529932158706\n505 56.02880038004196 533.5179645619947 c 28.459976196289062 2020-02-14 00:00:00\n505 56.02880038004196 533.5179645619947 c 28.459976196289062 2020-02-14 00:00:00\n28.459976196289062\n1788.7107849121094\ndias negociados: 505, qtd_ações: 56.02880038004196, dinheiro: R$ 533.5179645619947, Total Value: R$ 2128.0962896846204, Lucro: R$ 339.385504772511\n506 65.42108153777326 266.7589822809974 c 28.40193748474121 2020-02-17 00:00:00\n506 65.42108153777326 266.7589822809974 c 28.40193748474121 2020-02-17 00:00:00\n28.40193748474121\n1788.7107849121094\ndias negociados: 506, qtd_ações: 65.42108153777326, dinheiro: R$ 266.7589822809974, Total Value: R$ 2124.844450300991, Lucro: R$ 336.1336653888816\n507 16.355270384443315 1678.8341293517983 v 28.779207229614258 2020-02-18 00:00:00\n507 16.355270384443315 1678.8341293517983 v 28.779207229614258 2020-02-18 00:00:00\n28.779207229614258\n1788.7107849121094\ndias negociados: 507, qtd_ações: 16.355270384443315, dinheiro: R$ 1678.8341293517983, Total Value: R$ 2149.525845042065, Lucro: R$ 360.8150601299558\n508 4.088817596110829 2041.3458658344439 v 29.553102493286133 2020-02-19 00:00:00\n508 4.088817596110829 2041.3458658344439 v 29.553102493286133 2020-02-19 00:00:00\n29.553102493286133\n1788.7107849121094\ndias negociados: 508, qtd_ações: 4.088817596110829, dinheiro: R$ 2041.3458658344439, Total Value: R$ 2162.183111328659, Lucro: R$ 373.47232641654955\n28.943660736083984\n1788.7107849121094\ndias negociados: 509, qtd_ações: 4.088817596110829, dinheiro: R$ 2041.3458658344439, Total Value: R$ 2159.691215148006, Lucro: R$ 370.9804302358966\n510 40.29687105912344 1020.6729329172219 c 28.18911361694336 2020-02-21 00:00:00\n510 40.29687105912344 1020.6729329172219 c 28.18911361694336 2020-02-21 00:00:00\n28.18911361694336\n1788.7107849121094\ndias negociados: 510, qtd_ações: 40.29687105912344, dinheiro: R$ 1020.6729329172219, Total Value: R$ 2156.606009610169, Lucro: R$ 367.8952246980598\n511 61.148703086777275 510.33646645861097 c 24.47441864013672 2020-02-27 00:00:00\n511 61.148703086777275 510.33646645861097 c 24.47441864013672 2020-02-27 00:00:00\n24.47441864013672\n1788.7107849121094\ndias negociados: 511, qtd_ações: 61.148703086777275, dinheiro: R$ 510.33646645861097, Total Value: R$ 2006.9154251058185, Lucro: R$ 218.2046401937091\n512 71.55816160809412 255.16823322930549 c 24.513113021850586 2020-02-28 00:00:00\n512 71.55816160809412 255.16823322930549 c 24.513113021850586 2020-02-28 00:00:00\n24.513113021850586\n1788.7107849121094\ndias negociados: 512, qtd_ações: 71.55816160809412, dinheiro: R$ 255.16823322930549, Total Value: R$ 2009.2815363643663, Lucro: R$ 220.5707514522569\n513 17.88954040202353 1632.5349564641954 v 25.664283752441406 2020-03-02 00:00:00\n513 17.88954040202353 1632.5349564641954 v 25.664283752441406 2020-03-02 00:00:00\n25.664283752441406\n1788.7107849121094\ndias negociados: 513, qtd_ações: 17.88954040202353, dinheiro: R$ 1632.5349564641954, Total Value: R$ 2091.657197542492, Lucro: R$ 302.94641263038284\n514 4.472385100505883 1970.6465125631325 v 25.19994354248047 2020-03-03 00:00:00\n514 4.472385100505883 1970.6465125631325 v 25.19994354248047 2020-03-03 00:00:00\n25.19994354248047\n1788.7107849121094\ndias negociados: 514, qtd_ações: 4.472385100505883, dinheiro: R$ 1970.6465125631325, Total Value: R$ 2083.350364596112, Lucro: R$ 294.63957968400246\n515 42.35117411214968 985.3232562815663 c 26.01253318786621 2020-03-04 00:00:00\n515 42.35117411214968 985.3232562815663 c 26.01253318786621 2020-03-04 00:00:00\n26.01253318786621\n1788.7107849121094\ndias negociados: 515, qtd_ações: 42.35117411214968, dinheiro: R$ 985.3232562815663, Total Value: R$ 2086.98457841896, Lucro: R$ 298.2737935068508\n516 62.48878839888667 492.66162814078314 c 24.464746475219727 2020-03-05 00:00:00\n516 62.48878839888667 492.66162814078314 c 24.464746475219727 2020-03-05 00:00:00\n24.464746475219727\n1788.7107849121094\ndias negociados: 516, qtd_ações: 62.48878839888667, dinheiro: R$ 492.66162814078314, Total Value: R$ 2021.433993863197, Lucro: R$ 232.7232089510876\n517 73.64254033644707 246.33081407039157 c 22.085018157958984 2020-03-06 00:00:00\n517 73.64254033644707 246.33081407039157 c 22.085018157958984 2020-03-06 00:00:00\n22.085018157958984\n1788.7107849121094\ndias negociados: 517, qtd_ações: 73.64254033644707, dinheiro: R$ 246.33081407039157, Total Value: R$ 1872.7276545990521, Lucro: R$ 84.01686968694275\n518 18.410635084111767 1103.875756079417 v 15.526260375976562 2020-03-09 00:00:00\n518 18.410635084111767 1103.875756079417 v 15.526260375976562 2020-03-09 00:00:00\n15.526260375976562\n1788.7107849121094\ndias negociados: 518, qtd_ações: 18.410635084111767, dinheiro: R$ 1103.875756079417, Total Value: R$ 1389.7240700824254, Lucro: R$ -398.98671482968393\n519 50.902438565834764 551.9378780397085 c 16.986988067626953 2020-03-10 00:00:00\n519 50.902438565834764 551.9378780397085 c 16.986988067626953 2020-03-10 00:00:00\n16.986988067626953\n1788.7107849121094\ndias negociados: 519, qtd_ações: 50.902438565834764, dinheiro: R$ 551.9378780397085, Total Value: R$ 1416.6169945706577, Lucro: R$ -372.0937903414517\n520 12.725609641458691 1137.2951202935606 v 15.332788467407227 2020-03-11 00:00:00\n520 12.725609641458691 1137.2951202935606 v 15.332788467407227 2020-03-11 00:00:00\n15.332788467407227\n1788.7107849121094\ndias negociados: 520, qtd_ações: 12.725609641458691, dinheiro: R$ 1137.2951202935606, Total Value: R$ 1332.4142010448445, Lucro: R$ -456.29658386726487\n521 59.37873821981602 568.6475601467803 c 12.188840866088867 2020-03-12 00:00:00\n521 59.37873821981602 568.6475601467803 c 12.188840866088867 2020-03-12 00:00:00\n12.188840866088867\n1788.7107849121094\ndias negociados: 521, qtd_ações: 59.37873821981602, dinheiro: R$ 568.6475601467803, Total Value: R$ 1292.4055511372667, Lucro: R$ -496.3052337748427\n522 14.844684554954005 1232.092352188406 v 14.89747142791748 2020-03-13 00:00:00\n522 14.844684554954005 1232.092352188406 v 14.89747142791748 2020-03-13 00:00:00\n14.89747142791748\n1788.7107849121094\ndias negociados: 522, qtd_ações: 14.844684554954005, dinheiro: R$ 1232.092352188406, Total Value: R$ 1453.2406162022812, Lucro: R$ -335.47016870982816\n523 63.49456283779252 616.046176094203 c 12.662851333618164 2020-03-16 00:00:00\n523 63.49456283779252 616.046176094203 c 12.662851333618164 2020-03-16 00:00:00\n12.662851333618164\n1788.7107849121094\ndias negociados: 523, qtd_ações: 63.49456283779252, dinheiro: R$ 616.046176094203, Total Value: R$ 1420.0683858022462, Lucro: R$ -368.64239910986316\n524 15.87364070944813 1214.9168208564092 v 12.575788497924805 2020-03-17 00:00:00\n524 15.87364070944813 1214.9168208564092 v 12.575788497924805 2020-03-17 00:00:00\n12.575788497924805\n1788.7107849121094\ndias negociados: 524, qtd_ações: 15.87364070944813, dinheiro: R$ 1214.9168208564092, Total Value: R$ 1414.540369110478, Lucro: R$ -374.1704158016314\n525 71.49360737527347 607.4584104282046 c 10.921588897705078 2020-03-18 00:00:00\n525 71.49360737527347 607.4584104282046 c 10.921588897705078 2020-03-18 00:00:00\n10.921588897705078\n1788.7107849121094\ndias negociados: 525, qtd_ações: 71.49360737527347, dinheiro: R$ 607.4584104282046, Total Value: R$ 1388.2821989948773, Lucro: R$ -400.4285859172321\n526 17.873401843818367 1240.797077052345 v 11.811567306518555 2020-03-19 00:00:00\n526 17.873401843818367 1240.797077052345 v 11.811567306518555 2020-03-19 00:00:00\n11.811567306518555\n1788.7107849121094\ndias negociados: 526, qtd_ações: 17.873401843818367, dinheiro: R$ 1240.797077052345, Total Value: R$ 1451.9099659270585, Lucro: R$ -336.8008189850509\n11.608418464660645\n1788.7107849121094\ndias negociados: 527, qtd_ações: 17.873401843818367, dinheiro: R$ 1240.797077052345, Total Value: R$ 1448.2790050424258, Lucro: R$ -340.4317798696836\n528 4.468350460954592 1389.9247325039642 v 11.124735832214355 2020-03-23 00:00:00\n528 4.468350460954592 1389.9247325039642 v 11.124735832214355 2020-03-23 00:00:00\n11.124735832214355\n1788.7107849121094\ndias negociados: 528, qtd_ações: 4.468350460954592, dinheiro: R$ 1389.9247325039642, Total Value: R$ 1439.6339509878371, Lucro: R$ -349.07683392427225\n529 1.117087615238648 1432.8799822662725 v 12.817630767822266 2020-03-24 00:00:00\n529 1.117087615238648 1432.8799822662725 v 12.817630767822266 2020-03-24 00:00:00\n12.817630767822266\n1788.7107849121094\ndias negociados: 529, qtd_ações: 1.117087615238648, dinheiro: R$ 1432.8799822662725, Total Value: R$ 1447.1983988537086, Lucro: R$ -341.5123860584008\n530 52.835473817497885 716.4399911331362 c 13.852713584899902 2020-03-25 00:00:00\n530 52.835473817497885 716.4399911331362 c 13.852713584899902 2020-03-25 00:00:00\n13.852713584899902\n1788.7107849121094\ndias negociados: 530, qtd_ações: 52.835473817497885, dinheiro: R$ 716.4399911331362, Total Value: R$ 1448.3546770494122, Lucro: R$ -340.3561078626972\n531 13.208868454374471 1268.0593844960617 v 13.920430183410645 2020-03-26 00:00:00\n531 13.208868454374471 1268.0593844960617 v 13.920430183410645 2020-03-26 00:00:00\n13.920430183410645\n1788.7107849121094\ndias negociados: 531, qtd_ações: 13.208868454374471, dinheiro: R$ 1268.0593844960617, Total Value: R$ 1451.9325156170369, Lucro: R$ -336.7782692950725\n532 3.302217113593618 1395.5183529372096 v 12.865999221801758 2020-03-27 00:00:00\n532 3.302217113593618 1395.5183529372096 v 12.865999221801758 2020-03-27 00:00:00\n12.865999221801758\n1788.7107849121094\ndias negociados: 532, qtd_ações: 3.302217113593618, dinheiro: R$ 1395.5183529372096, Total Value: R$ 1438.0046757509256, Lucro: R$ -350.7061091611838\n533 0.8255542783984045 1427.5747609217397 v 12.943387985229492 2020-03-30 00:00:00\n533 0.8255542783984045 1427.5747609217397 v 12.943387985229492 2020-03-30 00:00:00\n12.943387985229492\n1788.7107849121094\ndias negociados: 533, qtd_ações: 0.8255542783984045, dinheiro: R$ 1427.5747609217397, Total Value: R$ 1438.2602302499163, Lucro: R$ -350.4505546621931\n534 53.56787891226109 713.7873804608698 c 13.533483505249023 2020-03-31 00:00:00\n534 53.56787891226109 713.7873804608698 c 13.533483505249023 2020-03-31 00:00:00\n13.533483505249023\n1788.7107849121094\ndias negociados: 534, qtd_ações: 53.56787891226109, dinheiro: R$ 713.7873804608698, Total Value: R$ 1438.7473861311323, Lucro: R$ -349.96339878097706\n535 79.36735934674313 356.8936902304349 c 13.833367347717285 2020-04-01 00:00:00\n535 79.36735934674313 356.8936902304349 c 13.833367347717285 2020-04-01 00:00:00\n13.833367347717285\n1788.7107849121094\ndias negociados: 535, qtd_ações: 79.36735934674313, dinheiro: R$ 356.8936902304349, Total Value: R$ 1454.8115274922156, Lucro: R$ -333.89925741989373\n536 19.841839836685782 1250.0076420108621 v 15.003883361816406 2020-04-02 00:00:00\n536 19.841839836685782 1250.0076420108621 v 15.003883361816406 2020-04-02 00:00:00\n15.003883361816406\n1788.7107849121094\ndias negociados: 536, qtd_ações: 19.841839836685782, dinheiro: R$ 1250.0076420108621, Total Value: R$ 1547.712292604338, Lucro: R$ -240.99849230777136\n537 61.95961571697674 625.0038210054311 c 14.839430809020996 2020-04-03 00:00:00\n537 61.95961571697674 625.0038210054311 c 14.839430809020996 2020-04-03 00:00:00\n14.839430809020996\n1788.7107849121094\ndias negociados: 537, qtd_ações: 61.95961571697674, dinheiro: R$ 625.0038210054311, Total Value: R$ 1544.4492513910373, Lucro: R$ -244.26153352107212\n538 82.4442910821982 312.50191050271553 c 15.255399703979492 2020-04-06 00:00:00\n538 82.4442910821982 312.50191050271553 c 15.255399703979492 2020-04-06 00:00:00\n15.255399703979492\n1788.7107849121094\ndias negociados: 538, qtd_ações: 82.4442910821982, dinheiro: R$ 312.50191050271553, Total Value: R$ 1570.222524272881, Lucro: R$ -218.48826063922843\n539 92.2931739107031 156.25095525135777 c 15.864840507507324 2020-04-07 00:00:00\n539 92.2931739107031 156.25095525135777 c 15.864840507507324 2020-04-07 00:00:00\n15.864840507507324\n1788.7107849121094\ndias negociados: 539, qtd_ações: 92.2931739107031, dinheiro: R$ 156.25095525135777, Total Value: R$ 1620.4674392762984, Lucro: R$ -168.243345635811\n540 23.073293477675776 1316.0174513028858 v 16.754817962646484 2020-04-08 00:00:00\n540 23.073293477675776 1316.0174513028858 v 16.754817962646484 2020-04-08 00:00:00\n16.754817962646484\n1788.7107849121094\ndias negociados: 540, qtd_ações: 23.073293477675776, dinheiro: R$ 1316.0174513028858, Total Value: R$ 1702.6062833200617, Lucro: R$ -86.10450159204765\n541 63.51354484500339 658.0087256514429 c 16.271133422851562 2020-04-09 00:00:00\n541 63.51354484500339 658.0087256514429 c 16.271133422851562 2020-04-09 00:00:00\n16.271133422851562\n1788.7107849121094\ndias negociados: 541, qtd_ações: 63.51354484500339, dinheiro: R$ 658.0087256514429, Total Value: R$ 1691.4460879827589, Lucro: R$ -97.26469692935052\n542 83.60228819357587 329.00436282572144 c 16.377548217773438 2020-04-13 00:00:00\n542 83.60228819357587 329.00436282572144 c 16.377548217773438 2020-04-13 00:00:00\n16.377548217773438\n1788.7107849121094\ndias negociados: 542, qtd_ações: 83.60228819357587, dinheiro: R$ 329.00436282572144, Total Value: R$ 1698.2048688322013, Lucro: R$ -90.50591607990805\n543 93.76673731283778 164.50218141286072 c 16.184072494506836 2020-04-14 00:00:00\n543 93.76673731283778 164.50218141286072 c 16.184072494506836 2020-04-14 00:00:00\n16.184072494506836\n1788.7107849121094\ndias negociados: 543, qtd_ações: 93.76673731283778, dinheiro: R$ 164.50218141286072, Total Value: R$ 1682.0298556572063, Lucro: R$ -106.68092925490305\n544 98.95755681041393 82.25109070643036 c 15.845492362976074 2020-04-15 00:00:00\n544 98.95755681041393 82.25109070643036 c 15.845492362976074 2020-04-15 00:00:00\n15.845492362976074\n1788.7107849121094\ndias negociados: 544, qtd_ações: 98.95755681041393, dinheiro: R$ 82.25109070643036, Total Value: R$ 1650.2823014046155, Lucro: R$ -138.42848350749387\n545 24.739389202603483 1210.8890140561814 v 15.207030296325684 2020-04-16 00:00:00\n545 24.739389202603483 1210.8890140561814 v 15.207030296325684 2020-04-16 00:00:00\n15.207030296325684\n1788.7107849121094\ndias negociados: 545, qtd_ações: 24.739389202603483, dinheiro: R$ 1210.8890140561814, Total Value: R$ 1587.101655172765, Lucro: R$ -201.60912973934433\n546 63.54085574514347 605.4445070280907 c 15.603650093078613 2020-04-17 00:00:00\n546 63.54085574514347 605.4445070280907 c 15.603650093078613 2020-04-17 00:00:00\n15.603650093078613\n1788.7107849121094\ndias negociados: 546, qtd_ações: 63.54085574514347, dinheiro: R$ 605.4445070280907, Total Value: R$ 1596.9137866900933, Lucro: R$ -191.7969982220161\n547 83.16053067747369 302.72225351404535 c 15.429524421691895 2020-04-20 00:00:00\n547 83.16053067747369 302.72225351404535 c 15.429524421691895 2020-04-20 00:00:00\n15.429524421691895\n1788.7107849121094\ndias negociados: 547, qtd_ações: 83.16053067747369, dinheiro: R$ 302.72225351404535, Total Value: R$ 1585.8496925229836, Lucro: R$ -202.86109238912582\n548 92.50183878432411 151.36112675702267 c 16.203418731689453 2020-04-22 00:00:00\n548 92.50183878432411 151.36112675702267 c 16.203418731689453 2020-04-22 00:00:00\n16.203418731689453\n1788.7107849121094\ndias negociados: 548, qtd_ações: 92.50183878432411, dinheiro: R$ 151.36112675702267, Total Value: R$ 1650.2071540306579, Lucro: R$ -138.5036308814515\n549 23.125459696081027 1288.9487270705797 v 16.3973331451416 2020-04-23 00:00:00\n549 23.125459696081027 1288.9487270705797 v 16.3973331451416 2020-04-23 00:00:00\n16.3973331451416\n1788.7107849121094\ndias negociados: 549, qtd_ações: 23.125459696081027, dinheiro: R$ 1288.9487270705797, Total Value: R$ 1668.1445938417653, Lucro: R$ -120.56619107034408\n550 64.89324582213355 644.4743635352899 c 15.429938316345215 2020-04-24 00:00:00\n550 64.89324582213355 644.4743635352899 c 15.429938316345215 2020-04-24 00:00:00\n15.429938316345215\n1788.7107849121094\ndias negociados: 550, qtd_ações: 64.89324582213355, dinheiro: R$ 644.4743635352899, Total Value: R$ 1645.7731437182374, Lucro: R$ -142.93764119387197\n551 16.223311455533388 1418.9900861227206 v 15.9136381149292 2020-04-27 00:00:00\n551 16.223311455533388 1418.9900861227206 v 15.9136381149292 2020-04-27 00:00:00\n15.9136381149292\n1788.7107849121094\ndias negociados: 551, qtd_ações: 16.223311455533388, dinheiro: R$ 1418.9900861227206, Total Value: R$ 1677.1619936518641, Lucro: R$ -111.54879126024525\n552 58.73857573961786 709.4950430613603 c 16.688007354736328 2020-04-28 00:00:00\n552 58.73857573961786 709.4950430613603 c 16.688007354736328 2020-04-28 00:00:00\n16.688007354736328\n1788.7107849121094\ndias negociados: 552, qtd_ações: 58.73857573961786, dinheiro: R$ 709.4950430613603, Total Value: R$ 1689.7248270108398, Lucro: R$ -98.98595790126956\n553 14.684643934904464 1485.155156593969 v 17.607057571411133 2020-04-29 00:00:00\n553 14.684643934904464 1485.155156593969 v 17.607057571411133 2020-04-29 00:00:00\n17.607057571411133\n1788.7107849121094\ndias negociados: 553, qtd_ações: 14.684643934904464, dinheiro: R$ 1485.155156593969, Total Value: R$ 1743.7085277715053, Lucro: R$ -45.002257140604115\n554 3.671160983726116 1677.471964514529 v 17.461942672729492 2020-04-30 00:00:00\n554 3.671160983726116 1677.471964514529 v 17.461942672729492 2020-04-30 00:00:00\n17.461942672729492\n1788.7107849121094\ndias negociados: 554, qtd_ações: 3.671160983726116, dinheiro: R$ 1677.471964514529, Total Value: R$ 1741.5775671547156, Lucro: R$ -47.13321775739382\n555 53.5550339986392 838.7359822572645 c 16.813770294189453 2020-05-04 00:00:00\n555 53.5550339986392 838.7359822572645 c 16.813770294189453 2020-05-04 00:00:00\n16.813770294189453\n1788.7107849121094\ndias negociados: 555, qtd_ações: 53.5550339986392, dinheiro: R$ 838.7359822572645, Total Value: R$ 1739.1980220078904, Lucro: R$ -49.51276290421902\n556 77.7184010872816 419.36799112863224 c 17.355527877807617 2020-05-05 00:00:00\n556 77.7184010872816 419.36799112863224 c 17.355527877807617 2020-05-05 00:00:00\n17.355527877807617\n1788.7107849121094\ndias negociados: 556, qtd_ações: 77.7184010872816, dinheiro: R$ 419.36799112863224, Total Value: R$ 1768.2118678175818, Lucro: R$ -20.498917094527542\n557 90.26153829159564 209.68399556431612 c 16.717029571533203 2020-05-06 00:00:00\n557 90.26153829159564 209.68399556431612 c 16.717029571533203 2020-05-06 00:00:00\n16.717029571533203\n1788.7107849121094\ndias negociados: 557, qtd_ações: 90.26153829159564, dinheiro: R$ 209.68399556431612, Total Value: R$ 1718.588800356997, Lucro: R$ -70.12198455511248\n558 22.56538457289891 1351.8412171300317 v 16.87181854248047 2020-05-07 00:00:00\n558 22.56538457289891 1351.8412171300317 v 16.87181854248047 2020-05-07 00:00:00\n16.87181854248047\n1788.7107849121094\ndias negociados: 558, qtd_ações: 22.56538457289891, dinheiro: R$ 1351.8412171300317, Total Value: R$ 1732.5602909852703, Lucro: R$ -56.15049392683909\n559 5.641346143224728 1654.4080507643778 v 17.877933502197266 2020-05-08 00:00:00\n559 5.641346143224728 1654.4080507643778 v 17.877933502197266 2020-05-08 00:00:00\n17.877933502197266\n1788.7107849121094\ndias negociados: 559, qtd_ações: 5.641346143224728, dinheiro: R$ 1654.4080507643778, Total Value: R$ 1755.2636619758264, Lucro: R$ -33.447122936282994\n560 52.752164015982565 827.2040253821889 c 17.558685302734375 2020-05-11 00:00:00\n560 52.752164015982565 827.2040253821889 c 17.558685302734375 2020-05-11 00:00:00\n17.558685302734375\n1788.7107849121094\ndias negociados: 560, qtd_ações: 52.752164015982565, dinheiro: R$ 827.2040253821889, Total Value: R$ 1753.4626723770552, Lucro: R$ -35.24811253505413\n561 76.32055811291008 413.60201269109444 c 17.54901123046875 2020-05-12 00:00:00\n561 76.32055811291008 413.60201269109444 c 17.54901123046875 2020-05-12 00:00:00\n17.54901123046875\n1788.7107849121094\ndias negociados: 561, qtd_ações: 76.32055811291008, dinheiro: R$ 413.60201269109444, Total Value: R$ 1752.9523441301963, Lucro: R$ -35.75844078191312\n562 88.47322083673829 206.80100634554722 c 17.016929626464844 2020-05-13 00:00:00\n562 88.47322083673829 206.80100634554722 c 17.016929626464844 2020-05-13 00:00:00\n17.016929626464844\n1788.7107849121094\ndias negociados: 562, qtd_ações: 88.47322083673829, dinheiro: R$ 206.80100634554722, Total Value: R$ 1712.3435791510058, Lucro: R$ -76.3672057611036\n563 22.118305209184573 1323.7611600954783 v 16.833118438720703 2020-05-14 00:00:00\n563 22.118305209184573 1323.7611600954783 v 16.833118438720703 2020-05-14 00:00:00\n16.833118438720703\n1788.7107849121094\ndias negociados: 563, qtd_ações: 22.118305209184573, dinheiro: R$ 1323.7611600954783, Total Value: R$ 1696.0812113454554, Lucro: R$ -92.62957356665402\n564 62.01162202554033 661.8805800477392 c 16.591264724731445 2020-05-15 00:00:00\n564 62.01162202554033 661.8805800477392 c 16.591264724731445 2020-05-15 00:00:00\n16.591264724731445\n1788.7107849121094\ndias negociados: 564, qtd_ações: 62.01162202554033, dinheiro: R$ 661.8805800477392, Total Value: R$ 1690.731817083466, Lucro: R$ -97.97896782864336\n565 80.46281664804044 330.9402900238696 c 17.93598175048828 2020-05-18 00:00:00\n565 80.46281664804044 330.9402900238696 c 17.93598175048828 2020-05-18 00:00:00\n17.93598175048828\n1788.7107849121094\ndias negociados: 565, qtd_ações: 80.46281664804044, dinheiro: R$ 330.9402900238696, Total Value: R$ 1774.1199010160076, Lucro: R$ -14.590883896101786\n566 89.61927233031285 165.4701450119348 c 18.07141876220703 2020-05-19 00:00:00\n566 89.61927233031285 165.4701450119348 c 18.07141876220703 2020-05-19 00:00:00\n18.07141876220703\n1788.7107849121094\ndias negociados: 566, qtd_ações: 89.61927233031285, dinheiro: R$ 165.4701450119348, Total Value: R$ 1785.017544457292, Lucro: R$ -3.693240454817442\n567 22.404818082578213 1420.4459316381444 v 18.671218872070312 2020-05-20 00:00:00\n567 22.404818082578213 1420.4459316381444 v 18.671218872070312 2020-05-20 00:00:00\n18.671218872070312\n1788.7107849121094\ndias negociados: 567, qtd_ações: 22.404818082578213, dinheiro: R$ 1420.4459316381444, Total Value: R$ 1838.771193846881, Lucro: R$ 50.06040893477166\n568 5.601204520644553 1732.4017252035571 v 18.564804077148438 2020-05-21 00:00:00\n568 5.601204520644553 1732.4017252035571 v 18.564804077148438 2020-05-21 00:00:00\n18.564804077148438\n1788.7107849121094\ndias negociados: 568, qtd_ações: 5.601204520644553, dinheiro: R$ 1732.4017252035571, Total Value: R$ 1836.3869897253614, Lucro: R$ 47.67620481325207\n569 53.55896704422657 866.2008626017786 c 18.061744689941406 2020-05-22 00:00:00\n569 53.55896704422657 866.2008626017786 c 18.061744689941406 2020-05-22 00:00:00\n18.061744689941406\n1788.7107849121094\ndias negociados: 569, qtd_ações: 53.55896704422657, dinheiro: R$ 866.2008626017786, Total Value: R$ 1833.5692512115847, Lucro: R$ 44.85846629947537\n570 13.389741761056642 1623.2041361904458 v 18.845354080200195 2020-05-25 00:00:00\n570 13.389741761056642 1623.2041361904458 v 18.845354080200195 2020-05-25 00:00:00\n18.845354080200195\n1788.7107849121094\ndias negociados: 570, qtd_ações: 13.389741761056642, dinheiro: R$ 1623.2041361904458, Total Value: R$ 1875.5385607200014, Lucro: R$ 86.82777580789207\n571 56.040171071395896 811.6020680952229 c 19.029165267944336 2020-05-26 00:00:00\n571 56.040171071395896 811.6020680952229 c 19.029165267944336 2020-05-26 00:00:00\n19.029165267944336\n1788.7107849121094\ndias negociados: 571, qtd_ações: 56.040171071395896, dinheiro: R$ 811.6020680952229, Total Value: R$ 1877.9997450566887, Lucro: R$ 89.28896014457928\n572 14.010042767848974 1621.97215111063 v 19.28069496154785 2020-05-27 00:00:00\n572 14.010042767848974 1621.97215111063 v 19.28069496154785 2020-05-27 00:00:00\n19.28069496154785\n1788.7107849121094\ndias negociados: 572, qtd_ações: 14.010042767848974, dinheiro: R$ 1621.97215111063, Total Value: R$ 1892.0955121157656, Lucro: R$ 103.38472720365621\n19.12590789794922\n1788.7107849121094\ndias negociados: 573, qtd_ações: 14.010042767848974, dinheiro: R$ 1621.97215111063, Total Value: R$ 1889.926938734839, Lucro: R$ 101.21615382272967\n19.677335739135742\n1788.7107849121094\ndias negociados: 574, qtd_ações: 14.010042767848974, dinheiro: R$ 1621.97215111063, Total Value: R$ 1897.6524663732448, Lucro: R$ 108.94168146113543\n575 55.24453669456772 810.986075555315 c 19.667661666870117 2020-06-01 00:00:00\n575 55.24453669456772 810.986075555315 c 19.667661666870117 2020-06-01 00:00:00\n19.667661666870117\n1788.7107849121094\ndias negociados: 575, qtd_ações: 55.24453669456772, dinheiro: R$ 810.986075555315, Total Value: R$ 1897.5169322070642, Lucro: R$ 108.80614729495483\n576 13.81113417364193 1668.7736313988798 v 20.702802658081055 2020-06-02 00:00:00\n576 13.81113417364193 1668.7736313988798 v 20.702802658081055 2020-06-02 00:00:00\n20.702802658081055\n1788.7107849121094\ndias negociados: 576, qtd_ações: 13.81113417364193, dinheiro: R$ 1668.7736313988798, Total Value: R$ 1954.702816680068, Lucro: R$ 165.99203176795868\n577 3.4527835434104825 1883.9219723876179 v 20.77052116394043 2020-06-03 00:00:00\n577 3.4527835434104825 1883.9219723876179 v 20.77052116394043 2020-06-03 00:00:00\n20.77052116394043\n1788.7107849121094\ndias negociados: 577, qtd_ações: 3.4527835434104825, dinheiro: R$ 1883.9219723876179, Total Value: R$ 1955.6380860505305, Lucro: R$ 166.92730113842117\n578 48.888291120712225 941.9609861938089 c 20.73182487487793 2020-06-04 00:00:00\n578 48.888291120712225 941.9609861938089 c 20.73182487487793 2020-06-04 00:00:00\n20.73182487487793\n1788.7107849121094\ndias negociados: 578, qtd_ações: 48.888291120712225, dinheiro: R$ 941.9609861938089, Total Value: R$ 1955.5044761404645, Lucro: R$ 166.7936912283551\n579 70.91731411343423 470.98049309690447 c 21.3799991607666 2020-06-05 00:00:00\n579 70.91731411343423 470.98049309690447 c 21.3799991607666 2020-06-05 00:00:00\n21.3799991607666\n1788.7107849121094\ndias negociados: 579, qtd_ações: 70.91731411343423, dinheiro: R$ 470.98049309690447, Total Value: R$ 1987.1926093259497, Lucro: R$ 198.4818244138403\n580 81.72160734634161 235.49024654845223 c 21.795988082885742 2020-06-08 00:00:00\n580 81.72160734634161 235.49024654845223 c 21.795988082885742 2020-06-08 00:00:00\n21.795988082885742\n1788.7107849121094\ndias negociados: 580, qtd_ações: 81.72160734634161, dinheiro: R$ 235.49024654845223, Total Value: R$ 2016.693426383582, Lucro: R$ 227.98264147147256\n581 20.430401836585403 1523.3641503433762 v 21.01237678527832 2020-06-09 00:00:00\n581 20.430401836585403 1523.3641503433762 v 21.01237678527832 2020-06-09 00:00:00\n21.01237678527832\n1788.7107849121094\ndias negociados: 581, qtd_ações: 20.430401836585403, dinheiro: R$ 1523.3641503433762, Total Value: R$ 1952.6554516083509, Lucro: R$ 163.94466669624148\n582 57.221656033000976 761.6820751716881 c 20.702802658081055 2020-06-10 00:00:00\n582 57.221656033000976 761.6820751716881 c 20.702802658081055 2020-06-10 00:00:00\n20.702802658081055\n1788.7107849121094\ndias negociados: 582, qtd_ações: 57.221656033000976, dinheiro: R$ 761.6820751716881, Total Value: R$ 1946.3307277915005, Lucro: R$ 157.61994287939115\n583 76.33167523714127 380.84103758584405 c 19.92886734008789 2020-06-12 00:00:00\n583 76.33167523714127 380.84103758584405 c 19.92886734008789 2020-06-12 00:00:00\n19.92886734008789\n1788.7107849121094\ndias negociados: 583, qtd_ações: 76.33167523714127, dinheiro: R$ 380.84103758584405, Total Value: R$ 1902.0448672335044, Lucro: R$ 113.33408232139504\n584 85.84052510874595 190.42051879292202 c 20.025609970092773 2020-06-15 00:00:00\n584 85.84052510874595 190.42051879292202 c 20.025609970092773 2020-06-15 00:00:00\n20.025609970092773\n1788.7107849121094\ndias negociados: 584, qtd_ações: 85.84052510874595, dinheiro: R$ 190.42051879292202, Total Value: R$ 1909.4293942486238, Lucro: R$ 120.71860933651442\n585 21.460131277186488 1521.4066455899886 v 20.67378044128418 2020-06-16 00:00:00\n585 21.460131277186488 1521.4066455899886 v 20.67378044128418 2020-06-16 00:00:00\n20.67378044128418\n1788.7107849121094\ndias negociados: 585, qtd_ações: 21.460131277186488, dinheiro: R$ 1521.4066455899886, Total Value: R$ 1965.0686878556776, Lucro: R$ 176.3579029435682\n586 5.365032819296622 1855.243113308483 v 20.741498947143555 2020-06-17 00:00:00\n586 5.365032819296622 1855.243113308483 v 20.741498947143555 2020-06-17 00:00:00\n20.741498947143555\n1788.7107849121094\ndias negociados: 586, qtd_ações: 5.365032819296622, dinheiro: R$ 1855.243113308483, Total Value: R$ 1966.5219358813147, Lucro: R$ 177.8111509692053\n587 49.75672383591778 927.6215566542415 c 20.89628791809082 2020-06-18 00:00:00\n587 49.75672383591778 927.6215566542415 c 20.89628791809082 2020-06-18 00:00:00\n20.89628791809082\n1788.7107849121094\ndias negociados: 587, qtd_ações: 49.75672383591778, dinheiro: R$ 927.6215566542415, Total Value: R$ 1967.3523837905118, Lucro: R$ 178.64159887840242\n588 72.08696652295924 463.8107783271208 c 20.77052116394043 2020-06-19 00:00:00\n588 72.08696652295924 463.8107783271208 c 20.77052116394043 2020-06-19 00:00:00\n20.77052116394043\n1788.7107849121094\ndias negociados: 588, qtd_ações: 72.08696652295924, dinheiro: R$ 463.8107783271208, Total Value: R$ 1961.0946421365109, Lucro: R$ 172.38385722440148\n589 83.52921666231427 231.9053891635604 c 20.26746368408203 2020-06-22 00:00:00\n589 83.52921666231427 231.9053891635604 c 20.26746368408203 2020-06-22 00:00:00\n20.26746368408203\n1788.7107849121094\ndias negociados: 589, qtd_ações: 83.52921666231427, dinheiro: R$ 231.9053891635604, Total Value: R$ 1924.8307544268346, Lucro: R$ 136.1199695147252\n590 20.882304165578567 1544.0235636683499 v 20.944658279418945 2020-06-23 00:00:00\n590 20.882304165578567 1544.0235636683499 v 20.944658279418945 2020-06-23 00:00:00\n20.944658279418945\n1788.7107849121094\ndias negociados: 590, qtd_ações: 20.882304165578567, dinheiro: R$ 1544.0235636683499, Total Value: R$ 1981.3962885032797, Lucro: R$ 192.68550359117035\n591 58.88279716174984 772.0117818341749 c 20.31583595275879 2020-06-24 00:00:00\n591 58.88279716174984 772.0117818341749 c 20.31583595275879 2020-06-24 00:00:00\n20.31583595275879\n1788.7107849121094\ndias negociados: 591, qtd_ações: 58.88279716174984, dinheiro: R$ 772.0117818341749, Total Value: R$ 1968.2650294118557, Lucro: R$ 179.55424449974635\n20.77052116394043\n1788.7107849121094\ndias negociados: 592, qtd_ações: 58.88279716174984, dinheiro: R$ 772.0117818341749, Total Value: R$ 1995.0381664743113, Lucro: R$ 206.32738156220194\n593 78.02892043924264 386.00589091708747 c 20.161046981811523 2020-06-26 00:00:00\n593 78.02892043924264 386.00589091708747 c 20.161046981811523 2020-06-26 00:00:00\n20.161046981811523\n1788.7107849121094\ndias negociados: 593, qtd_ações: 78.02892043924264, dinheiro: R$ 386.00589091708747, Total Value: R$ 1959.1506218326917, Lucro: R$ 170.43983692058237\n594 19.50723010981066 1612.28883986237 v 20.95433235168457 2020-06-29 00:00:00\n594 19.50723010981066 1612.28883986237 v 20.95433235168457 2020-06-29 00:00:00\n20.95433235168457\n1788.7107849121094\ndias negociados: 594, qtd_ações: 19.50723010981066, dinheiro: R$ 1612.28883986237, Total Value: R$ 2021.0498228441309, Lucro: R$ 232.33903793202148\n595 4.876807527452665 1917.302655774652 v 20.847915649414062 2020-06-30 00:00:00\n595 4.876807527452665 1917.302655774652 v 20.847915649414062 2020-06-30 00:00:00\n20.847915649414062\n1788.7107849121094\ndias negociados: 595, qtd_ações: 4.876807527452665, dinheiro: R$ 1917.302655774652, Total Value: R$ 2018.9739277454128, Lucro: R$ 230.2631428333034\n596 1.2192018818631662 1994.122252814076 v 21.002700805664062 2020-07-01 00:00:00\n596 1.2192018818631662 1994.122252814076 v 21.002700805664062 2020-07-01 00:00:00\n21.002700805664062\n1788.7107849121094\ndias negociados: 596, qtd_ações: 1.2192018818631662, dinheiro: R$ 1994.122252814076, Total Value: R$ 2019.7287851605508, Lucro: R$ 231.01800024844147\n21.34130096435547\n1788.7107849121094\ndias negociados: 597, qtd_ações: 1.2192018818631662, dinheiro: R$ 1994.122252814076, Total Value: R$ 2020.1416071112264, Lucro: R$ 231.43082219911707\n598 48.10903971092178 997.061126407038 c 21.263906478881836 2020-07-03 00:00:00\n598 48.10903971092178 997.061126407038 c 21.263906478881836 2020-07-03 00:00:00\n21.263906478881836\n1788.7107849121094\ndias negociados: 598, qtd_ações: 48.10903971092178, dinheiro: R$ 997.061126407038, Total Value: R$ 2020.0472476088912, Lucro: R$ 231.33646269678184\n599 12.027259927730444 1782.801053080594 v 21.776639938354492 2020-07-06 00:00:00\n599 12.027259927730444 1782.801053080594 v 21.776639938354492 2020-07-06 00:00:00\n21.776639938354492\n1788.7107849121094\ndias negociados: 599, qtd_ações: 12.027259927730444, dinheiro: R$ 1782.801053080594, Total Value: R$ 2044.7143619717795, Lucro: R$ 256.0035770596701\n600 53.47664090694204 891.400526540297 c 21.505762100219727 2020-07-07 00:00:00\n600 53.47664090694204 891.400526540297 c 21.505762100219727 2020-07-07 00:00:00\n21.505762100219727\n1788.7107849121094\ndias negociados: 600, qtd_ações: 53.47664090694204, dinheiro: R$ 891.400526540297, Total Value: R$ 2041.456443803871, Lucro: R$ 252.74565889176165\n601 13.36916022673551 1770.2388059775067 v 21.912078857421875 2020-07-08 00:00:00\n601 13.36916022673551 1770.2388059775067 v 21.912078857421875 2020-07-08 00:00:00\n21.912078857421875\n1788.7107849121094\ndias negociados: 601, qtd_ações: 13.36916022673551, dinheiro: R$ 1770.2388059775067, Total Value: R$ 2063.184899123243, Lucro: R$ 274.47411421113384\n602 54.69378169504608 885.1194029887533 c 21.41869354248047 2020-07-09 00:00:00\n602 54.69378169504608 885.1194029887533 c 21.41869354248047 2020-07-09 00:00:00\n21.41869354248047\n1788.7107849121094\ndias negociados: 602, qtd_ações: 54.69378169504608, dinheiro: R$ 885.1194029887533, Total Value: R$ 2056.5887517942733, Lucro: R$ 267.87796688216395\n603 13.67344542376152 1778.40449611874 v 21.776639938354492 2020-07-10 00:00:00\n603 13.67344542376152 1778.40449611874 v 21.776639938354492 2020-07-10 00:00:00\n21.776639938354492\n1788.7107849121094\ndias negociados: 603, qtd_ações: 13.67344542376152, dinheiro: R$ 1778.40449611874, Total Value: R$ 2076.1661938287357, Lucro: R$ 287.4554089166263\n604 3.41836135594038 1998.2534158684994 v 21.43804168701172 2020-07-13 00:00:00\n604 3.41836135594038 1998.2534158684994 v 21.43804168701172 2020-07-13 00:00:00\n21.43804168701172\n1788.7107849121094\ndias negociados: 604, qtd_ações: 3.41836135594038, dinheiro: R$ 1998.2534158684994, Total Value: R$ 2071.5363891184193, Lucro: R$ 282.8256042063099\n605 48.51765488830345 999.1267079342497 c 22.153932571411133 2020-07-14 00:00:00\n605 48.51765488830345 999.1267079342497 c 22.153932571411133 2020-07-14 00:00:00\n22.153932571411133\n1788.7107849121094\ndias negociados: 605, qtd_ações: 48.51765488830345, dinheiro: R$ 999.1267079342497, Total Value: R$ 2073.98356285272, Lucro: R$ 285.27277794061047\n606 12.129413722075862 1820.7586156284378 v 22.579599380493164 2020-07-15 00:00:00\n606 12.129413722075862 1820.7586156284378 v 22.579599380493164 2020-07-15 00:00:00\n22.579599380493164\n1788.7107849121094\ndias negociados: 606, qtd_ações: 12.129413722075862, dinheiro: R$ 1820.7586156284378, Total Value: R$ 2094.635918193167, Lucro: R$ 305.9251332810577\n607 53.548326406642715 910.3793078142189 c 21.97979736328125 2020-07-16 00:00:00\n607 53.548326406642715 910.3793078142189 c 21.97979736328125 2020-07-16 00:00:00\n21.97979736328125\n1788.7107849121094\ndias negociados: 607, qtd_ações: 53.548326406642715, dinheiro: R$ 910.3793078142189, Total Value: R$ 2087.3606713750683, Lucro: R$ 298.6498864629589\n608 13.387081601660679 1793.8923760538974 v 21.9991455078125 2020-07-17 00:00:00\n608 13.387081601660679 1793.8923760538974 v 21.9991455078125 2020-07-17 00:00:00\n21.9991455078125\n1788.7107849121094\ndias negociados: 608, qtd_ações: 13.387081601660679, dinheiro: R$ 1793.8923760538974, Total Value: R$ 2088.3967321337905, Lucro: R$ 299.6859472216811\n609 54.15894647743136 896.9461880269487 c 21.9991455078125 2020-07-20 00:00:00\n609 54.15894647743136 896.9461880269487 c 21.9991455078125 2020-07-20 00:00:00\n21.9991455078125\n1788.7107849121094\ndias negociados: 609, qtd_ações: 54.15894647743136, dinheiro: R$ 896.9461880269487, Total Value: R$ 2088.3967321337905, Lucro: R$ 299.6859472216811\n610 73.99531970561395 448.47309401347434 c 22.608623504638672 2020-07-21 00:00:00\n610 73.99531970561395 448.47309401347434 c 22.608623504638672 2020-07-21 00:00:00\n22.608623504638672\n1788.7107849121094\ndias negociados: 610, qtd_ações: 73.99531970561395, dinheiro: R$ 448.47309401347434, Total Value: R$ 2121.405418343071, Lucro: R$ 332.6946334309614\n611 84.05119977436938 224.23654700673717 c 22.299047470092773 2020-07-22 00:00:00\n611 84.05119977436938 224.23654700673717 c 22.299047470092773 2020-07-22 00:00:00\n22.299047470092773\n1788.7107849121094\ndias negociados: 611, qtd_ações: 84.05119977436938, dinheiro: R$ 224.23654700673717, Total Value: R$ 2098.498240693651, Lucro: R$ 309.78745578154167\n612 89.18596741355869 112.11827350336858 c 21.835121154785156 2020-07-23 00:00:00\n612 89.18596741355869 112.11827350336858 c 21.835121154785156 2020-07-23 00:00:00\n21.835121154785156\n1788.7107849121094\ndias negociados: 612, qtd_ações: 89.18596741355869, dinheiro: R$ 112.11827350336858, Total Value: R$ 2059.5046772851433, Lucro: R$ 270.79389237303394\n613 22.29649185338967 1583.0119570104102 v 21.989912033081055 2020-07-24 00:00:00\n613 22.29649185338967 1583.0119570104102 v 21.989912033081055 2020-07-24 00:00:00\n21.989912033081055\n1788.7107849121094\ndias negociados: 613, qtd_ações: 22.29649185338967, dinheiro: R$ 1583.0119570104102, Total Value: R$ 2073.3098515127576, Lucro: R$ 284.59906660064826\n614 5.574122963347418 1958.3390149851089 v 22.444610595703125 2020-07-27 00:00:00\n614 5.574122963347418 1958.3390149851089 v 22.444610595703125 2020-07-27 00:00:00\n22.444610595703125\n1788.7107849121094\ndias negociados: 614, qtd_ações: 5.574122963347418, dinheiro: R$ 1958.3390149851089, Total Value: R$ 2083.4480343100086, Lucro: R$ 294.7372493978992\n615 49.96553758487849 979.1695074925544 c 22.057632446289062 2020-07-28 00:00:00\n615 49.96553758487849 979.1695074925544 c 22.057632446289062 2020-07-28 00:00:00\n22.057632446289062\n1788.7107849121094\ndias negociados: 615, qtd_ações: 49.96553758487849, dinheiro: R$ 979.1695074925544, Total Value: R$ 2081.290970521046, Lucro: R$ 292.5801856089365\n616 71.80680211603385 489.5847537462772 c 22.415586471557617 2020-07-29 00:00:00\n616 71.80680211603385 489.5847537462772 c 22.415586471557617 2020-07-29 00:00:00\n22.415586471557617\n1788.7107849121094\ndias negociados: 616, qtd_ações: 71.80680211603385, dinheiro: R$ 489.5847537462772, Total Value: R$ 2099.1763358242606, Lucro: R$ 310.46555091215123\n617 82.89492871546571 244.7923768731386 c 22.076982498168945 2020-07-30 00:00:00\n617 82.89492871546571 244.7923768731386 c 22.076982498168945 2020-07-30 00:00:00\n22.076982498168945\n1788.7107849121094\ndias negociados: 617, qtd_ações: 82.89492871546571, dinheiro: R$ 244.7923768731386, Total Value: R$ 2074.8622673114373, Lucro: R$ 286.15148239932796\n618 88.59382587256749 122.3961884365693 c 21.477170944213867 2020-07-31 00:00:00\n618 88.59382587256749 122.3961884365693 c 21.477170944213867 2020-07-31 00:00:00\n21.477170944213867\n1788.7107849121094\ndias negociados: 618, qtd_ações: 88.59382587256749, dinheiro: R$ 122.3961884365693, Total Value: R$ 2025.1409313036186, Lucro: R$ 236.4301463915092\n619 91.49555819970745 61.19809421828465 c 21.090192794799805 2020-08-03 00:00:00\n619 91.49555819970745 61.19809421828465 c 21.090192794799805 2020-08-03 00:00:00\n21.090192794799805\n1788.7107849121094\ndias negociados: 619, qtd_ações: 91.49555819970745, dinheiro: R$ 61.19809421828465, Total Value: R$ 1990.857056517941, Lucro: R$ 202.14627160583154\n620 92.94775661021362 30.599047109142326 c 21.070844650268555 2020-08-04 00:00:00\n620 92.94775661021362 30.599047109142326 c 21.070844650268555 2020-08-04 00:00:00\n21.070844650268555\n1788.7107849121094\ndias negociados: 620, qtd_ações: 92.94775661021362, dinheiro: R$ 30.599047109142326, Total Value: R$ 1989.0867872339256, Lucro: R$ 200.37600232181626\n621 23.236939152553404 1593.8822913200743 v 22.425260543823242 2020-08-05 00:00:00\n621 23.236939152553404 1593.8822913200743 v 22.425260543823242 2020-08-05 00:00:00\n22.425260543823242\n1788.7107849121094\ndias negociados: 621, qtd_ações: 23.236939152553404, dinheiro: R$ 1593.8822913200743, Total Value: R$ 2114.9767060570516, Lucro: R$ 326.26592114494224\n622 5.809234788138351 1985.3774898585884 v 22.463956832885742 2020-08-06 00:00:00\n622 5.809234788138351 1985.3774898585884 v 22.463956832885742 2020-08-06 00:00:00\n22.463956832885742\n1788.7107849121094\ndias negociados: 622, qtd_ações: 5.809234788138351, dinheiro: R$ 1985.3774898585884, Total Value: R$ 2115.8758893714266, Lucro: R$ 327.16510445931726\n623 50.833297434332394 992.6887449292942 c 22.04796028137207 2020-08-07 00:00:00\n623 50.833297434332394 992.6887449292942 c 22.04796028137207 2020-08-07 00:00:00\n22.04796028137207\n1788.7107849121094\ndias negociados: 623, qtd_ações: 50.833297434332394, dinheiro: R$ 992.6887449292942, Total Value: R$ 2113.4592677326273, Lucro: R$ 324.7484828205179\n624 12.708324358583099 1857.6098040731094 v 22.68647003173828 2020-08-10 00:00:00\n624 12.708324358583099 1857.6098040731094 v 22.68647003173828 2020-08-10 00:00:00\n22.68647003173828\n1788.7107849121094\ndias negociados: 624, qtd_ações: 12.708324358583099, dinheiro: R$ 1857.6098040731094, Total Value: R$ 2145.9168237877147, Lucro: R$ 357.2060388756054\n625 54.305576131492316 928.8049020365547 c 22.328516006469727 2020-08-11 00:00:00\n625 54.305576131492316 928.8049020365547 c 22.328516006469727 2020-08-11 00:00:00\n22.328516006469727\n1788.7107849121094\ndias negociados: 625, qtd_ações: 54.305576131492316, dinheiro: R$ 928.8049020365547, Total Value: R$ 2141.367827929141, Lucro: R$ 352.6570430170318\n626 13.576394032873079 1853.9883222869166 v 22.715492248535156 2020-08-12 00:00:00\n626 13.576394032873079 1853.9883222869166 v 22.715492248535156 2020-08-12 00:00:00\n22.715492248535156\n1788.7107849121094\ndias negociados: 626, qtd_ações: 13.576394032873079, dinheiro: R$ 1853.9883222869166, Total Value: R$ 2162.382795703704, Lucro: R$ 373.6720107915944\n627 55.528796725113864 926.9941611434583 c 22.096330642700195 2020-08-13 00:00:00\n627 55.528796725113864 926.9941611434583 c 22.096330642700195 2020-08-13 00:00:00\n22.096330642700195\n1788.7107849121094\ndias negociados: 627, qtd_ações: 55.528796725113864, dinheiro: R$ 926.9941611434583, Total Value: R$ 2153.976813772862, Lucro: R$ 365.2660288607526\n628 76.66229658681813 463.49708057172916 c 21.931865692138672 2020-08-14 00:00:00\n628 76.66229658681813 463.49708057172916 c 21.931865692138672 2020-08-14 00:00:00\n21.931865692138672\n1788.7107849121094\ndias negociados: 628, qtd_ações: 76.66229658681813, dinheiro: R$ 463.49708057172916, Total Value: R$ 2144.8442729647254, Lucro: R$ 356.13348805261603\n629 87.26177520028986 231.74854028586458 c 21.864145278930664 2020-08-17 00:00:00\n629 87.26177520028986 231.74854028586458 c 21.864145278930664 2020-08-17 00:00:00\n21.864145278930664\n1788.7107849121094\ndias negociados: 629, qtd_ações: 87.26177520028986, dinheiro: R$ 231.74854028586458, Total Value: R$ 2139.652670562391, Lucro: R$ 350.9418856502816\n630 21.815443800072465 1689.2692032834261 v 22.270471572875977 2020-08-18 00:00:00\n630 21.815443800072465 1689.2692032834261 v 22.270471572875977 2020-08-18 00:00:00\n22.270471572875977\n1788.7107849121094\ndias negociados: 630, qtd_ações: 21.815443800072465, dinheiro: R$ 1689.2692032834261, Total Value: R$ 2175.1094242826134, Lucro: R$ 386.39863937050404\n631 5.453860950018116 2051.5915946577597 v 22.144702911376953 2020-08-19 00:00:00\n631 5.453860950018116 2051.5915946577597 v 22.144702911376953 2020-08-19 00:00:00\n22.144702911376953\n1788.7107849121094\ndias negociados: 631, qtd_ações: 5.453860950018116, dinheiro: R$ 2051.5915946577597, Total Value: R$ 2172.365725115871, Lucro: R$ 383.6549402037617\n632 52.10233302147066 1025.7957973288799 c 21.989912033081055 2020-08-20 00:00:00\n632 52.10233302147066 1025.7957973288799 c 21.989912033081055 2020-08-20 00:00:00\n21.989912033081055\n1788.7107849121094\ndias negociados: 632, qtd_ações: 52.10233302147066, dinheiro: R$ 1025.7957973288799, Total Value: R$ 2171.521517189314, Lucro: R$ 382.8107322772048\n633 75.60232808343807 512.8978986644399 c 21.82544708251953 2020-08-21 00:00:00\n633 75.60232808343807 512.8978986644399 c 21.82544708251953 2020-08-21 00:00:00\n21.82544708251953\n1788.7107849121094\ndias negociados: 633, qtd_ações: 75.60232808343807, dinheiro: R$ 512.8978986644399, Total Value: R$ 2162.9525095647978, Lucro: R$ 374.2417246526884\n634 87.14259562670628 256.44894933221997 c 22.222097396850586 2020-08-24 00:00:00\n634 87.14259562670628 256.44894933221997 c 22.222097396850586 2020-08-24 00:00:00\n22.222097396850586\n1788.7107849121094\ndias negociados: 634, qtd_ações: 87.14259562670628, dinheiro: R$ 256.44894933221997, Total Value: R$ 2192.940196763253, Lucro: R$ 404.2294118511436\n635 21.78564890667657 1702.4945819907098 v 22.125354766845703 2020-08-25 00:00:00\n635 21.78564890667657 1702.4945819907098 v 22.125354766845703 2020-08-25 00:00:00\n22.125354766845703\n1788.7107849121094\ndias negociados: 635, qtd_ações: 21.78564890667657, dinheiro: R$ 1702.4945819907098, Total Value: R$ 2184.5097928768732, Lucro: R$ 395.79900796476386\n636 5.446412226669143 2053.7312640133932 v 21.496517181396484 2020-08-26 00:00:00\n636 5.446412226669143 2053.7312640133932 v 21.496517181396484 2020-08-26 00:00:00\n21.496517181396484\n1788.7107849121094\ndias negociados: 636, qtd_ações: 5.446412226669143, dinheiro: R$ 2053.7312640133932, Total Value: R$ 2170.8101580209545, Lucro: R$ 382.09937310884516\n637 53.366304468640536 1026.8656320066966 c 21.428796768188477 2020-08-27 00:00:00\n637 53.366304468640536 1026.8656320066966 c 21.428796768188477 2020-08-27 00:00:00\n21.428796768188477\n1788.7107849121094\ndias negociados: 637, qtd_ações: 53.366304468640536, dinheiro: R$ 1026.8656320066966, Total Value: R$ 2170.4413247344633, Lucro: R$ 381.7305398223539\n638 13.341576117160134 1899.648894746294 v 21.806100845336914 2020-08-28 00:00:00\n638 13.341576117160134 1899.648894746294 v 21.806100845336914 2020-08-28 00:00:00\n21.806100845336914\n1788.7107849121094\ndias negociados: 638, qtd_ações: 13.341576117160134, dinheiro: R$ 1899.648894746294, Total Value: R$ 2190.5766489928264, Lucro: R$ 401.8658640807171\n639 58.19271838785333 949.824447373147 c 21.177263259887695 2020-08-31 00:00:00\n639 58.19271838785333 949.824447373147 c 21.177263259887695 2020-08-31 00:00:00\n21.177263259887695\n1788.7107849121094\ndias negociados: 639, qtd_ações: 58.19271838785333, dinheiro: R$ 949.824447373147, Total Value: R$ 2182.1869644812245, Lucro: R$ 393.4761795691152\n640 14.548179596963333 1915.475351756947 v 22.125354766845703 2020-09-01 00:00:00\n640 14.548179596963333 1915.475351756947 v 22.125354766845703 2020-09-01 00:00:00\n22.125354766845703\n1788.7107849121094\ndias negociados: 640, qtd_ações: 14.548179596963333, dinheiro: R$ 1915.475351756947, Total Value: R$ 2237.358986551547, Lucro: R$ 448.6482016394375\n641 57.96796538814428 957.7376758784735 c 22.057632446289062 2020-09-02 00:00:00\n641 57.96796538814428 957.7376758784735 c 22.057632446289062 2020-09-02 00:00:00\n22.057632446289062\n1788.7107849121094\ndias negociados: 641, qtd_ações: 57.96796538814428, dinheiro: R$ 957.7376758784735, Total Value: R$ 2236.373750069366, Lucro: R$ 447.6629651572566\n642 14.49199134703607 1920.0796150868591 v 22.135028839111328 2020-09-03 00:00:00\n642 14.49199134703607 1920.0796150868591 v 22.135028839111328 2020-09-03 00:00:00\n22.135028839111328\n1788.7107849121094\ndias negociados: 642, qtd_ações: 14.49199134703607, dinheiro: R$ 1920.0796150868591, Total Value: R$ 2240.8602614896545, Lucro: R$ 452.1494765775451\n643 57.788267431135026 960.0398075434296 c 22.17372703552246 2020-09-04 00:00:00\n643 57.788267431135026 960.0398075434296 c 22.17372703552246 2020-09-04 00:00:00\n22.17372703552246\n1788.7107849121094\ndias negociados: 643, qtd_ações: 57.788267431135026, dinheiro: R$ 960.0398075434296, Total Value: R$ 2241.4210754171904, Lucro: R$ 452.71029050508105\n644 80.07826524868102 480.0199037717148 c 21.535215377807617 2020-09-08 00:00:00\n644 80.07826524868102 480.0199037717148 c 21.535215377807617 2020-09-08 00:00:00\n21.535215377807617\n1788.7107849121094\ndias negociados: 644, qtd_ações: 80.07826524868102, dinheiro: R$ 480.0199037717148, Total Value: R$ 2204.5225929832677, Lucro: R$ 415.81180807115834\n645 20.019566312170255 1800.7054102068853 v 21.989912033081055 2020-09-09 00:00:00\n645 20.019566312170255 1800.7054102068853 v 21.989912033081055 2020-09-09 00:00:00\n21.989912033081055\n1788.7107849121094\ndias negociados: 645, qtd_ações: 20.019566312170255, dinheiro: R$ 1800.7054102068853, Total Value: R$ 2240.933912351942, Lucro: R$ 452.22312743983275\n646 62.09256586369561 900.3527051034426 c 21.3997745513916 2020-09-10 00:00:00\n646 62.09256586369561 900.3527051034426 c 21.3997745513916 2020-09-10 00:00:00\n21.3997745513916\n1788.7107849121094\ndias negociados: 646, qtd_ações: 62.09256586369561, dinheiro: R$ 900.3527051034426, Total Value: R$ 2229.1196159039628, Lucro: R$ 440.4088309918534\n647 83.35981501170083 450.1763525517213 c 21.167587280273438 2020-09-11 00:00:00\n647 83.35981501170083 450.1763525517213 c 21.167587280273438 2020-09-11 00:00:00\n21.167587280273438\n1788.7107849121094\ndias negociados: 647, qtd_ações: 83.35981501170083, dinheiro: R$ 450.1763525517213, Total Value: R$ 2214.7025124793468, Lucro: R$ 425.9917275672374\n648 94.09153532695098 225.08817627586066 c 20.97410011291504 2020-09-14 00:00:00\n648 94.09153532695098 225.08817627586066 c 20.97410011291504 2020-09-14 00:00:00\n20.97410011291504\n1788.7107849121094\ndias negociados: 648, qtd_ações: 94.09153532695098, dinheiro: R$ 225.08817627586066, Total Value: R$ 2198.5734580012127, Lucro: R$ 409.8626730891033\n649 23.522883831737744 1704.519451335622 v 20.964426040649414 2020-09-15 00:00:00\n649 23.522883831737744 1704.519451335622 v 20.964426040649414 2020-09-15 00:00:00\n20.964426040649414\n1788.7107849121094\ndias negociados: 649, qtd_ações: 23.522883831737744, dinheiro: R$ 1704.519451335622, Total Value: R$ 2197.663209688876, Lucro: R$ 408.95242477676675\n650 5.880720957934436 2075.4013331016963 v 21.022472381591797 2020-09-16 00:00:00\n650 5.880720957934436 2075.4013331016963 v 21.022472381591797 2020-09-16 00:00:00\n21.022472381591797\n1788.7107849121094\ndias negociados: 650, qtd_ações: 5.880720957934436, dinheiro: R$ 2075.4013331016963, Total Value: R$ 2199.028627023721, Lucro: R$ 410.31784211161175\n651 1.470180239483609 2169.9139137951993 v 21.428796768188477 2020-09-17 00:00:00\n651 1.470180239483609 2169.9139137951993 v 21.428796768188477 2020-09-17 00:00:00\n21.428796768188477\n1788.7107849121094\ndias negociados: 651, qtd_ações: 1.470180239483609, dinheiro: R$ 2169.9139137951993, Total Value: R$ 2201.4181073597, Lucro: R$ 412.70732244759074\n652 53.27027199749417 1084.9569568975996 c 20.945077896118164 2020-09-18 00:00:00\n652 53.27027199749417 1084.9569568975996 c 20.945077896118164 2020-09-18 00:00:00\n20.945077896118164\n1788.7107849121094\ndias negociados: 652, qtd_ações: 53.27027199749417, dinheiro: R$ 1084.9569568975996, Total Value: R$ 2200.7069534325174, Lucro: R$ 411.996168520408\n653 80.09974555892556 542.4784784487998 c 20.219497680664062 2020-09-21 00:00:00\n653 80.09974555892556 542.4784784487998 c 20.219497680664062 2020-09-21 00:00:00\n20.219497680664062\n1788.7107849121094\ndias negociados: 653, qtd_ações: 80.09974555892556, dinheiro: R$ 542.4784784487998, Total Value: R$ 2162.0550979992768, Lucro: R$ 373.3443130871674\n654 93.57897662103143 271.2392392243999 c 20.122753143310547 2020-09-22 00:00:00\n654 93.57897662103143 271.2392392243999 c 20.122753143310547 2020-09-22 00:00:00\n20.122753143310547\n1788.7107849121094\ndias negociados: 654, qtd_ações: 93.57897662103143, dinheiro: R$ 271.2392392243999, Total Value: R$ 2154.3058851730443, Lucro: R$ 365.59510026093494\n655 100.50848752729763 135.61961961219995 c 19.571311950683594 2020-09-23 00:00:00\n655 100.50848752729763 135.61961961219995 c 19.571311950683594 2020-09-23 00:00:00\n19.571311950683594\n1788.7107849121094\ndias negociados: 655, qtd_ações: 100.50848752729763, dinheiro: R$ 135.61961961219995, Total Value: R$ 2102.702582700333, Lucro: R$ 313.9917977882237\n656 25.127121881824408 1623.3294345024426 v 19.735776901245117 2020-09-24 00:00:00\n656 25.127121881824408 1623.3294345024426 v 19.735776901245117 2020-09-24 00:00:00\n19.735776901245117\n1788.7107849121094\ndias negociados: 656, qtd_ações: 25.127121881824408, dinheiro: R$ 1623.3294345024426, Total Value: R$ 2119.2327061325236, Lucro: R$ 330.52192122041424\n657 66.80531169854135 811.6647172512213 c 19.474567413330078 2020-09-25 00:00:00\n657 66.80531169854135 811.6647172512213 c 19.474567413330078 2020-09-25 00:00:00\n19.474567413330078\n1788.7107849121094\ndias negociados: 657, qtd_ações: 66.80531169854135, dinheiro: R$ 811.6647172512213, Total Value: R$ 2112.669263492993, Lucro: R$ 323.9584785808838\n18.990848541259766\n1788.7107849121094\ndias negociados: 658, qtd_ações: 66.80531169854135, dinheiro: R$ 811.6647172512213, Total Value: R$ 2080.354273469869, Lucro: R$ 291.6434885577596\n659 88.52933948014147 405.83235862561065 c 18.68126678466797 2020-09-29 00:00:00\n659 88.52933948014147 405.83235862561065 c 18.68126678466797 2020-09-29 00:00:00\n18.68126678466797\n1788.7107849121094\ndias negociados: 659, qtd_ações: 88.52933948014147, dinheiro: R$ 405.83235862561065, Total Value: R$ 2059.672567724572, Lucro: R$ 270.96178281246284\n660 22.13233487003537 1665.4831579278236 v 18.971500396728516 2020-09-30 00:00:00\n660 22.13233487003537 1665.4831579278236 v 18.971500396728516 2020-09-30 00:00:00\n18.971500396728516\n1788.7107849121094\ndias negociados: 660, qtd_ações: 22.13233487003537, dinheiro: R$ 1665.4831579278236, Total Value: R$ 2085.366757695228, Lucro: R$ 296.6559727831186\n661 5.533083717508842 1984.249960920528 v 19.203685760498047 2020-10-01 00:00:00\n661 5.533083717508842 1984.249960920528 v 19.203685760498047 2020-10-01 00:00:00\n19.203685760498047\n1788.7107849121094\ndias negociados: 661, qtd_ações: 5.533083717508842, dinheiro: R$ 1984.249960920528, Total Value: R$ 2090.505561918096, Lucro: R$ 301.7947770059868\n662 1.3832709293772105 2060.609466986248 v 18.400711059570312 2020-10-02 00:00:00\n662 1.3832709293772105 2060.609466986248 v 18.400711059570312 2020-10-02 00:00:00\n18.400711059570312\n1788.7107849121094\ndias negociados: 662, qtd_ações: 1.3832709293772105, dinheiro: R$ 2060.609466986248, Total Value: R$ 2086.0626356748216, Lucro: R$ 297.3518507627123\n663 0.34581773234430263 2080.7130532592523 v 19.377824783325195 2020-10-05 00:00:00\n663 0.34581773234430263 2080.7130532592523 v 19.377824783325195 2020-10-05 00:00:00\n19.377824783325195\n1788.7107849121094\ndias negociados: 663, qtd_ações: 0.34581773234430263, dinheiro: R$ 2080.7130532592523, Total Value: R$ 2087.414248683587, Lucro: R$ 298.7034637714778\n664 54.30319529405969 1040.3565266296262 c 19.28108024597168 2020-10-06 00:00:00\n664 54.30319529405969 1040.3565266296262 c 19.28108024597168 2020-10-06 00:00:00\n19.28108024597168\n1788.7107849121094\ndias negociados: 664, qtd_ações: 54.30319529405969, dinheiro: R$ 1040.3565266296262, Total Value: R$ 2087.380792707063, Lucro: R$ 298.67000779495356\n665 81.4727377635316 520.1782633148131 c 19.145639419555664 2020-10-07 00:00:00\n665 81.4727377635316 520.1782633148131 c 19.145639419555664 2020-10-07 00:00:00\n19.145639419555664\n1788.7107849121094\ndias negociados: 665, qtd_ações: 81.4727377635316, dinheiro: R$ 520.1782633148131, Total Value: R$ 2080.025923059405, Lucro: R$ 291.3151381472958\n666 20.3681844408829 1728.488731346938 v 19.77447509765625 2020-10-08 00:00:00\n666 20.3681844408829 1728.488731346938 v 19.77447509765625 2020-10-08 00:00:00\n19.77447509765625\n1788.7107849121094\ndias negociados: 666, qtd_ações: 20.3681844408829, dinheiro: R$ 1728.488731346938, Total Value: R$ 2131.2588873576465, Lucro: R$ 342.5481024455371\n19.15531349182129\n1788.7107849121094\ndias negociados: 667, qtd_ações: 20.3681844408829, dinheiro: R$ 1728.488731346938, Total Value: R$ 2118.6476895712867, Lucro: R$ 329.93690465917734\n668 5.092046110220725 2025.9849170827742 v 19.474567413330078 2020-10-13 00:00:00\n668 5.092046110220725 2025.9849170827742 v 19.474567413330078 2020-10-13 00:00:00\n19.474567413330078\n1788.7107849121094\ndias negociados: 668, qtd_ações: 5.092046110220725, dinheiro: R$ 2025.9849170827742, Total Value: R$ 2125.150312328053, Lucro: R$ 336.43952741594376\n669 57.524969270412484 1012.9924585413871 c 19.319778442382812 2020-10-14 00:00:00\n669 57.524969270412484 1012.9924585413871 c 19.319778442382812 2020-10-14 00:00:00\n19.319778442382812\n1788.7107849121094\ndias negociados: 669, qtd_ações: 57.524969270412484, dinheiro: R$ 1012.9924585413871, Total Value: R$ 2124.362119750636, Lucro: R$ 335.6513348385265\n670 84.03346287781244 506.49622927069356 c 19.10694122314453 2020-10-15 00:00:00\n670 84.03346287781244 506.49622927069356 c 19.10694122314453 2020-10-15 00:00:00\n19.10694122314453\n1788.7107849121094\ndias negociados: 670, qtd_ações: 84.03346287781244, dinheiro: R$ 506.49622927069356, Total Value: R$ 2112.118665254354, Lucro: R$ 323.4078803422444\n671 97.57569610192766 253.24811463534678 c 18.70061683654785 2020-10-16 00:00:00\n671 97.57569610192766 253.24811463534678 c 18.70061683654785 2020-10-16 00:00:00\n18.70061683654785\n1788.7107849121094\ndias negociados: 671, qtd_ações: 97.57569610192766, dinheiro: R$ 253.24811463534678, Total Value: R$ 2077.9738199969315, Lucro: R$ 289.26303508482215\n672 24.393924025481915 1635.2441616862811 v 18.884429931640625 2020-10-19 00:00:00\n672 24.393924025481915 1635.2441616862811 v 18.884429931640625 2020-10-19 00:00:00\n18.884429931640625\n1788.7107849121094\ndias negociados: 672, qtd_ações: 24.393924025481915, dinheiro: R$ 1635.2441616862811, Total Value: R$ 2095.909510703259, Lucro: R$ 307.1987257911496\n673 66.27398995207027 817.6220808431406 c 19.52294158935547 2020-10-20 00:00:00\n673 66.27398995207027 817.6220808431406 c 19.52294158935547 2020-10-20 00:00:00\n19.52294158935547\n1788.7107849121094\ndias negociados: 673, qtd_ações: 66.27398995207027, dinheiro: R$ 817.6220808431406, Total Value: R$ 2111.4853155709397, Lucro: R$ 322.7745306588304\n674 16.568497488017567 1787.0577030310951 v 19.503591537475586 2020-10-21 00:00:00\n674 16.568497488017567 1787.0577030310951 v 19.503591537475586 2020-10-21 00:00:00\n19.503591537475586\n1788.7107849121094\ndias negociados: 674, qtd_ações: 16.568497488017567, dinheiro: R$ 1787.0577030310951, Total Value: R$ 2110.20291042708, Lucro: R$ 321.49212551497067\n675 60.88717457428146 893.5288515155476 c 20.16145133972168 2020-10-22 00:00:00\n675 60.88717457428146 893.5288515155476 c 20.16145133972168 2020-10-22 00:00:00\n20.16145133972168\n1788.7107849121094\ndias negociados: 675, qtd_ações: 60.88717457428146, dinheiro: R$ 893.5288515155476, Total Value: R$ 2121.102658908062, Lucro: R$ 332.39187399595266\n676 15.221793643570365 1802.2809762915774 v 19.90024185180664 2020-10-23 00:00:00\n676 15.221793643570365 1802.2809762915774 v 19.90024185180664 2020-10-23 00:00:00\n19.90024185180664\n1788.7107849121094\ndias negociados: 676, qtd_ações: 15.221793643570365, dinheiro: R$ 1802.2809762915774, Total Value: R$ 2105.198351216921, Lucro: R$ 316.4875663048115\n677 61.22026414078341 901.1404881457887 c 19.590662002563477 2020-10-26 00:00:00\n677 61.22026414078341 901.1404881457887 c 19.590662002563477 2020-10-26 00:00:00\n19.590662002563477\n1788.7107849121094\ndias negociados: 677, qtd_ações: 61.22026414078341, dinheiro: R$ 901.1404881457887, Total Value: R$ 2100.4859906355337, Lucro: R$ 311.7752057234243\n678 84.64755501447695 450.57024407289435 c 19.232707977294922 2020-10-27 00:00:00\n678 84.64755501447695 450.57024407289435 c 19.232707977294922 2020-10-27 00:00:00\n19.232707977294922\n1788.7107849121094\ndias negociados: 678, qtd_ações: 84.64755501447695, dinheiro: R$ 450.57024407289435, Total Value: R$ 2078.571950658336, Lucro: R$ 289.86116574622656\n679 97.12035904345986 225.28512203644718 c 18.06210708618164 2020-10-28 00:00:00\n679 97.12035904345986 225.28512203644718 c 18.06210708618164 2020-10-28 00:00:00\n18.06210708618164\n1788.7107849121094\ndias negociados: 679, qtd_ações: 97.12035904345986, dinheiro: R$ 225.28512203644718, Total Value: R$ 1979.4834473278286, Lucro: R$ 190.7726624157192\n680 103.15631663590614 112.64256101822359 c 18.66192054748535 2020-10-29 00:00:00\n680 103.15631663590614 112.64256101822359 c 18.66192054748535 2020-10-29 00:00:00\n18.66192054748535\n1788.7107849121094\ndias negociados: 680, qtd_ações: 103.15631663590614, dinheiro: R$ 112.64256101822359, Total Value: R$ 2037.7375460487453, Lucro: R$ 249.02676113663597\n681 25.789079158976534 1530.266798205027 v 18.323314666748047 2020-10-30 00:00:00\n681 25.789079158976534 1530.266798205027 v 18.323314666748047 2020-10-30 00:00:00\n18.323314666748047\n1788.7107849121094\ndias negociados: 681, qtd_ações: 25.789079158976534, dinheiro: R$ 1530.266798205027, Total Value: R$ 2002.8082106006282, Lucro: R$ 214.0974256885188\n682 6.447269789744134 1897.9583985732104 v 19.010196685791016 2020-11-03 00:00:00\n682 6.447269789744134 1897.9583985732104 v 19.010196685791016 2020-11-03 00:00:00\n19.010196685791016\n1788.7107849121094\ndias negociados: 682, qtd_ações: 6.447269789744134, dinheiro: R$ 1897.9583985732104, Total Value: R$ 2020.522265362605, Lucro: R$ 231.81148045049554\n19.077917098999023\n1788.7107849121094\ndias negociados: 683, qtd_ações: 6.447269789744134, dinheiro: R$ 1897.9583985732104, Total Value: R$ 2020.9588771368299, Lucro: R$ 232.2480922247205\n684 1.6118174474360334 1991.0040199263453 v 19.242382049560547 2020-11-05 00:00:00\n684 1.6118174474360334 1991.0040199263453 v 19.242382049560547 2020-11-05 00:00:00\n19.242382049560547\n1788.7107849121094\ndias negociados: 684, qtd_ações: 1.6118174474360334, dinheiro: R$ 1991.0040199263453, Total Value: R$ 2022.019227044057, Lucro: R$ 233.3084421319477\n685 53.71340703924918 995.5020099631727 c 19.10694122314453 2020-11-06 00:00:00\n685 53.71340703924918 995.5020099631727 c 19.10694122314453 2020-11-06 00:00:00\n19.10694122314453\n1788.7107849121094\ndias negociados: 685, qtd_ações: 53.71340703924918, dinheiro: R$ 995.5020099631727, Total Value: R$ 2021.8009211569445, Lucro: R$ 233.0901362448351\n686 13.428351759812294 1837.7166718587673 v 20.90637969970703 2020-11-09 00:00:00\n686 13.428351759812294 1837.7166718587673 v 20.90637969970703 2020-11-09 00:00:00\n20.90637969970703\n1788.7107849121094\ndias negociados: 686, qtd_ações: 13.428351759812294, dinheiro: R$ 1837.7166718587673, Total Value: R$ 2118.454892490632, Lucro: R$ 329.74410757852274\n687 54.58013881378144 918.8583359293837 c 22.328516006469727 2020-11-10 00:00:00\n687 54.58013881378144 918.8583359293837 c 22.328516006469727 2020-11-10 00:00:00\n22.328516006469727\n1788.7107849121094\ndias negociados: 687, qtd_ações: 54.58013881378144, dinheiro: R$ 918.8583359293837, Total Value: R$ 2137.551839068242, Lucro: R$ 348.8410541561325\n688 75.33589076260685 459.4291679646918 c 22.135028839111328 2020-11-11 00:00:00\n688 75.33589076260685 459.4291679646918 c 22.135028839111328 2020-11-11 00:00:00\n22.135028839111328\n1788.7107849121094\ndias negociados: 688, qtd_ações: 75.33589076260685, dinheiro: R$ 459.4291679646918, Total Value: R$ 2126.9912826151353, Lucro: R$ 338.28049770302596\n689 86.17321657848969 229.7145839823459 c 21.196611404418945 2020-11-12 00:00:00\n689 86.17321657848969 229.7145839823459 c 21.196611404418945 2020-11-12 00:00:00\n21.196611404418945\n1788.7107849121094\ndias negociados: 689, qtd_ações: 86.17321657848969, dinheiro: R$ 229.7145839823459, Total Value: R$ 2056.2947692654243, Lucro: R$ 267.5839843533149\n690 21.54330414462242 1644.6680821312057 v 21.89316749572754 2020-11-13 00:00:00\n690 21.54330414462242 1644.6680821312057 v 21.89316749572754 2020-11-13 00:00:00\n21.89316749572754\n1788.7107849121094\ndias negociados: 690, qtd_ações: 21.54330414462242, dinheiro: R$ 1644.6680821312057, Total Value: R$ 2116.3192481808255, Lucro: R$ 327.60846326871615\n691 5.385826036155605 2008.723194799949 v 22.531679153442383 2020-11-16 00:00:00\n691 5.385826036155605 2008.723194799949 v 22.531679153442383 2020-11-16 00:00:00\n22.531679153442383\n1788.7107849121094\ndias negociados: 691, qtd_ações: 5.385826036155605, dinheiro: R$ 2008.723194799949, Total Value: R$ 2130.0748990228635, Lucro: R$ 341.36411411075414\n22.918655395507812\n1788.7107849121094\ndias negociados: 692, qtd_ações: 5.385826036155605, dinheiro: R$ 2008.723194799949, Total Value: R$ 2132.159085742753, Lucro: R$ 343.4483008306438\n693 49.46923131843461 1004.3615973999745 c 22.783212661743164 2020-11-18 00:00:00\n693 49.46923131843461 1004.3615973999745 c 22.783212661743164 2020-11-18 00:00:00\n22.783212661743164\n1788.7107849121094\ndias negociados: 693, qtd_ações: 49.46923131843461, dinheiro: R$ 1004.3615973999745, Total Value: R$ 2131.4296147408354, Lucro: R$ 342.71882982872603\n694 71.26109033918405 502.18079869998724 c 23.044422149658203 2020-11-19 00:00:00\n694 71.26109033918405 502.18079869998724 c 23.044422149658203 2020-11-19 00:00:00\n23.044422149658203\n1788.7107849121094\ndias negociados: 694, qtd_ações: 71.26109033918405, dinheiro: R$ 502.18079869998724, Total Value: R$ 2144.3514473210744, Lucro: R$ 355.640662408965\n695 17.815272584796013 1799.9919818577416 v 24.282745361328125 2020-11-23 00:00:00\n695 17.815272584796013 1799.9919818577416 v 24.282745361328125 2020-11-23 00:00:00\n24.282745361328125\n1788.7107849121094\ndias negociados: 695, qtd_ações: 17.815272584796013, dinheiro: R$ 1799.9919818577416, Total Value: R$ 2232.595709576993, Lucro: R$ 443.8849246648838\n696 4.453818146199003 2138.922345552304 v 25.36627769470215 2020-11-24 00:00:00\n696 4.453818146199003 2138.922345552304 v 25.36627769470215 2020-11-24 00:00:00\n25.36627769470215\n1788.7107849121094\ndias negociados: 696, qtd_ações: 4.453818146199003, dinheiro: R$ 2138.922345552304, Total Value: R$ 2251.8991334504913, Lucro: R$ 463.1883485383819\n697 1.1134545365497508 2223.751887604042 v 25.395301818847656 2020-11-25 00:00:00\n697 1.1134545365497508 2223.751887604042 v 25.395301818847656 2020-11-25 00:00:00\n25.395301818847656\n1788.7107849121094\ndias negociados: 697, qtd_ações: 1.1134545365497508, dinheiro: R$ 2223.751887604042, Total Value: R$ 2252.028401621288, Lucro: R$ 463.31761670917876\n698 45.625345548003786 1111.875943802021 c 24.97930145263672 2020-11-26 00:00:00\n698 45.625345548003786 1111.875943802021 c 24.97930145263672 2020-11-26 00:00:00\n24.97930145263672\n1788.7107849121094\ndias negociados: 698, qtd_ações: 45.625345548003786, dinheiro: R$ 1111.875943802021, Total Value: R$ 2251.5652041263243, Lucro: R$ 462.8544192142149\n699 68.16058047438966 555.9379719010105 c 24.669721603393555 2020-11-27 00:00:00\n699 68.16058047438966 555.9379719010105 c 24.669721603393555 2020-11-27 00:00:00\n24.669721603393555\n1788.7107849121094\ndias negociados: 699, qtd_ações: 68.16058047438966, dinheiro: R$ 555.9379719010105, Total Value: R$ 2237.440516529906, Lucro: R$ 448.72973161779646\n700 79.6997064911505 277.96898595050527 c 24.089258193969727 2020-11-30 00:00:00\n700 79.6997064911505 277.96898595050527 c 24.089258193969727 2020-11-30 00:00:00\n24.089258193969727\n1788.7107849121094\ndias negociados: 700, qtd_ações: 79.6997064911505, dinheiro: R$ 277.96898595050527, Total Value: R$ 2197.8757935994345, Lucro: R$ 409.1650086873251\n701 85.31150803705567 138.98449297525264 c 24.76646614074707 2020-12-01 00:00:00\n701 85.31150803705567 138.98449297525264 c 24.76646614074707 2020-12-01 00:00:00\n24.76646614074707\n1788.7107849121094\ndias negociados: 701, qtd_ações: 85.31150803705567, dinheiro: R$ 138.98449297525264, Total Value: R$ 2251.8490681910635, Lucro: R$ 463.1382832789541\n702 21.32787700926392 1742.821984964345 v 25.06637191772461 2020-12-02 00:00:00\n702 21.32787700926392 1742.821984964345 v 25.06637191772461 2020-12-02 00:00:00\n25.06637191772461\n1788.7107849121094\ndias negociados: 702, qtd_ações: 21.32787700926392, dinheiro: R$ 1742.821984964345, Total Value: R$ 2277.434482294042, Lucro: R$ 488.72369738193265\n25.772602081298828\n1788.7107849121094\ndias negociados: 703, qtd_ações: 21.32787700926392, dinheiro: R$ 1742.821984964345, Total Value: R$ 2292.4968723629854, Lucro: R$ 503.78608745087604\n704 5.33196925231598 2168.8509941856437 v 26.633625030517578 2020-12-04 00:00:00\n704 5.33196925231598 2168.8509941856437 v 26.633625030517578 2020-12-04 00:00:00\n26.633625030517578\n1788.7107849121094\ndias negociados: 704, qtd_ações: 5.33196925231598, dinheiro: R$ 2168.8509941856437, Total Value: R$ 2310.8606639260765, Lucro: R$ 522.1498790139672\n705 46.84762311380358 1084.4254970928218 c 26.120882034301758 2020-12-07 00:00:00\n705 46.84762311380358 1084.4254970928218 c 26.120882034301758 2020-12-07 00:00:00\n26.120882034301758\n1788.7107849121094\ndias negociados: 705, qtd_ações: 46.84762311380358, dinheiro: R$ 1084.4254970928218, Total Value: R$ 2308.1267340359136, Lucro: R$ 519.4159491238042\n706 67.87017874459315 542.2127485464109 c 25.79195213317871 2020-12-08 00:00:00\n706 67.87017874459315 542.2127485464109 c 25.79195213317871 2020-12-08 00:00:00\n25.79195213317871\n1788.7107849121094\ndias negociados: 706, qtd_ações: 67.87017874459315, dinheiro: R$ 542.2127485464109, Total Value: R$ 2292.7171499972405, Lucro: R$ 504.00636508513116\n707 78.27220780478024 271.10637427320546 c 26.062835693359375 2020-12-09 00:00:00\n707 78.27220780478024 271.10637427320546 c 26.062835693359375 2020-12-09 00:00:00\n26.062835693359375\n1788.7107849121094\ndias negociados: 707, qtd_ações: 78.27220780478024, dinheiro: R$ 271.10637427320546, Total Value: R$ 2311.102065645674, Lucro: R$ 522.3912807335646\n708 19.56805195119506 1851.0807479916966 v 26.914182662963867 2020-12-10 00:00:00\n708 19.56805195119506 1851.0807479916966 v 26.914182662963867 2020-12-10 00:00:00\n26.914182662963867\n1788.7107849121094\ndias negociados: 708, qtd_ações: 19.56805195119506, dinheiro: R$ 1851.0807479916966, Total Value: R$ 2377.738872564527, Lucro: R$ 589.0280876524175\n709 54.26845529144145 925.5403739958483 c 26.67232322692871 2020-12-11 00:00:00\n709 54.26845529144145 925.5403739958483 c 26.67232322692871 2020-12-11 00:00:00\n26.67232322692871\n1788.7107849121094\ndias negociados: 709, qtd_ações: 54.26845529144145, dinheiro: R$ 925.5403739958483, Total Value: R$ 2373.0061545553044, Lucro: R$ 584.295369643195\n26.72069549560547\n1788.7107849121094\ndias negociados: 710, qtd_ações: 54.26845529144145, dinheiro: R$ 925.5403739958483, Total Value: R$ 2375.6312428553347, Lucro: R$ 586.9204579432253\n711 13.567113822860362 2022.1650336965783 v 26.943206787109375 2020-12-15 00:00:00\n711 13.567113822860362 2022.1650336965783 v 26.943206787109375 2020-12-15 00:00:00\n26.943206787109375\n1788.7107849121094\ndias negociados: 711, qtd_ações: 13.567113822860362, dinheiro: R$ 2022.1650336965783, Total Value: R$ 2387.706586930155, Lucro: R$ 598.9958020180457\n712 3.3917784557150905 2299.66817067797 v 27.272136688232422 2020-12-16 00:00:00\n712 3.3917784557150905 2299.66817067797 v 27.272136688232422 2020-12-16 00:00:00\n27.272136688232422\n1788.7107849121094\ndias negociados: 712, qtd_ações: 3.3917784557150905, dinheiro: R$ 2299.66817067797, Total Value: R$ 2392.1692163384337, Lucro: R$ 603.4584314263243\n713 45.47862857358214 1149.834085338985 c 27.320507049560547 2020-12-17 00:00:00\n713 45.47862857358214 1149.834085338985 c 27.320507049560547 2020-12-17 00:00:00\n27.320507049560547\n1788.7107849121094\ndias negociados: 713, qtd_ações: 45.47862857358214, dinheiro: R$ 1149.834085338985, Total Value: R$ 2392.3332778878816, Lucro: R$ 603.6224929757723\n714 66.62689571225204 574.9170426694925 c 27.18506622314453 2020-12-18 00:00:00\n714 66.62689571225204 574.9170426694925 c 27.18506622314453 2020-12-18 00:00:00\n27.18506622314453\n1788.7107849121094\ndias negociados: 714, qtd_ações: 66.62689571225204, dinheiro: R$ 574.9170426694925, Total Value: R$ 2386.1736148496084, Lucro: R$ 597.462829937499\n715 77.62368186785044 287.45852133474625 c 26.140230178833008 2020-12-21 00:00:00\n715 77.62368186785044 287.45852133474625 c 26.140230178833008 2020-12-21 00:00:00\n26.140230178833008\n1788.7107849121094\ndias negociados: 715, qtd_ações: 77.62368186785044, dinheiro: R$ 287.45852133474625, Total Value: R$ 2316.5594326888627, Lucro: R$ 527.8486477767533\n716 19.40592046696261 1823.9280336624843 v 26.391765594482422 2020-12-22 00:00:00\n716 19.40592046696261 1823.9280336624843 v 26.391765594482422 2020-12-22 00:00:00\n26.391765594482422\n1788.7107849121094\ndias negociados: 716, qtd_ações: 19.40592046696261, dinheiro: R$ 1823.9280336624843, Total Value: R$ 2336.0845377717305, Lucro: R$ 547.3737528596212\n717 53.13246109154552 911.9640168312421 c 27.03995132446289 2020-12-23 00:00:00\n717 53.13246109154552 911.9640168312421 c 27.03995132446289 2020-12-23 00:00:00\n27.03995132446289\n1788.7107849121094\ndias negociados: 717, qtd_ações: 53.13246109154552, dinheiro: R$ 911.9640168312421, Total Value: R$ 2348.6631784955516, Lucro: R$ 559.9523935834422\n718 13.28311527288638 1998.3552414765745 v 27.262460708618164 2020-12-28 00:00:00\n718 13.28311527288638 1998.3552414765745 v 27.262460708618164 2020-12-28 00:00:00\n27.262460708618164\n1788.7107849121094\ndias negociados: 718, qtd_ações: 13.28311527288638, dinheiro: R$ 1998.3552414765745, Total Value: R$ 2360.4856496916855, Lucro: R$ 571.7748647795761\n719 3.320778818221595 2270.8204729063773 v 27.349531173706055 2020-12-29 00:00:00\n719 3.320778818221595 2270.8204729063773 v 27.349531173706055 2020-12-29 00:00:00\n27.349531173706055\n1788.7107849121094\ndias negociados: 719, qtd_ações: 3.320778818221595, dinheiro: R$ 2270.8204729063773, Total Value: R$ 2361.6422167163114, Lucro: R$ 572.931431804202\n720 0.8301947045553988 2339.1054441491347 v 27.417251586914062 2020-12-30 00:00:00\n720 0.8301947045553988 2339.1054441491347 v 27.417251586914062 2020-12-30 00:00:00\n27.417251586914062\n1788.7107849121094\ndias negociados: 720, qtd_ações: 0.8301947045553988, dinheiro: R$ 2339.1054441491347, Total Value: R$ 2361.867101230054, Lucro: R$ 573.1563163179444\n721 0.2075486761388497 2356.5200396283185 v 27.968692779541016 2021-01-04 00:00:00\n721 0.2075486761388497 2356.5200396283185 v 27.968692779541016 2021-01-04 00:00:00\n27.968692779541016\n1788.7107849121094\ndias negociados: 721, qtd_ações: 0.2075486761388497, dinheiro: R$ 2356.5200396283185, Total Value: R$ 2362.3249047880463, Lucro: R$ 573.6141198759369\n722 0.051887169034712424 2361.043858951693 v 29.061901092529297 2021-01-05 00:00:00\n722 0.051887169034712424 2361.043858951693 v 29.061901092529297 2021-01-05 00:00:00\n29.061901092529297\n1788.7107849121094\ndias negociados: 722, qtd_ações: 0.051887169034712424, dinheiro: R$ 2361.043858951693, Total Value: R$ 2362.551798726151, Lucro: R$ 573.8410138140416\n723 40.59186443021533 1180.5219294758465 c 29.119945526123047 2021-01-06 00:00:00\n723 40.59186443021533 1180.5219294758465 c 29.119945526123047 2021-01-06 00:00:00\n29.119945526123047\n1788.7107849121094\ndias negociados: 723, qtd_ações: 40.59186443021533, dinheiro: R$ 1180.5219294758465, Total Value: R$ 2362.554810487489, Lucro: R$ 573.8440255753794\n29.990642547607422\n1788.7107849121094\ndias negociados: 724, qtd_ações: 40.59186443021533, dinheiro: R$ 1180.5219294758465, Total Value: R$ 2397.8980259433747, Lucro: R$ 609.1872410312653\n725 60.197476272169915 590.2609647379232 c 30.106735229492188 2021-01-08 00:00:00\n725 60.197476272169915 590.2609647379232 c 30.106735229492188 2021-01-08 00:00:00\n30.106735229492188\n1788.7107849121094\ndias negociados: 725, qtd_ações: 60.197476272169915, dinheiro: R$ 590.2609647379232, Total Value: R$ 2402.610444347781, Lucro: R$ 613.8996594356718\n29.855201721191406\n1788.7107849121094\ndias negociados: 726, qtd_ações: 60.197476272169915, dinheiro: R$ 590.2609647379232, Total Value: R$ 2387.468761950189, Lucro: R$ 598.7579770380798\n727 70.15710184399323 295.1304823689616 c 29.632688522338867 2021-01-12 00:00:00\n727 70.15710184399323 295.1304823689616 c 29.632688522338867 2021-01-12 00:00:00\n29.632688522338867\n1788.7107849121094\ndias negociados: 727, qtd_ações: 70.15710184399323, dinheiro: R$ 295.1304823689616, Total Value: R$ 2374.0740289420187, Lucro: R$ 585.3632440299093\n728 75.38974887131583 147.5652411844808 c 28.200878143310547 2021-01-13 00:00:00\n728 75.38974887131583 147.5652411844808 c 28.200878143310547 2021-01-13 00:00:00\n28.200878143310547\n1788.7107849121094\ndias negociados: 728, qtd_ações: 75.38974887131583, dinheiro: R$ 147.5652411844808, Total Value: R$ 2273.6223623592423, Lucro: R$ 484.9115774471329\n729 77.97942056139081 73.7826205922404 c 28.49110984802246 2021-01-14 00:00:00\n729 77.97942056139081 73.7826205922404 c 28.49110984802246 2021-01-14 00:00:00\n28.49110984802246\n1788.7107849121094\ndias negociados: 729, qtd_ações: 77.97942056139081, dinheiro: R$ 73.7826205922404, Total Value: R$ 2295.5028576919676, Lucro: R$ 506.79207277985824\n730 79.33549855707966 36.8913102961202 c 27.204416275024414 2021-01-15 00:00:00\n730 79.33549855707966 36.8913102961202 c 27.204416275024414 2021-01-15 00:00:00\n27.204416275024414\n1788.7107849121094\ndias negociados: 730, qtd_ações: 79.33549855707966, dinheiro: R$ 36.8913102961202, Total Value: R$ 2195.167238429514, Lucro: R$ 406.4564535174045\n731 19.833874639269915 1652.7199143669689 v 27.156042098999023 2021-01-18 00:00:00\n731 19.833874639269915 1652.7199143669689 v 27.156042098999023 2021-01-18 00:00:00\n27.156042098999023\n1788.7107849121094\ndias negociados: 731, qtd_ações: 19.833874639269915, dinheiro: R$ 1652.7199143669689, Total Value: R$ 2191.3294490572516, Lucro: R$ 402.61866414514225\n27.755855560302734\n1788.7107849121094\ndias negociados: 732, qtd_ações: 19.833874639269915, dinheiro: R$ 1652.7199143669689, Total Value: R$ 2203.2260740556962, Lucro: R$ 414.51528914358687\n733 50.11291661884448 826.3599571834844 c 27.29148292541504 2021-01-20 00:00:00\n733 50.11291661884448 826.3599571834844 c 27.29148292541504 2021-01-20 00:00:00\n27.29148292541504\n1788.7107849121094\ndias negociados: 733, qtd_ações: 50.11291661884448, dinheiro: R$ 826.3599571834844, Total Value: R$ 2194.015765429426, Lucro: R$ 405.30498051731684\n734 12.52822915471112 1828.1036239576247 v 26.652973175048828 2021-01-21 00:00:00\n734 12.52822915471112 1828.1036239576247 v 26.652973175048828 2021-01-21 00:00:00\n26.652973175048828\n1788.7107849121094\ndias negociados: 734, qtd_ações: 12.52822915471112, dinheiro: R$ 1828.1036239576247, Total Value: R$ 2162.018179549005, Lucro: R$ 373.3073946368954\n735 47.40511669486137 914.0518119788123 c 26.20795249938965 2021-01-22 00:00:00\n735 47.40511669486137 914.0518119788123 c 26.20795249938965 2021-01-22 00:00:00\n26.20795249938965\n1788.7107849121094\ndias negociados: 735, qtd_ações: 47.40511669486137, dinheiro: R$ 914.0518119788123, Total Value: R$ 2156.4428585457626, Lucro: R$ 367.7320736336533\n736 64.90168918384711 457.02590598940617 c 26.120882034301758 2021-01-26 00:00:00\n736 64.90168918384711 457.02590598940617 c 26.120882034301758 2021-01-26 00:00:00\n26.120882034301758\n1788.7107849121094\ndias negociados: 736, qtd_ações: 64.90168918384711, dinheiro: R$ 457.02590598940617, Total Value: R$ 2152.315272987595, Lucro: R$ 363.60448807548573\n737 73.52226315147057 228.51295299470308 c 26.507858276367188 2021-01-27 00:00:00\n737 73.52226315147057 228.51295299470308 c 26.507858276367188 2021-01-27 00:00:00\n26.507858276367188\n1788.7107849121094\ndias negociados: 737, qtd_ações: 73.52226315147057, dinheiro: R$ 228.51295299470308, Total Value: R$ 2177.4306847716584, Lucro: R$ 388.71989985954906\n738 18.380565787867642 1709.405894419276 v 26.856136322021484 2021-01-28 00:00:00\n738 18.380565787867642 1709.405894419276 v 26.856136322021484 2021-01-28 00:00:00\n26.856136322021484\n1788.7107849121094\ndias negociados: 738, qtd_ações: 18.380565787867642, dinheiro: R$ 1709.405894419276, Total Value: R$ 2203.0368748941337, Lucro: R$ 414.3260899820243\n739 51.48167469606386 854.702947209638 c 25.82097625732422 2021-01-29 00:00:00\n739 51.48167469606386 854.702947209638 c 25.82097625732422 2021-01-29 00:00:00\n25.82097625732422\n1788.7107849121094\ndias negociados: 739, qtd_ações: 51.48167469606386, dinheiro: R$ 854.702947209638, Total Value: R$ 2184.010047223992, Lucro: R$ 395.29926231188256\n740 12.870418674015966 1883.0606620581752 v 26.633625030517578 2021-02-01 00:00:00\n740 12.870418674015966 1883.0606620581752 v 26.633625030517578 2021-02-01 00:00:00\n26.633625030517578\n1788.7107849121094\ndias negociados: 740, qtd_ações: 12.870418674015966, dinheiro: R$ 1883.0606620581752, Total Value: R$ 2225.8465670076876, Lucro: R$ 437.1357820955782\n741 46.827790730647116 941.5303310290876 c 27.726831436157227 2021-02-02 00:00:00\n741 46.827790730647116 941.5303310290876 c 27.726831436157227 2021-02-02 00:00:00\n27.726831436157227\n1788.7107849121094\ndias negociados: 741, qtd_ações: 46.827790730647116, dinheiro: R$ 941.5303310290876, Total Value: R$ 2239.916591145386, Lucro: R$ 451.2058062332767\n742 11.706947682661779 1924.8337521053882 v 27.997716903686523 2021-02-03 00:00:00\n742 11.706947682661779 1924.8337521053882 v 27.997716903686523 2021-02-03 00:00:00\n27.997716903686523\n1788.7107849121094\ndias negociados: 742, qtd_ações: 11.706947682661779, dinheiro: R$ 1924.8337521053882, Total Value: R$ 2252.6015591308214, Lucro: R$ 463.89077421871207\n743 2.9267369206654448 2169.6402724126665 v 27.881622314453125 2021-02-04 00:00:00\n743 2.9267369206654448 2169.6402724126665 v 27.881622314453125 2021-02-04 00:00:00\n27.881622314453125\n1788.7107849121094\ndias negociados: 743, qtd_ações: 2.9267369206654448, dinheiro: R$ 2169.6402724126665, Total Value: R$ 2251.242445848426, Lucro: R$ 462.5316609363167\n744 41.807824001176755 1084.8201362063332 c 27.900972366333008 2021-02-05 00:00:00\n744 41.807824001176755 1084.8201362063332 c 27.900972366333008 2021-02-05 00:00:00\n27.900972366333008\n1788.7107849121094\ndias negociados: 744, qtd_ações: 41.807824001176755, dinheiro: R$ 1084.8201362063332, Total Value: R$ 2251.2990783596797, Lucro: R$ 462.5882934475703\n745 61.75322749505432 542.4100681031666 c 27.194740295410156 2021-02-08 00:00:00\n745 61.75322749505432 542.4100681031666 c 27.194740295410156 2021-02-08 00:00:00\n27.194740295410156\n1788.7107849121094\ndias negociados: 745, qtd_ações: 61.75322749505432, dinheiro: R$ 542.4100681031666, Total Value: R$ 2221.773052234551, Lucro: R$ 433.0622673224416\n746 71.93233532964472 271.2050340515833 c 26.643301010131836 2021-02-09 00:00:00\n746 71.93233532964472 271.2050340515833 c 26.643301010131836 2021-02-09 00:00:00\n26.643301010131836\n1788.7107849121094\ndias negociados: 746, qtd_ações: 71.93233532964472, dinheiro: R$ 271.2050340515833, Total Value: R$ 2187.7198966010483, Lucro: R$ 399.0091116889389\n747 17.98308383241118 1722.1611225629513 v 26.894832611083984 2021-02-10 00:00:00\n747 17.98308383241118 1722.1611225629513 v 26.894832611083984 2021-02-10 00:00:00\n26.894832611083984\n1788.7107849121094\ndias negociados: 747, qtd_ações: 17.98308383241118, dinheiro: R$ 1722.1611225629513, Total Value: R$ 2205.8131520667407, Lucro: R$ 417.10236715463134\n748 4.495770958102795 2088.5536360194565 v 27.16571617126465 2021-02-11 00:00:00\n748 4.495770958102795 2088.5536360194565 v 27.16571617126465 2021-02-11 00:00:00\n27.16571617126465\n1788.7107849121094\ndias negociados: 748, qtd_ações: 4.495770958102795, dinheiro: R$ 2088.5536360194565, Total Value: R$ 2210.6844738382915, Lucro: R$ 421.97368892618215\n749 1.1239427395256987 2181.3261045570825 v 27.513996124267578 2021-02-12 00:00:00\n749 1.1239427395256987 2181.3261045570825 v 27.513996124267578 2021-02-12 00:00:00\n27.513996124267578\n1788.7107849121094\ndias negociados: 749, qtd_ações: 1.1239427395256987, dinheiro: R$ 2181.3261045570825, Total Value: R$ 2212.2502607362912, Lucro: R$ 423.53947582418186\n750 0.28098568488142467 2205.196094880337 v 28.316970825195312 2021-02-18 00:00:00\n750 0.28098568488142467 2205.196094880337 v 28.316970825195312 2021-02-18 00:00:00\n28.316970825195312\n1788.7107849121094\ndias negociados: 750, qtd_ações: 0.28098568488142467, dinheiro: R$ 2205.196094880337, Total Value: R$ 2213.152758321422, Lucro: R$ 424.4419734093126\n751 41.982661112848554 1102.5980474401686 c 26.44013786315918 2021-02-19 00:00:00\n751 41.982661112848554 1102.5980474401686 c 26.44013786315918 2021-02-19 00:00:00\n26.44013786315918\n1788.7107849121094\ndias negociados: 751, qtd_ações: 41.982661112848554, dinheiro: R$ 1102.5980474401686, Total Value: R$ 2212.625395126176, Lucro: R$ 423.91461021406667\n752 68.2795424417784 551.2990237200843 c 20.964426040649414 2021-02-22 00:00:00\n752 68.2795424417784 551.2990237200843 c 20.964426040649414 2021-02-22 00:00:00\n20.964426040649414\n1788.7107849121094\ndias negociados: 752, qtd_ações: 68.2795424417784, dinheiro: R$ 551.2990237200843, Total Value: R$ 1982.7404413301304, Lucro: R$ 194.02965641802098\n753 80.12188294182877 275.64951186004214 c 23.276607513427734 2021-02-23 00:00:00\n753 80.12188294182877 275.64951186004214 c 23.276607513427734 2021-02-23 00:00:00\n23.276607513427734\n1788.7107849121094\ndias negociados: 753, qtd_ações: 80.12188294182877, dinheiro: R$ 275.64951186004214, Total Value: R$ 2140.6151343337915, Lucro: R$ 351.9043494216821\n754 20.030470735457193 1694.1395909907399 v 23.60553741455078 2021-02-24 00:00:00\n754 20.030470735457193 1694.1395909907399 v 23.60553741455078 2021-02-24 00:00:00\n23.60553741455078\n1788.7107849121094\ndias negociados: 754, qtd_ações: 20.030470735457193, dinheiro: R$ 1694.1395909907399, Total Value: R$ 2166.9696173676393, Lucro: R$ 378.25883245552996\n755 57.78719871239827 847.0697954953699 c 22.4349365234375 2021-02-25 00:00:00\n755 57.78719871239827 847.0697954953699 c 22.4349365234375 2021-02-25 00:00:00\n22.4349365234375\n1788.7107849121094\ndias negociados: 755, qtd_ações: 57.78719871239827, dinheiro: R$ 847.0697954953699, Total Value: R$ 2143.5219304752945, Lucro: R$ 354.81114556318516\n21.515867233276367\n1788.7107849121094\ndias negociados: 756, qtd_ações: 57.78719871239827, dinheiro: R$ 847.0697954953699, Total Value: R$ 2090.41149077429, Lucro: R$ 301.7007058621807\n757 77.68671141628266 423.53489774768497 c 21.283681869506836 2021-03-01 00:00:00\n757 77.68671141628266 423.53489774768497 c 21.283681869506836 2021-03-01 00:00:00\n21.283681869506836\n1788.7107849121094\ndias negociados: 757, qtd_ações: 77.68671141628266, dinheiro: R$ 423.53489774768497, Total Value: R$ 2076.99414902003, Lucro: R$ 288.28336410792053\n758 87.64099228767091 211.76744887384248 c 21.27400779724121 2021-03-02 00:00:00\n758 87.64099228767091 211.76744887384248 c 21.27400779724121 2021-03-02 00:00:00\n21.27400779724121\n1788.7107849121094\ndias negociados: 758, qtd_ações: 87.64099228767091, dinheiro: R$ 211.76744887384248, Total Value: R$ 2076.2426021597103, Lucro: R$ 287.5318172476009\n759 92.80603807997205 105.88372443692124 c 20.50005531311035 2021-03-03 00:00:00\n759 92.80603807997205 105.88372443692124 c 20.50005531311035 2021-03-03 00:00:00\n20.50005531311035\n1788.7107849121094\ndias negociados: 759, qtd_ações: 92.80603807997205, dinheiro: R$ 105.88372443692124, Total Value: R$ 2008.412638466974, Lucro: R$ 219.70185355486456\n760 23.201509519993014 1602.138668529513 v 21.496517181396484 2021-03-04 00:00:00\n760 23.201509519993014 1602.138668529513 v 21.496517181396484 2021-03-04 00:00:00\n21.496517181396484\n1788.7107849121094\ndias negociados: 760, qtd_ações: 23.201509519993014, dinheiro: R$ 1602.138668529513, Total Value: R$ 2100.890316560377, Lucro: R$ 312.1795316482676\n761 5.800377379998253 1979.0642808897799 v 21.660982131958008 2021-03-05 00:00:00\n761 5.800377379998253 1979.0642808897799 v 21.660982131958008 2021-03-05 00:00:00\n21.660982131958008\n1788.7107849121094\ndias negociados: 761, qtd_ações: 5.800377379998253, dinheiro: R$ 1979.0642808897799, Total Value: R$ 2104.7061516765352, Lucro: R$ 315.99536676442585\n20.412986755371094\n1788.7107849121094\ndias negociados: 762, qtd_ações: 5.800377379998253, dinheiro: R$ 1979.0642808897799, Total Value: R$ 2097.4673075238384, Lucro: R$ 308.756522611729\n763 1.4500943449995634 2069.9287799157432 v 20.88703155517578 2021-03-09 00:00:00\n763 1.4500943449995634 2069.9287799157432 v 20.88703155517578 2021-03-09 00:00:00\n20.88703155517578\n1788.7107849121094\ndias negociados: 763, qtd_ações: 1.4500943449995634, dinheiro: R$ 2069.9287799157432, Total Value: R$ 2100.2169462577313, Lucro: R$ 311.5061613456219\n764 0.36252358624989084 2093.4340244976893 v 21.612611770629883 2021-03-10 00:00:00\n764 0.36252358624989084 2093.4340244976893 v 21.612611770629883 2021-03-10 00:00:00\n21.612611770629883\n1788.7107849121094\ndias negociados: 764, qtd_ações: 0.36252358624989084, dinheiro: R$ 2093.4340244976893, Total Value: R$ 2101.2691060250045, Lucro: R$ 312.5583211128951\n765 0.09063089656247271 2099.5602233458926 v 22.531679153442383 2021-03-11 00:00:00\n765 0.09063089656247271 2099.5602233458926 v 22.531679153442383 2021-03-11 00:00:00\n22.531679153442383\n1788.7107849121094\ndias negociados: 765, qtd_ações: 0.09063089656247271, dinheiro: R$ 2099.5602233458926, Total Value: R$ 2101.602289628627, Lucro: R$ 312.89150471651783\n766 0.022657724140618177 2101.083881870061 v 22.415586471557617 2021-03-12 00:00:00\n766 0.022657724140618177 2101.083881870061 v 22.415586471557617 2021-03-12 00:00:00\n22.415586471557617\n1788.7107849121094\ndias negociados: 766, qtd_ações: 0.022657724140618177, dinheiro: R$ 2101.083881870061, Total Value: R$ 2101.5917680447837, Lucro: R$ 312.8809831326744\n767 45.938038237899036 1050.5419409350304 c 22.87995719909668 2021-03-15 00:00:00\n767 45.938038237899036 1050.5419409350304 c 22.87995719909668 2021-03-15 00:00:00\n22.87995719909668\n1788.7107849121094\ndias negociados: 767, qtd_ações: 45.938038237899036, dinheiro: R$ 1050.5419409350304, Total Value: R$ 2101.6022896286267, Lucro: R$ 312.8915047165174\n768 69.26060514853887 525.2709704675152 c 22.522005081176758 2021-03-16 00:00:00\n768 69.26060514853887 525.2709704675152 c 22.522005081176758 2021-03-16 00:00:00\n22.522005081176758\n1788.7107849121094\ndias negociados: 768, qtd_ações: 69.26060514853887, dinheiro: R$ 525.2709704675152, Total Value: R$ 2085.158671548285, Lucro: R$ 296.44788663617555\n769 17.315151287134718 1735.390059333431 v 23.295957565307617 2021-03-17 00:00:00\n769 17.315151287134718 1735.390059333431 v 23.295957565307617 2021-03-17 00:00:00\n23.295957565307617\n1788.7107849121094\ndias negociados: 769, qtd_ações: 17.315151287134718, dinheiro: R$ 1735.390059333431, Total Value: R$ 2138.763088955403, Lucro: R$ 350.0523040432936\n770 55.90800749436167 867.6950296667155 c 22.483306884765625 2021-03-18 00:00:00\n770 55.90800749436167 867.6950296667155 c 22.483306884765625 2021-03-18 00:00:00\n22.483306884765625\n1788.7107849121094\ndias negociados: 770, qtd_ações: 55.90800749436167, dinheiro: R$ 867.6950296667155, Total Value: R$ 2124.6919194782254, Lucro: R$ 335.981134566116\n771 13.977001873590417 1841.27264869642 v 23.21856117248535 2021-03-19 00:00:00\n771 13.977001873590417 1841.27264869642 v 23.21856117248535 2021-03-19 00:00:00\n23.21856117248535\n1788.7107849121094\ndias negociados: 771, qtd_ações: 13.977001873590417, dinheiro: R$ 1841.27264869642, Total Value: R$ 2165.7985217063215, Lucro: R$ 377.0877367942121\n772 3.4942504683976043 2079.799170557206 v 22.75419044494629 2021-03-22 00:00:00\n772 3.4942504683976043 2079.799170557206 v 22.75419044494629 2021-03-22 00:00:00\n22.75419044494629\n1788.7107849121094\ndias negociados: 772, qtd_ações: 3.4942504683976043, dinheiro: R$ 2079.799170557206, Total Value: R$ 2159.3080111774675, Lucro: R$ 370.59722626535813\n773 50.63891061317002 1039.899585278603 c 22.057632446289062 2021-03-23 00:00:00\n773 50.63891061317002 1039.899585278603 c 22.057632446289062 2021-03-23 00:00:00\n22.057632446289062\n1788.7107849121094\ndias negociados: 773, qtd_ações: 50.63891061317002, dinheiro: R$ 1039.899585278603, Total Value: R$ 2156.8740630643933, Lucro: R$ 368.16327815228397\n774 74.19057999019769 519.9497926393014 c 22.076982498168945 2021-03-24 00:00:00\n774 74.19057999019769 519.9497926393014 c 22.076982498168945 2021-03-24 00:00:00\n22.076982498168945\n1788.7107849121094\ndias negociados: 774, qtd_ações: 74.19057999019769, dinheiro: R$ 519.9497926393014, Total Value: R$ 2157.853928611899, Lucro: R$ 369.14314369978956\n775 85.77353417918332 259.9748963196507 c 22.444610595703125 2021-03-25 00:00:00\n775 85.77353417918332 259.9748963196507 c 22.444610595703125 2021-03-25 00:00:00\n22.444610595703125\n1788.7107849121094\ndias negociados: 775, qtd_ações: 85.77353417918332, dinheiro: R$ 259.9748963196507, Total Value: R$ 2185.1284703886527, Lucro: R$ 396.4176854765433\n776 21.44338354479583 1720.0211426499625 v 22.696142196655273 2021-03-26 00:00:00\n776 21.44338354479583 1720.0211426499625 v 22.696142196655273 2021-03-26 00:00:00\n22.696142196655273\n1788.7107849121094\ndias negociados: 776, qtd_ações: 21.44338354479583, dinheiro: R$ 1720.0211426499625, Total Value: R$ 2206.7032247600664, Lucro: R$ 417.99243984795703\n777 5.360845886198957 2090.789513323968 v 23.054096221923828 2021-03-29 00:00:00\n777 5.360845886198957 2090.789513323968 v 23.054096221923828 2021-03-29 00:00:00\n23.054096221923828\n1788.7107849121094\ndias negociados: 777, qtd_ações: 5.360845886198957, dinheiro: R$ 2090.789513323968, Total Value: R$ 2214.3789702153035, Lucro: R$ 425.66818530319415\n778 1.3402114715497393 2183.4816059924697 v 23.054096221923828 2021-03-30 00:00:00\n778 1.3402114715497393 2183.4816059924697 v 23.054096221923828 2021-03-30 00:00:00\n23.054096221923828\n1788.7107849121094\ndias negociados: 778, qtd_ações: 1.3402114715497393, dinheiro: R$ 2183.4816059924697, Total Value: R$ 2214.3789702153035, Lucro: R$ 425.66818530319415\n779 0.3350528678874348 2206.917186123731 v 23.315305709838867 2021-03-31 00:00:00\n779 0.3350528678874348 2206.917186123731 v 23.315305709838867 2021-03-31 00:00:00\n23.315305709838867\n1788.7107849121094\ndias negociados: 779, qtd_ações: 0.3350528678874348, dinheiro: R$ 2206.917186123731, Total Value: R$ 2214.7290461674847, Lucro: R$ 426.01826125537536\n780 48.07872656929359 1103.4585930618655 c 23.11214256286621 2021-04-01 00:00:00\n780 48.07872656929359 1103.4585930618655 c 23.11214256286621 2021-04-01 00:00:00\n23.11214256286621\n1788.7107849121094\ndias negociados: 780, qtd_ações: 48.07872656929359, dinheiro: R$ 1103.4585930618655, Total Value: R$ 2214.6609757724427, Lucro: R$ 425.95019086033335\n781 71.80161187489404 551.7292965309327 c 23.257259368896484 2021-04-05 00:00:00\n781 71.80161187489404 551.7292965309327 c 23.257259368896484 2021-04-05 00:00:00\n23.257259368896484\n1788.7107849121094\ndias negociados: 781, qtd_ações: 71.80161187489404, dinheiro: R$ 551.7292965309327, Total Value: R$ 2221.6380070101814, Lucro: R$ 432.927222098072\n782 83.6729304973793 275.86464826546637 c 23.237911224365234 2021-04-06 00:00:00\n782 83.6729304973793 275.86464826546637 c 23.237911224365234 2021-04-06 00:00:00\n23.237911224365234\n1788.7107849121094\ndias negociados: 782, qtd_ações: 83.6729304973793, dinheiro: R$ 275.86464826546637, Total Value: R$ 2220.248779046049, Lucro: R$ 431.5379941339397\n23.21856117248535\n1788.7107849121094\ndias negociados: 783, qtd_ações: 83.6729304973793, dinheiro: R$ 275.86464826546637, Total Value: R$ 2218.629703499983, Lucro: R$ 429.9189185878736\n22.928329467773438\n1788.7107849121094\ndias negociados: 784, qtd_ações: 83.6729304973793, dinheiro: R$ 275.86464826546637, Total Value: R$ 2194.345166243487, Lucro: R$ 405.6343813313774\n22.87995719909668\n1788.7107849121094\ndias negociados: 785, qtd_ações: 83.6729304973793, dinheiro: R$ 275.86464826546637, Total Value: R$ 2190.297716768496, Lucro: R$ 401.5869318563864\n786 20.918232624344824 1726.260171996636 v 23.11214256286621 2021-04-12 00:00:00\n786 20.918232624344824 1726.260171996636 v 23.11214256286621 2021-04-12 00:00:00\n23.11214256286621\n1788.7107849121094\ndias negociados: 786, qtd_ações: 20.918232624344824, dinheiro: R$ 1726.260171996636, Total Value: R$ 2209.7253465736926, Lucro: R$ 421.01456166158323\n787 58.13890176487474 863.130085998318 c 23.189537048339844 2021-04-13 00:00:00\n787 58.13890176487474 863.130085998318 c 23.189537048339844 2021-04-13 00:00:00\n23.189537048339844\n1788.7107849121094\ndias negociados: 787, qtd_ações: 58.13890176487474, dinheiro: R$ 863.130085998318, Total Value: R$ 2211.3443024246717, Lucro: R$ 422.6335175125623\n788 14.534725441218685 1890.3208687044937 v 23.557165145874023 2021-04-14 00:00:00\n788 14.534725441218685 1890.3208687044937 v 23.557165145874023 2021-04-14 00:00:00\n23.557165145874023\n1788.7107849121094\ndias negociados: 788, qtd_ações: 14.534725441218685, dinheiro: R$ 1890.3208687044937, Total Value: R$ 2232.717796273219, Lucro: R$ 444.00701136110956\n789 55.46848153070553 945.1604343522469 c 23.09000015258789 2021-04-15 00:00:00\n789 55.46848153070553 945.1604343522469 c 23.09000015258789 2021-04-15 00:00:00\n23.09000015258789\n1788.7107849121094\ndias negociados: 789, qtd_ações: 55.46848153070553, dinheiro: R$ 945.1604343522469, Total Value: R$ 2225.9276813600563, Lucro: R$ 437.21689644794697\n22.950000762939453\n1788.7107849121094\ndias negociados: 790, qtd_ações: 55.46848153070553, dinheiro: R$ 945.1604343522469, Total Value: R$ 2218.1621278010316, Lucro: R$ 429.4513428889222\n791 13.867120382676383 1955.2415115917825 v 24.280000686645508 2021-04-19 00:00:00\n791 13.867120382676383 1955.2415115917825 v 24.280000686645508 2021-04-19 00:00:00\n24.280000686645508\n1788.7107849121094\ndias negociados: 791, qtd_ações: 13.867120382676383, dinheiro: R$ 1955.2415115917825, Total Value: R$ 2291.9352040049607, Lucro: R$ 503.22441909285135\n792 3.4667800956690957 2202.977614054364 v 23.81999969482422 2021-04-20 00:00:00\n792 3.4667800956690957 2202.977614054364 v 23.81999969482422 2021-04-20 00:00:00\n23.81999969482422\n1788.7107849121094\ndias negociados: 792, qtd_ações: 3.4667800956690957, dinheiro: R$ 2202.977614054364, Total Value: R$ 2285.5563148752244, Lucro: R$ 496.84552996311504\n793 49.9235008700082 1101.488807027182 c 23.709999084472656 2021-04-22 00:00:00\n793 49.9235008700082 1101.488807027182 c 23.709999084472656 2021-04-22 00:00:00\n23.709999084472656\n1788.7107849121094\ndias negociados: 793, qtd_ações: 49.9235008700082, dinheiro: R$ 1101.488807027182, Total Value: R$ 2285.1749669487463, Lucro: R$ 496.4641820366369\n794 73.1714701016602 550.744403513591 c 23.690000534057617 2021-04-23 00:00:00\n794 73.1714701016602 550.744403513591 c 23.690000534057617 2021-04-23 00:00:00\n23.690000534057617\n1788.7107849121094\ndias negociados: 794, qtd_ações: 73.1714701016602, dinheiro: R$ 550.744403513591, Total Value: R$ 2284.1765692997024, Lucro: R$ 495.465784387593\n795 18.29286752541505 1855.7576104588466 v 23.780000686645508 2021-04-26 00:00:00\n795 18.29286752541505 1855.7576104588466 v 23.780000686645508 2021-04-26 00:00:00\n23.780000686645508\n1788.7107849121094\ndias negociados: 795, qtd_ações: 18.29286752541505, dinheiro: R$ 1855.7576104588466, Total Value: R$ 2290.762012773932, Lucro: R$ 502.05122786182255\n796 58.460780508383905 927.8788052294233 c 23.100000381469727 2021-04-27 00:00:00\n796 58.460780508383905 927.8788052294233 c 23.100000381469727 2021-04-27 00:00:00\n23.100000381469727\n1788.7107849121094\ndias negociados: 796, qtd_ações: 58.460780508383905, dinheiro: R$ 927.8788052294233, Total Value: R$ 2278.3228572741095, Lucro: R$ 489.61207236200016\n797 14.615195127095976 1977.5421426735252 v 23.940000534057617 2021-04-28 00:00:00\n797 14.615195127095976 1977.5421426735252 v 23.940000534057617 2021-04-28 00:00:00\n23.940000534057617\n1788.7107849121094\ndias negociados: 797, qtd_ações: 14.615195127095976, dinheiro: R$ 1977.5421426735252, Total Value: R$ 2327.429921821559, Lucro: R$ 538.7191369094498\n798 56.47679699860775 988.7710713367626 c 23.6200008392334 2021-04-29 00:00:00\n798 56.47679699860775 988.7710713367626 c 23.6200008392334 2021-04-29 00:00:00\n23.6200008392334\n1788.7107849121094\ndias negociados: 798, qtd_ações: 56.47679699860775, dinheiro: R$ 988.7710713367626, Total Value: R$ 2322.7530638410917, Lucro: R$ 534.0422789289823\n23.6200008392334\n1788.7107849121094\ndias negociados: 799, qtd_ações: 56.47679699860775, dinheiro: R$ 988.7710713367626, Total Value: R$ 2322.7530638410917, Lucro: R$ 534.0422789289823\n800 14.119199249651938 1981.2095995213103 v 23.43000030517578 2021-05-03 00:00:00\n800 14.119199249651938 1981.2095995213103 v 23.43000030517578 2021-05-03 00:00:00\n23.43000030517578\n1788.7107849121094\ndias negociados: 800, qtd_ações: 14.119199249651938, dinheiro: R$ 1981.2095995213103, Total Value: R$ 2312.022442249493, Lucro: R$ 523.3116573373836\n22.889999389648438\n1788.7107849121094\ndias negociados: 801, qtd_ações: 14.119199249651938, dinheiro: R$ 1981.2095995213103, Total Value: R$ 2304.3980617281677, Lucro: R$ 515.6872768160583\n802 3.5297998124129846 2233.5549873028076 v 23.829999923706055 2021-05-05 00:00:00\n802 3.5297998124129846 2233.5549873028076 v 23.829999923706055 2021-05-05 00:00:00\n23.829999923706055\n1788.7107849121094\ndias negociados: 802, qtd_ações: 3.5297998124129846, dinheiro: R$ 2233.5549873028076, Total Value: R$ 2317.6701165633067, Lucro: R$ 528.9593316511973\n803 51.05224635077059 1116.7774936514038 c 23.5 2021-05-06 00:00:00\n803 51.05224635077059 1116.7774936514038 c 23.5 2021-05-06 00:00:00\n23.5\n1788.7107849121094\ndias negociados: 803, qtd_ações: 51.05224635077059, dinheiro: R$ 1116.7774936514038, Total Value: R$ 2316.5052828945127, Lucro: R$ 527.7944979824033\n804 12.763061587692647 2050.2677860416816 v 24.3799991607666 2021-05-07 00:00:00\n804 12.763061587692647 2050.2677860416816 v 24.3799991607666 2021-05-07 00:00:00\n24.3799991607666\n1788.7107849121094\ndias negociados: 804, qtd_ações: 12.763061587692647, dinheiro: R$ 2050.2677860416816, Total Value: R$ 2361.431216838441, Lucro: R$ 572.7204319263315\n24.700000762939453\n1788.7107849121094\ndias negociados: 805, qtd_ações: 12.763061587692647, dinheiro: R$ 2050.2677860416816, Total Value: R$ 2365.5154169951334, Lucro: R$ 576.804632083024\n806 3.1907653969231617 2291.011031587993 v 25.149999618530273 2021-05-11 00:00:00\n806 3.1907653969231617 2291.011031587993 v 25.149999618530273 2021-05-11 00:00:00\n25.149999618530273\n1788.7107849121094\ndias negociados: 806, qtd_ações: 3.1907653969231617, dinheiro: R$ 2291.011031587993, Total Value: R$ 2371.25878010343, Lucro: R$ 582.5479951913208\n807 0.7976913492307904 2350.311408133003 v 24.780000686645508 2021-05-12 00:00:00\n807 0.7976913492307904 2350.311408133003 v 24.780000686645508 2021-05-12 00:00:00\n24.780000686645508\n1788.7107849121094\ndias negociados: 807, qtd_ações: 0.7976913492307904, dinheiro: R$ 2350.311408133003, Total Value: R$ 2370.0782003146733, Lucro: R$ 581.3674154025639\n808 0.1994228373076976 2365.2621381090285 v 24.989999771118164 2021-05-13 00:00:00\n808 0.1994228373076976 2365.2621381090285 v 24.989999771118164 2021-05-13 00:00:00\n24.989999771118164\n1788.7107849121094\ndias negociados: 808, qtd_ações: 0.1994228373076976, dinheiro: R$ 2365.2621381090285, Total Value: R$ 2370.2457147677037, Lucro: R$ 581.5349298555943\n26.299999237060547\n1788.7107849121094\ndias negociados: 809, qtd_ações: 0.1994228373076976, dinheiro: R$ 2365.2621381090285, Total Value: R$ 2370.5069585780734, Lucro: R$ 581.796173665964\n26.65999984741211\n1788.7107849121094\ndias negociados: 810, qtd_ações: 0.1994228373076976, dinheiro: R$ 2365.2621381090285, Total Value: R$ 2370.5787509212223, Lucro: R$ 581.8679660091129\n26.350000381469727\n1788.7107849121094\ndias negociados: 811, qtd_ações: 0.1994228373076976, dinheiro: R$ 2365.2621381090285, Total Value: R$ 2370.51692994816, Lucro: R$ 581.8061450360506\n812 45.42432097522142 1182.6310690545142 c 26.149999618530273 2021-05-19 00:00:00\n812 45.42432097522142 1182.6310690545142 c 26.149999618530273 2021-05-19 00:00:00\n26.149999618530273\n1788.7107849121094\ndias negociados: 812, qtd_ações: 45.42432097522142, dinheiro: R$ 1182.6310690545142, Total Value: R$ 2370.477045228551, Lucro: R$ 581.7662603164417\n813 11.356080243805355 2066.020561616935 v 25.93000030517578 2021-05-20 00:00:00\n813 11.356080243805355 2066.020561616935 v 25.93000030517578 2021-05-20 00:00:00\n25.93000030517578\n1788.7107849121094\ndias negociados: 813, qtd_ações: 11.356080243805355, dinheiro: R$ 2066.020561616935, Total Value: R$ 2360.4837258044086, Lucro: R$ 571.7729408922992\n814 51.16379702367395 1033.0102808084675 c 25.950000762939453 2021-05-21 00:00:00\n814 51.16379702367395 1033.0102808084675 c 25.950000762939453 2021-05-21 00:00:00\n25.950000762939453\n1788.7107849121094\ndias negociados: 814, qtd_ações: 51.16379702367395, dinheiro: R$ 1033.0102808084675, Total Value: R$ 2360.710852607686, Lucro: R$ 572.0000676955765\n815 12.790949255918488 2045.6697099786065 v 26.389999389648438 2021-05-24 00:00:00\n815 12.790949255918488 2045.6697099786065 v 26.389999389648438 2021-05-24 00:00:00\n26.389999389648438\n1788.7107849121094\ndias negociados: 815, qtd_ações: 12.790949255918488, dinheiro: R$ 2045.6697099786065, Total Value: R$ 2383.2228530353195, Lucro: R$ 594.5120681232102\n816 52.3743412431227 1022.8348549893033 c 25.84000015258789 2021-05-25 00:00:00\n816 52.3743412431227 1022.8348549893033 c 25.84000015258789 2021-05-25 00:00:00\n25.84000015258789\n1788.7107849121094\ndias negociados: 816, qtd_ações: 52.3743412431227, dinheiro: R$ 1022.8348549893033, Total Value: R$ 2376.187840703284, Lucro: R$ 587.4770557911747\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d353e6ce711e6a1c07d45121b856856834ff1e | 25,758 | ipynb | Jupyter Notebook | skspec/tests/regression/REGRESSION.ipynb | hugadams/scikit-spectra | c451be6d54080fbcc2a3bc5daf8846b83b7343ee | [
"BSD-3-Clause"
] | 83 | 2015-01-15T18:57:22.000Z | 2022-01-18T11:43:55.000Z | skspec/tests/regression/REGRESSION.ipynb | hugadams/scikit-spectra | c451be6d54080fbcc2a3bc5daf8846b83b7343ee | [
"BSD-3-Clause"
] | 18 | 2015-02-02T22:46:51.000Z | 2019-04-29T17:23:32.000Z | skspec/tests/regression/REGRESSION.ipynb | hugadams/scikit-spectra | c451be6d54080fbcc2a3bc5daf8846b83b7343ee | [
"BSD-3-Clause"
] | 43 | 2015-01-02T20:47:11.000Z | 2021-12-18T16:14:40.000Z | 38.733835 | 1,487 | 0.54682 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
d0d35c2bdc4e0123faf758531bdfab46be835890 | 361,955 | ipynb | Jupyter Notebook | 06. Emotion Recognition using Facial Images/Code/Face_Emotion_Recognition_from_Images.ipynb | Jamess-ai/ai-with-python-series | 4346a836aefd652d9fdc7f0ad3bb856e5c129b22 | [
"MIT"
] | 23 | 2021-07-16T11:32:17.000Z | 2022-03-14T02:28:32.000Z | 06. Emotion Recognition using Facial Images/Code/Face_Emotion_Recognition_from_Images.ipynb | Jamess-ai/ai-with-python-series | 4346a836aefd652d9fdc7f0ad3bb856e5c129b22 | [
"MIT"
] | 1 | 2021-09-28T15:17:42.000Z | 2021-09-28T15:48:09.000Z | 06. Emotion Recognition using Facial Images/Code/Face_Emotion_Recognition_from_Images.ipynb | Jamess-ai/ai-with-python-series | 4346a836aefd652d9fdc7f0ad3bb856e5c129b22 | [
"MIT"
] | 28 | 2021-08-01T09:10:18.000Z | 2022-03-24T12:47:49.000Z | 970.38874 | 118,678 | 0.935138 | [
[
[
"<a href=\"https://colab.research.google.com/github/rjrahul24/ai-with-python-series/blob/main/06.%20Emotion%20Recognition%20using%20Facial%20Images/Code/Face_Emotion_Recognition_from_Images.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"# We install the FER() library to perform facial recognition\n# This installation will also take care of any of the above dependencies if they are missing",
"_____no_output_____"
],
[
"pip install fer",
"Collecting fer\n Downloading fer-21.0.3-py3-none-any.whl (810 kB)\n\u001b[?25l\r\u001b[K |▍ | 10 kB 24.6 MB/s eta 0:00:01\r\u001b[K |▉ | 20 kB 25.9 MB/s eta 0:00:01\r\u001b[K |█▏ | 30 kB 26.2 MB/s eta 0:00:01\r\u001b[K |█▋ | 40 kB 27.3 MB/s eta 0:00:01\r\u001b[K |██ | 51 kB 28.3 MB/s eta 0:00:01\r\u001b[K |██▍ | 61 kB 28.5 MB/s eta 0:00:01\r\u001b[K |██▉ | 71 kB 21.5 MB/s eta 0:00:01\r\u001b[K |███▎ | 81 kB 22.4 MB/s eta 0:00:01\r\u001b[K |███▋ | 92 kB 22.9 MB/s eta 0:00:01\r\u001b[K |████ | 102 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████▌ | 112 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████▉ | 122 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████▎ | 133 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████▋ | 143 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████ | 153 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████▌ | 163 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████▉ | 174 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████▎ | 184 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████▊ | 194 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████ | 204 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████▌ | 215 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████ | 225 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████▎ | 235 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████▊ | 245 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████ | 256 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████▌ | 266 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████ | 276 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████▎ | 286 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████▊ | 296 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████▏ | 307 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████▌ | 317 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████ | 327 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████▍ | 337 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████▊ | 348 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████████▏ | 358 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████████▌ | 368 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████ | 378 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████▍ | 389 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████▊ | 399 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████████▏ | 409 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████████▋ | 419 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████████ | 430 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████████▍ | 440 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████████▉ | 450 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████████████▏ | 460 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████████████▋ | 471 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████████ | 481 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████████▍ | 491 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████████▉ | 501 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████████████▏ | 512 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████████████▋ | 522 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████ | 532 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████▍ | 542 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████▉ | 552 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████▎ | 563 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████▋ | 573 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████ | 583 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████▍ | 593 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████▉ | 604 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████████████████▎ | 614 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████████████████▋ | 624 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████████ | 634 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████████▌ | 645 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████████▉ | 655 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▎ | 665 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▊ | 675 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████████ | 686 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▌ | 696 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▉ | 706 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▎ | 716 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▊ | 727 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████ | 737 kB 23.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▌ | 747 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 757 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▎ | 768 kB 23.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▊ | 778 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▏| 788 kB 23.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▌| 798 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 808 kB 23.2 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 810 kB 23.2 MB/s \n\u001b[?25hRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from fer) (3.2.2)\nRequirement already satisfied: tensorflow>=2.4.0 in /usr/local/lib/python3.7/dist-packages (from fer) (2.5.0)\nCollecting mtcnn>=0.1.1\n Downloading mtcnn-0.1.1-py3-none-any.whl (2.3 MB)\n\u001b[K |████████████████████████████████| 2.3 MB 41.3 MB/s \n\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from fer) (2.23.0)\nRequirement already satisfied: opencv-contrib-python in /usr/local/lib/python3.7/dist-packages (from fer) (4.1.2.30)\nRequirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from fer) (1.1.5)\nRequirement already satisfied: keras==2.4.3 in /usr/local/lib/python3.7/dist-packages (from fer) (2.4.3)\nRequirement already satisfied: numpy>=1.9.1 in /usr/local/lib/python3.7/dist-packages (from keras==2.4.3->fer) (1.19.5)\nRequirement already satisfied: h5py in /usr/local/lib/python3.7/dist-packages (from keras==2.4.3->fer) (3.1.0)\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from keras==2.4.3->fer) (3.13)\nRequirement already satisfied: scipy>=0.14 in /usr/local/lib/python3.7/dist-packages (from keras==2.4.3->fer) (1.4.1)\nRequirement already satisfied: opencv-python>=4.1.0 in /usr/local/lib/python3.7/dist-packages (from mtcnn>=0.1.1->fer) (4.1.2.30)\nRequirement already satisfied: tensorflow-estimator<2.6.0,>=2.5.0rc0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (2.5.0)\nRequirement already satisfied: keras-nightly~=2.5.0.dev in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (2.5.0.dev2021032900)\nRequirement already satisfied: gast==0.4.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (0.4.0)\nRequirement already satisfied: six~=1.15.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (1.15.0)\nRequirement already satisfied: protobuf>=3.9.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (3.17.3)\nRequirement already satisfied: wrapt~=1.12.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (1.12.1)\nRequirement already satisfied: flatbuffers~=1.12.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (1.12)\nRequirement already satisfied: google-pasta~=0.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (0.2.0)\nRequirement already satisfied: astunparse~=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (1.6.3)\nRequirement already satisfied: opt-einsum~=3.3.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (3.3.0)\nRequirement already satisfied: typing-extensions~=3.7.4 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (3.7.4.3)\nRequirement already satisfied: grpcio~=1.34.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (1.34.1)\nRequirement already satisfied: keras-preprocessing~=1.1.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (1.1.2)\nRequirement already satisfied: tensorboard~=2.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (2.5.0)\nRequirement already satisfied: termcolor~=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (1.1.0)\nRequirement already satisfied: wheel~=0.35 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (0.36.2)\nRequirement already satisfied: absl-py~=0.10 in /usr/local/lib/python3.7/dist-packages (from tensorflow>=2.4.0->fer) (0.12.0)\nRequirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py->keras==2.4.3->fer) (1.5.2)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.5->tensorflow>=2.4.0->fer) (1.0.1)\nRequirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.5->tensorflow>=2.4.0->fer) (1.32.1)\nRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.5->tensorflow>=2.4.0->fer) (0.4.4)\nRequirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.5->tensorflow>=2.4.0->fer) (57.2.0)\nRequirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.5->tensorflow>=2.4.0->fer) (1.8.0)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.5->tensorflow>=2.4.0->fer) (3.3.4)\nRequirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.5->tensorflow>=2.4.0->fer) (0.6.1)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard~=2.5->tensorflow>=2.4.0->fer) (4.2.2)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard~=2.5->tensorflow>=2.4.0->fer) (0.2.8)\nRequirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard~=2.5->tensorflow>=2.4.0->fer) (4.7.2)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.5->tensorflow>=2.4.0->fer) (1.3.0)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard~=2.5->tensorflow>=2.4.0->fer) (4.6.1)\nRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard~=2.5->tensorflow>=2.4.0->fer) (0.4.8)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->fer) (2021.5.30)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->fer) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->fer) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->fer) (2.10)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.5->tensorflow>=2.4.0->fer) (3.1.1)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->markdown>=2.6.8->tensorboard~=2.5->tensorflow>=2.4.0->fer) (3.5.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->fer) (2.4.7)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->fer) (0.10.0)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->fer) (2.8.1)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->fer) (1.3.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->fer) (2018.9)\nInstalling collected packages: mtcnn, fer\nSuccessfully installed fer-21.0.3 mtcnn-0.1.1\n"
],
[
"from fer import FER\nimport matplotlib.pyplot as plt \n%matplotlib inline\n\ntest_image_one = plt.imread(\"/content/Image-One.jpeg\")\nemo_detector = FER(mtcnn=True)\n# Capture all the emotions on the image\ncaptured_emotions = emo_detector.detect_emotions(test_image_one)\n# Print all captured emotions with the image\nprint(captured_emotions)\nplt.imshow(test_image_one)",
"WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/layers/normalization.py:534: _colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n"
],
[
"# Use the top Emotion() function to call for the dominant emotion in the image\ndominant_emotion, emotion_score = emo_detector.top_emotion(test_image_one)\nprint(dominant_emotion, emotion_score)",
"neutral 0.97\n"
],
[
"# We repeat the same steps for a few other images to confirm the performance of FER()\ntest_image_two = plt.imread(\"/content/Image-Two.jpg\")\ncaptured_emotions_two = emo_detector.detect_emotions(test_image_two)\nprint(captured_emotions_two)\nplt.imshow(test_image_two)\ndominant_emotion_two, emotion_score_two = emo_detector.top_emotion(test_image_two)\nprint(dominant_emotion_two, emotion_score_two)\n",
"[{'box': (30, 48, 110, 110), 'emotions': {'angry': 0.0, 'disgust': 0.0, 'fear': 0.0, 'happy': 0.98, 'sad': 0.0, 'surprise': 0.0, 'neutral': 0.01}}]\nhappy 0.98\n"
],
[
"# Testing on another image\ntest_image_three = plt.imread(\"Image-Three.jpg\")\ncaptured_emotions_three = emo_detector.detect_emotions(test_image_three)\nprint(captured_emotions_three)\nplt.imshow(test_image_three)\ndominant_emotion_three, emotion_score_three = emo_detector.top_emotion(test_image_three)\nprint(dominant_emotion_three, emotion_score_three)\n",
"[{'box': (155, 193, 676, 676), 'emotions': {'angry': 0.0, 'disgust': 0.0, 'fear': 0.0, 'happy': 0.58, 'sad': 0.02, 'surprise': 0.0, 'neutral': 0.4}}]\nhappy 0.58\n"
],
[
"# Testing on another image\ntest_image_four = plt.imread(\"Image-Four.jpg\")\ncaptured_emotions_four = emo_detector.detect_emotions(test_image_four)\nprint(captured_emotions_four)\nplt.imshow(test_image_four)\ndominant_emotion_four, emotion_score_four = emo_detector.top_emotion(test_image_four)\nprint(dominant_emotion_four, emotion_score_four)\n",
"[{'box': (150, 75, 237, 237), 'emotions': {'angry': 0.78, 'disgust': 0.0, 'fear': 0.14, 'happy': 0.0, 'sad': 0.03, 'surprise': 0.01, 'neutral': 0.04}}]\nangry 0.78\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d36c168491ce1f8cc0d29fefc99c013bba0358 | 13,542 | ipynb | Jupyter Notebook | Intro_vector_and_matrix.ipynb | vietthao2000/pre-program-package-2018-part-2 | 1303b5f4199ab383c2e6574a05771de734d405cf | [
"MIT"
] | null | null | null | Intro_vector_and_matrix.ipynb | vietthao2000/pre-program-package-2018-part-2 | 1303b5f4199ab383c2e6574a05771de734d405cf | [
"MIT"
] | null | null | null | Intro_vector_and_matrix.ipynb | vietthao2000/pre-program-package-2018-part-2 | 1303b5f4199ab383c2e6574a05771de734d405cf | [
"MIT"
] | null | null | null | 13,542 | 13,542 | 0.622065 | [
[
[
"# Bổ trợ bài giảng về Đại số tuyến tính - Phần 1\n## MaSSP 2018, Computer Science\nTài liệu ngắn này đưa ra định nghĩa một số khái niệm cơ bản trong đại số tuyến tính liên quan đến vector và ma trận.\n\n# 1. Một số khái niệm\n## 1.1. Vô hướng (Scalar)\nMột `scalar` là một số bất kì thuộc tập số nào đó.\nKhi định nghĩa một số ta phải chỉ rõ tập số mà nó thuộc vào (gọi là `domain`).\nVí dụ, $ n $ là số tự nhiên sẽ được kí hiệu: $ n \\in \\mathbb{N} $ (Natural numbers),\nhoặc $ x $ là số thực sẽ được kí hiệu: $ x \\in \\mathbb{R} $ (Real numbers).\nTrong Python số tự nhiên có thể là kiểu `int`, số thực có thể là kiểu `float`.\n<!---\nMột số thường có thể định nghĩa được bằng một kiểu dữ liệu nguyên thủy của các ngôn ngữ lập trình.\nNhư số tự nhiên có thể là kiểu `int`, số thực có thể là kiểu `float` trong Python.\n--->",
"_____no_output_____"
]
],
[
[
"x = 1\nprint(type(x))",
"<class 'int'>\n"
],
[
"y = 2.0\nprint(type(y))",
"<class 'float'>\n"
]
],
[
[
"## 1.2. Véc-tơ (Vector)\n`Vector` là 1 mảng của các vô hướng scalars tương tự như mảng 1 chiều trong các ngôn ngữ lập trình.\nCác phần tử trong vector cũng được đánh địa chỉ và có thể truy cập nó qua các địa chỉ tương ứng của nó.\nTrong toán học, một vector có thể là vector cột (`column vector`) nếu các nó được biểu diễn dạng một cột nhiều hàng, hoặc có thể là vector hàng (`row vector`) nếu nó được biểu diễn dưới dạng một hàng của nhiều cột.\n\nMột vector cột có dạng như sau:\n\n$$\nx =\n\\begin{bmatrix}\nx_1 \\\\\nx_2 \\\\\n\\vdots \\\\\nx_n\n\\end{bmatrix}\n$$\n\nMột vector hàng có dạng như sau:\n$$\nx =\n\\begin{bmatrix}\nx_1, &\nx_2, &\n\\cdots &\nx_n\n\\end{bmatrix}\n$$\n\nTrong đó, $ x_1 $, $ x_2 $, ..., $ x_n $ là các phần tử `thứ 1`, `thứ 2`, ... `thứ n` của vector.\nLưu ý trong lập trình Python ta đánh số từ `0`: $x[0] = x_1, x[1] = x_2,...$.",
"_____no_output_____"
],
[
"## 1.3. Ma trận (Matrix)\nMa trận là một mảng 2 chiều của các vô hướng tương tự như mảng 2 chiều trong các ngôn ngữ lập trình. Ví dụ dưới đây là một ma trận có $ m $ hàng và $ n $ cột:\n$$\nA =\n\\begin{bmatrix}\nA _{1, 1} & A _{1, 2} & \\cdots & A _{1, n} \\\\\nA _{2, 1} & A _{2, 2} & \\cdots & A _{2, n} \\\\\n\\vdots & \\vdots & \\vdots & \\vdots \\\\\nA _{m, 1} & A _{m, 2} & \\cdots & A _{m, n}\n\\end{bmatrix}\n$$\n\nKhi định nghĩa một ma trận ta cần chỉ rõ số hàng và số cột cùng trường số của các phần tử có nó.\nLúc này, $ mn $ được gọi là cấp của ma trận.\nVí dụ, ma trận số thực $ A $ có m hàng và n cột được kí hiệu là: $ A \\in \\mathbb{R}^{m \\times n} $.\n\nCác phần tử trong ma trận được định danh bằng 2 địa chỉ hàng $ i $ và cột $ j $ tương ứng.\nVí dụ phần tử hàng thứ 3, cột thứ 2 sẽ được kí hiệu là: $ A_{3,2} $.\nTa cũng có thể kí hiệu các phần tử của hàng $ i $ là $ A _{i,:} $ và của cột $ j $ là $ A _{:,j} $.\nNếu bạn để ý thì sẽ thấy $ A _{i,:} $ chính là vector hàng, còn $ A _{:,j} $ là vector cột.\nNhư vậy, vector có thể coi là trường hợp đặt biệt của ma trận với số hàng hoặc số cột là 1.\n\nCác ma trận sẽ được kí hiệu: $ [A _{ij}] _{mn} $, trong đó $ A $ là tên của ma trận;\n$ m, n $ là cấp của ma trận; còn $ A _{ij} $ là các phần tử của ma trận tại hàng $ i $ và cột $ j $.\n\n<!---\nCác vector ta cũng sẽ biểu diễn tương tự.\n vector hàng: $ [x_i]_n $, trong đó $ x $ là tên của vector;\n$ n $ là cấp của vector; $ x_i $ là phần tử của vector tại vị trí $ i $.\n vector cột ta sẽ biểu diễn thông qua phép chuyển vị của vector hàng: $ [x_i]_n ^\\intercal $.\n\nNgoài ra, nếu một ma trận được biểu diễn dưới dạng: $ [A _{1j}] _{1n} $ thì ta cũng sẽ hiểu ngầm luôn nó là vector hàng.\nTương tự, với $ [A _{i1}] _{m1} $ thì ta có thể hiểu ngầm với nhau rằng nó là vector cột.\n--->\n\nMột điểm cần lưu ý nữa là các giá trị $ m, n, i, j $ khi được biểu điễn tường minh dưới dạng số, ta cần phải chèn dấu phẩy `,` vào giữa chúng.\nVí dụ: $ [A _{ij}] _{9,4} $ là ma trận có cấp là `9, 4`. $ A _{5,25} $ là phần tử tại hàng `5` và cột `25`.\nViệc này giúp ta phân biệt được giữa ma trận và vector, nếu không ta sẽ bị nhầm ma trận thành vector.",
"_____no_output_____"
],
[
"## 1.4. Ten-xơ (Tensor)\nTensor là một mảng nhiều chiều, nó là trường hợp tổng quát của việc biểu diễn số chiều.\nNhư vậy, ma trận có thể coi là một tensor 2 chiều, vector là tensor một nhiều còn scalar là tensor zero chiều.\n\nCác phần tử của một tensor cần được định danh bằng số địa chỉ tương ứng với số chiều của tensor đó. Ví dụ mộ tensor 3 chiều $A$ có phần tử tại hàng $ i $, cột $ j $, cao $ k $ được kí hiệu là $ A_{i,j,k} $.\n<img src=\"https://github.com/vietthao2000/pre-program-package-2018-part-2/blob/master/images/tensor1.png?raw=true\" alt=\"Tensor\" style=\"height: 50%; width: 50%;\"/>\n\nVí dụ ảnh trắng đen hoặc xám (`grayscale`) được biểu diễn bằng ma trận 2 chiều. Giá trị của mỗi phần tử trong ma trận là một số thập phân nằm trong khoảng từ 0 đến 1, ứng với độ đen trắng của từng điểm ảnh (`pixel`) (0 thể hiện màu đen và giá trị càng gần tới 1 thì càng trắng). Do hình ảnh có chiều dài và chiều rộng, ma trận của các điểm ảnh là ma trận 2 chiều.\n<img src=\"https://github.com/vietthao2000/pre-program-package-2018-part-2/blob/master/images/MNIST_2.png?raw=true\" alt=\"grayscale\" style=\"height: 25%; width: 25%;\"/>\n\nMột ảnh màu được biểu diễn bằng một tensor 3 chiều, 2 chiều đầu cũng để đánh số địa chỉ mỗi điểm ảnh dọc theo chiều dài và chiều rộng của ảnh. Chiều cuối cùng để phân biệt 3 màu cơ bản đỏ, xanh lá, xanh dương ($k=1,2,3$). Như vậy mỗi điểm ảnh được xác định bởi vị trí của nó, và thành phần 3 màu cơ bản.\n<img src=\"https://github.com/vietthao2000/pre-program-package-2018-part-2/blob/master/images/tensor2.png?raw=true\" alt=\"color\" style=\"height: 50%; width: 50%;\"/>\n\nVậy đố các bạn biết, một đoạn phim đen trắng sẽ được biểu diễn bằng tensor mấy chiều? Một đoạn phim màu thì sao?\n<img src=\"https://github.com/vietthao2000/pre-program-package-2018-part-2/blob/master/images/tensor4.png?raw=true\" alt=\"video\" style=\"height: 50%; width: 50%;\"/>\n",
"_____no_output_____"
],
[
"# 2. Một số ma trận đặc biệt\n## 2.1. Ma trận không (zero matrix)\nMa trận `zero` là ma trận mà tất cả các phần tử của nó đều bằng 0: $ A_{i,j} = 0, \\forall{i,j} $. Ví dụ:\n\n$$\n\\varnothing =\n\\begin{bmatrix}\n0 & 0 & 0 & 0 \\\\\n0 & 0 & 0 & 0 \\\\\n0 & 0 & 0 & 0\n\\end{bmatrix}\n$$\nTa có thể viết $\\bf 0_{m\\times n}$ để chỉ ma trận zero có size $m\\times n$.\n\n## 2.2. Ma trận vuông (square matrix)\nMa trận vuông là ma trận có số hàng bằng với số cột: $ A \\in R^{n \\times n} $.\nVí dụ một ma trận vuông cấp 3 (số hàng và số cột là 3) có dạng như sau:\n\n$$\nA =\n\\begin{bmatrix}\n2 & 1 & 9 \\\\\n4 & 5 & 9 \\\\\n8 & 0 & 5\n\\end{bmatrix}\n$$\n\nVới ma trận vuông, đường chéo bắt đầu từ góc trái trên cùng tới góc phải dưới cùng được gọi là đường chéo chính: $ \\{ A _{i,i} \\} $. Ký hiệu $\\{ \\cdots \\}$ dùng để chỉ một tập hợp (`set`). Trong ví dụ trên, đường chéo chính đi qua các phần tử `2, 5, 5`.",
"_____no_output_____"
],
[
"## 2.3. Ma trận chéo\nMa trận chéo là ma trận vuông có các phần từ nằm ngoài đường chéo chính bằng 0: $ A_{i,j} = 0, \\forall{i \\not = j} $.\nVí dụ ma trận chéo cấp 4 (có 4 hàng và 4 cột) có dạng như sau:\n\n$$\nA =\n\\begin{bmatrix}\n1 & 0 & 0 & 0 \\\\\n0 & 2 & 0 & 0 \\\\\n0 & 0 & 3 & 0 \\\\\n0 & 0 & 0 & 4\n\\end{bmatrix}\n$$\n\n> Lưu ý rằng ma trận vuông zero (ma trận vuông có các phần tử bằng 0) cũng là một ma trận chéo, ký hiệu $\\bf 0_n$.",
"_____no_output_____"
],
[
"## 2.4. Ma trận đơn vị\nLà ma trận chéo có các phần tử trên đường chéo bằng 1:\n$$\n\\begin{cases}\nA _{i,j} = 0, \\forall{i \\not = j} \\\\\nA _{i,j} = 1, \\forall{i = j}\n\\end{cases}\n$$\n\nMa trận đơn vị được kí hiệu là $ I_n $ với $ n $ là cấp của ma trận. Ví dụ ma trận đơn vị cấp 3 được biểu diễn như sau:\n\n$$\nI_{3} =\n\\begin{bmatrix}\n1 & 0 & 0 \\\\\n0 & 1 & 0 \\\\\n0 & 0 & 1\n\\end{bmatrix}\n$$\n\n<!--- đã nói ở phần định nghĩa ma trận\n## 2.5. Ma trận cột\nMa trận cột chính là vector cột, tức là ma trận chỉ có 1 cột.\n\n## 2.6. Ma trận hàng\nTương tự như ma trận cột, ma trận hàng chính là vector hàng, tức là ma trận chỉ có 1 hàng.\n--->",
"_____no_output_____"
],
[
"## 2.5. Ma trận chuyển vị\nMa trận chuyển vị là ma trận nhận được sau khi ta đổi hàng thành cột và cột thành hàng.\n\n$$\n\\begin{cases}\nA \\in \\mathbb{R}^{m\\times n} \\\\\nB \\in \\mathbb{R}^{n\\times m} \\\\\nA _{i,j} = B _{j,i}, \\forall{i,j}\n\\end{cases}\n$$\n\nMa trận chuyển vị của $ A $ được kí hiệu là $ A^\\intercal $. Như vậy: $ (A^\\intercal)_{i,j} = A _{j,i} $.\n\n$$\n \\begin{bmatrix}\n 1 & 2 & 3 \\\\\n 10 & 15 & 20\n \\end{bmatrix}\n^\\intercal =\n \\begin{bmatrix}\n 1 & 10 \\\\\n 2 & 15 \\\\\n 3 & 20\n \\end{bmatrix}\n$$\n\nVector cũng là một ma trận nên mọi phép toán với ma trận đều có thể áp dụng được cho vector, bao gồm cả phép chuyển vị ma trận.\nSử dụng phép chuyển vị ta có thể biến một vector hàng thành vector cột và ngược lại.\n\nMặc định (`by default, convention`) trong toán học khi cho một vector $x\\in\\mathbb{R}^n$ ta hiểu đây là một vector cột. Đôi lúc để viết cho ngắn gọi người ta thường sử dụng phép chuyển vị để định nghĩa vector cột, ví dụ $ x = [x_1, x_2, ..., x_n]^\\intercal $.\n\n<!---Do đó ở ví dụ về vector hàng, theo chuẩn ta nên viết $x^{\\top} =\n\\begin{bmatrix}\nx_1, &\nx_2, &\n\\cdots &\nx_n\n\\end{bmatrix}$. --->\n<!---\n# 3. Các kí hiệu\nĐể thuận tiện, từ nay về sau tôi sẽ mặc định các vô hướng, phần tử của ma trận (bao gồm cả vector) mà chúng ta làm việc là thuộc trường số thực $ \\mathbb{R} $. Tôi cũng sẽ sử dụng một số kí hiệu bổ sung như dưới đây.\n\nCác ma trận sẽ được kí hiệu: $ [A _{ij}] _{mn} $, trong đó $ A $ là tên của ma trận;\n$ m, n $ là cấp của ma trận; còn $ A _{ij} $ là các phần tử của ma trận tại hàng $ i $ và cột $ j $.\n\nCác vector ta cũng sẽ biểu diễn tương tự.\n vector hàng: $ [x_i]_n $, trong đó $ x $ là tên của vector;\n$ n $ là cấp của vector; $ x_i $ là phần tử của vector tại vị trí $ i $.\n vector cột ta sẽ biểu diễn thông qua phép chuyển vị của vector hàng: $ [x_i]_n ^\\intercal $.\n\nNgoài ra, nếu một ma trận được biểu diễn dưới dạng: $ [A _{1j}] _{1n} $ thì ta cũng sẽ hiểu ngầm luôn nó là vector hàng.\nTương tự, với $ [A _{i1}] _{m1} $ thì ta có thể hiểu ngầm với nhau rằng nó là vector cột.\n\nMột điểm cần lưu ý nữa là các giá trị $ m, n, i, j $ khi được biểu điễn tường minh dưới dạng số,\nta cần phải chèn dấu phẩy `,` vào giữa chúng.\nVí dụ: $ [A _{ij}] _{9,4} $ là ma trận có cấp là `9, 4`. $ A _{5,25} $ là phần tử tại hàng `5` và cột `25`.\nViệc này giúp ta phân biệt được giữa ma trận và vector, nếu không ta sẽ bị nhầm ma trận thành vector.\n\nTrên đây là một số khái niệm cơ bản để làm việc với ma trận, trong phần sau tôi sẽ đề cập tới các phép toán của ma trận.\nViệc biến đổi ma trận và các phép toán trên ma trận là rất cần thiết để làm việc với các bài toán về học máy sau này.\n--->",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d0d37922e3538861c7fa9fed388cd4f51c4ea9e5 | 10,484 | ipynb | Jupyter Notebook | Code/Dataset Segmentation script.ipynb | mirtanvirislam/Deep-Learning-Based-Glaucoma-Detection-with-Cropped-Optic-Cup-and-Disc-and-Blood-Vessel-Segmentation | c7c6fa4c9c3a7ccd16dc8c4bacdd241446013bdf | [
"MIT"
] | null | null | null | Code/Dataset Segmentation script.ipynb | mirtanvirislam/Deep-Learning-Based-Glaucoma-Detection-with-Cropped-Optic-Cup-and-Disc-and-Blood-Vessel-Segmentation | c7c6fa4c9c3a7ccd16dc8c4bacdd241446013bdf | [
"MIT"
] | null | null | null | Code/Dataset Segmentation script.ipynb | mirtanvirislam/Deep-Learning-Based-Glaucoma-Detection-with-Cropped-Optic-Cup-and-Disc-and-Blood-Vessel-Segmentation | c7c6fa4c9c3a7ccd16dc8c4bacdd241446013bdf | [
"MIT"
] | 1 | 2022-03-25T05:27:41.000Z | 2022-03-25T05:27:41.000Z | 5,242 | 10,483 | 0.707077 | [
[
[
"from pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom google.colab import auth\nfrom oauth2client.client import GoogleCredentials\n\nauth.authenticate_user()\ngauth = GoogleAuth()\ngauth.credentials = GoogleCredentials.get_application_default()\ndrive = GoogleDrive(gauth)",
"_____no_output_____"
],
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"_____no_output_____"
],
[
"!unzip -qq '/content/drive/My Drive/Colab Notebooks/Glaucoma detection/Data/BEH.zip'",
"_____no_output_____"
],
[
"!pip install git+https://github.com/karolzak/keras-unet\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport glob\nimport os\nimport sys\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nimport torchvision.models as models\nfrom torchvision.utils import make_grid\nfrom torch.utils.data import Dataset, random_split, DataLoader\nimport torchvision.transforms as transforms\nimport torchvision.transforms.functional as TF\nimport random\nfrom torchvision.utils import make_grid\nfrom PIL import Image\nfrom keras_unet.utils import plot_imgs\nfrom sklearn.model_selection import train_test_split\nfrom keras_unet.models import custom_unet\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adam, SGD\nfrom keras_unet.metrics import iou, iou_thresholded\nfrom keras_unet.losses import jaccard_distance\nfrom keras_unet.utils import plot_imgs, plot_segm_history",
"_____no_output_____"
],
[
"# Load FAU dataset\n\norgs = glob.glob(\"/content/FAU/training/original/*\")\nmasks = glob.glob(\"/content/FAU/training/mask/*\")\nsize = 512\n\nimgs_list = []\nmasks_list = []\nfor image, mask in zip(orgs, masks):\n imgs_list.append(np.array(Image.open(image).resize((size,size)))[:,:,1])\n im = Image.open(mask).resize((512,512))\n masks_list.append(np.array(im))\n\nimgs_np = np.asarray(imgs_list)\nmasks_np = np.asarray(masks_list)\n\nprint('Original Images:', imgs_np.shape, ' Ground Truth images:', masks_np.shape)\n# plot_imgs(org_imgs=imgs_np, mask_imgs=masks_np, nm_img_to_plot=10, figsize=6)",
"_____no_output_____"
],
[
"dataset_glaucoma = glob.glob(\"/content/BEH/Train/glaucoma/*.jpg\")\ndataset_normal = glob.glob(\"/content/BEH/Train/normal/*.jpg\")\n\ndataset = []\nfor image in dataset_glaucoma:\n dataset.append(np.array(Image.open(image).resize((size,size)))[:,:,1])\nfor image in dataset_normal:\n dataset.append(np.array(Image.open(image).resize((size,size)))[:,:,1])\n\ndataset_np = np.asarray(dataset)\ndataset_x = np.asarray(dataset_np, dtype=np.float32)/255\ndataset_x = dataset_x.reshape(dataset_x.shape[0], dataset_x.shape[1], dataset_x.shape[2], 1)\nprint('Dataset:', dataset_x.shape)\nplot_imgs(org_imgs=dataset_np, mask_imgs=masks_np, nm_img_to_plot=10, figsize=6)",
"_____no_output_____"
],
[
"# Get data into correct shape, dtype and range (0.0-1.0)\nprint(imgs_np.max(), masks_np.max())\nx = np.asarray(imgs_np, dtype=np.float32)/255\ny = np.asarray(masks_np, dtype=np.float32)/255\nprint(x.max(), y.max())\nprint(x.shape, y.shape)\ny = y.reshape(y.shape[0], y.shape[1], y.shape[2], 1)\nx = x.reshape(x.shape[0], x.shape[1], x.shape[2], 1)\nprint(x.shape, y.shape)\n\nx_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.1, random_state=0)\n\nprint(\"x_train: \", x_train.shape)\nprint(\"y_train: \", y_train.shape)\nprint(\"x_val: \", x_val.shape)\nprint(\"y_val: \", y_val.shape)\n\nfrom keras_unet.utils import get_augmented\n\ntrain_gen = get_augmented(\n x_train, y_train, batch_size=8,\n data_gen_args = dict(\n rotation_range=5.,\n width_shift_range=0.05,\n height_shift_range=0.05,\n shear_range=40,\n zoom_range=0.2,\n horizontal_flip=True,\n vertical_flip=False,\n fill_mode='constant'\n ))\n\nsample_batch = next(train_gen)\nxx, yy = sample_batch\nprint(xx.shape, yy.shape)\nfrom keras_unet.utils import plot_imgs\n\n# Plot Dataset and Masks\nplot_imgs(org_imgs=xx, mask_imgs=yy, nm_img_to_plot=3, figsize=6)\n\n# Initialize network\ninput_shape = x_train[0].shape\n\nmodel = custom_unet(\n input_shape,\n filters=32,\n use_batch_norm=True,\n dropout=0.3,\n dropout_change_per_layer=0.0,\n num_layers=4\n)\n\nmodel_filename = 'segm_model_v3.h5'\ncallback_checkpoint = ModelCheckpoint(\n model_filename, \n verbose=1, \n monitor='val_loss', \n save_best_only=True,\n)\n\nmodel.compile(\n optimizer=Adam(), \n # optimizer=SGD(lr=0.01, momentum=0.99),\n loss='binary_crossentropy',\n #loss=jaccard_distance,\n metrics=[iou, iou_thresholded]\n)\n",
"_____no_output_____"
],
[
"history = model.fit_generator(\n train_gen,\n steps_per_epoch=200,\n epochs=3,\n validation_data=(x_val, y_val),\n callbacks=[callback_checkpoint]\n)",
"_____no_output_____"
],
[
"plot_segm_history(history)",
"_____no_output_____"
],
[
"# Segment Training data\nmodel.load_weights(model_filename)\ny_pred = model.predict(x_val)\ny_pred = np.moveaxis(y_pred, -1, 1)\nplot_imgs(org_imgs=x_val, mask_imgs=y_val, pred_imgs=y_pred, nm_img_to_plot=8)\n\n# Segment dataset\ndataset_y_pred = model.predict(dataset_x)\nplot_imgs(org_imgs=dataset_x, mask_imgs=dataset_y_pred, pred_imgs=dataset_y_pred, nm_img_to_plot=8)\ndataset_x = np.moveaxis(dataset_x, -1, 1)\ndataset_y_pred = np.moveaxis(dataset_y_pred, -1, 1)\nprint(dataset_x.shape, dataset_y_pred.shape)\n\nimport torch\nx = torch.Tensor(dataset_y_pred)",
"_____no_output_____"
],
[
"from torchvision.utils import save_image\nfrom pathlib import Path\n\nfor i in range(len(dataset_glaucoma)):\n output = x[i][0]\n out_dir = Path('/content/ORIGA_af/glaucoma')\n out_filename = str(i) + '_BEH.jpg'\n output_name = out_dir.joinpath(out_filename)\n save_image(output, output_name, padding=0)\n\nfor i in range(len(dataset_glaucoma), len(x)):\n output = x[i][0]\n out_dir = Path('/content/ORIGA_af/normal')\n out_filename = str(i) + '_BEH.jpg'\n output_name = out_dir.joinpath(out_filename)\n save_image(output, output_name, padding=0)",
"_____no_output_____"
],
[
"# Zip segmented dataset\n!zip -r -j BEH '/content/BEH/'",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d37e550b09787c27c1189de44eadd274a1286d | 11,549 | ipynb | Jupyter Notebook | notebooks/hacker_news_demo.ipynb | thundercomb/aitextgen | 6e07361a2f15de7925a3cdefc91d32390b7489c9 | [
"MIT"
] | 1,416 | 2020-05-18T15:41:34.000Z | 2022-03-31T09:39:55.000Z | notebooks/hacker_news_demo.ipynb | thundercomb/aitextgen | 6e07361a2f15de7925a3cdefc91d32390b7489c9 | [
"MIT"
] | 165 | 2020-05-19T00:14:32.000Z | 2022-03-30T16:56:36.000Z | notebooks/hacker_news_demo.ipynb | thundercomb/aitextgen | 6e07361a2f15de7925a3cdefc91d32390b7489c9 | [
"MIT"
] | 156 | 2020-05-18T16:28:17.000Z | 2022-03-24T14:50:31.000Z | 26.128959 | 120 | 0.546714 | [
[
[
"# Hacker News aitextgen\n\nA demo on how aitextgen can be used to create bespoke Hacker News submission titles.\n\n**NOTE**: This is released as a proof of concept for mini-GPT-2 models; quality of titles may vary.",
"_____no_output_____"
]
],
[
[
"from aitextgen import aitextgen",
"_____no_output_____"
]
],
[
[
"## Loading the Hacker News Model\n\nThe `minimaxir/hacker-news` model was finetuned on HN submissions up until May 12th with atleast 5 points.\n\nIt uses a custom GPT-2 architecture that is only 30 MB on disk (compared to 124M GPT-2's 500MB on disk.)\n\nRunning the cell will download the model and cache it into `/aitextgen`.",
"_____no_output_____"
]
],
[
[
"ai = aitextgen(model=\"minimaxir/hacker-news\")",
"INFO:aitextgen:Loading minimaxir/hacker-news model from /aitextgen.\n"
]
],
[
[
"## Generation\n\nSince the model is so small, generation happens almost immediately, even in bulk.",
"_____no_output_____"
]
],
[
[
"ai.generate()",
"Kinect can now centralize cellphone locations, not their pictures\n"
],
[
"ai.generate(5)",
"Ask HN: Should I start writing a blog post in Python?\n==========\nThe Psychology of Human Misjudgment (2012)\n==========\nNew York' New Year: $99 Linux PC\n==========\nC++11/12 Released\n==========\nDynamic types in Go\n"
]
],
[
[
"## Prompted Input\n\nYou can seed input with a `prompt` to get specific types of HN posts. The prompt will be **bolded** in the output.",
"_____no_output_____"
]
],
[
[
"ai.generate(5, prompt=\"Ask HN\")",
"\u001b[1mAsk HN\u001b[0m: What are some good (O'Reilly eval) books for a new web-based project?\n==========\n\u001b[1mAsk HN\u001b[0m: How to avoid the Huawei of 20k job candidates?\n==========\n\u001b[1mAsk HN\u001b[0m: How to grow your startup\n==========\n\u001b[1mAsk HN\u001b[0m: What's the best way to learn a new languages on your website?\n==========\n\u001b[1mAsk HN\u001b[0m: How to get started in Machine Learning?\n"
],
[
"ai.generate(5, prompt=\"Show HN\")",
"\u001b[1mShow HN\u001b[0m: The Penetration Tester\n==========\n\u001b[1mShow HN\u001b[0m: qVD.S.Next Windows Awesomeness\n==========\n\u001b[1mShow HN\u001b[0m: My Startup – a crowdfunded satellite news aggregator\n==========\n\u001b[1mShow HN\u001b[0m: The JavaScript Way to Learn JavaScript Within the Web\n==========\n\u001b[1mShow HN\u001b[0m: Hacker News like / you read the message\n"
],
[
"ai.generate(5, prompt=\"Elon Musk\")",
"\u001b[1mElon Musk\u001b[0m Says Tesla Is a Wireless Carrier Has Been Laying Off\n==========\n\u001b[1mElon Musk\u001b[0m’s Family Secretary of Munich Is the New Model 3\n==========\n\u001b[1mElon Musk\u001b[0m is a suitable person to learn the originally good\n==========\n\u001b[1mElon Musk\u001b[0m's Hyperloop Is a Success\n==========\n\u001b[1mElon Musk\u001b[0m’s New Nexus Program\n"
],
[
"ai.generate(5, prompt=\"Google says\")",
"\u001b[1mGoogle says\u001b[0m its employees are working with Amazon and Apple\n==========\n\u001b[1mGoogle says\u001b[0m it’s peaked\n==========\n\u001b[1mGoogle says\u001b[0m it is flea banning visible to people who worked in U.S.\n==========\n\u001b[1mGoogle says\u001b[0m it will not allow enemy mine to secure sensitive information\n==========\n\u001b[1mGoogle says\u001b[0m no to Google for Java\n"
]
],
[
[
"## Bulk Generation to File\n\nYou can use `generate_to_file()` to create many HN titles.",
"_____no_output_____"
]
],
[
[
"ai.generate_to_file(1000, batch_size=20)",
"INFO:aitextgen:Generating 1,000 texts to ATG_20200517_235441_14821584.txt\n"
]
],
[
[
"# MIT License\n\nCopyright (c) 2020 Max Woolf\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0d387cf792ccef8bbcb597c9de0f0a2d30fd9ff | 50,916 | ipynb | Jupyter Notebook | doc/pub/week1/ipynb/.ipynb_checkpoints/week1-checkpoint.ipynb | Schoyen/ComputationalPhysics2 | 9cf10ffb2557cc73c4e6bab060d53690ee39426f | [
"CC0-1.0"
] | 87 | 2015-01-21T08:29:56.000Z | 2022-03-28T07:11:53.000Z | doc/pub/week1/ipynb/.ipynb_checkpoints/week1-checkpoint.ipynb | Schoyen/ComputationalPhysics2 | 9cf10ffb2557cc73c4e6bab060d53690ee39426f | [
"CC0-1.0"
] | 3 | 2020-01-18T10:43:38.000Z | 2020-02-08T13:15:42.000Z | doc/pub/week1/ipynb/.ipynb_checkpoints/week1-checkpoint.ipynb | Schoyen/ComputationalPhysics2 | 9cf10ffb2557cc73c4e6bab060d53690ee39426f | [
"CC0-1.0"
] | 54 | 2015-02-09T10:02:00.000Z | 2022-03-07T10:44:14.000Z | 36.895652 | 1,765 | 0.580878 | [
[
[
"<!-- dom:TITLE: Week 2 January 11-15: Introduction to the course and start Variational Monte Carlo -->\n# Week 2 January 11-15: Introduction to the course and start Variational Monte Carlo\n<!-- dom:AUTHOR: Morten Hjorth-Jensen Email [email protected] at Department of Physics and Center fo Computing in Science Education, University of Oslo, Oslo, Norway & Department of Physics and Astronomy and Facility for Rare Ion Beams, Michigan State University, East Lansing, Michigan, USA -->\n<!-- Author: --> \n**Morten Hjorth-Jensen Email [email protected]**, Department of Physics and Center fo Computing in Science Education, University of Oslo, Oslo, Norway and Department of Physics and Astronomy and Facility for Rare Ion Beams, Michigan State University, East Lansing, Michigan, USA\n\nDate: **Jan 14, 2021**\n\nCopyright 1999-2021, Morten Hjorth-Jensen Email [email protected]. Released under CC Attribution-NonCommercial 4.0 license\n\n\n\n\n## Overview of week 2\n**Topics.**\n\n* Introduction to the course and overview of topics to be covered\n\n* Introduction to Variational Monte Carlo methods, Metropolis Algorithm, statistics and Markov Chain theory\n\n\n\n**Teaching Material, videos and written material.**\n\n* Asynchronuous vidoes\n\n* Lecture notes and reading assignments\n\n* Additional (often recommended) background material\n\n\n\n## Textbook\n\nThere are no unique textbooks which cover the material to be discussed. For each week however, we will, in addition to our own lecture notes, send links to additional literature. This can be articles or chapters from other textbooks.\nA useful textbook is however \n\n* [Bernd A. Berg, *Markov Chain Monte Carlo Simulations and their Statistical Analysis*, World Scientific, 2004](https://www.worldscientific.com/worldscibooks/10.1142/5602), chapters 1, 2\n\nThis book has its main focus on spin-models, but many of the concepts are general. Chapters 1 and 2 contain a good discussion of the statistical foundation. \n\n## Aims\n* Be able to apply central many-particle methods like the Variational Monte Carlo method to properties of many-fermion systems and many-boson systems.\n\n* Understand how to simulate quantum mechanical systems with many interacting particles. The methods are relevant for atomic, molecular, solid state, materials science, nanotechnology, quantum chemistry and nuclear physics. \n\n* Learn to manage and structure larger projects, with unit tests, object orientation and writing clean code\n\n* Learn about a proper statistical analysis of large data sets\n\n* Learn to optimize with convex optimization methods functions that depend on many variables.\n\n* Parallelization and code optimizations\n\n\n\n\n\n## Lectures and ComputerLab\n\n * Lectures: Thursday (2.15pm-4pm). First time January 14. Last lecture May 6.\n\n * Computerlab: Thursday (4.15pm-7pm), first time January 14, last lab session May 6.\n\n * Weekly plans and all other information are on the webpage of the course\n\n * **First project to be handed in March 26**.\n\n * **Second and final project to be handed in May 31.**\n\n * There is no final exam, only project work.\n\n\n\n## Course Format\n\n * Two compulsory projects. Electronic reports only. You are free to choose your format. We use devilry to hand in the projects.\n\n * Evaluation and grading: The two projects count 1/2 each of the final mark. No exam.\n\n * The computer lab (room 397 in the Physics buidling) has no PCs, so please bring your own laptops. C/C++ is the default programming language, but programming languages like Fortran2008, Rust, Julia, and/or Python can also be used. All source codes discussed during the lectures can be found at the webpage of the course.\n\n\n\n\n## Topics covered in this course\n * Parallelization (MPI and OpenMP), high-performance computing topics. Choose between Python, Fortran2008 and/or C++ as programming languages. \n\n * Algorithms for Monte Carlo Simulations (multidimensional integrals), Metropolis-Hastings and importance sampling algorithms. Improved Monte Carlo methods.\n\n * Statistical analysis of data from Monte Carlo calculations, bootstrapping, jackknife and blocking methods. \n\n * Eigenvalue solvers\n\n * For project 2 there will be at least three variants:\n\na. Variational Monte Carlo for fermions\n\nb. Hartree-Fock theory for fermions\n\nc. Coupled cluster theory for fermions (iterative methods)\n\nd. Neural networks and Machine Learning to solve the same problems as in project 1\n\ne. Eigenvalue problems with deep learning methods\n\nf. Possible project on quantum computing\n\n\n\n## Topics covered in this course\n * Search for minima in multidimensional spaces (conjugate gradient method, steepest descent method, quasi-Newton-Raphson, Broyden-Jacobian). Convex optimization, gradient methods\n\n * Iterative methods for solutions of non-linear equations.\n\n * Object orientation\n\n * Data analysis and resampling techniques\n\n * Variational Monte Carlo (VMC) for 'ab initio' studies of quantum mechanical many-body systems.\n\n * Simulation of two- and three-dimensional systems like quantum dots or atoms and molecules or systems from solid state physics\n\n * **Simulation of trapped bosons using VMC (project 1, default)**\n\n * **Machine learning and neural networks (project 2, default, same system as in project 1)**\n\n * Extension of project 1 to fermionic systems (project 2)\n\n * Coupled cluster theory (project 2, depends on interest)\n\n * Other quantum-mechanical methods and systems can be tailored to one's interests (Hartree-Fock Theory, Many-body perturbation theory, time-dependent theories and more).\n\n\n\n\n\n## Quantum Monte Carlo Motivation\n\nMost quantum mechanical problems of interest in for example atomic, molecular, nuclear and solid state \nphysics consist of a large number of interacting electrons and ions or nucleons. \n\nThe total number of particles $N$ is usually sufficiently large\nthat an exact solution cannot be found. \n\nTypically, \nthe expectation value for a chosen hamiltonian for a system of $N$ particles is",
"_____no_output_____"
],
[
"$$\n\\langle H \\rangle =\n \\frac{\\int d\\boldsymbol{R}_1d\\boldsymbol{R}_2\\dots d\\boldsymbol{R}_N\n \\Psi^{\\ast}(\\boldsymbol{R_1},\\boldsymbol{R}_2,\\dots,\\boldsymbol{R}_N)\n H(\\boldsymbol{R_1},\\boldsymbol{R}_2,\\dots,\\boldsymbol{R}_N)\n \\Psi(\\boldsymbol{R_1},\\boldsymbol{R}_2,\\dots,\\boldsymbol{R}_N)}\n {\\int d\\boldsymbol{R}_1d\\boldsymbol{R}_2\\dots d\\boldsymbol{R}_N\n \\Psi^{\\ast}(\\boldsymbol{R_1},\\boldsymbol{R}_2,\\dots,\\boldsymbol{R}_N)\n \\Psi(\\boldsymbol{R_1},\\boldsymbol{R}_2,\\dots,\\boldsymbol{R}_N)},\n$$",
"_____no_output_____"
],
[
"an in general intractable problem.\n\n This integral is actually the starting point in a Variational Monte Carlo calculation. **Gaussian quadrature: Forget it**! Given 10 particles and 10 mesh points for each degree of freedom\nand an\n ideal 1 Tflops machine (all operations take the same time), how long will it take to compute the above integral? The lifetime of the universe is of the order of $10^{17}$ s.\n\n\n\n\n## Quantum Monte Carlo Motivation\nAs an example from the nuclear many-body problem, we have Schroedinger's equation as a differential equation",
"_____no_output_____"
],
[
"$$\n\\hat{H}\\Psi(\\boldsymbol{r}_1,..,\\boldsymbol{r}_A,\\alpha_1,..,\\alpha_A)=E\\Psi(\\boldsymbol{r}_1,..,\\boldsymbol{r}_A,\\alpha_1,..,\\alpha_A)\n$$",
"_____no_output_____"
],
[
"where",
"_____no_output_____"
],
[
"$$\n\\boldsymbol{r}_1,..,\\boldsymbol{r}_A,\n$$",
"_____no_output_____"
],
[
"are the coordinates and",
"_____no_output_____"
],
[
"$$\n\\alpha_1,..,\\alpha_A,\n$$",
"_____no_output_____"
],
[
"are sets of relevant quantum numbers such as spin and isospin for a system of $A$ nucleons ($A=N+Z$, $N$ being the number of neutrons and $Z$ the number of protons).\n\n\n\n\n## Quantum Monte Carlo Motivation\nThere are",
"_____no_output_____"
],
[
"$$\n2^A\\times \\left(\\begin{array}{c} A\\\\ Z\\end{array}\\right)\n$$",
"_____no_output_____"
],
[
"coupled second-order differential equations in $3A$ dimensions.\n\nFor a nucleus like beryllium-10 this number is **215040**.\nThis is a truely challenging many-body problem.\n\nMethods like partial differential equations can at most be used for 2-3 particles.\n\n\n\n\n## Various many-body methods\n* Monte-Carlo methods\n\n* Renormalization group (RG) methods, in particular density matrix RG\n\n* Large-scale diagonalization (Iterative methods, Lanczo's method, dimensionalities $10^{10}$ states)\n\n* Coupled cluster theory, favoured method in quantum chemistry, molecular and atomic physics. Applications to ab initio calculations in nuclear physics as well for large nuclei.\n\n* Perturbative many-body methods \n\n* Green's function methods\n\n* Density functional theory/Mean-field theory and Hartree-Fock theory\n\nThe physics of the system hints at which many-body methods to use.\n\n\n\n\n\n## Quantum Monte Carlo Motivation\n**Pros and Cons of Monte Carlo.**\n\n* Is physically intuitive.\n\n* Allows one to study systems with many degrees of freedom. Diffusion Monte Carlo (DMC) and Green's function Monte Carlo (GFMC) yield in principle the exact solution to Schroedinger's equation.\n\n* Variational Monte Carlo (VMC) is easy to implement but needs a reliable trial wave function, can be difficult to obtain. This is where we will use Hartree-Fock theory to construct an optimal basis.\n\n* DMC/GFMC for fermions (spin with half-integer values, electrons, baryons, neutrinos, quarks) has a sign problem. Nature prefers an anti-symmetric wave function. PDF in this case given distribution of random walkers.\n\n* The solution has a statistical error, which can be large. \n\n* There is a limit for how large systems one can study, DMC needs a huge number of random walkers in order to achieve stable results. \n\n* Obtain only the lowest-lying states with a given symmetry. Can get excited states with extra labor.\n\n\n\n\n\n## Quantum Monte Carlo Motivation\n**Where and why do we use Monte Carlo Methods in Quantum Physics.**\n\n* Quantum systems with many particles at finite temperature: Path Integral Monte Carlo with applications to dense matter and quantum liquids (phase transitions from normal fluid to superfluid). Strong correlations.\n\n* Bose-Einstein condensation of dilute gases, method transition from non-linear PDE to Diffusion Monte Carlo as density increases.\n\n* Light atoms, molecules, solids and nuclei. \n\n* Lattice Quantum-Chromo Dynamics. Impossible to solve without MC calculations. \n\n* Simulations of systems in solid state physics, from semiconductors to spin systems. Many electrons active and possibly strong correlations.\n\n\n\n## Quantum Monte Carlo Motivation\nWe start with the variational principle.\nGiven a hamiltonian $H$ and a trial wave function $\\Psi_T$, the variational principle states that the expectation value of $\\langle H \\rangle$, defined through",
"_____no_output_____"
],
[
"$$\nE[H]= \\langle H \\rangle =\n \\frac{\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R})H(\\boldsymbol{R})\\Psi_T(\\boldsymbol{R})}\n {\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R})\\Psi_T(\\boldsymbol{R})},\n$$",
"_____no_output_____"
],
[
"is an upper bound to the ground state energy $E_0$ of the hamiltonian $H$, that is",
"_____no_output_____"
],
[
"$$\nE_0 \\le \\langle H \\rangle .\n$$",
"_____no_output_____"
],
[
"In general, the integrals involved in the calculation of various expectation values are multi-dimensional ones. Traditional integration methods such as the Gauss-Legendre will not be adequate for say the computation of the energy of a many-body system.\n\n\n\n## Quantum Monte Carlo Motivation\nThe trial wave function can be expanded in the eigenstates of the hamiltonian since they form a complete set, viz.,",
"_____no_output_____"
],
[
"$$\n\\Psi_T(\\boldsymbol{R})=\\sum_i a_i\\Psi_i(\\boldsymbol{R}),\n$$",
"_____no_output_____"
],
[
"and assuming the set of eigenfunctions to be normalized one obtains",
"_____no_output_____"
],
[
"$$\n\\frac{\\sum_{nm}a^*_ma_n \\int d\\boldsymbol{R}\\Psi^{\\ast}_m(\\boldsymbol{R})H(\\boldsymbol{R})\\Psi_n(\\boldsymbol{R})}\n {\\sum_{nm}a^*_ma_n \\int d\\boldsymbol{R}\\Psi^{\\ast}_m(\\boldsymbol{R})\\Psi_n(\\boldsymbol{R})} =\\frac{\\sum_{n}a^2_n E_n}\n {\\sum_{n}a^2_n} \\ge E_0,\n$$",
"_____no_output_____"
],
[
"where we used that $H(\\boldsymbol{R})\\Psi_n(\\boldsymbol{R})=E_n\\Psi_n(\\boldsymbol{R})$.\nIn general, the integrals involved in the calculation of various expectation\nvalues are multi-dimensional ones. \nThe variational principle yields the lowest state of a given symmetry.\n\n\n\n\n## Quantum Monte Carlo Motivation\nIn most cases, a wave function has only small values in large parts of \nconfiguration space, and a straightforward procedure which uses\nhomogenously distributed random points in configuration space \nwill most likely lead to poor results. This may suggest that some kind\nof importance sampling combined with e.g., the Metropolis algorithm \nmay be a more efficient way of obtaining the ground state energy.\nThe hope is then that those regions of configurations space where\nthe wave function assumes appreciable values are sampled more \nefficiently.\n\n\n\n\n## Quantum Monte Carlo Motivation\nThe tedious part in a VMC calculation is the search for the variational\nminimum. A good knowledge of the system is required in order to carry out\nreasonable VMC calculations. This is not always the case, \nand often VMC calculations \nserve rather as the starting\npoint for so-called diffusion Monte Carlo calculations (DMC). DMC is a way of\nsolving exactly the many-body Schroedinger equation by means of \na stochastic procedure. A good guess on the binding energy\nand its wave function is however necessary. \nA carefully performed VMC calculation can aid in this context.\n\n\n\n\n## Quantum Monte Carlo Motivation\n* Construct first a trial wave function $\\psi_T(\\boldsymbol{R},\\boldsymbol{\\alpha})$, for a many-body system consisting of $N$ particles located at positions $\\boldsymbol{R}=(\\boldsymbol{R}_1,\\dots ,\\boldsymbol{R}_N)$. The trial wave function depends on $\\alpha$ variational parameters $\\boldsymbol{\\alpha}=(\\alpha_1,\\dots ,\\alpha_M)$.\n\n* Then we evaluate the expectation value of the hamiltonian $H$",
"_____no_output_____"
],
[
"$$\nE[H]=\\langle H \\rangle =\n \\frac{\\int d\\boldsymbol{R}\\Psi^{\\ast}_{T}(\\boldsymbol{R},\\boldsymbol{\\alpha})H(\\boldsymbol{R})\\Psi_{T}(\\boldsymbol{R},\\boldsymbol{\\alpha})}\n {\\int d\\boldsymbol{R}\\Psi^{\\ast}_{T}(\\boldsymbol{R},\\boldsymbol{\\alpha})\\Psi_{T}(\\boldsymbol{R},\\boldsymbol{\\alpha})}.\n$$",
"_____no_output_____"
],
[
"* Thereafter we vary $\\alpha$ according to some minimization algorithm and return to the first step.\n\n\n\n\n## Quantum Monte Carlo Motivation\n**Basic steps.**\n\nChoose a trial wave function\n$\\psi_T(\\boldsymbol{R})$.",
"_____no_output_____"
],
[
"$$\nP(\\boldsymbol{R})= \\frac{\\left|\\psi_T(\\boldsymbol{R})\\right|^2}{\\int \\left|\\psi_T(\\boldsymbol{R})\\right|^2d\\boldsymbol{R}}.\n$$",
"_____no_output_____"
],
[
"This is our new probability distribution function (PDF).\nThe approximation to the expectation value of the Hamiltonian is now",
"_____no_output_____"
],
[
"$$\nE[H(\\boldsymbol{\\alpha})] = \n \\frac{\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R},\\boldsymbol{\\alpha})H(\\boldsymbol{R})\\Psi_T(\\boldsymbol{R},\\boldsymbol{\\alpha})}\n {\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R},\\boldsymbol{\\alpha})\\Psi_T(\\boldsymbol{R},\\boldsymbol{\\alpha})}.\n$$",
"_____no_output_____"
],
[
"## Quantum Monte Carlo Motivation\nDefine a new quantity",
"_____no_output_____"
],
[
"<!-- Equation labels as ordinary links -->\n<div id=\"eq:locale1\"></div>\n\n$$\nE_L(\\boldsymbol{R},\\boldsymbol{\\alpha})=\\frac{1}{\\psi_T(\\boldsymbol{R},\\boldsymbol{\\alpha})}H\\psi_T(\\boldsymbol{R},\\boldsymbol{\\alpha}),\n\\label{eq:locale1} \\tag{1}\n$$",
"_____no_output_____"
],
[
"called the local energy, which, together with our trial PDF yields",
"_____no_output_____"
],
[
"<!-- Equation labels as ordinary links -->\n<div id=\"eq:vmc1\"></div>\n\n$$\nE[H(\\boldsymbol{\\alpha})]=\\int P(\\boldsymbol{R})E_L(\\boldsymbol{R}) d\\boldsymbol{R}\\approx \\frac{1}{N}\\sum_{i=1}^N E_L(\\boldsymbol{R_i},\\boldsymbol{\\alpha})\n\\label{eq:vmc1} \\tag{2}\n$$",
"_____no_output_____"
],
[
"with $N$ being the number of Monte Carlo samples.\n\n\n\n\n\n\n\n\n## Quantum Monte Carlo\nThe Algorithm for performing a variational Monte Carlo calculations runs thus as this\n\n * Initialisation: Fix the number of Monte Carlo steps. Choose an initial $\\boldsymbol{R}$ and variational parameters $\\alpha$ and calculate $\\left|\\psi_T^{\\alpha}(\\boldsymbol{R})\\right|^2$. \n\n * Initialise the energy and the variance and start the Monte Carlo calculation.\n\n * Calculate a trial position $\\boldsymbol{R}_p=\\boldsymbol{R}+r*step$ where $r$ is a random variable $r \\in [0,1]$.\n\n * Metropolis algorithm to accept or reject this move $w = P(\\boldsymbol{R}_p)/P(\\boldsymbol{R})$.\n\n * If the step is accepted, then we set $\\boldsymbol{R}=\\boldsymbol{R}_p$. \n\n * Update averages\n\n\n * Finish and compute final averages.\n\nObserve that the jumping in space is governed by the variable *step*. This is Called brute-force sampling.\nNeed importance sampling to get more relevant sampling, see lectures below.\n\n\n\n## Quantum Monte Carlo: hydrogen atom\nThe radial Schroedinger equation for the hydrogen atom can be\nwritten as",
"_____no_output_____"
],
[
"$$\n-\\frac{\\hbar^2}{2m}\\frac{\\partial^2 u(r)}{\\partial r^2}-\n\\left(\\frac{ke^2}{r}-\\frac{\\hbar^2l(l+1)}{2mr^2}\\right)u(r)=Eu(r),\n$$",
"_____no_output_____"
],
[
"or with dimensionless variables",
"_____no_output_____"
],
[
"<!-- Equation labels as ordinary links -->\n<div id=\"eq:hydrodimless1\"></div>\n\n$$\n-\\frac{1}{2}\\frac{\\partial^2 u(\\rho)}{\\partial \\rho^2}-\n\\frac{u(\\rho)}{\\rho}+\\frac{l(l+1)}{2\\rho^2}u(\\rho)-\\lambda u(\\rho)=0,\n\\label{eq:hydrodimless1} \\tag{3}\n$$",
"_____no_output_____"
],
[
"with the hamiltonian",
"_____no_output_____"
],
[
"$$\nH=-\\frac{1}{2}\\frac{\\partial^2 }{\\partial \\rho^2}-\n\\frac{1}{\\rho}+\\frac{l(l+1)}{2\\rho^2}.\n$$",
"_____no_output_____"
],
[
"Use variational parameter $\\alpha$ in the trial\nwave function",
"_____no_output_____"
],
[
"<!-- Equation labels as ordinary links -->\n<div id=\"eq:trialhydrogen\"></div>\n\n$$\nu_T^{\\alpha}(\\rho)=\\alpha\\rho e^{-\\alpha\\rho}. \n\\label{eq:trialhydrogen} \\tag{4}\n$$",
"_____no_output_____"
],
[
"## Quantum Monte Carlo: hydrogen atom\nInserting this wave function into the expression for the\nlocal energy $E_L$ gives",
"_____no_output_____"
],
[
"$$\nE_L(\\rho)=-\\frac{1}{\\rho}-\n \\frac{\\alpha}{2}\\left(\\alpha-\\frac{2}{\\rho}\\right).\n$$",
"_____no_output_____"
],
[
"A simple variational Monte Carlo calculation results in\n<table border=\"1\">\n<thead>\n<tr><th align=\"center\"> $\\alpha$ </th> <th align=\"center\">$\\langle H \\rangle $</th> <th align=\"center\"> $\\sigma^2$</th> <th align=\"center\">$\\sigma/\\sqrt{N}$</th> </tr>\n</thead>\n<tbody>\n<tr><td align=\"center\"> 7.00000E-01 </td> <td align=\"center\"> -4.57759E-01 </td> <td align=\"center\"> 4.51201E-02 </td> <td align=\"center\"> 6.71715E-04 </td> </tr>\n<tr><td align=\"center\"> 8.00000E-01 </td> <td align=\"center\"> -4.81461E-01 </td> <td align=\"center\"> 3.05736E-02 </td> <td align=\"center\"> 5.52934E-04 </td> </tr>\n<tr><td align=\"center\"> 9.00000E-01 </td> <td align=\"center\"> -4.95899E-01 </td> <td align=\"center\"> 8.20497E-03 </td> <td align=\"center\"> 2.86443E-04 </td> </tr>\n<tr><td align=\"center\"> 1.00000E-00 </td> <td align=\"center\"> -5.00000E-01 </td> <td align=\"center\"> 0.00000E+00 </td> <td align=\"center\"> 0.00000E+00 </td> </tr>\n<tr><td align=\"center\"> 1.10000E+00 </td> <td align=\"center\"> -4.93738E-01 </td> <td align=\"center\"> 1.16989E-02 </td> <td align=\"center\"> 3.42036E-04 </td> </tr>\n<tr><td align=\"center\"> 1.20000E+00 </td> <td align=\"center\"> -4.75563E-01 </td> <td align=\"center\"> 8.85899E-02 </td> <td align=\"center\"> 9.41222E-04 </td> </tr>\n<tr><td align=\"center\"> 1.30000E+00 </td> <td align=\"center\"> -4.54341E-01 </td> <td align=\"center\"> 1.45171E-01 </td> <td align=\"center\"> 1.20487E-03 </td> </tr>\n</tbody>\n</table>\n\n\n\n\n## Quantum Monte Carlo: hydrogen atom\n\nWe note that at $\\alpha=1$ we obtain the exact\nresult, and the variance is zero, as it should. The reason is that \nwe then have the exact wave function, and the action of the hamiltionan\non the wave function",
"_____no_output_____"
],
[
"$$\nH\\psi = \\mathrm{constant}\\times \\psi,\n$$",
"_____no_output_____"
],
[
"yields just a constant. The integral which defines various \nexpectation values involving moments of the hamiltonian becomes then",
"_____no_output_____"
],
[
"$$\n\\langle H^n \\rangle =\n \\frac{\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R})H^n(\\boldsymbol{R})\\Psi_T(\\boldsymbol{R})}\n {\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R})\\Psi_T(\\boldsymbol{R})}=\n\\mathrm{constant}\\times\\frac{\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R})\\Psi_T(\\boldsymbol{R})}\n {\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R})\\Psi_T(\\boldsymbol{R})}=\\mathrm{constant}.\n$$",
"_____no_output_____"
],
[
"**This gives an important information: the exact wave function leads to zero variance!**\nVariation is then performed by minimizing both the energy and the variance.\n\n\n\n\n\n## [Quantum Monte Carlo for bosons](https://github.com/mortele/variational-monte-carlo-fys4411)\n\nFor bosons in a harmonic oscillator-like trap we will use is a spherical (S)\n or an elliptical (E) harmonic trap in one, two and finally three\n dimensions, with the latter given by",
"_____no_output_____"
],
[
"<!-- Equation labels as ordinary links -->\n<div id=\"trap_eqn\"></div>\n\n$$\n\\begin{equation}\n V_{ext}(\\mathbf{r}) = \\Bigg\\{\n \\begin{array}{ll}\n\t \\frac{1}{2}m\\omega_{ho}^2r^2 & (S)\\\\\n \\strut\n\t \\frac{1}{2}m[\\omega_{ho}^2(x^2+y^2) + \\omega_z^2z^2] & (E)\n\\label{trap_eqn} \\tag{5}\n \\end{array}\n \\end{equation}\n$$",
"_____no_output_____"
],
[
"where (S) stands for symmetric and",
"_____no_output_____"
],
[
"<!-- Equation labels as ordinary links -->\n<div id=\"_auto1\"></div>\n\n$$\n\\begin{equation}\n \\hat{H} = \\sum_i^N \\left(\n\t \\frac{-\\hbar^2}{2m}\n\t { \\bigtriangledown }_{i}^2 +\n\t V_{ext}({\\bf{r}}_i)\\right) +\n\t \\sum_{i<j}^{N} V_{int}({\\bf{r}}_i,{\\bf{r}}_j),\n\\label{_auto1} \\tag{6}\n\\end{equation}\n$$",
"_____no_output_____"
],
[
"as the two-body Hamiltonian of the system.\n\n\n\n\n## [Quantum Monte Carlo for bosons](https://github.com/mortele/variational-monte-carlo-fys4411)\n We will represent the inter-boson interaction by a pairwise, repulsive potential",
"_____no_output_____"
],
[
"<!-- Equation labels as ordinary links -->\n<div id=\"_auto2\"></div>\n\n$$\n\\begin{equation}\n V_{int}(|\\mathbf{r}_i-\\mathbf{r}_j|) = \\Bigg\\{\n \\begin{array}{ll}\n\t \\infty & {|\\mathbf{r}_i-\\mathbf{r}_j|} \\leq {a}\\\\\n\t 0 & {|\\mathbf{r}_i-\\mathbf{r}_j|} > {a}\n \\end{array}\n\\label{_auto2} \\tag{7}\n\\end{equation}\n$$",
"_____no_output_____"
],
[
"where $a$ is the so-called hard-core diameter of the bosons.\n Clearly, $V_{int}(|\\mathbf{r}_i-\\mathbf{r}_j|)$ is zero if the bosons are\n separated by a distance $|\\mathbf{r}_i-\\mathbf{r}_j|$ greater than $a$ but\n infinite if they attempt to come within a distance $|\\mathbf{r}_i-\\mathbf{r}_j| \\leq a$.\n\n\n\n## [Quantum Monte Carlo for bosons](https://github.com/mortele/variational-monte-carlo-fys4411)\n Our trial wave function for the ground state with $N$ atoms is given by",
"_____no_output_____"
],
[
"<!-- Equation labels as ordinary links -->\n<div id=\"eq:trialwf\"></div>\n\n$$\n\\begin{equation}\n \\Psi_T(\\mathbf{R})=\\Psi_T(\\mathbf{r}_1, \\mathbf{r}_2, \\dots \\mathbf{r}_N,\\alpha,\\beta)=\\prod_i g(\\alpha,\\beta,\\mathbf{r}_i)\\prod_{i<j}f(a,|\\mathbf{r}_i-\\mathbf{r}_j|),\n\\label{eq:trialwf} \\tag{8}\n \\end{equation}\n$$",
"_____no_output_____"
],
[
"where $\\alpha$ and $\\beta$ are variational parameters. The\n single-particle wave function is proportional to the harmonic\n oscillator function for the ground state",
"_____no_output_____"
],
[
"<!-- Equation labels as ordinary links -->\n<div id=\"_auto3\"></div>\n\n$$\n\\begin{equation}\n g(\\alpha,\\beta,\\mathbf{r}_i)= \\exp{[-\\alpha(x_i^2+y_i^2+\\beta z_i^2)]}.\n\\label{_auto3} \\tag{9}\n\\end{equation}\n$$",
"_____no_output_____"
],
[
"## [Quantum Monte Carlo for bosons](https://github.com/mortele/variational-monte-carlo-fys4411)\nFor spherical traps we have $\\beta = 1$ and for non-interacting\nbosons ($a=0$) we have $\\alpha = 1/2a_{ho}^2$. The correlation wave\n function is",
"_____no_output_____"
],
[
"<!-- Equation labels as ordinary links -->\n<div id=\"_auto4\"></div>\n\n$$\n\\begin{equation}\n f(a,|\\mathbf{r}_i-\\mathbf{r}_j|)=\\Bigg\\{\n \\begin{array}{ll}\n\t 0 & {|\\mathbf{r}_i-\\mathbf{r}_j|} \\leq {a}\\\\\n\t (1-\\frac{a}{|\\mathbf{r}_i-\\mathbf{r}_j|}) & {|\\mathbf{r}_i-\\mathbf{r}_j|} > {a}.\n \\end{array}\n\\label{_auto4} \\tag{10}\n\\end{equation}\n$$",
"_____no_output_____"
],
[
"### Simple example, the hydrogen atom\n\nThe radial Schroedinger equation for the hydrogen atom can be\nwritten as (when we have gotten rid of the first derivative term in the kinetic energy and used $rR(r)=u(r)$)",
"_____no_output_____"
],
[
"$$\n-\\frac{\\hbar^2}{2m}\\frac{d^2 u(r)}{d r^2}-\n\\left(\\frac{ke^2}{r}-\\frac{\\hbar^2l(l+1)}{2mr^2}\\right)u(r)=Eu(r).\n$$",
"_____no_output_____"
],
[
"We will specialize to the case with $l=0$ and end up with",
"_____no_output_____"
],
[
"$$\n-\\frac{\\hbar^2}{2m}\\frac{d^2 u(r)}{d r^2}-\n\\left(\\frac{ke^2}{r}\\right)u(r)=Eu(r).\n$$",
"_____no_output_____"
],
[
"Then we introduce a dimensionless variable $\\rho=r/a$ where $a$ is a constant with dimension length.\nMultiplying with $ma^2/\\hbar^2$ we can rewrite our equations as",
"_____no_output_____"
],
[
"$$\n-\\frac{1}{2}\\frac{d^2 u(\\rho)}{d \\rho^2}-\n\\frac{ke^2ma}{\\hbar^2}\\frac{u(\\rho)}{\\rho}-\\lambda u(\\rho)=0.\n$$",
"_____no_output_____"
],
[
"Since $a$ is just a parameter we choose to set",
"_____no_output_____"
],
[
"$$\n\\frac{ke^2ma}{\\hbar^2}=1,\n$$",
"_____no_output_____"
],
[
"which leads to $a=\\hbar^2/mke^2$, better known as the Bohr radius with value $0.053$ nm. Scaling the equations this way does not only render our numerical treatment simpler since we avoid carrying with us all physical parameters, but we obtain also a **natural** length scale. We will see this again and again. In our discussions below with a harmonic oscillator trap, the **natural** lentgh scale with be determined by the oscillator frequency, the mass of the particle and $\\hbar$. We have also defined a dimensionless 'energy' $\\lambda = Ema^2/\\hbar^2$. \nWith the rescaled quantities, the ground state energy of the hydrogen atom is $1/2$. \nThe equation we want to solve is now defined by the Hamiltonian",
"_____no_output_____"
],
[
"$$\nH=-\\frac{1}{2}\\frac{d^2 }{d \\rho^2}-\\frac{1}{\\rho}.\n$$",
"_____no_output_____"
],
[
"As trial wave function we peep now into the analytical solution for\nthe hydrogen atom and use (with $\\alpha$ as a variational parameter)",
"_____no_output_____"
],
[
"$$\nu_T^{\\alpha}(\\rho)=\\alpha\\rho \\exp{-(\\alpha\\rho)}.\n$$",
"_____no_output_____"
],
[
"Inserting this wave function into the expression for the\nlocal energy $E_L$ gives",
"_____no_output_____"
],
[
"$$\nE_L(\\rho)=-\\frac{1}{\\rho}-\n \\frac{\\alpha}{2}\\left(\\alpha-\\frac{2}{\\rho}\\right).\n$$",
"_____no_output_____"
],
[
"To have analytical local energies saves us from computing numerically\nthe second derivative, a feature which often increases our numerical\nexpenditure with a factor of three or more. Integratng up the local energy (recall to bring back the PDF in the integration) gives $\\overline{E}[\\boldsymbol{\\alpha}]=\\alpha(\\alpha/2-1)$. \n\n\n\n\n### Second example, the harmonic oscillator in one dimension\n\nWe present here another well-known example, the harmonic oscillator in\none dimension for one particle. This will also serve the aim of\nintroducing our next model, namely that of interacting electrons in a\nharmonic oscillator trap.\n\nHere as well, we do have analytical solutions and the energy of the\nground state, with $\\hbar=1$, is $1/2\\omega$, with $\\omega$ being the\noscillator frequency. We use the following trial wave function",
"_____no_output_____"
],
[
"$$\n\\psi_T(x;\\alpha) = \\exp{-(\\frac{1}{2}\\alpha^2x^2)},\n$$",
"_____no_output_____"
],
[
"which results in a local energy",
"_____no_output_____"
],
[
"$$\n\\frac{1}{2}\\left(\\alpha^2+x^2(1-\\alpha^4)\\right).\n$$",
"_____no_output_____"
],
[
"We can compare our numerically calculated energies with the exact energy as function of $\\alpha$",
"_____no_output_____"
],
[
"$$\n\\overline{E}[\\alpha] = \\frac{1}{4}\\left(\\alpha^2+\\frac{1}{\\alpha^2}\\right).\n$$",
"_____no_output_____"
],
[
"Similarly, with the above ansatz, we can also compute the exact variance which reads",
"_____no_output_____"
],
[
"$$\n\\sigma^2[\\alpha]=\\frac{1}{4}\\left(1+(1-\\alpha^4)^2\\frac{3}{4\\alpha^4}\\right)-\\overline{E}.\n$$",
"_____no_output_____"
],
[
"Our code for computing the energy of the ground state of the harmonic oscillator follows here. We start by defining directories where we store various outputs.",
"_____no_output_____"
]
],
[
[
"# Common imports\nimport os\n\n# Where to save the figures and data files\nPROJECT_ROOT_DIR = \"Results\"\nFIGURE_ID = \"Results/FigureFiles\"\nDATA_ID = \"Results/VMCHarmonic\"\n\nif not os.path.exists(PROJECT_ROOT_DIR):\n os.mkdir(PROJECT_ROOT_DIR)\n\nif not os.path.exists(FIGURE_ID):\n os.makedirs(FIGURE_ID)\n\nif not os.path.exists(DATA_ID):\n os.makedirs(DATA_ID)\n\ndef image_path(fig_id):\n return os.path.join(FIGURE_ID, fig_id)\n\ndef data_path(dat_id):\n return os.path.join(DATA_ID, dat_id)\n\ndef save_fig(fig_id):\n plt.savefig(image_path(fig_id) + \".png\", format='png')\n\noutfile = open(data_path(\"VMCHarmonic.dat\"),'w')",
"_____no_output_____"
]
],
[
[
"We proceed with the implementation of the Monte Carlo algorithm but list first the ansatz for the wave function and the expression for the local energy",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\n# VMC for the one-dimensional harmonic oscillator\n# Brute force Metropolis, no importance sampling and no energy minimization\nfrom math import exp, sqrt\nfrom random import random, seed\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numba import jit\nfrom decimal import *\n# Trial wave function for the Harmonic oscillator in one dimension\ndef WaveFunction(r,alpha):\n return exp(-0.5*alpha*alpha*r*r)\n\n# Local energy for the Harmonic oscillator in one dimension\ndef LocalEnergy(r,alpha):\n return 0.5*r*r*(1-alpha**4) + 0.5*alpha*alpha",
"_____no_output_____"
]
],
[
[
"Note that in the Metropolis algorithm there is no need to compute the\ntrial wave function, mainly since we are just taking the ratio of two\nexponentials. It is then from a computational point view, more\nconvenient to compute the argument from the ratio and then calculate\nthe exponential. Here we have refrained from this purely of\npedagogical reasons.",
"_____no_output_____"
]
],
[
[
"# The Monte Carlo sampling with the Metropolis algo\n# The jit decorator tells Numba to compile this function.\n# The argument types will be inferred by Numba when the function is called.\n\ndef MonteCarloSampling():\n\n NumberMCcycles= 100000\n StepSize = 1.0\n # positions\n PositionOld = 0.0\n PositionNew = 0.0\n\n # seed for rng generator\n seed()\n # start variational parameter\n alpha = 0.4\n for ia in range(MaxVariations):\n alpha += .05\n AlphaValues[ia] = alpha\n energy = energy2 = 0.0\n #Initial position\n PositionOld = StepSize * (random() - .5)\n wfold = WaveFunction(PositionOld,alpha)\n #Loop over MC MCcycles\n for MCcycle in range(NumberMCcycles):\n #Trial position \n PositionNew = PositionOld + StepSize*(random() - .5)\n wfnew = WaveFunction(PositionNew,alpha)\n #Metropolis test to see whether we accept the move\n if random() <= wfnew**2 / wfold**2:\n PositionOld = PositionNew\n wfold = wfnew\n DeltaE = LocalEnergy(PositionOld,alpha)\n energy += DeltaE\n energy2 += DeltaE**2\n #We calculate mean, variance and error\n energy /= NumberMCcycles\n energy2 /= NumberMCcycles\n variance = energy2 - energy**2\n error = sqrt(variance/NumberMCcycles)\n Energies[ia] = energy \n Variances[ia] = variance \n outfile.write('%f %f %f %f \\n' %(alpha,energy,variance,error))\n return Energies, AlphaValues, Variances",
"_____no_output_____"
]
],
[
[
"Finally, the results are presented here with the exact energies and variances as well.",
"_____no_output_____"
]
],
[
[
"#Here starts the main program with variable declarations\nMaxVariations = 20\nEnergies = np.zeros((MaxVariations))\nExactEnergies = np.zeros((MaxVariations))\nExactVariance = np.zeros((MaxVariations))\nVariances = np.zeros((MaxVariations))\nAlphaValues = np.zeros(MaxVariations)\n(Energies, AlphaValues, Variances) = MonteCarloSampling()\noutfile.close()\nExactEnergies = 0.25*(AlphaValues*AlphaValues+1.0/(AlphaValues*AlphaValues))\nExactVariance = 0.25*(1.0+((1.0-AlphaValues**4)**2)*3.0/(4*(AlphaValues**4)))-ExactEnergies*ExactEnergies\n\n#simple subplot\nplt.subplot(2, 1, 1)\nplt.plot(AlphaValues, Energies, 'o-',AlphaValues, ExactEnergies,'r-')\nplt.title('Energy and variance')\nplt.ylabel('Dimensionless energy')\nplt.subplot(2, 1, 2)\nplt.plot(AlphaValues, Variances, '.-',AlphaValues, ExactVariance,'r-')\nplt.xlabel(r'$\\alpha$', fontsize=15)\nplt.ylabel('Variance')\nsave_fig(\"VMCHarmonic\")\nplt.show()\n#nice printout with Pandas\nimport pandas as pd\nfrom pandas import DataFrame\ndata ={'Alpha':AlphaValues, 'Energy':Energies,'Exact Energy':ExactEnergies,'Variance':Variances,'Exact Variance':ExactVariance,}\nframe = pd.DataFrame(data)\nprint(frame)",
"_____no_output_____"
]
],
[
[
"For $\\alpha=1$ we have the exact eigenpairs, as can be deduced from the\ntable here. With $\\omega=1$, the exact energy is $1/2$ a.u. with zero\nvariance, as it should. We see also that our computed variance follows rather well the exact variance.\nIncreasing the number of Monte Carlo cycles will improve our statistics (try to increase the number of Monte Carlo cycles).\n\nThe fact that the variance is exactly equal to zero when $\\alpha=1$ is that \nwe then have the exact wave function, and the action of the hamiltionan\non the wave function",
"_____no_output_____"
],
[
"$$\nH\\psi = \\mathrm{constant}\\times \\psi,\n$$",
"_____no_output_____"
],
[
"yields just a constant. The integral which defines various \nexpectation values involving moments of the hamiltonian becomes then",
"_____no_output_____"
],
[
"$$\n\\langle H^n \\rangle =\n \\frac{\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R})H^n(\\boldsymbol{R})\\Psi_T(\\boldsymbol{R})}\n {\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R})\\Psi_T(\\boldsymbol{R})}=\n\\mathrm{constant}\\times\\frac{\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R})\\Psi_T(\\boldsymbol{R})}\n {\\int d\\boldsymbol{R}\\Psi^{\\ast}_T(\\boldsymbol{R})\\Psi_T(\\boldsymbol{R})}=\\mathrm{constant}.\n$$",
"_____no_output_____"
],
[
"**This gives an important information: the exact wave function leads to zero variance!**\nAs we will see below, many practitioners perform a minimization on both the energy and the variance.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d0d388621fe855459af9ae7562a113dfe5a937cf | 5,009 | ipynb | Jupyter Notebook | examples/BCI Competition 3, Data Set 1 (motor imagery in ECoG recordings).ipynb | jscastanoc/wyrm | c3593efe3cb5507ac525be4d650df7ce504aab00 | [
"MIT"
] | null | null | null | examples/BCI Competition 3, Data Set 1 (motor imagery in ECoG recordings).ipynb | jscastanoc/wyrm | c3593efe3cb5507ac525be4d650df7ce504aab00 | [
"MIT"
] | null | null | null | examples/BCI Competition 3, Data Set 1 (motor imagery in ECoG recordings).ipynb | jscastanoc/wyrm | c3593efe3cb5507ac525be4d650df7ce504aab00 | [
"MIT"
] | null | null | null | 27.075676 | 214 | 0.478139 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
d0d38b88f0db915dc15c938e4ecc3866087e3bb2 | 175,185 | ipynb | Jupyter Notebook | confidence_intervals/confidence_intervals.ipynb | aleksander-ivanov/pyda_homeworks | aea53b673f51cfd9752dafb4be46d844ff593bb0 | [
"MIT"
] | null | null | null | confidence_intervals/confidence_intervals.ipynb | aleksander-ivanov/pyda_homeworks | aea53b673f51cfd9752dafb4be46d844ff593bb0 | [
"MIT"
] | null | null | null | confidence_intervals/confidence_intervals.ipynb | aleksander-ivanov/pyda_homeworks | aea53b673f51cfd9752dafb4be46d844ff593bb0 | [
"MIT"
] | null | null | null | 330.537736 | 41,923 | 0.658618 | [
[
[
"# Домашнее задание «Доверительные интервалы. Статистическая проверка гипотез для несвязанных выборок»",
"_____no_output_____"
]
],
[
[
"import scipy.stats as stats\nimport pandas as pd\nimport numpy as np\nimport scipy as sp\n",
"_____no_output_____"
]
],
[
[
"1. Найдите минимально необходимый объем выборки для построения интервальной оценки среднего с точностью ∆ = 3, дисперсией (stddev) σ^2 = 225 и уровнем доверия β = 0.95.",
"_____no_output_____"
]
],
[
[
"# β = 1 − α # формула уровня доверия, те вероятность принять неправильную нулевую гипотезу\n# α = 1 − β # уровень значимости (обычно 5%), те вероятность отвергнуть правильную нулевую гипотезу (вероятность ошибки первого рода).\n# p-value - минимальное значение уровня значимости, при котором мы отвергаем нулевую гипотезу\n# если p-value < α - то результат является статистически значимым и можно отклонить нулевую гипотезу\n\nZ = 1.96 # 95%\nsigma = 15 # стандартное отклонение\ne = 3\n\nN = ((sigma*Z)/e)**2\nN",
"_____no_output_____"
]
],
[
[
"2. Вам даны две выборки роста мужчин и женщин. Докажите, используя t-Тест Стьдента, что различия между выборками незначительно, если уровень значимости равен 0.001\n",
"_____no_output_____"
]
],
[
[
"np.random.seed(12)\npopulation_men = stats.norm.rvs(loc=19,scale=171,size=11000000) # Выборка мужчин со средним ростом 171\npopulation_women = stats.norm.rvs(loc=16,scale=165,size=12000) # Выборка женщин со средним ростом 165",
"_____no_output_____"
],
[
"t , p = stats.ttest_ind(population_men,population_women)\nprint(\"t = \" + str(t))\nprint(\"p = \" + str(p))",
"t = 4.454139905689355\np = 8.423099227697255e-06\n"
],
[
"pd.DataFrame(population_men).hist()",
"_____no_output_____"
],
[
"pd.DataFrame(population_women).hist()",
"_____no_output_____"
]
],
[
[
"3. Определите объем необходимой выборки для исследования среднего чека за кофе в случайном городе, если известно, что в этом городе стандартное отклонение = 150, уровень доверия = 95%. Погрешность 50 рублей. ",
"_____no_output_____"
]
],
[
[
"sigma = 150\nZ = 1.96 # 95%\ne = 3",
"_____no_output_____"
],
[
"N = ((sigma*Z)/e)**2\nN",
"_____no_output_____"
]
],
[
[
"4. Представьте, что вы хотите разоблачить \"волшебника\", который считает, что умеет предсказывать погоду на завтра. Отвечая просто: дождь или солнце. Вы пронаблюдали за ответами \"волшебника\" в течении какого периода времени и получили такие результаты (см.ниже). Можно ли сказать, что маг действительно умеет предсказывать погоду, \nесли уровнь значимости принять за 0.05 ?",
"_____no_output_____"
],
[
" - Нулевая гипотеза - волшебник умеет предсказывать погоду.\n - Альтернативная - неумеет",
"_____no_output_____"
]
],
[
[
"observations = pd.DataFrame([[25,36],[15,44]],\n index=['Дождь','Солнце'],\n columns=['Ответ волшебника','Реальность'])\nobservations",
"_____no_output_____"
],
[
"oddsratio, pvalue = sp.stats.fisher_exact(observations) \npvalue",
"_____no_output_____"
]
],
[
[
"Вывод: значение pvalue больше уровня значимости, это значит мы не может отклонить нулевую гипотезу тк результат статистически не значим и мы не можем утверждать что волшебник действительно может предсказывать погоду.",
"_____no_output_____"
],
[
"5. Используя функцию mean_confidence_interval(data, confidence), постройте доверительный интервал с уровнем доверия 90% для выборки: data = [1,5,8,9,6,7,5,6,7,8,5,6,7,0,9,8,4,6,7,9,8,6,5,7,8,9,6,7,5,8,6,7,9,5]",
"_____no_output_____"
]
],
[
[
"def mean_confidence_interval(data, confidence=0.95):\n n = len(data)\n m, se = np.mean(data), stats.sem(data)\n h = se * stats.t.ppf((1 + confidence)/2, n)\n return m-h,m, m+h#,h",
"_____no_output_____"
],
[
"data = [4,5,8,9,6,7,5,6,7,8,5,6,7,0,9,8,4,6,7,9,8,6,5,7,8,9,6,7,5,8,6,7,9,5,10]",
"_____no_output_____"
],
[
"mean_confidence_interval(data, 0.90)",
"_____no_output_____"
]
],
[
[
"6. Принадлежит ли выборка data_1 и data_2 одному множеству? Оцените это с помощью известных вам тестов проверки гипотез.",
"_____no_output_____"
]
],
[
[
"data_1 = [4,5,8,9,6,7,5,6,7,8,5,6,7,0,9,8,4,6,7,9,8,6,5,7,8,9,6,7,5,8,6,7,9,5,10]\ndata_2 = [8,5,6,7,0,1,8,4,6,7,0,2,6,5,7,5,3,5,3,5,3,5,5,8,7,6,4,5,3,5,4,6,4,5,3,2,6,4,2,6,1,0,4,3,5,4,3,4,5,4,3,4,5,4,3,4,5,3,4,4,1,2,4,3,1,2,4,3,2,1,5,3,4,6,4,5,3,2,4,5,6,4,3,1,3,5,3,4,4,4,2,5,3]",
"_____no_output_____"
],
[
"stats.ttest_ind(data_1,data_2)",
"_____no_output_____"
]
],
[
[
"По t-тесту Стьюдента мы получили достаточно высокое значение pvalue (> 0.05), соответственно отвергаем нулевую гипотезу, что означает две данные выборки не принадлежат одному множеству.",
"_____no_output_____"
],
[
"7. На примере датасета про жилье в New York City, мы сталкивались с примером, когда переменная имеет не совсем нормальное распределение. \n\nПредположим, Вы сформировали две гипотезы:\nНулевая гипотеза - распределение нормальное, \nАльтернативная гипотеза - распределение не нормальное.\n\n\nДопустим, вы применили какой-то тест (сейчас неважно какой), который показал уровень значимости (p-value) = 0.03.\nКаковы будут ваши выводы? Будем считать что у нас нормальное распределение или все-таки нет? Вопрос без подвоха)",
"_____no_output_____"
],
[
"Вывод: При pvalue = 0.03, что меньше уровня значимости, будем считать, что результат статистически значим и можно отклонить нулевую гипотезу и переменная имеет не нормальное распределение.",
"_____no_output_____"
],
[
"8. Первая выборка — это пациенты, которых лечили препаратом А. \nВторая выборка — пациенты, которых лечили препаратом Б. Значения в выборках — это некоторая характеристика эффективности лечения (уровень метаболита в крови, температура через три дня после начала лечения, срок выздоровления, число койко-дней, и т.д.) \n\nа) Требуется выяснить, имеется ли значимое различие эффективности препаратов А и Б, или различия являются чисто случайными и объясняются «естественной» дисперсией выбранной характеристики? (уровень значимости принять за 5% или 0.05)\n\nb) При каком минимальном P-values различия были бы уже значимы? ",
"_____no_output_____"
]
],
[
[
"np.random.seed(11)\nA = stats.norm.rvs(scale=50,loc=10,size=300)\nB = A+stats.norm.rvs(scale=10,loc=-1.25,size=300)\n",
"_____no_output_____"
],
[
"pd.DataFrame(A).hist(bins=120)",
"_____no_output_____"
],
[
"pd.DataFrame(B).hist(bins=120)",
"_____no_output_____"
],
[
"stats.ttest_ind(a=A,b=B,equal_var=False)",
"_____no_output_____"
]
],
[
[
"a) Значение pvalue выше уровня значимости - мы не можем отклонить нулевую гипотезу, различия статистически не значимы либо случайны\n",
"_____no_output_____"
],
[
"b) При любом значении pvalue меньше 0.05 различия считались бы статистически значимыми",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
d0d39160d8add36023ff511c4b4d4e2928ca3b87 | 52,703 | ipynb | Jupyter Notebook | notebooks/test.ipynb | saty101/Captcha-Text-Recognition | 08704a451bf11ea4f6ebc72978386d71c3d4f212 | [
"MIT"
] | null | null | null | notebooks/test.ipynb | saty101/Captcha-Text-Recognition | 08704a451bf11ea4f6ebc72978386d71c3d4f212 | [
"MIT"
] | null | null | null | notebooks/test.ipynb | saty101/Captcha-Text-Recognition | 08704a451bf11ea4f6ebc72978386d71c3d4f212 | [
"MIT"
] | null | null | null | 52,703 | 52,703 | 0.93266 | [
[
[
"Testing our model on validation dataset which is taken as test_dataset",
"_____no_output_____"
]
],
[
[
"import torch\nimport albumentations\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.nn import functional as F\nfrom PIL import Image, ImageFile",
"_____no_output_____"
],
[
"import sys\nsys.path.append('../src')\nfrom dataset import ClassificationDataset\nfrom model import CaptchaModel",
"_____no_output_____"
],
[
"import glob\nimport os\nfrom torch import nn\nfrom sklearn import preprocessing, model_selection\nimage_files = glob.glob(os.path.join('../src/captcha_images_v2/', \"*.png\"))",
"_____no_output_____"
],
[
"targets_orig = [x.split(\"/\")[-1][:-4] for x in image_files]\ntargets = [[c for c in x] for x in targets_orig]\ntargets_flat = [c for clist in targets for c in clist]",
"_____no_output_____"
],
[
"lbl_enc = preprocessing.LabelEncoder()\nlbl_enc.fit(targets_flat)\ntargets_enc = [lbl_enc.transform(x) for x in targets]\ntargets_enc = np.array(targets_enc)\ntargets_enc = targets_enc+1",
"_____no_output_____"
],
[
"(\n _,\n test_imgs,\n _,\n test_targets,\n _,\n test_targets_orig,\n) = model_selection.train_test_split(\n image_files, targets_enc, targets_orig, test_size=0.1, random_state=42\n)",
"_____no_output_____"
],
[
"test_dataset = ClassificationDataset(\n image_paths = test_imgs,\n targets=test_targets,\n resize=(75,300)\n)",
"_____no_output_____"
],
[
"test_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size = 8,\n num_workers = 8,\n shuffle= False\n)",
"_____no_output_____"
],
[
"checkpoint = torch.load('../notebooks/crnn200.pt')\nmodel.load_state_dict(checkpoint['state_dict'])\nmodel.eval()",
"_____no_output_____"
],
[
"model = CaptchaModel(num_chars=len((lbl_enc.classes_)))\nmodel.to('cuda')",
"_____no_output_____"
],
[
"tmp = iter(test_loader)\nbatch = next(tmp)\ntemp1 = batch[\"images\"][0]\ntemp2 = batch['targets'][0]\ntemp1.shape, temp2.shape",
"_____no_output_____"
],
[
"plt.imshow(temp1[0])",
"_____no_output_____"
],
[
"temp1 = temp1.cuda()\ntemp1 = torch.unsqueeze(temp1, 0)\ntemp1.shape",
"_____no_output_____"
],
[
"x, y = model(temp1)",
"_____no_output_____"
]
],
[
[
"Prediction of the model",
"_____no_output_____"
]
],
[
[
"from train import decode_predictions, remove_duplicates\npreds = decode_predictions(x, lbl_enc)\npreds",
"_____no_output_____"
]
],
[
[
"The original Label of the Captcha",
"_____no_output_____"
]
],
[
[
"for t in temp2:\n print(lbl_enc.classes_[t-1],end='')",
"44c22"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d39f2cf1e70071fe6affa438c59baa82e7a498 | 1,105 | ipynb | Jupyter Notebook | ETL/Extract/Encoding.ipynb | Vishal7017/Data_Pipelines | 300f37c845ca30c589d8328e76b9d90c6952bbbd | [
"BSD-3-Clause"
] | 1 | 2021-06-28T17:43:25.000Z | 2021-06-28T17:43:25.000Z | ETL/Extract/Encoding.ipynb | Vishal7017/Data_Pipelines | 300f37c845ca30c589d8328e76b9d90c6952bbbd | [
"BSD-3-Clause"
] | null | null | null | ETL/Extract/Encoding.ipynb | Vishal7017/Data_Pipelines | 300f37c845ca30c589d8328e76b9d90c6952bbbd | [
"BSD-3-Clause"
] | null | null | null | 19.385965 | 67 | 0.506787 | [
[
[
"import chardet \n\n# use the detect method to find the encoding\n# 'rb' means read in the file as binary\nwith open(\"mystery.csv\", 'rb') as file:\n print(chardet.detect(file.read()))",
"{'encoding': 'UTF-16', 'confidence': 1.0, 'language': ''}\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
d0d3adceae1fb3375d66b0abb3c42cfc02aa8aba | 13,287 | ipynb | Jupyter Notebook | functional_tests/mongodb_test_run.ipynb | chung-ejy/longshot | c9ff1a6586546b496ed6ba2f0b43ff9cd68fa15d | [
"MIT"
] | null | null | null | functional_tests/mongodb_test_run.ipynb | chung-ejy/longshot | c9ff1a6586546b496ed6ba2f0b43ff9cd68fa15d | [
"MIT"
] | null | null | null | functional_tests/mongodb_test_run.ipynb | chung-ejy/longshot | c9ff1a6586546b496ed6ba2f0b43ff9cd68fa15d | [
"MIT"
] | 1 | 2021-08-05T02:12:51.000Z | 2021-08-05T02:12:51.000Z | 156.317647 | 1,838 | 0.68789 | [
[
[
"import pandas as pd\nfrom database.market import Market",
"_____no_output_____"
],
[
"market = Market()",
"_____no_output_____"
],
[
"market.connect()\ntest = market.retrieve(\"prices\")\nmarket.disconnect()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
d0d3b7a3d66de1d4c760a200bb18400b54fb0db5 | 11,332 | ipynb | Jupyter Notebook | 07_Elementary_Topic_modelling_NLP.ipynb | ShaifaliBhardwaj01/Elementary-Topic-Modelling | 2738692ec75e7478700b540d4f3da3476d150bb5 | [
"Apache-2.0"
] | null | null | null | 07_Elementary_Topic_modelling_NLP.ipynb | ShaifaliBhardwaj01/Elementary-Topic-Modelling | 2738692ec75e7478700b540d4f3da3476d150bb5 | [
"Apache-2.0"
] | null | null | null | 07_Elementary_Topic_modelling_NLP.ipynb | ShaifaliBhardwaj01/Elementary-Topic-Modelling | 2738692ec75e7478700b540d4f3da3476d150bb5 | [
"Apache-2.0"
] | null | null | null | 55.54902 | 876 | 0.636869 | [
[
[
"from nltk.corpus import stopwords \nfrom nltk.stem.wordnet import WordNetLemmatizer\nimport string\nimport gensim\nfrom gensim import corpora\n",
"_____no_output_____"
],
[
"import nltk\nnltk.download('stopwords')\nnltk.download('wordnet')",
"[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Unzipping corpora/wordnet.zip.\n"
],
[
"from nltk.corpus import stopwords \nfrom nltk.stem.wordnet import WordNetLemmatizer\nimport string\nimport gensim\nfrom gensim import corpora\n# Step 2: Getting data\ndocs1=\"Sugar causes blood glucose to spike and plummet. Unstable blood sugar often leads to mood swings, fatigue, headaches and cravings for more sugar. Cravings set the stage for a cycle of addiction in which every new hit of sugar makes you feel better temporarily but, a few hours later, results in more cravings and hunger. On the flip side, those who avoid sugar often report having little or no cravings for sugary things and feeling emotionally balanced and energized.\"\ndocs2=\"Sugar increases the risk of obesity, diabetes and heart disease. Large-scale studies have shown that the more high-glycemic foods (those that quickly affect blood sugar), including foods containing sugar, a person consumes, the higher his risk for becoming obese and for developing diabetes and heart disease1. Emerging research is also suggesting connections between high-glycemic diets and many different forms of cancer.\"\ndocs3=\"Sugar interferes with immune function. Research on human subjects is scant, but animal studies have shown that sugar suppresses immune response5. More research is needed to understand the exact mechanisms; however, we do know that bacteria and yeast feed on sugar and that, when these organisms get out of balance in the body, infections and illness are more likely.\"\ndocs4=\"A high-sugar diet often results in chromium deficiency. Its sort of a catch-22. If you consume a lot of sugar and other refined carbohydrates, you probably dont get enough of the trace mineral chromium, and one of chromiums main functions is to help regulate blood sugar. Scientists estimate that 90 percent of Americans dont get enough chromium. Chromium is found in a variety of animal foods, seafood and plant foods. Refining starches and other carbohydrates rob these foods of their chromium supplies.\"\ndocs5=\"Sugar accelerates aging. It even contributes to that telltale sign of aging: sagging skin. Some of the sugar you consume, after hitting your bloodstream, ends up attaching itself to proteins, in a process called glycation. These new molecular structures contribute to the loss of elasticity found in aging body tissues, from your skin to your organs and arteries7. The more sugar circulating in your blood, the faster this damage takes hold.\"\ndocs6=\"Sugar causes tooth decay. With all the other life-threatening effects of sugar, we sometimes forget the most basic damage it does. When it sits on your teeth, it creates decay more efficiently than any other food substance8. For a strong visual reminder, next time the Tooth Fairy visits, try the old tooth-in-a-glass-of-Coke experiment—the results will surely convince you that sugar isnt good for your pearly whites.\"\ndocs7=\"Sugar can cause gum disease, which can lead to heart disease. Increasing evidence shows that chronic infections, such as those that result from periodontal problems, play a role in the development of coronary artery disease9. The most popular theory is that the connection is related to widespread effects from the bodys inflammatory response to infection.\"\ndocs7=\"Sugar affects behavior and cognition in children. Though it has been confirmed by millions of parents, most researchers have not been able to show the effect of sugar on childrens behavior. A possible problem with the research is that most of it compared the effects of a sugar-sweetened drink to one containing an artificial sweetener10. It may be that kids react to both real sugar and sugar substitutes, therefore showing no differences in behavior. What about kids ability to learn? Between 1979 and 1983, 803 New York City public schools reduced the amount of sucrose (table sugar) and eliminated artificial colors, flavors and two preservatives from school lunches and breakfasts. The diet policy changes were followed by a 15.7 percent increase in a national academic ranking (previously, the greatest improvement ever seen had been 1.7 percent).\"\ndocs8=\"Sugar increases stress. When were under stress, our stress hormone levels rise; these chemicals are the bodys fight-or-flight emergency crew, sent out to prepare the body for an attack or an escape. These chemicals are also called into action when blood sugar is low. For example, after a blood-sugar spike (say, from eating a piece of birthday cake), theres a compensatory dive, which causes the body to release stress hormones such as adrenaline, epinephrine and cortisol. One of the main things these hormones do is raise blood sugar, providing the body with a quick energy boost. The problem is, these helpful hormones can make us feel anxious, irritable and shaky.\"\ndocs9=\"Sugar takes the place of important nutrients. According to USDA data, people who consume the most sugar have the lowest intakes of essential nutrients––especially vitamin A, vitamin C, folate, vitamin B-12, calcium, phosphorous, magnesium and iron. Ironically, those who consume the most sugar are children and teenagers, the individuals who need these nutrients most12.\"\ndocs10=\"Slashing Sugar. Now that you know the negative impacts refined sugar can have on your body and mind, youll want to be more careful about the foods you choose. And the first step is getting educated about where sugar lurks—believe it or not, a food neednt even taste all that sweet for it to be loaded with sugar. When it comes to convenience and packaged foods, let the ingredients label be your guide, and be aware that just because something boasts that it is low in carbs or a diet food, doesnt mean its free of sugar. Atkins products never contain added sugar.\"\n# compile documents\ndoc_complete=[docs1,docs2,docs3, docs4,docs5,docs6,docs7,docs8,docs9,docs10]\n",
"_____no_output_____"
],
[
"# Step - some necessary preprocessing",
"_____no_output_____"
],
[
"stop_set = set(stopwords.words('english'))\nexclude_set = set(string.punctuation) \nlemmatize = WordNetLemmatizer()\ndef clean_doc(doc):\n stop_free = \" \".join([i for i in doc.lower().split() if i not in stop_set])\n punc_free = ''.join(i for i in stop_free if i not in exclude_set)\n normalized = \" \".join(lemmatize.lemmatize(w) for w in punc_free.split())\n return normalized\ncleaned = [clean_doc(doc).split() for doc in doc_complete]\n",
"_____no_output_____"
],
[
"# Step 4: Create LDA model using gensim",
"_____no_output_____"
],
[
"# Every unique term is assigned an index in our term document matrix. \ndictionary = corpora.Dictionary(cleaned)\n# Converting list of documents (corpus) into Document Term Matrix using dictionary prepared above.\ndoc_term_matrix = [dictionary.doc2bow(doc) for doc in cleaned]\n# Creating an LDA object\nLda = gensim.models.ldamodel.LdaModel\n# Running and Training LDA model on the document term matrix.\nldamodel = Lda(doc_term_matrix, num_topics=5, id2word = dictionary, passes=300)\n#Result\ntopics = ldamodel.print_topics(num_topics=5, num_words=5)\nfor i in topics :\n print (i)\n#prints \n",
"(0, '0.058*\"sugar\" + 0.022*\"craving\" + 0.016*\"blood\" + 0.016*\"aging\" + 0.011*\"research\"')\n(1, '0.051*\"chromium\" + 0.026*\"food\" + 0.018*\"get\" + 0.018*\"carbohydrate\" + 0.018*\"enough\"')\n(2, '0.060*\"sugar\" + 0.035*\"food\" + 0.011*\"diet\" + 0.011*\"highglycemic\" + 0.011*\"tooth\"')\n(3, '0.003*\"blood\" + 0.003*\"consume\" + 0.003*\"research\" + 0.003*\"cause\" + 0.003*\"sugar\"')\n(4, '0.045*\"sugar\" + 0.017*\"body\" + 0.017*\"stress\" + 0.017*\"hormone\" + 0.013*\"behavior\"')\n"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d3bfc2417bc9ef4d5c007e927bf13323918c46 | 5,559 | ipynb | Jupyter Notebook | scalar-bars.ipynb | UttamBasu/ipygany_examples | 5b619951977a0e89f2c63a172d55a637519231d8 | [
"MIT"
] | null | null | null | scalar-bars.ipynb | UttamBasu/ipygany_examples | 5b619951977a0e89f2c63a172d55a637519231d8 | [
"MIT"
] | null | null | null | scalar-bars.ipynb | UttamBasu/ipygany_examples | 5b619951977a0e89f2c63a172d55a637519231d8 | [
"MIT"
] | null | null | null | 34.314815 | 419 | 0.569167 | [
[
[
"%matplotlib inline\nfrom pyvista import set_plot_theme\nset_plot_theme('document')",
"_____no_output_____"
]
],
[
[
"Customize Scalar Bars {#scalar_bar_example}\n=====================\n\nWalk through of all the different capabilities of scalar bars and how a\nuser can customize scalar bars.\n",
"_____no_output_____"
]
],
[
[
"# sphinx_gallery_thumbnail_number = 2\nimport pyvista as pv\nfrom pyvista import examples",
"_____no_output_____"
]
],
[
[
"By default, when plotting a dataset with a scalar array, a scalar bar\nfor that array is added. To turn off this behavior, a user could specify\n`show_scalar_bar=False` when calling `.add_mesh()`. Let\\'s start with a\nsample dataset provide via PyVista to demonstrate the default behavior\nof scalar bar plotting:\n",
"_____no_output_____"
]
],
[
[
"# Load St Helens DEM and warp the topography\nmesh = examples.download_st_helens().warp_by_scalar()\n\n# First a default plot with jet colormap\np = pv.Plotter()\n# Add the data, use active scalar for coloring, and show the scalar bar\np.add_mesh(mesh)\n# Display the scene\np.show()",
"_____no_output_____"
]
],
[
[
"We could also plot the scene with an interactive scalar bar to move\naround and place where we like by specifying passing keyword arguments\nto control the scalar bar via the `scalar_bar_args` parameter in\n`pyvista.BasePlotter.add_mesh`{.interpreted-text role=\"func\"}. The\nkeyword arguments to control the scalar bar are defined in\n`pyvista.BasePlotter.add_scalar_bar`{.interpreted-text role=\"func\"}.\n",
"_____no_output_____"
]
],
[
[
"# create dictionary of parameters to control scalar bar\nsargs = dict(interactive=True) # Simply make the bar interactive\n\np = pv.Plotter(notebook=False) # If in IPython, be sure to show the scene\np.add_mesh(mesh, scalar_bar_args=sargs)\np.show()\n# Remove from plotters so output is not produced in docs\npv.plotting._ALL_PLOTTERS.clear()",
"_____no_output_____"
]
],
[
[
"\n\nOr manually define the scalar bar\\'s location:\n",
"_____no_output_____"
]
],
[
[
"# Set a custom position and size\nsargs = dict(height=0.25, vertical=True, position_x=0.05, position_y=0.05)\n\np = pv.Plotter()\np.add_mesh(mesh, scalar_bar_args=sargs)\np.show()",
"_____no_output_____"
]
],
[
[
"The text properties of the scalar bar can also be controlled:\n",
"_____no_output_____"
]
],
[
[
"# Controlling the text properties\nsargs = dict(\n title_font_size=20,\n label_font_size=16,\n shadow=True,\n n_labels=3,\n italic=True,\n fmt=\"%.1f\",\n font_family=\"arial\",\n)\n\np = pv.Plotter()\np.add_mesh(mesh, scalar_bar_args=sargs)\np.show()",
"_____no_output_____"
]
],
[
[
"Labelling values outside of the scalar range\n",
"_____no_output_____"
]
],
[
[
"p = pv.Plotter()\np.add_mesh(mesh, clim=[1000, 2000],\n below_color='blue', above_color='red',\n scalar_bar_args=sargs)\np.show()",
"_____no_output_____"
]
],
[
[
"Annotate values of interest using a dictionary. The key of the\ndictionary must be the value to annotate, and the value must be the\nstring label.\n",
"_____no_output_____"
]
],
[
[
"# Make a dictionary for the annotations\nannotations = {\n 2300: \"High\",\n 805.3: \"Cutoff value\",\n}\n\np = pv.Plotter()\np.add_mesh(mesh, scalars='Elevation', annotations=annotations)\np.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d3c2a633c21d71aa0f5fc505964a1dfde1bf4c | 721,993 | ipynb | Jupyter Notebook | uk-used-car-data-set/eda-model.ipynb | tejeshreddy/kaggle-challenges | a1a58a267b7afb9b1f01a9ec2d9daab2a1e1e167 | [
"MIT"
] | 1 | 2020-12-10T13:37:02.000Z | 2020-12-10T13:37:02.000Z | uk-used-car-data-set/eda-model.ipynb | tejeshreddy/kaggle-challenges | a1a58a267b7afb9b1f01a9ec2d9daab2a1e1e167 | [
"MIT"
] | null | null | null | uk-used-car-data-set/eda-model.ipynb | tejeshreddy/kaggle-challenges | a1a58a267b7afb9b1f01a9ec2d9daab2a1e1e167 | [
"MIT"
] | null | null | null | 494.177276 | 372,808 | 0.936866 | [
[
[
"## Step 1: Import Libraries",
"_____no_output_____"
]
],
[
[
"# All imports\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport missingno\n\nimport seaborn as sns\n\nfrom sklearn.feature_selection import SelectKBest, f_regression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso\nfrom sklearn.svm import SVR\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.ensemble import RandomForestRegressor\n\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"# List all the files\nfor dir_name, _, file_names in os.walk('data'):\n for file_name in file_names:\n print(os.path.join(dir_name, file_name))",
"data/vauxhall.csv\ndata/bmw.csv\ndata/unclean cclass.csv\ndata/vw.csv\ndata/hyundi.csv\ndata/audi.csv\ndata/unclean focus.csv\ndata/toyota.csv\ndata/ford.csv\ndata/focus.csv\ndata/skoda.csv\ndata/cclass.csv\ndata/merc.csv\n"
]
],
[
[
"## Step 2: Reading the Data",
"_____no_output_____"
]
],
[
[
"data_vw = pd.read_csv(\"data/vw.csv\")",
"_____no_output_____"
],
[
"data_vw.shape",
"_____no_output_____"
],
[
"data_vw.head()",
"_____no_output_____"
],
[
"data_vw.describe()",
"_____no_output_____"
],
[
"missingno.matrix(data_vw)",
"_____no_output_____"
],
[
"data_vw.isnull().sum()",
"_____no_output_____"
]
],
[
[
"## Step 3: EDA",
"_____no_output_____"
]
],
[
[
"categorical_features = [feature for feature in data_vw.columns if data_vw[feature].dtype == 'O']",
"_____no_output_____"
],
[
"# Getting the count plot\nfor feature in categorical_features:\n sns.countplot(y=data_vw[feature])\n plt.show()",
"_____no_output_____"
],
[
"# Getting the barplot\nplt.figure(figsize=(10,5), facecolor='w')\nsns.barplot(x=data_vw['year'], y=data_vw['price'])",
"_____no_output_____"
],
[
"sns.barplot(x=data_vw['transmission'], y=data_vw['price'])",
"_____no_output_____"
],
[
"# Getting the relation b/w milleage and price\nplt.figure(figsize=(10, 6))\nsns.scatterplot(x=data_vw['mileage'], y=data_vw['price'], hue=data_vw['year'])",
"_____no_output_____"
],
[
"plt.figure(figsize=(5,5))\nsns.scatterplot(x=data_vw['mileage'], y=data_vw['price'], hue=data_vw['transmission'])",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,10))\nsns.pairplot(data_vw)",
"_____no_output_____"
]
],
[
[
"## Step 4: Feature Engineering",
"_____no_output_____"
]
],
[
[
"data_vw.head()",
"_____no_output_____"
]
],
[
[
"Dropping the year column, but instead will create data on how old the car is",
"_____no_output_____"
]
],
[
[
"data_vw['age_of_car'] = 2020 - data_vw['year']",
"_____no_output_____"
],
[
"data_vw.drop(['year'], axis=1, inplace=True)",
"_____no_output_____"
],
[
"# Look at the frequency of the ages\nsns.countplot(y=data_vw['age_of_car'])",
"_____no_output_____"
],
[
"# OHE the categorical variables\ndata_vw_extended = pd.get_dummies(data_vw)",
"_____no_output_____"
],
[
"data_vw_extended.shape",
"_____no_output_____"
],
[
"sc = StandardScaler()",
"_____no_output_____"
],
[
"data_vw_extended = pd.DataFrame(sc.fit_transform(data_vw_extended), columns=data_vw_extended.columns)",
"_____no_output_____"
],
[
"data_vw_extended.head()",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(data_vw_extended.drop(['price'], axis=1), data_vw_extended[['price']])",
"_____no_output_____"
],
[
"X_train.shape, X_test.shape, y_train.shape, y_test.shape",
"_____no_output_____"
]
],
[
[
"## Step 5: Feature Selection",
"_____no_output_____"
]
],
[
[
"# Select the k best features\nno_of_features = []\nr_2_train = []\nr_2_test = []",
"_____no_output_____"
],
[
"for k in range(3, 40, 2):\n selector = SelectKBest(f_regression, k=k)\n X_train_selector = selector.fit_transform(X_train, y_train)\n X_test_selector = selector.transform(X_test)\n \n lin_reg = LinearRegression()\n lin_reg.fit(X_train_selector, y_train)\n \n no_of_features.append(k)\n r_2_train.append(lin_reg.score(X_train_selector, y_train))\n r_2_test.append(lin_reg.score(X_test_selector, y_test))",
"_____no_output_____"
],
[
"sns.lineplot(x=no_of_features, y=r_2_train)\nsns.lineplot(x=no_of_features, y=r_2_test)",
"_____no_output_____"
]
],
[
[
"k=23 is providing us the best optimal result. Hence training the model on 23",
"_____no_output_____"
]
],
[
[
"selector = SelectKBest(f_regression, k=23)\nX_train_selector = selector.fit_transform(X_train, y_train)\nX_test_selector = selector.transform(X_test)",
"_____no_output_____"
],
[
"column_name = data_vw_extended.drop(['price'], axis=1).columns\ncolumn_name[selector.get_support()]",
"_____no_output_____"
]
],
[
[
"## Step 6: Model",
"_____no_output_____"
]
],
[
[
"def regressor_builder(model):\n regressor = model\n regressor.fit(X_train_selector, y_train)\n score = regressor.score(X_test_selector, y_test)\n return regressor, score",
"_____no_output_____"
],
[
"list_models = [LinearRegression(), Lasso(), Ridge(), SVR(), RandomForestRegressor(), MLPRegressor()]",
"_____no_output_____"
],
[
"model_performance = pd.DataFrame(columns=['Features', 'Model', 'Performance'])",
"_____no_output_____"
],
[
"for model in list_models:\n regressor, score = regressor_builder(model)\n model_performance = model_performance.append({\"Feature\": \"Linear\", \"Model\": regressor, \"Performance\": score}, ignore_index=True)",
"_____no_output_____"
],
[
"model_performance",
"_____no_output_____"
]
],
[
[
"Randomforest provides the best r^2",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d0d3c3b83dd863cebe23ef961d979f485f07e8af | 40,464 | ipynb | Jupyter Notebook | notebooks/community/ml_ops/stage2/get_started_vertex_training_sklearn.ipynb | mikegre-google/vertex-ai-samples | 33a8c6ca0e73feb7845ff47af4ca70c2f4b0431e | [
"Apache-2.0"
] | 1 | 2022-03-24T13:56:15.000Z | 2022-03-24T13:56:15.000Z | notebooks/community/ml_ops/stage2/get_started_vertex_training_sklearn.ipynb | mikegre-google/vertex-ai-samples | 33a8c6ca0e73feb7845ff47af4ca70c2f4b0431e | [
"Apache-2.0"
] | null | null | null | notebooks/community/ml_ops/stage2/get_started_vertex_training_sklearn.ipynb | mikegre-google/vertex-ai-samples | 33a8c6ca0e73feb7845ff47af4ca70c2f4b0431e | [
"Apache-2.0"
] | null | null | null | 36.953425 | 350 | 0.527975 | [
[
[
"# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# E2E ML on GCP: MLOps stage 2 : experimentation: get started with Vertex Training for Scikit-Learn\n\n<table align=\"left\">\n <td>\n <a href=\"https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage2/get_started_vertex_training_sklearn.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/github-logo-32px.png\" alt=\"GitHub logo\">\n View on GitHub\n </a>\n </td>\n <td>\n <a href=\"https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage2/get_started_vertex_training_sklearn.ipynb\">\n Open in Google Cloud Notebooks\n </a>\n </td>\n</table>\n<br/><br/><br/>",
"_____no_output_____"
],
[
"## Overview\n\n\nThis tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 2 : experimentation: get started with Vertex Training for Scikit-Learn.",
"_____no_output_____"
],
[
"### Dataset\n\nThe dataset used for this tutorial is the [News Aggregation](https://archive.ics.uci.edu/ml/datasets/News+Aggregator) from [ICS Machine Learning Datasets](https://archive.ics.uci.edu/ml/datasets.php). The trained model predicts the news category of the news article.",
"_____no_output_____"
],
[
"### Objective\n\nIn this tutorial, you learn how to use `Vertex AI Training` for training a Scikit-Learn custom model.\n\nThis tutorial uses the following Google Cloud ML services:\n\n- `Vertex AI Training`\n- `Vertex AI Model` resource\n\nThe steps performed include:\n\n- Training using a Python package.\n- Report accuracy when hyperparameter tuning.\n- Save the model artifacts to Cloud Storage using GCSFuse.\n- Create a `Vertex AI Model` resource.",
"_____no_output_____"
],
[
"## Installations\n\nInstall *one time* the packages for executing the MLOps notebooks.",
"_____no_output_____"
]
],
[
[
"ONCE_ONLY = False\nif ONCE_ONLY:\n ! pip3 install -U tensorflow==2.5 $USER_FLAG\n ! pip3 install -U tensorflow-data-validation==1.2 $USER_FLAG\n ! pip3 install -U tensorflow-transform==1.2 $USER_FLAG\n ! pip3 install -U tensorflow-io==0.18 $USER_FLAG\n ! pip3 install --upgrade google-cloud-aiplatform[tensorboard] $USER_FLAG\n ! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG\n ! pip3 install --upgrade google-cloud-bigquery $USER_FLAG\n ! pip3 install --upgrade google-cloud-logging $USER_FLAG\n ! pip3 install --upgrade apache-beam[gcp] $USER_FLAG\n ! pip3 install --upgrade pyarrow $USER_FLAG\n ! pip3 install --upgrade cloudml-hypertune $USER_FLAG\n ! pip3 install --upgrade kfp $USER_FLAG\n ! pip3 install --upgrade torchvision $USER_FLAG\n ! pip3 install --upgrade rpy2 $USER_FLAG",
"_____no_output_____"
]
],
[
[
"### Restart the kernel\n\nOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.",
"_____no_output_____"
]
],
[
[
"import os\n\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)",
"_____no_output_____"
]
],
[
[
"#### Set your project ID\n\n**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.",
"_____no_output_____"
]
],
[
[
"PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}",
"_____no_output_____"
],
[
"if PROJECT_ID == \"\" or PROJECT_ID is None or PROJECT_ID == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID:\", PROJECT_ID)",
"_____no_output_____"
],
[
"! gcloud config set project $PROJECT_ID",
"_____no_output_____"
]
],
[
[
"#### Region\n\nYou can also change the `REGION` variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.\n\n- Americas: `us-central1`\n- Europe: `europe-west4`\n- Asia Pacific: `asia-east1`\n\nYou may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.\n\nLearn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations).",
"_____no_output_____"
]
],
[
[
"REGION = \"us-central1\" # @param {type: \"string\"}",
"_____no_output_____"
]
],
[
[
"#### Timestamp\n\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.",
"_____no_output_____"
]
],
[
[
"from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")",
"_____no_output_____"
]
],
[
[
"### Create a Cloud Storage bucket\n\n**The following steps are required, regardless of your notebook environment.**\n\nWhen you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.\n\nSet the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.",
"_____no_output_____"
]
],
[
[
"BUCKET_NAME = \"gs://[your-bucket-name]\" # @param {type:\"string\"}",
"_____no_output_____"
],
[
"if BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"gs://[your-bucket-name]\":\n BUCKET_NAME = \"gs://\" + PROJECT_ID + \"aip-\" + TIMESTAMP",
"_____no_output_____"
]
],
[
[
"**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.",
"_____no_output_____"
]
],
[
[
"! gsutil mb -l $REGION $BUCKET_NAME",
"_____no_output_____"
]
],
[
[
"Finally, validate access to your Cloud Storage bucket by examining its contents:",
"_____no_output_____"
]
],
[
[
"! gsutil ls -al $BUCKET_NAME",
"_____no_output_____"
]
],
[
[
"### Set up variables\n\nNext, set up some variables used throughout the tutorial.\n### Import libraries and define constants",
"_____no_output_____"
]
],
[
[
"import google.cloud.aiplatform as aip",
"_____no_output_____"
]
],
[
[
"### Initialize Vertex AI SDK for Python\n\nInitialize the Vertex AI SDK for Python for your project and corresponding bucket.",
"_____no_output_____"
]
],
[
[
"aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)",
"_____no_output_____"
]
],
[
[
"#### Set hardware accelerators\n\nYou can set hardware accelerators for training and prediction.\n\nSet the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:\n\n (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)\n\n\nOtherwise specify `(None, None)` to use a container image to run on a CPU.\n\nLearn more about [hardware accelerator support for your region](https://cloud.google.com/vertex-ai/docs/general/locations#accelerators).\n\n*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3. This is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.",
"_____no_output_____"
]
],
[
[
"if os.getenv(\"IS_TESTING_TRAIN_GPU\"):\n TRAIN_GPU, TRAIN_NGPU = (\n aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,\n int(os.getenv(\"IS_TESTING_TRAIN_GPU\")),\n )\nelse:\n TRAIN_GPU, TRAIN_NGPU = (None, None)\n\nif os.getenv(\"IS_TESTING_DEPLOY_GPU\"):\n DEPLOY_GPU, DEPLOY_NGPU = (\n aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,\n int(os.getenv(\"IS_TESTING_DEPLOY_GPU\")),\n )\nelse:\n DEPLOY_GPU, DEPLOY_NGPU = (None, None)",
"_____no_output_____"
]
],
[
[
"#### Set pre-built containers\n\nSet the pre-built Docker container image for training and prediction.\n\n\nFor the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).\n\n\nFor the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers).",
"_____no_output_____"
]
],
[
[
"TRAIN_VERSION = \"scikit-learn-cpu.0-23\"\nDEPLOY_VERSION = \"sklearn-cpu.0-23\"\n\nTRAIN_IMAGE = \"{}-docker.pkg.dev/vertex-ai/training/{}:latest\".format(\n REGION.split(\"-\")[0], TRAIN_VERSION\n)\nDEPLOY_IMAGE = \"{}-docker.pkg.dev/vertex-ai/prediction/{}:latest\".format(\n REGION.split(\"-\")[0], DEPLOY_VERSION\n)",
"_____no_output_____"
]
],
[
[
"#### Set machine type\n\nNext, set the machine type to use for training.\n\n- Set the variable `TRAIN_COMPUTE` to configure the compute resources for the VMs you will use for for training.\n - `machine type`\n - `n1-standard`: 3.75GB of memory per vCPU.\n - `n1-highmem`: 6.5GB of memory per vCPU\n - `n1-highcpu`: 0.9 GB of memory per vCPU\n - `vCPUs`: number of \\[2, 4, 8, 16, 32, 64, 96 \\]\n\n*Note: The following is not supported for training:*\n\n - `standard`: 2 vCPUs\n - `highcpu`: 2, 4 and 8 vCPUs\n\n*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.",
"_____no_output_____"
]
],
[
[
"if os.getenv(\"IS_TESTING_TRAIN_MACHINE\"):\n MACHINE_TYPE = os.getenv(\"IS_TESTING_TRAIN_MACHINE\")\nelse:\n MACHINE_TYPE = \"n1-standard\"\n\nVCPU = \"4\"\nTRAIN_COMPUTE = MACHINE_TYPE + \"-\" + VCPU\nprint(\"Train machine type\", TRAIN_COMPUTE)",
"_____no_output_____"
]
],
[
[
"## Introduction to Scikit-learn training\n\nOnce you have trained a Scikit-learn model, you will want to save it at a Cloud Storage location, so it can subsequently be uploaded to a `Vertex AI Model` resource. The Scikit-learn package does not have support to save the model to a Cloud Storage location. Instead, you will do the following steps to save to a Cloud Storage location.\n\n1. Save the in-memory model to the local filesystem in pickle format (e.g., model.pkl).\n2. Create a Cloud Storage storage client.\n3. Upload the pickle file as a blob to the specified Cloud Storage location using the Cloud Storage storage client.\n\n*Note*: You can do hyperparameter tuning with a Scikit-learn model.",
"_____no_output_____"
],
[
"### Examine the training package\n\n#### Package layout\n\nBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.\n\n- PKG-INFO\n- README.md\n- setup.cfg\n- setup.py\n- trainer\n - \\_\\_init\\_\\_.py\n - task.py\n\nThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.\n\nThe file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`).\n\n#### Package Assembly\n\nIn the following cells, you will assemble the training package.",
"_____no_output_____"
]
],
[
[
"# Make folder for Python training script\n! rm -rf custom\n! mkdir custom\n\n# Add package information\n! touch custom/README.md\n\nsetup_cfg = \"[egg_info]\\n\\ntag_build =\\n\\ntag_date = 0\"\n! echo \"$setup_cfg\" > custom/setup.cfg\n\nsetup_py = \"import setuptools\\n\\nsetuptools.setup(\\n\\n install_requires=[\\n\\n 'wget',\\n\\n 'cloudml-hypertune',\\n\\n ],\\n\\n packages=setuptools.find_packages())\"\n! echo \"$setup_py\" > custom/setup.py\n\npkg_info = \"Metadata-Version: 1.0\\n\\nName: News Aggregation text classification\\n\\nVersion: 0.0.0\\n\\nSummary: Demostration training script\\n\\nHome-page: www.google.com\\n\\nAuthor: Google\\n\\nAuthor-email: [email protected]\\n\\nLicense: Public\\n\\nDescription: Demo\\n\\nPlatform: Vertex\"\n! echo \"$pkg_info\" > custom/PKG-INFO\n\n# Make the training subfolder\n! mkdir custom/trainer\n! touch custom/trainer/__init__.py",
"_____no_output_____"
]
],
[
[
"### Create the task script for the Python training package\n\nNext, you create the `task.py` script for driving the training package. Some noteable steps include:\n\n- Command-line arguments:\n - `model-dir`: The location to save the trained model. When using Vertex AI custom training, the location will be specified in the environment variable: `AIP_MODEL_DIR`,\n - `dataset_url`: The location of the dataset to download.\n - `alpha`: Hyperparameter\n- Data preprocessing (`get_data()`):\n - Download the dataset and split into training and test.\n- Model architecture (`get_model()`):\n - Builds the corresponding model architecture.\n- Training (`train_model()`):\n - Trains the model\n- Evaluation (`evaluate_model()`):\n - Evaluates the model.\n - If hyperparameter tuning, reports the metric for accuracy.\n- Model artifact saving\n - Saves the model artifacts and evaluation metrics where the Cloud Storage location specified by `model-dir`.\n - *Note*: GCSFuse (`/gcs`) is used to do filesystem operations on Cloud Storage buckets.",
"_____no_output_____"
]
],
[
[
"%%writefile custom/trainer/task.py\nimport argparse\nimport logging\nimport os\nimport pickle\nimport zipfile\nfrom typing import List, Tuple\n\nimport pandas as pd\nimport wget\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nimport hypertune\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model-dir', dest='model_dir',\n default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')\nparser.add_argument(\"--dataset-url\", dest=\"dataset_url\",\n type=str, help=\"Download url for the training data.\")\nparser.add_argument('--alpha', dest='alpha',\n default=1.0, type=float,\n help='Alpha parameters for MultinomialNB')\nargs = parser.parse_args()\n\nlogging.getLogger().setLevel(logging.INFO)\n\ndef get_data(url: str, test_size: float = 0.2) -> Tuple[List, List, List, List]:\n logging.info(\"Downloading training data from: {}\".format(args.dataset_url))\n\n zip_filepath = wget.download(url, out=\".\")\n\n with zipfile.ZipFile(zip_filepath, \"r\") as zf:\n zf.extract(path=\".\", member=\"newsCorpora.csv\")\n\n COLUMN_NAMES = [\"id\", \"title\", \"url\", \"publisher\",\n \"category\", \"story\", \"hostname\", \"timestamp\"]\n\n dataframe = pd.read_csv(\n \"newsCorpora.csv\", delimiter=\"\t\", names=COLUMN_NAMES, index_col=0\n )\n\n train, test = train_test_split(dataframe, test_size=test_size)\n\n x_train, y_train = train[\"title\"].values, train[\"category\"].values\n x_test, y_test = test[\"title\"].values, test[\"category\"].values\n\n return x_train, y_train, x_test, y_test\n\ndef get_model():\n logging.info(\"Build model ...\")\n model = Pipeline([\n (\"vectorizer\", CountVectorizer()),\n (\"tfidf\", TfidfTransformer()),\n (\"naivebayes\", MultinomialNB(alpha=args.alpha)),\n ])\n return model\n\ndef train_model(model: Pipeline, X_train: List, y_train: List, X_test: List, y_test: List\n) -> Pipeline:\n logging.info(\"Training started ...\")\n model.fit(X_train, y_train)\n logging.info(\"Training completed\")\n return model\n\ndef evaluate_model(model: Pipeline, X_train: List, y_train: List, X_test: List, y_test: List\n) -> float:\n score = model.score(X_test, y_test)\n logging.info(f\"Evaluation completed with model score: {score}\")\n\n # report metric for hyperparameter tuning\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='accuracy',\n metric_value=score\n )\n return score\n\n\ndef export_model_to_gcs(fitted_pipeline: Pipeline, gcs_uri: str) -> str:\n \"\"\"Exports trained pipeline to GCS\n Parameters:\n fitted_pipeline (sklearn.pipelines.Pipeline): the Pipeline object\n with data already fitted (trained pipeline object).\n gcs_uri (str): GCS path to store the trained pipeline\n i.e gs://example_bucket/training-job.\n Returns:\n export_path (str): Model GCS location\n \"\"\"\n # Upload model artifact to Cloud Storage\n artifact_filename = 'model.pkl'\n storage_path = os.path.join(gcs_uri, artifact_filename)\n\n # Save model artifact to local filesystem (doesn't persist)\n with open(storage_path, 'wb') as model_file:\n pickle.dump(fitted_pipeline, model_file)\n\n\ndef export_evaluation_report_to_gcs(report: str, gcs_uri: str) -> None:\n \"\"\"\n Exports training job report to GCS\n Parameters:\n report (str): Full report in text to sent to GCS\n gcs_uri (str): GCS path to store the report\n i.e gs://example_bucket/training-job\n \"\"\"\n\n # Upload model artifact to Cloud Storage\n artifact_filename = 'report.txt'\n storage_path = os.path.join(gcs_uri, artifact_filename)\n\n # Save model artifact to local filesystem (doesn't persist)\n with open(storage_path, 'w') as report_file:\n report_file.write(report)\n\n\nlogging.info(\"Starting custom training job.\")\n\ndata = get_data(args.dataset_url)\nmodel = get_model()\nmodel = train_model(model, *data)\nscore = evaluate_model(model, *data)\n\n# export model to gcs using GCSFuse\nlogging.info(\"Exporting model artifacts ...\")\ngs_prefix = 'gs://'\ngcsfuse_prefix = '/gcs/'\nif args.model_dir.startswith(gs_prefix):\n args.model_dir = args.model_dir.replace(gs_prefix, gcsfuse_prefix)\n dirpath = os.path.split(args.model_dir)[0]\n if not os.path.isdir(dirpath):\n os.makedirs(dirpath)\n\nexport_model_to_gcs(model, args.model_dir)\nexport_evaluation_report_to_gcs(str(score), args.model_dir)\nlogging.info(f\"Exported model artifacts to GCS bucket: {args.model_dir}\")",
"_____no_output_____"
]
],
[
[
"#### Store training script on your Cloud Storage bucket\n\nNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.",
"_____no_output_____"
]
],
[
[
"! rm -f custom.tar custom.tar.gz\n! tar cvf custom.tar custom\n! gzip custom.tar\n! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_newsaggr.tar.gz",
"_____no_output_____"
]
],
[
[
"### Create and run custom training job\n\n\nTo train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job.\n\n#### Create custom training job\n\nA custom training job is created with the `CustomTrainingJob` class, with the following parameters:\n\n- `display_name`: The human readable name for the custom training job.\n- `container_uri`: The training container image.\n\n- `python_package_gcs_uri`: The location of the Python training package as a tarball.\n- `python_module_name`: The relative path to the training script in the Python package.\n- `model_serving_container_uri`: The container image for deploying the model.\n\n*Note:* There is no requirements parameter. You specify any requirements in the `setup.py` script in your Python package.",
"_____no_output_____"
]
],
[
[
"DISPLAY_NAME = \"newsaggr_\" + TIMESTAMP\n\njob = aip.CustomPythonPackageTrainingJob(\n display_name=DISPLAY_NAME,\n python_package_gcs_uri=f\"{BUCKET_NAME}/trainer_newsaggr.tar.gz\",\n python_module_name=\"trainer.task\",\n container_uri=TRAIN_IMAGE,\n model_serving_container_image_uri=DEPLOY_IMAGE,\n project=PROJECT_ID,\n)",
"_____no_output_____"
]
],
[
[
"### Prepare your command-line arguments\n\nNow define the command-line arguments for your custom training container:\n\n- `args`: The command-line arguments to pass to the executable that is set as the entry point into the container.\n - `--model-dir` : For our demonstrations, we use this command-line argument to specify where to store the model artifacts.\n - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or\n - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification.\n - `--dataset-url`: The location of the dataset to download.\n - `--alpha`: Tunable hyperparameter",
"_____no_output_____"
]
],
[
[
"MODEL_DIR = \"{}/{}\".format(BUCKET_NAME, TIMESTAMP)\nDATASET_URL = \"https://archive.ics.uci.edu/ml/machine-learning-databases/00359/NewsAggregatorDataset.zip\"\n\nDIRECT = False\n\nif DIRECT:\n CMDARGS = [\n \"--alpha=\" + str(0.9),\n \"--dataset-url=\" + DATASET_URL,\n \"--model_dir=\" + MODEL_DIR,\n ]\nelse:\n CMDARGS = [\"--alpha=\" + str(0.9), \"--dataset-url=\" + DATASET_URL]",
"_____no_output_____"
]
],
[
[
"#### Run the custom training job\n\nNext, you run the custom job to start the training job by invoking the method `run`, with the following parameters:\n\n- `model_display_name`: The human readable name for the `Model` resource.\n- `args`: The command-line arguments to pass to the training script.\n- `replica_count`: The number of compute instances for training (replica_count = 1 is single node training).\n- `machine_type`: The machine type for the compute instances.\n- `accelerator_type`: The hardware accelerator type.\n- `accelerator_count`: The number of accelerators to attach to a worker replica.\n- `base_output_dir`: The Cloud Storage location to write the model artifacts to.\n- `sync`: Whether to block until completion of the job.",
"_____no_output_____"
]
],
[
[
"if TRAIN_GPU:\n model = job.run(\n model_display_name=\"newsaggr_\" + TIMESTAMP,\n args=CMDARGS,\n replica_count=1,\n machine_type=TRAIN_COMPUTE,\n accelerator_type=TRAIN_GPU.name,\n accelerator_count=TRAIN_NGPU,\n base_output_dir=MODEL_DIR,\n sync=False,\n )\nelse:\n model = job.run(\n model_display_name=\"newsaggr_\" + TIMESTAMP,\n args=CMDARGS,\n replica_count=1,\n machine_type=TRAIN_COMPUTE,\n base_output_dir=MODEL_DIR,\n sync=False,\n )\n\nmodel_path_to_deploy = MODEL_DIR",
"_____no_output_____"
]
],
[
[
"### List a custom training job",
"_____no_output_____"
]
],
[
[
"_job = job.list(filter=f\"display_name={DISPLAY_NAME}\")\nprint(_job)",
"_____no_output_____"
]
],
[
[
"### Wait for completion of custom training job\n\nNext, wait for the custom training job to complete. Alternatively, one can set the parameter `sync` to `True` in the `run()` method to block until the custom training job is completed.",
"_____no_output_____"
]
],
[
[
"model.wait()",
"_____no_output_____"
]
],
[
[
"### Delete a custom training job\n\nAfter a training job is completed, you can delete the training job with the method `delete()`. Prior to completion, a training job can be canceled with the method `cancel()`.",
"_____no_output_____"
]
],
[
[
"job.delete()",
"_____no_output_____"
]
],
[
[
"# Cleaning up\n\nTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud\nproject](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n\nOtherwise, you can delete the individual resources you created in this tutorial:\n\n- Dataset\n- Pipeline\n- Model\n- Endpoint\n- AutoML Training Job\n- Batch Job\n- Custom Job\n- Hyperparameter Tuning Job\n- Cloud Storage Bucket",
"_____no_output_____"
]
],
[
[
"delete_all = True\n\nif delete_all:\n # Delete the dataset using the Vertex dataset object\n try:\n if \"dataset\" in globals():\n dataset.delete()\n except Exception as e:\n print(e)\n\n # Delete the model using the Vertex model object\n try:\n if \"model\" in globals():\n model.delete()\n except Exception as e:\n print(e)\n\n # Delete the endpoint using the Vertex endpoint object\n try:\n if \"endpoint\" in globals():\n endpoint.undeploy_all()\n endpoint.delete()\n except Exception as e:\n print(e)\n\n # Delete the AutoML or Pipeline training job\n try:\n if \"dag\" in globals():\n dag.delete()\n except Exception as e:\n print(e)\n\n # Delete the custom training job\n try:\n if \"job\" in globals():\n job.delete()\n except Exception as e:\n print(e)\n\n # Delete the batch prediction job using the Vertex batch prediction object\n try:\n if \"batch_predict_job\" in globals():\n batch_predict_job.delete()\n except Exception as e:\n print(e)\n\n # Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object\n try:\n if \"hpt_job\" in globals():\n hpt_job.delete()\n except Exception as e:\n print(e)\n\n if \"BUCKET_NAME\" in globals():\n ! gsutil rm -r $BUCKET_NAME",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d3c61eabb94c477152c9eb6d996d8ff04201f6 | 82,131 | ipynb | Jupyter Notebook | nbs/05_data.transforms.ipynb | hanshin-back/fastai | eb98c4a490c319f8136be92cfc1628b5de3f33e2 | [
"Apache-2.0"
] | 1 | 2020-11-28T20:01:39.000Z | 2020-11-28T20:01:39.000Z | nbs/05_data.transforms.ipynb | hanshin-back/fastai | eb98c4a490c319f8136be92cfc1628b5de3f33e2 | [
"Apache-2.0"
] | 1 | 2021-02-23T22:57:30.000Z | 2021-02-23T22:57:30.000Z | nbs/05_data.transforms.ipynb | hanshin-back/fastai | eb98c4a490c319f8136be92cfc1628b5de3f33e2 | [
"Apache-2.0"
] | null | null | null | 45.908888 | 3,716 | 0.690738 | [
[
[
"#hide\n#skip\n! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab",
"_____no_output_____"
],
[
"#default_exp data.transforms",
"_____no_output_____"
],
[
"#export\nfrom fastai.torch_basics import *\nfrom fastai.data.core import *\nfrom fastai.data.load import *\nfrom fastai.data.external import *\n\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"#hide\nfrom nbdev.showdoc import *",
"_____no_output_____"
]
],
[
[
"# Helper functions for processing data and basic transforms\n\n> Functions for getting, splitting, and labeling data, as well as generic transforms",
"_____no_output_____"
],
[
"## Get, split, and label",
"_____no_output_____"
],
[
"For most data source creation we need functions to get a list of items, split them in to train/valid sets, and label them. fastai provides functions to make each of these steps easy (especially when combined with `fastai.data.blocks`).",
"_____no_output_____"
],
[
"### Get",
"_____no_output_____"
],
[
"First we'll look at functions that *get* a list of items (generally file names).\n\nWe'll use *tiny MNIST* (a subset of MNIST with just two classes, `7`s and `3`s) for our examples/tests throughout this page.",
"_____no_output_____"
]
],
[
[
"path = untar_data(URLs.MNIST_TINY)\n(path/'train').ls()",
"_____no_output_____"
],
[
"# export\ndef _get_files(p, fs, extensions=None):\n p = Path(p)\n res = [p/f for f in fs if not f.startswith('.')\n and ((not extensions) or f'.{f.split(\".\")[-1].lower()}' in extensions)]\n return res",
"_____no_output_____"
],
[
"# export\ndef get_files(path, extensions=None, recurse=True, folders=None, followlinks=True):\n \"Get all the files in `path` with optional `extensions`, optionally with `recurse`, only in `folders`, if specified.\"\n path = Path(path)\n folders=L(folders)\n extensions = setify(extensions)\n extensions = {e.lower() for e in extensions}\n if recurse:\n res = []\n for i,(p,d,f) in enumerate(os.walk(path, followlinks=followlinks)): # returns (dirpath, dirnames, filenames)\n if len(folders) !=0 and i==0: d[:] = [o for o in d if o in folders]\n else: d[:] = [o for o in d if not o.startswith('.')]\n if len(folders) !=0 and i==0 and '.' not in folders: continue\n res += _get_files(p, f, extensions)\n else:\n f = [o.name for o in os.scandir(path) if o.is_file()]\n res = _get_files(path, f, extensions)\n return L(res)",
"_____no_output_____"
]
],
[
[
"This is the most general way to grab a bunch of file names from disk. If you pass `extensions` (including the `.`) then returned file names are filtered by that list. Only those files directly in `path` are included, unless you pass `recurse`, in which case all child folders are also searched recursively. `folders` is an optional list of directories to limit the search to.",
"_____no_output_____"
]
],
[
[
"t3 = get_files(path/'train'/'3', extensions='.png', recurse=False)\nt7 = get_files(path/'train'/'7', extensions='.png', recurse=False)\nt = get_files(path/'train', extensions='.png', recurse=True)\ntest_eq(len(t), len(t3)+len(t7))\ntest_eq(len(get_files(path/'train'/'3', extensions='.jpg', recurse=False)),0)\ntest_eq(len(t), len(get_files(path, extensions='.png', recurse=True, folders='train')))\nt",
"_____no_output_____"
],
[
"#hide\ntest_eq(len(get_files(path/'train'/'3', recurse=False)),346)\ntest_eq(len(get_files(path, extensions='.png', recurse=True, folders=['train', 'test'])),729)\ntest_eq(len(get_files(path, extensions='.png', recurse=True, folders='train')),709)\ntest_eq(len(get_files(path, extensions='.png', recurse=True, folders='training')),0)",
"_____no_output_____"
]
],
[
[
"It's often useful to be able to create functions with customized behavior. `fastai.data` generally uses functions named as CamelCase verbs ending in `er` to create these functions. `FileGetter` is a simple example of such a function creator.",
"_____no_output_____"
]
],
[
[
"#export\ndef FileGetter(suf='', extensions=None, recurse=True, folders=None):\n \"Create `get_files` partial function that searches path suffix `suf`, only in `folders`, if specified, and passes along args\"\n def _inner(o, extensions=extensions, recurse=recurse, folders=folders):\n return get_files(o/suf, extensions, recurse, folders)\n return _inner",
"_____no_output_____"
],
[
"fpng = FileGetter(extensions='.png', recurse=False)\ntest_eq(len(t7), len(fpng(path/'train'/'7')))\ntest_eq(len(t), len(fpng(path/'train', recurse=True)))\nfpng_r = FileGetter(extensions='.png', recurse=True)\ntest_eq(len(t), len(fpng_r(path/'train')))",
"_____no_output_____"
],
[
"#export\nimage_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))",
"_____no_output_____"
],
[
"#export\ndef get_image_files(path, recurse=True, folders=None):\n \"Get image files in `path` recursively, only in `folders`, if specified.\"\n return get_files(path, extensions=image_extensions, recurse=recurse, folders=folders)",
"_____no_output_____"
]
],
[
[
"This is simply `get_files` called with a list of standard image extensions.",
"_____no_output_____"
]
],
[
[
"test_eq(len(t), len(get_image_files(path, recurse=True, folders='train')))",
"_____no_output_____"
],
[
"#export\ndef ImageGetter(suf='', recurse=True, folders=None):\n \"Create `get_image_files` partial that searches suffix `suf` and passes along `kwargs`, only in `folders`, if specified\"\n def _inner(o, recurse=recurse, folders=folders): return get_image_files(o/suf, recurse, folders)\n return _inner",
"_____no_output_____"
]
],
[
[
"Same as `FileGetter`, but for image extensions.",
"_____no_output_____"
]
],
[
[
"test_eq(len(get_files(path/'train', extensions='.png', recurse=True, folders='3')),\n len(ImageGetter( 'train', recurse=True, folders='3')(path)))",
"_____no_output_____"
],
[
"#export\ndef get_text_files(path, recurse=True, folders=None):\n \"Get text files in `path` recursively, only in `folders`, if specified.\"\n return get_files(path, extensions=['.txt'], recurse=recurse, folders=folders)",
"_____no_output_____"
],
[
"#export\nclass ItemGetter(ItemTransform):\n \"Creates a proper transform that applies `itemgetter(i)` (even on a tuple)\"\n _retain = False\n def __init__(self, i): self.i = i\n def encodes(self, x): return x[self.i]",
"_____no_output_____"
],
[
"test_eq(ItemGetter(1)((1,2,3)), 2)\ntest_eq(ItemGetter(1)(L(1,2,3)), 2)\ntest_eq(ItemGetter(1)([1,2,3]), 2)\ntest_eq(ItemGetter(1)(np.array([1,2,3])), 2)",
"_____no_output_____"
],
[
"#export\nclass AttrGetter(ItemTransform):\n \"Creates a proper transform that applies `attrgetter(nm)` (even on a tuple)\"\n _retain = False\n def __init__(self, nm, default=None): store_attr()\n def encodes(self, x): return getattr(x, self.nm, self.default)",
"_____no_output_____"
],
[
"test_eq(AttrGetter('shape')(torch.randn([4,5])), [4,5])\ntest_eq(AttrGetter('shape', [0])([4,5]), [0])",
"_____no_output_____"
]
],
[
[
"### Split",
"_____no_output_____"
],
[
"The next set of functions are used to *split* data into training and validation sets. The functions return two lists - a list of indices or masks for each of training and validation sets.",
"_____no_output_____"
]
],
[
[
"# export\ndef RandomSplitter(valid_pct=0.2, seed=None):\n \"Create function that splits `items` between train/val with `valid_pct` randomly.\"\n def _inner(o):\n if seed is not None: torch.manual_seed(seed)\n rand_idx = L(list(torch.randperm(len(o)).numpy()))\n cut = int(valid_pct * len(o))\n return rand_idx[cut:],rand_idx[:cut]\n return _inner",
"_____no_output_____"
],
[
"src = list(range(30))\nf = RandomSplitter(seed=42)\ntrn,val = f(src)\nassert 0<len(trn)<len(src)\nassert all(o not in val for o in trn)\ntest_eq(len(trn), len(src)-len(val))\n# test random seed consistency\ntest_eq(f(src)[0], trn)",
"_____no_output_____"
]
],
[
[
"Use scikit-learn train_test_split. This allow to *split* items in a stratified fashion (uniformely according to the ‘labels‘ distribution)",
"_____no_output_____"
]
],
[
[
"# export\ndef TrainTestSplitter(test_size=0.2, random_state=None, stratify=None, train_size=None, shuffle=True):\n \"Split `items` into random train and test subsets using sklearn train_test_split utility.\"\n def _inner(o, **kwargs):\n train,valid = train_test_split(range_of(o), test_size=test_size, random_state=random_state,\n stratify=stratify, train_size=train_size, shuffle=shuffle)\n return L(train), L(valid)\n return _inner",
"_____no_output_____"
],
[
"src = list(range(30))\nlabels = [0] * 20 + [1] * 10\ntest_size = 0.2\n\nf = TrainTestSplitter(test_size=test_size, random_state=42, stratify=labels)\ntrn,val = f(src)\nassert 0<len(trn)<len(src)\nassert all(o not in val for o in trn)\ntest_eq(len(trn), len(src)-len(val))\n\n# test random seed consistency\ntest_eq(f(src)[0], trn)\n\n# test labels distribution consistency\n# there should be test_size % of zeroes and ones respectively in the validation set\ntest_eq(len([t for t in val if t < 20]) / 20, test_size)\ntest_eq(len([t for t in val if t > 20]) / 10, test_size)",
"_____no_output_____"
],
[
"#export\ndef IndexSplitter(valid_idx):\n \"Split `items` so that `val_idx` are in the validation set and the others in the training set\"\n def _inner(o):\n train_idx = np.setdiff1d(np.array(range_of(o)), np.array(valid_idx))\n return L(train_idx, use_list=True), L(valid_idx, use_list=True)\n return _inner",
"_____no_output_____"
],
[
"items = list(range(10))\nsplitter = IndexSplitter([3,7,9])\ntest_eq(splitter(items),[[0,1,2,4,5,6,8],[3,7,9]])",
"_____no_output_____"
],
[
"# export\ndef _grandparent_idxs(items, name):\n def _inner(items, name): return mask2idxs(Path(o).parent.parent.name == name for o in items)\n return [i for n in L(name) for i in _inner(items,n)]",
"_____no_output_____"
],
[
"# export\ndef GrandparentSplitter(train_name='train', valid_name='valid'):\n \"Split `items` from the grand parent folder names (`train_name` and `valid_name`).\"\n def _inner(o):\n return _grandparent_idxs(o, train_name),_grandparent_idxs(o, valid_name)\n return _inner",
"_____no_output_____"
],
[
"fnames = [path/'train/3/9932.png', path/'valid/7/7189.png', \n path/'valid/7/7320.png', path/'train/7/9833.png', \n path/'train/3/7666.png', path/'valid/3/925.png',\n path/'train/7/724.png', path/'valid/3/93055.png']\nsplitter = GrandparentSplitter()\ntest_eq(splitter(fnames),[[0,3,4,6],[1,2,5,7]])",
"_____no_output_____"
],
[
"fnames2 = fnames + [path/'test/3/4256.png', path/'test/7/2345.png', path/'valid/7/6467.png']\nsplitter = GrandparentSplitter(train_name=('train', 'valid'), valid_name='test')\ntest_eq(splitter(fnames2),[[0,3,4,6,1,2,5,7,10],[8,9]])",
"_____no_output_____"
],
[
"# export\ndef FuncSplitter(func):\n \"Split `items` by result of `func` (`True` for validation, `False` for training set).\"\n def _inner(o):\n val_idx = mask2idxs(func(o_) for o_ in o)\n return IndexSplitter(val_idx)(o)\n return _inner",
"_____no_output_____"
],
[
"splitter = FuncSplitter(lambda o: Path(o).parent.parent.name == 'valid')\ntest_eq(splitter(fnames),[[0,3,4,6],[1,2,5,7]])",
"_____no_output_____"
],
[
"# export\ndef MaskSplitter(mask):\n \"Split `items` depending on the value of `mask`.\"\n def _inner(o): return IndexSplitter(mask2idxs(mask))(o)\n return _inner",
"_____no_output_____"
],
[
"items = list(range(6))\nsplitter = MaskSplitter([True,False,False,True,False,True])\ntest_eq(splitter(items),[[1,2,4],[0,3,5]])",
"_____no_output_____"
],
[
"# export\ndef FileSplitter(fname):\n \"Split `items` by providing file `fname` (contains names of valid items separated by newline).\"\n valid = Path(fname).read_text().split('\\n')\n def _func(x): return x.name in valid\n def _inner(o): return FuncSplitter(_func)(o)\n return _inner",
"_____no_output_____"
],
[
"with tempfile.TemporaryDirectory() as d:\n fname = Path(d)/'valid.txt'\n fname.write_text('\\n'.join([Path(fnames[i]).name for i in [1,3,4]]))\n splitter = FileSplitter(fname)\n test_eq(splitter(fnames),[[0,2,5,6,7],[1,3,4]])",
"_____no_output_____"
],
[
"# export\ndef ColSplitter(col='is_valid'):\n \"Split `items` (supposed to be a dataframe) by value in `col`\"\n def _inner(o):\n assert isinstance(o, pd.DataFrame), \"ColSplitter only works when your items are a pandas DataFrame\"\n valid_idx = (o.iloc[:,col] if isinstance(col, int) else o[col]).values.astype('bool')\n return IndexSplitter(mask2idxs(valid_idx))(o)\n return _inner",
"_____no_output_____"
],
[
"df = pd.DataFrame({'a': [0,1,2,3,4], 'b': [True,False,True,True,False]})\nsplits = ColSplitter('b')(df)\ntest_eq(splits, [[1,4], [0,2,3]])\n#Works with strings or index\nsplits = ColSplitter(1)(df)\ntest_eq(splits, [[1,4], [0,2,3]])\n# does not get confused if the type of 'is_valid' is integer, but it meant to be a yes/no\ndf = pd.DataFrame({'a': [0,1,2,3,4], 'is_valid': [1,0,1,1,0]})\nsplits_by_int = ColSplitter('is_valid')(df)\ntest_eq(splits_by_int, [[1,4], [0,2,3]])",
"_____no_output_____"
],
[
"# export\ndef RandomSubsetSplitter(train_sz, valid_sz, seed=None):\n \"Take randoms subsets of `splits` with `train_sz` and `valid_sz`\"\n assert 0 < train_sz < 1\n assert 0 < valid_sz < 1\n assert train_sz + valid_sz <= 1.\n\n def _inner(o):\n if seed is not None: torch.manual_seed(seed)\n train_len,valid_len = int(len(o)*train_sz),int(len(o)*valid_sz)\n idxs = L(list(torch.randperm(len(o)).numpy()))\n return idxs[:train_len],idxs[train_len:train_len+valid_len]\n return _inner",
"_____no_output_____"
],
[
"items = list(range(100))\nvalid_idx = list(np.arange(70,100))\nsplits = RandomSubsetSplitter(0.3, 0.1)(items)\ntest_eq(len(splits[0]), 30)\ntest_eq(len(splits[1]), 10)",
"_____no_output_____"
]
],
[
[
"### Label",
"_____no_output_____"
],
[
"The final set of functions is used to *label* a single item of data.",
"_____no_output_____"
]
],
[
[
"# export\ndef parent_label(o):\n \"Label `item` with the parent folder name.\"\n return Path(o).parent.name",
"_____no_output_____"
]
],
[
[
"Note that `parent_label` doesn't have anything customize, so it doesn't return a function - you can just use it directly.",
"_____no_output_____"
]
],
[
[
"test_eq(parent_label(fnames[0]), '3')\ntest_eq(parent_label(\"fastai_dev/dev/data/mnist_tiny/train/3/9932.png\"), '3')\n[parent_label(o) for o in fnames]",
"_____no_output_____"
],
[
"#hide\n#test for MS Windows when os.path.sep is '\\\\' instead of '/'\ntest_eq(parent_label(os.path.join(\"fastai_dev\",\"dev\",\"data\",\"mnist_tiny\",\"train\", \"3\", \"9932.png\") ), '3')",
"_____no_output_____"
],
[
"# export\nclass RegexLabeller():\n \"Label `item` with regex `pat`.\"\n def __init__(self, pat, match=False):\n self.pat = re.compile(pat)\n self.matcher = self.pat.match if match else self.pat.search\n\n def __call__(self, o):\n res = self.matcher(str(o))\n assert res,f'Failed to find \"{self.pat}\" in \"{o}\"'\n return res.group(1)",
"_____no_output_____"
]
],
[
[
"`RegexLabeller` is a very flexible function since it handles any regex search of the stringified item. Pass `match=True` to use `re.match` (i.e. check only start of string), or `re.search` otherwise (default).\n\nFor instance, here's an example the replicates the previous `parent_label` results.",
"_____no_output_____"
]
],
[
[
"f = RegexLabeller(fr'{os.path.sep}(\\d){os.path.sep}')\ntest_eq(f(fnames[0]), '3')\n[f(o) for o in fnames]",
"_____no_output_____"
],
[
"f = RegexLabeller(r'(\\d*)', match=True)\ntest_eq(f(fnames[0].name), '9932')",
"_____no_output_____"
],
[
"#export\nclass ColReader(DisplayedTransform):\n \"Read `cols` in `row` with potential `pref` and `suff`\"\n def __init__(self, cols, pref='', suff='', label_delim=None):\n store_attr()\n self.pref = str(pref) + os.path.sep if isinstance(pref, Path) else pref\n self.cols = L(cols)\n\n def _do_one(self, r, c):\n o = r[c] if isinstance(c, int) else r[c] if c=='name' else getattr(r, c)\n if len(self.pref)==0 and len(self.suff)==0 and self.label_delim is None: return o\n if self.label_delim is None: return f'{self.pref}{o}{self.suff}'\n else: return o.split(self.label_delim) if len(o)>0 else []\n\n def __call__(self, o, **kwargs):\n if len(self.cols) == 1: return self._do_one(o, self.cols[0])\n return L(self._do_one(o, c) for c in self.cols)",
"_____no_output_____"
]
],
[
[
"`cols` can be a list of column names or a list of indices (or a mix of both). If `label_delim` is passed, the result is split using it.",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame({'a': 'a b c d'.split(), 'b': ['1 2', '0', '', '1 2 3']})\nf = ColReader('a', pref='0', suff='1')\ntest_eq([f(o) for o in df.itertuples()], '0a1 0b1 0c1 0d1'.split())\n\nf = ColReader('b', label_delim=' ')\ntest_eq([f(o) for o in df.itertuples()], [['1', '2'], ['0'], [], ['1', '2', '3']])\n\ndf['a1'] = df['a']\nf = ColReader(['a', 'a1'], pref='0', suff='1')\ntest_eq([f(o) for o in df.itertuples()], [L('0a1', '0a1'), L('0b1', '0b1'), L('0c1', '0c1'), L('0d1', '0d1')])\n\ndf = pd.DataFrame({'a': [L(0,1), L(2,3,4), L(5,6,7)]})\nf = ColReader('a')\ntest_eq([f(o) for o in df.itertuples()], [L(0,1), L(2,3,4), L(5,6,7)])\n\ndf['name'] = df['a']\nf = ColReader('name')\ntest_eq([f(df.iloc[0,:])], [L(0,1)])",
"_____no_output_____"
]
],
[
[
"## Categorize -",
"_____no_output_____"
]
],
[
[
"#export\nclass CategoryMap(CollBase):\n \"Collection of categories with the reverse mapping in `o2i`\"\n def __init__(self, col, sort=True, add_na=False, strict=False):\n if is_categorical_dtype(col):\n items = L(col.cat.categories, use_list=True)\n #Remove non-used categories while keeping order\n if strict: items = L(o for o in items if o in col.unique())\n else:\n if not hasattr(col,'unique'): col = L(col, use_list=True)\n # `o==o` is the generalized definition of non-NaN used by Pandas\n items = L(o for o in col.unique() if o==o)\n if sort: items = items.sorted()\n self.items = '#na#' + items if add_na else items\n self.o2i = defaultdict(int, self.items.val2idx()) if add_na else dict(self.items.val2idx())\n\n def map_objs(self,objs):\n \"Map `objs` to IDs\"\n return L(self.o2i[o] for o in objs)\n\n def map_ids(self,ids):\n \"Map `ids` to objects in vocab\"\n return L(self.items[o] for o in ids)\n\n def __eq__(self,b): return all_equal(b,self)",
"_____no_output_____"
],
[
"t = CategoryMap([4,2,3,4])\ntest_eq(t, [2,3,4])\ntest_eq(t.o2i, {2:0,3:1,4:2})\ntest_eq(t.map_objs([2,3]), [0,1])\ntest_eq(t.map_ids([0,1]), [2,3])\ntest_fail(lambda: t.o2i['unseen label'])",
"_____no_output_____"
],
[
"t = CategoryMap([4,2,3,4], add_na=True)\ntest_eq(t, ['#na#',2,3,4])\ntest_eq(t.o2i, {'#na#':0,2:1,3:2,4:3})",
"_____no_output_____"
],
[
"t = CategoryMap(pd.Series([4,2,3,4]), sort=False)\ntest_eq(t, [4,2,3])\ntest_eq(t.o2i, {4:0,2:1,3:2})",
"_____no_output_____"
],
[
"col = pd.Series(pd.Categorical(['M','H','L','M'], categories=['H','M','L'], ordered=True))\nt = CategoryMap(col)\ntest_eq(t, ['H','M','L'])\ntest_eq(t.o2i, {'H':0,'M':1,'L':2})",
"_____no_output_____"
],
[
"col = pd.Series(pd.Categorical(['M','H','M'], categories=['H','M','L'], ordered=True))\nt = CategoryMap(col, strict=True)\ntest_eq(t, ['H','M'])\ntest_eq(t.o2i, {'H':0,'M':1})",
"_____no_output_____"
],
[
"# export\nclass Categorize(DisplayedTransform):\n \"Reversible transform of category string to `vocab` id\"\n loss_func,order=CrossEntropyLossFlat(),1\n def __init__(self, vocab=None, sort=True, add_na=False):\n if vocab is not None: vocab = CategoryMap(vocab, sort=sort, add_na=add_na)\n store_attr()\n\n def setups(self, dsets):\n if self.vocab is None and dsets is not None: self.vocab = CategoryMap(dsets, sort=self.sort, add_na=self.add_na)\n self.c = len(self.vocab)\n\n def encodes(self, o): \n try:\n return TensorCategory(self.vocab.o2i[o])\n except KeyError as e:\n raise KeyError(f\"Label '{o}' was not included in the training dataset\") from e\n def decodes(self, o): return Category (self.vocab [o])",
"_____no_output_____"
],
[
"#export\nclass Category(str, ShowTitle): _show_args = {'label': 'category'}",
"_____no_output_____"
],
[
"cat = Categorize()\ntds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])\ntest_eq(cat.vocab, ['cat', 'dog'])\ntest_eq(cat('cat'), 0)\ntest_eq(cat.decode(1), 'dog')\ntest_stdout(lambda: show_at(tds,2), 'cat')\ntest_fail(lambda: cat('bird'))",
"_____no_output_____"
],
[
"cat = Categorize(add_na=True)\ntds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])\ntest_eq(cat.vocab, ['#na#', 'cat', 'dog'])\ntest_eq(cat('cat'), 1)\ntest_eq(cat.decode(2), 'dog')\ntest_stdout(lambda: show_at(tds,2), 'cat')",
"_____no_output_____"
],
[
"cat = Categorize(vocab=['dog', 'cat'], sort=False, add_na=True)\ntds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])\ntest_eq(cat.vocab, ['#na#', 'dog', 'cat'])\ntest_eq(cat('dog'), 1)\ntest_eq(cat.decode(2), 'cat')\ntest_stdout(lambda: show_at(tds,2), 'cat')",
"_____no_output_____"
]
],
[
[
"## Multicategorize -",
"_____no_output_____"
]
],
[
[
"# export\nclass MultiCategorize(Categorize):\n \"Reversible transform of multi-category strings to `vocab` id\"\n loss_func,order=BCEWithLogitsLossFlat(),1\n def __init__(self, vocab=None, add_na=False): super().__init__(vocab=vocab,add_na=add_na,sort=vocab==None)\n\n def setups(self, dsets):\n if not dsets: return\n if self.vocab is None:\n vals = set()\n for b in dsets: vals = vals.union(set(b))\n self.vocab = CategoryMap(list(vals), add_na=self.add_na)\n\n def encodes(self, o): \n if not all(elem in self.vocab.o2i.keys() for elem in o):\n diff = [elem for elem in o if elem not in self.vocab.o2i.keys()]\n diff_str = \"', '\".join(diff)\n raise KeyError(f\"Labels '{diff_str}' were not included in the training dataset\")\n return TensorMultiCategory([self.vocab.o2i[o_] for o_ in o])\n def decodes(self, o): return MultiCategory ([self.vocab [o_] for o_ in o])",
"_____no_output_____"
],
[
"#export\nclass MultiCategory(L):\n def show(self, ctx=None, sep=';', color='black', **kwargs):\n return show_title(sep.join(self.map(str)), ctx=ctx, color=color, **kwargs)",
"_____no_output_____"
],
[
"cat = MultiCategorize()\ntds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], tfms=[cat])\ntest_eq(tds[3][0], TensorMultiCategory([]))\ntest_eq(cat.vocab, ['a', 'b', 'c'])\ntest_eq(cat(['a', 'c']), tensor([0,2]))\ntest_eq(cat([]), tensor([]))\ntest_eq(cat.decode([1]), ['b'])\ntest_eq(cat.decode([0,2]), ['a', 'c'])\ntest_stdout(lambda: show_at(tds,2), 'a;c')\n\n# if vocab supplied, ensure it maintains its order (i.e., it doesn't sort)\ncat = MultiCategorize(vocab=['z', 'y', 'x'])\ntest_eq(cat.vocab, ['z','y','x'])\n\ntest_fail(lambda: cat('bird'))",
"_____no_output_____"
],
[
"# export\nclass OneHotEncode(DisplayedTransform):\n \"One-hot encodes targets\"\n order=2\n def __init__(self, c=None): store_attr()\n\n def setups(self, dsets):\n if self.c is None: self.c = len(L(getattr(dsets, 'vocab', None)))\n if not self.c: warn(\"Couldn't infer the number of classes, please pass a value for `c` at init\")\n\n def encodes(self, o): return TensorMultiCategory(one_hot(o, self.c).float())\n def decodes(self, o): return one_hot_decode(o, None)",
"_____no_output_____"
]
],
[
[
"Works in conjunction with ` MultiCategorize` or on its own if you have one-hot encoded targets (pass a `vocab` for decoding and `do_encode=False` in this case)",
"_____no_output_____"
]
],
[
[
"_tfm = OneHotEncode(c=3)\ntest_eq(_tfm([0,2]), tensor([1.,0,1]))\ntest_eq(_tfm.decode(tensor([0,1,1])), [1,2])",
"_____no_output_____"
],
[
"tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], [[MultiCategorize(), OneHotEncode()]])\ntest_eq(tds[1], [tensor([1.,0,0])])\ntest_eq(tds[3], [tensor([0.,0,0])])\ntest_eq(tds.decode([tensor([False, True, True])]), [['b','c']])\ntest_eq(type(tds[1][0]), TensorMultiCategory)\ntest_stdout(lambda: show_at(tds,2), 'a;c')",
"_____no_output_____"
],
[
"#hide\n#test with passing the vocab\ntds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], [[MultiCategorize(vocab=['a', 'b', 'c']), OneHotEncode()]])\ntest_eq(tds[1], [tensor([1.,0,0])])\ntest_eq(tds[3], [tensor([0.,0,0])])\ntest_eq(tds.decode([tensor([False, True, True])]), [['b','c']])\ntest_eq(type(tds[1][0]), TensorMultiCategory)\ntest_stdout(lambda: show_at(tds,2), 'a;c')",
"_____no_output_____"
],
[
"# export\nclass EncodedMultiCategorize(Categorize):\n \"Transform of one-hot encoded multi-category that decodes with `vocab`\"\n loss_func,order=BCEWithLogitsLossFlat(),1\n def __init__(self, vocab):\n super().__init__(vocab, sort=vocab==None)\n self.c = len(vocab)\n def encodes(self, o): return TensorMultiCategory(tensor(o).float())\n def decodes(self, o): return MultiCategory (one_hot_decode(o, self.vocab))",
"_____no_output_____"
],
[
"_tfm = EncodedMultiCategorize(vocab=['a', 'b', 'c'])\ntest_eq(_tfm([1,0,1]), tensor([1., 0., 1.]))\ntest_eq(type(_tfm([1,0,1])), TensorMultiCategory)\ntest_eq(_tfm.decode(tensor([False, True, True])), ['b','c'])\n\n_tfm2 = EncodedMultiCategorize(vocab=['c', 'b', 'a'])\ntest_eq(_tfm2.vocab, ['c', 'b', 'a'])",
"_____no_output_____"
],
[
"#export\nclass RegressionSetup(DisplayedTransform):\n \"Transform that floatifies targets\"\n loss_func=MSELossFlat()\n def __init__(self, c=None): store_attr()\n\n def encodes(self, o): return tensor(o).float()\n def decodes(self, o): return TitledFloat(o) if o.ndim==0 else TitledTuple(o_.item() for o_ in o)\n def setups(self, dsets):\n if self.c is not None: return\n try: self.c = len(dsets[0]) if hasattr(dsets[0], '__len__') else 1\n except: self.c = 0",
"_____no_output_____"
],
[
"_tfm = RegressionSetup()\ndsets = Datasets([0, 1, 2], RegressionSetup)\ntest_eq(dsets.c, 1)\ntest_eq_type(dsets[0], (tensor(0.),))\n\ndsets = Datasets([[0, 1, 2], [3,4,5]], RegressionSetup)\ntest_eq(dsets.c, 3)\ntest_eq_type(dsets[0], (tensor([0.,1.,2.]),))",
"_____no_output_____"
],
[
"#export\ndef get_c(dls):\n if getattr(dls, 'c', False): return dls.c\n if getattr(getattr(dls.train, 'after_item', None), 'c', False): return dls.train.after_item.c\n if getattr(getattr(dls.train, 'after_batch', None), 'c', False): return dls.train.after_batch.c\n vocab = getattr(dls, 'vocab', [])\n if len(vocab) > 0 and is_listy(vocab[-1]): vocab = vocab[-1]\n return len(vocab)",
"_____no_output_____"
]
],
[
[
"## End-to-end dataset example with MNIST",
"_____no_output_____"
],
[
"Let's show how to use those functions to grab the mnist dataset in a `Datasets`. First we grab all the images.",
"_____no_output_____"
]
],
[
[
"path = untar_data(URLs.MNIST_TINY)\nitems = get_image_files(path)",
"_____no_output_____"
]
],
[
[
"Then we split between train and validation depending on the folder.",
"_____no_output_____"
]
],
[
[
"splitter = GrandparentSplitter()\nsplits = splitter(items)\ntrain,valid = (items[i] for i in splits)\ntrain[:3],valid[:3]",
"_____no_output_____"
]
],
[
[
"Our inputs are images that we open and convert to tensors, our targets are labeled depending on the parent directory and are categories.",
"_____no_output_____"
]
],
[
[
"from PIL import Image",
"_____no_output_____"
],
[
"def open_img(fn:Path): return Image.open(fn).copy()\ndef img2tensor(im:Image.Image): return TensorImage(array(im)[None])\n\ntfms = [[open_img, img2tensor],\n [parent_label, Categorize()]]\ntrain_ds = Datasets(train, tfms)",
"_____no_output_____"
],
[
"x,y = train_ds[3]\nxd,yd = decode_at(train_ds,3)\ntest_eq(parent_label(train[3]),yd)\ntest_eq(array(Image.open(train[3])),xd[0].numpy())",
"_____no_output_____"
],
[
"ax = show_at(train_ds, 3, cmap=\"Greys\", figsize=(1,1))",
"_____no_output_____"
],
[
"assert ax.title.get_text() in ('3','7')\ntest_fig_exists(ax)",
"_____no_output_____"
]
],
[
[
"## ToTensor -",
"_____no_output_____"
]
],
[
[
"#export\nclass ToTensor(Transform):\n \"Convert item to appropriate tensor class\"\n order = 5",
"_____no_output_____"
]
],
[
[
"## IntToFloatTensor -",
"_____no_output_____"
]
],
[
[
"# export\nclass IntToFloatTensor(DisplayedTransform):\n \"Transform image to float tensor, optionally dividing by 255 (e.g. for images).\"\n order = 10 #Need to run after PIL transforms on the GPU\n def __init__(self, div=255., div_mask=1): store_attr()\n def encodes(self, o:TensorImage): return o.float().div_(self.div)\n def encodes(self, o:TensorMask ): return o.long() // self.div_mask\n def decodes(self, o:TensorImage): return ((o.clamp(0., 1.) * self.div).long()) if self.div else o",
"_____no_output_____"
],
[
"t = (TensorImage(tensor(1)),tensor(2).long(),TensorMask(tensor(3)))\ntfm = IntToFloatTensor()\nft = tfm(t)\ntest_eq(ft, [1./255, 2, 3])\ntest_eq(type(ft[0]), TensorImage)\ntest_eq(type(ft[2]), TensorMask)\ntest_eq(ft[0].type(),'torch.FloatTensor')\ntest_eq(ft[1].type(),'torch.LongTensor')\ntest_eq(ft[2].type(),'torch.LongTensor')",
"_____no_output_____"
]
],
[
[
"## Normalization -",
"_____no_output_____"
]
],
[
[
"# export\ndef broadcast_vec(dim, ndim, *t, cuda=True):\n \"Make a vector broadcastable over `dim` (out of `ndim` total) by prepending and appending unit axes\"\n v = [1]*ndim\n v[dim] = -1\n f = to_device if cuda else noop\n return [f(tensor(o).view(*v)) for o in t]",
"_____no_output_____"
],
[
"# export\n@docs\nclass Normalize(DisplayedTransform):\n \"Normalize/denorm batch of `TensorImage`\"\n parameters,order = L('mean', 'std'),99\n def __init__(self, mean=None, std=None, axes=(0,2,3)): store_attr()\n\n @classmethod\n def from_stats(cls, mean, std, dim=1, ndim=4, cuda=True): return cls(*broadcast_vec(dim, ndim, mean, std, cuda=cuda))\n\n def setups(self, dl:DataLoader):\n if self.mean is None or self.std is None:\n x,*_ = dl.one_batch()\n self.mean,self.std = x.mean(self.axes, keepdim=True),x.std(self.axes, keepdim=True)+1e-7\n\n def encodes(self, x:TensorImage): return (x-self.mean) / self.std\n def decodes(self, x:TensorImage):\n f = to_cpu if x.device.type=='cpu' else noop\n return (x*f(self.std) + f(self.mean))\n\n _docs=dict(encodes=\"Normalize batch\", decodes=\"Denormalize batch\")",
"_____no_output_____"
],
[
"mean,std = [0.5]*3,[0.5]*3\nmean,std = broadcast_vec(1, 4, mean, std)\nbatch_tfms = [IntToFloatTensor(), Normalize.from_stats(mean,std)]\ntdl = TfmdDL(train_ds, after_batch=batch_tfms, bs=4, device=default_device())",
"_____no_output_____"
],
[
"x,y = tdl.one_batch()\nxd,yd = tdl.decode((x,y))\n\ntest_eq(x.type(), 'torch.cuda.FloatTensor' if default_device().type=='cuda' else 'torch.FloatTensor')\ntest_eq(xd.type(), 'torch.LongTensor')\ntest_eq(type(x), TensorImage)\ntest_eq(type(y), TensorCategory)\nassert x.mean()<0.0\nassert x.std()>0.5\nassert 0<xd.float().mean()/255.<1\nassert 0<xd.float().std()/255.<0.5",
"_____no_output_____"
],
[
"#hide\nnrm = Normalize()\nbatch_tfms = [IntToFloatTensor(), nrm]\ntdl = TfmdDL(train_ds, after_batch=batch_tfms, bs=4)\nx,y = tdl.one_batch()\ntest_close(x.mean(), 0.0, 1e-4)\nassert x.std()>0.9, x.std()",
"_____no_output_____"
],
[
"#Just for visuals\nfrom fastai.vision.core import *",
"_____no_output_____"
],
[
"tdl.show_batch((x,y))",
"_____no_output_____"
],
[
"#hide\nx,y = cast(x,Tensor),cast(y,Tensor) #Lose type of tensors (to emulate predictions)\ntest_ne(type(x), TensorImage)\ntdl.show_batch((x,y), figsize=(1,1)) #Check that types are put back by dl.",
"_____no_output_____"
]
],
[
[
"## Export -",
"_____no_output_____"
]
],
[
[
"#hide\nfrom nbdev.export import notebook2script\nnotebook2script()",
"Converted 00_torch_core.ipynb.\nConverted 01_layers.ipynb.\nConverted 01a_losses.ipynb.\nConverted 02_data.load.ipynb.\nConverted 03_data.core.ipynb.\nConverted 04_data.external.ipynb.\nConverted 05_data.transforms.ipynb.\nConverted 06_data.block.ipynb.\nConverted 07_vision.core.ipynb.\nConverted 08_vision.data.ipynb.\nConverted 09_vision.augment.ipynb.\nConverted 09b_vision.utils.ipynb.\nConverted 09c_vision.widgets.ipynb.\nConverted 10_tutorial.pets.ipynb.\nConverted 10b_tutorial.albumentations.ipynb.\nConverted 11_vision.models.xresnet.ipynb.\nConverted 12_optimizer.ipynb.\nConverted 13_callback.core.ipynb.\nConverted 13a_learner.ipynb.\nConverted 13b_metrics.ipynb.\nConverted 14_callback.schedule.ipynb.\nConverted 14a_callback.data.ipynb.\nConverted 15_callback.hook.ipynb.\nConverted 15a_vision.models.unet.ipynb.\nConverted 16_callback.progress.ipynb.\nConverted 17_callback.tracker.ipynb.\nConverted 18_callback.fp16.ipynb.\nConverted 18a_callback.training.ipynb.\nConverted 18b_callback.preds.ipynb.\nConverted 19_callback.mixup.ipynb.\nConverted 20_interpret.ipynb.\nConverted 20a_distributed.ipynb.\nConverted 21_vision.learner.ipynb.\nConverted 22_tutorial.imagenette.ipynb.\nConverted 23_tutorial.vision.ipynb.\nConverted 24_tutorial.siamese.ipynb.\nConverted 24_vision.gan.ipynb.\nConverted 30_text.core.ipynb.\nConverted 31_text.data.ipynb.\nConverted 32_text.models.awdlstm.ipynb.\nConverted 33_text.models.core.ipynb.\nConverted 34_callback.rnn.ipynb.\nConverted 35_tutorial.wikitext.ipynb.\nConverted 36_text.models.qrnn.ipynb.\nConverted 37_text.learner.ipynb.\nConverted 38_tutorial.text.ipynb.\nConverted 39_tutorial.transformers.ipynb.\nConverted 40_tabular.core.ipynb.\nConverted 41_tabular.data.ipynb.\nConverted 42_tabular.model.ipynb.\nConverted 43_tabular.learner.ipynb.\nConverted 44_tutorial.tabular.ipynb.\nConverted 45_collab.ipynb.\nConverted 46_tutorial.collab.ipynb.\nConverted 50_tutorial.datablock.ipynb.\nConverted 60_medical.imaging.ipynb.\nConverted 61_tutorial.medical_imaging.ipynb.\nConverted 65_medical.text.ipynb.\nConverted 70_callback.wandb.ipynb.\nConverted 71_callback.tensorboard.ipynb.\nConverted 72_callback.neptune.ipynb.\nConverted 73_callback.captum.ipynb.\nConverted 74_callback.cutmix.ipynb.\nConverted 97_test_utils.ipynb.\nConverted 99_pytorch_doc.ipynb.\nConverted dev-setup.ipynb.\nConverted index.ipynb.\nConverted quick_start.ipynb.\nConverted tutorial.ipynb.\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d3c8f525f0fd6f91b0acfe539be9e44e0f9f70 | 734,448 | ipynb | Jupyter Notebook | examples/webinars_conferences_etc/multi_lingual_webinar/4_Unsupervise_Chinese_Keyword_Extraction_NER_and_Translation_from_Chinese_News.ipynb | UPbook-innovations/nlu | 2ae02ce7b6ca163f47271e98b71de109d38adefe | [
"Apache-2.0"
] | 1 | 2021-05-01T01:23:18.000Z | 2021-05-01T01:23:18.000Z | examples/webinars_conferences_etc/multi_lingual_webinar/4_Unsupervise_Chinese_Keyword_Extraction_NER_and_Translation_from_Chinese_News.ipynb | sheerinZ/nlu | a223eee4b077a6b832f47e5e6125167fe0922687 | [
"Apache-2.0"
] | 2 | 2021-09-28T05:55:05.000Z | 2022-02-26T11:16:21.000Z | examples/webinars_conferences_etc/multi_lingual_webinar/4_Unsupervise_Chinese_Keyword_Extraction_NER_and_Translation_from_Chinese_News.ipynb | atdavidpark/nlu | 619d07299e993323d83086c86506db71e2a139a9 | [
"Apache-2.0"
] | 1 | 2021-09-13T10:06:20.000Z | 2021-09-13T10:06:20.000Z | 734,448 | 734,448 | 0.915942 | [
[
[
"\n\n[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/webinars_conferences_etc/multi_lingual_webinar/4_Unsupervise_Chinese_Keyword_Extraction_NER_and_Translation_from_Chinese_News.ipynb)\n\n",
"_____no_output_____"
]
],
[
[
"import os\n! apt-get update -qq > /dev/null \n# Install java\n! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null\nos.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-8-openjdk-amd64\"\nos.environ[\"PATH\"] = os.environ[\"JAVA_HOME\"] + \"/bin:\" + os.environ[\"PATH\"]\n! pip install nlu pyspark==2.4.4 > /dev/null \nimport nlu \nimport pandas as pd \n! wget http://ckl-it.de/wp-content/uploads/2021/02/chinese_news.csv",
"--2021-02-17 12:04:57-- http://ckl-it.de/wp-content/uploads/2021/02/chinese_news.csv\nResolving ckl-it.de (ckl-it.de)... 217.160.0.108, 2001:8d8:100f:f000::209\nConnecting to ckl-it.de (ckl-it.de)|217.160.0.108|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 21914526 (21M) [text/csv]\nSaving to: ‘chinese_news.csv’\n\nchinese_news.csv 100%[===================>] 20.90M 6.54MB/s in 3.5s \n\n2021-02-17 12:05:01 (5.99 MB/s) - ‘chinese_news.csv’ saved [21914526/21914526]\n\n"
]
],
[
[
"# Analyzing chinese News Articles With NLU\n## This notebook showcases how to extract Chinese Keywords Unsupervied with YAKE and Named Entities and translate them to English\n### In addition, we will leverage the Chinese WordSegmenter and Lemmatizer to preprocess our data further and get a better view fof our data distribution\n",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"# [Chinese official daily news](https://www.kaggle.com/noxmoon/chinese-official-daily-news-since-2016)\n\n### Xinwen Lianbo is a daily news programme produced by China Central Television. It is shown simultaneously by all local TV stations in mainland China, making it one of the world's most-watched programmes. It has been broadcast since 1 January 1978.\nwikipedia\n\n",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('./chinese_news.csv')\ndf",
"_____no_output_____"
]
],
[
[
"# Depending how we pre-process our text, we will get different keywords extracted with YAKE. In This tutorial we will see the effect of **Lemmatization** and **Word Segmentation** and see how the distribution of Keywords changes \n- Lemmatization\n- Word Segmentation",
"_____no_output_____"
],
[
"# Apply YAKE - Keyword Extractor to the raw text\nFirst we do no pre-processing at all and just calculate keywords from the raw titles with YAKE",
"_____no_output_____"
]
],
[
[
"yake_df = nlu.load('yake').predict(df.headline)\nyake_df",
"_____no_output_____"
]
],
[
[
"## The predicted Chinese Keywords dont show up on Pandas Label and you probably do not speek Chinese!\n### This is why we will translate each extracted Keyword into english and then take a look at the distribution again",
"_____no_output_____"
]
],
[
[
"yake_df.explode('keywords_classes').keywords_classes.value_counts()[0:100].plot.bar(title='Top 100 in Chinese News Articles. No Chinese Keywords :( So lets translate!', figsize=(20,8))",
"_____no_output_____"
]
],
[
[
"### We get the top 100 keywords and store the counts toegether with the keywords in a new DF",
"_____no_output_____"
]
],
[
[
"top_100_zh = yake_df.explode('keywords_classes').keywords_classes.value_counts()[0:100]\ntop_100_zh = pd.DataFrame(top_100_zh)\n# Create new DF from the counts\ntop_100_zh['zh'] = top_100_zh.index\ntop_100_zh.reset_index(inplace=True)\ntop_100_zh\n",
"_____no_output_____"
]
],
[
[
"### Now we can just translate each predicted keyword with `zh.translate_to.en` in 1 line of code and see what is actually going on in the dataset",
"_____no_output_____"
]
],
[
[
"top_100_en = nlu.load('zh.translate_to.en').predict(top_100_zh.zh)\ntop_100_en",
"translate_zh_en download started this may take some time.\nApprox size to download 396.8 MB\n[OK!]\n"
]
],
[
[
"#### Write the translations into the df with the Keyword counts so we can plot them together in the next step",
"_____no_output_____"
]
],
[
[
"# Write translation back to the keyword df with the counts\ntop_100_zh['en']= top_100_en.translation\ntop_100_zh",
"_____no_output_____"
]
],
[
[
"## Now we can simply look at every keyword as a bar chart with the actual translation of it and understand what keywordsa ppeared in chinese news!",
"_____no_output_____"
]
],
[
[
"top_100_zh.index = top_100_zh.en\ntop_100_zh.keywords_classes.plot.barh(figsize=(20,20), title='Distribution of top 100 translated chinese News Articles generated by YAKE alogirthm applied to RAW data')",
"_____no_output_____"
]
],
[
[
"# Apply Yake to Segmented/Tokenized data\nWe gave the YAKE algorithm full heatlines which where not segmented. To better understand the Chinese text ,we can segment it into token and analyze their occurcence instead\n## YAKE + Word Segmentation",
"_____no_output_____"
]
],
[
[
"# Segment words into tokenz with the word segmenter\n# This will output 1 row per token\nseg_df = nlu.load('zh.segment_words').predict(df.headline)\nseg_df ",
"wordseg_weibo download started this may take some time.\nApproximate size to download 1.2 MB\n[OK!]\n"
]
],
[
[
"### Join the tokens back as white space seperated strings for the Yake Keyword extraction in the next step",
"_____no_output_____"
]
],
[
[
"# Join the tokens back as white space seperated strings\njoined_segs = seg_df.token.groupby(seg_df.index).transform(lambda x : ' '.join(x)).drop_duplicates()\njoined_segs",
"_____no_output_____"
]
],
[
[
"### Now we can extract keywords with yake on the whitespace seperated tokens \n",
"_____no_output_____"
]
],
[
[
"seg_yake_df = nlu.load('yake').predict(joined_segs)\nseg_yake_df",
"_____no_output_____"
],
[
"# Get top 100 occoring Keywords from the joined segmented tokens\ntop_100_seg_zh = seg_yake_df.explode('keywords_classes').keywords_classes.value_counts()[0:100]#.plot.bar(title='Top 100 in Chinese News Articles Segmented', figsize=(20,8))\ntop_100_seg_zh = pd.DataFrame(top_100_seg_zh )\ntop_100_seg_zh",
"_____no_output_____"
]
],
[
[
"## Get top 100 keywords and Translate them like we did for the raw Data as data preperation for the visualization of the keyword distribution",
"_____no_output_____"
]
],
[
[
"# Create new DF from the counts\ntop_100_seg_zh['zh'] = top_100_seg_zh.index\ntop_100_seg_zh.reset_index(inplace=True)\n# Write Translations back to df with keyword counts\n\ntop_100_seg_zh['en'] = nlu.load('zh.translate_to.en').predict(top_100_seg_zh.zh).translation",
"translate_zh_en download started this may take some time.\nApprox size to download 396.8 MB\n[OK!]\n"
]
],
[
[
"### Visualize the distirbution of the Keywords extracted from the segmented tokens\nWe can observe that we now have a very different distribution than originally",
"_____no_output_____"
]
],
[
[
"top_100_seg_zh.index = top_100_seg_zh.en\ntop_100_seg_zh.keywords_classes.plot.barh(figsize=(20,20), title = 'Segmented Keywords YAKE Distribution')",
"_____no_output_____"
]
],
[
[
"# Apply Yake to Segmented and Lemmatized data",
"_____no_output_____"
]
],
[
[
"# Automated Word Segmentation Included!\nzh_lem_df = nlu.load('zh.lemma').predict(df.headline)\nzh_lem_df",
"lemma download started this may take some time.\nApproximate size to download 149.5 KB\n[OK!]\nwordseg_weibo download started this may take some time.\nApproximate size to download 1.2 MB\n[OK!]\n"
]
],
[
[
"## Join tokens into whitespace seperated string like we did previosuly for Word Segmentation",
"_____no_output_____"
]
],
[
[
"zh_lem_df['lem_str'] = zh_lem_df.lemma.str.join(' ')\nzh_lem_df",
"_____no_output_____"
]
],
[
[
"## Extract Keywords on Stemmed + Word Segmented Chinese text",
"_____no_output_____"
]
],
[
[
"yake_lem_df = nlu.load('yake').predict(zh_lem_df.lem_str)\nyake_lem_df",
"_____no_output_____"
],
[
"top_100_stem = yake_lem_df.explode('keywords_classes').keywords_classes.value_counts()[:100]\ntop_100_stem = pd.DataFrame(top_100_stem)\n# Create new DF from the counts\ntop_100_stem['zh'] = top_100_stem.index\ntop_100_stem.reset_index(inplace=True)\n# Write Translations back to df with keyword counts\n\ntop_100_stem['en'] = nlu.load('zh.translate_to.en').predict(top_100_stem.zh).translation\ntop_100_stem",
"translate_zh_en download started this may take some time.\nApprox size to download 396.8 MB\n[OK!]\n"
]
],
[
[
"# Plot the Segmented and Lemmatized Distribution of extracted keywords ",
"_____no_output_____"
]
],
[
[
"top_100_stem.index = top_100_stem.en\ntop_100_stem.keywords_classes.plot.barh(figsize=(20,20), title='Distribution of top 100 translated chinese News Artzzzicles generated by YAKE alogirthm applied to Lemmatized and Segmented Chinese Text')",
"_____no_output_____"
]
],
[
[
"# Extract Chinese Named entities",
"_____no_output_____"
]
],
[
[
"zh_ner_df = nlu.load('zh.ner').predict(df.iloc[:1000].headline, output_level='document')\nzh_ner_df",
"ner_msra_bert_768d download started this may take some time.\nApproximate size to download 19.2 MB\n[OK!]\nbert_base_chinese download started this may take some time.\nApproximate size to download 367.6 MB\n[OK!]\nwordseg_weibo download started this may take some time.\nApproximate size to download 1.2 MB\n[OK!]\n"
],
[
"# Translate Detected Chinese Entities to English\nen_entities = nlu.load('zh.translate_to.en').predict(zh_ner_df.explode('entities').entities)\nen_entities",
"translate_zh_en download started this may take some time.\nApprox size to download 396.8 MB\n[OK!]\n"
],
[
"en_entities.translation.value_counts()[0:100].plot.barh(figsize=(20,20), title = \"Top 100 Translated detected Named entities\")",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"# There are many more models!\n## Checkout [the Modelshub](https://nlp.johnsnowlabs.com/models) and the [NLU Namespace](https://nlu.johnsnowlabs.com/docs/en/namespace) for more models",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d0d3dbed5bccff23f0deeb9249ab447bb9fd276f | 9,342 | ipynb | Jupyter Notebook | 3.2_Simple-Scatter-Plots.ipynb | anilkumarpanda/python-training-2021 | df586c999d8952be9c0ef3df2adf7b4b8125daa1 | [
"MIT"
] | null | null | null | 3.2_Simple-Scatter-Plots.ipynb | anilkumarpanda/python-training-2021 | df586c999d8952be9c0ef3df2adf7b4b8125daa1 | [
"MIT"
] | 1 | 2021-01-19T13:01:44.000Z | 2021-01-21T08:39:40.000Z | 3.2_Simple-Scatter-Plots.ipynb | anilkumarpanda/python-training-2021 | df586c999d8952be9c0ef3df2adf7b4b8125daa1 | [
"MIT"
] | 1 | 2021-01-17T10:06:48.000Z | 2021-01-17T10:06:48.000Z | 32.894366 | 487 | 0.611218 | [
[
[
"# Simple Scatter Plots",
"_____no_output_____"
],
[
"Another commonly used plot type is the simple scatter plot, a close cousin of the line plot.\nInstead of points being joined by line segments, here the points are represented individually with a dot, circle, or other shape.\nWe’ll start by setting up the notebook for plotting and importing the functions we will use:",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-whitegrid')\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"## Scatter Plots with ``plt.plot``\n\nIn the previous section we looked at ``plt.plot``/``ax.plot`` to produce line plots.\nIt turns out that this same function can produce scatter plots as well:",
"_____no_output_____"
]
],
[
[
"x = np.linspace(0, 10, 30)\ny = np.sin(x)\n\nplt.plot(x, y, 'o', color='black');",
"_____no_output_____"
]
],
[
[
"The third argument in the function call is a character that represents the type of symbol used for the plotting. Just as you can specify options such as ``'-'``, ``'--'`` to control the line style, the marker style has its own set of short string codes. The full list of available symbols can be seen in the documentation of ``plt.plot``, or in Matplotlib's online documentation. Most of the possibilities are fairly intuitive, and we'll show a number of the more common ones here:",
"_____no_output_____"
]
],
[
[
"rng = np.random.RandomState(0)\nfor marker in ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's', 'd']:\n plt.plot(rng.rand(5), rng.rand(5), marker,\n label=\"marker='{0}'\".format(marker))\nplt.legend(numpoints=1)\nplt.xlim(0, 1.8);",
"_____no_output_____"
]
],
[
[
"For even more possibilities, these character codes can be used together with line and color codes to plot points along with a line connecting them:",
"_____no_output_____"
]
],
[
[
"plt.plot(x, y, '-ok');",
"_____no_output_____"
]
],
[
[
"Additional keyword arguments to ``plt.plot`` specify a wide range of properties of the lines and markers:",
"_____no_output_____"
]
],
[
[
"plt.plot(x, y, '-p', color='gray',\n markersize=15, linewidth=4,\n markerfacecolor='white',\n markeredgecolor='gray',\n markeredgewidth=2)\nplt.ylim(-1.2, 1.2);",
"_____no_output_____"
]
],
[
[
"This type of flexibility in the ``plt.plot`` function allows for a wide variety of possible visualization options.\nFor a full description of the options available, refer to the ``plt.plot`` documentation.",
"_____no_output_____"
],
[
"## Scatter Plots with ``plt.scatter``\n\nA second, more powerful method of creating scatter plots is the ``plt.scatter`` function, which can be used very similarly to the ``plt.plot`` function:",
"_____no_output_____"
]
],
[
[
"plt.scatter(x, y, marker='o');",
"_____no_output_____"
]
],
[
[
"The primary difference of ``plt.scatter`` from ``plt.plot`` is that it can be used to create scatter plots where the properties of each individual point (size, face color, edge color, etc.) can be individually controlled or mapped to data.\n\nLet's show this by creating a random scatter plot with points of many colors and sizes.\nIn order to better see the overlapping results, we'll also use the ``alpha`` keyword to adjust the transparency level:",
"_____no_output_____"
]
],
[
[
"rng = np.random.RandomState(0)\nx = rng.randn(100)\ny = rng.randn(100)\ncolors = rng.rand(100)\nsizes = 1000 * rng.rand(100)\n\nplt.scatter(x, y, c=colors, s=sizes, alpha=0.3,\n cmap='viridis')\nplt.colorbar(); # show color scale",
"_____no_output_____"
]
],
[
[
"Notice that the color argument is automatically mapped to a color scale (shown here by the ``colorbar()`` command), and that the size argument is given in pixels.\nIn this way, the color and size of points can be used to convey information in the visualization, in order to visualize multidimensional data.\n\nFor example, we might use the Iris data from Scikit-Learn, where each sample is one of three types of flowers that has had the size of its petals and sepals carefully measured:",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_iris\niris = load_iris()\nfeatures = iris.data.T\n\nplt.scatter(features[0], features[1], alpha=0.2,\n s=100*features[3], c=iris.target, cmap='viridis')\nplt.xlabel(iris.feature_names[0])\nplt.ylabel(iris.feature_names[1]);",
"_____no_output_____"
]
],
[
[
"We can see that this scatter plot has given us the ability to simultaneously explore four different dimensions of the data:\nthe (x, y) location of each point corresponds to the sepal length and width, the size of the point is related to the petal width, and the color is related to the particular species of flower.\nMulticolor and multifeature scatter plots like this can be useful for both exploration and presentation of data.",
"_____no_output_____"
],
[
"## ``plot`` Versus ``scatter``: A Note on Efficiency\n\nAside from the different features available in ``plt.plot`` and ``plt.scatter``, why might you choose to use one over the other? While it doesn't matter as much for small amounts of data, as datasets get larger than a few thousand points, ``plt.plot`` can be noticeably more efficient than ``plt.scatter``.\nThe reason is that ``plt.scatter`` has the capability to render a different size and/or color for each point, so the renderer must do the extra work of constructing each point individually.\nIn ``plt.plot``, on the other hand, the points are always essentially clones of each other, so the work of determining the appearance of the points is done only once for the entire set of data.\nFor large datasets, the difference between these two can lead to vastly different performance, and for this reason, ``plt.plot`` should be preferred over ``plt.scatter`` for large datasets.",
"_____no_output_____"
],
[
"\n*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*\n\n*The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
d0d3dd9d1dbe4f9dd2ef02bddbfb364ed05ddaa0 | 119,073 | ipynb | Jupyter Notebook | HousePricePrediction.ipynb | pallabi-sahoo/Pytorch | b4f164cdd3ecece6f1c589b5004199290d4ea618 | [
"MIT"
] | null | null | null | HousePricePrediction.ipynb | pallabi-sahoo/Pytorch | b4f164cdd3ecece6f1c589b5004199290d4ea618 | [
"MIT"
] | null | null | null | HousePricePrediction.ipynb | pallabi-sahoo/Pytorch | b4f164cdd3ecece6f1c589b5004199290d4ea618 | [
"MIT"
] | null | null | null | 37.586174 | 16,708 | 0.517834 | [
[
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"df=pd.read_csv('houseprice.csv',usecols=[\"SalePrice\",\"MSSubClass\",\"MSZoning\",\"LotFrontage\",\"LotArea\",\n \"Street\",\"YearBuilt\",\"LotShape\",\"1stFlrSF\",\"2ndFlrSF\"]).dropna()",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1201 entries, 0 to 1459\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MSSubClass 1201 non-null int64 \n 1 MSZoning 1201 non-null object \n 2 LotFrontage 1201 non-null float64\n 3 LotArea 1201 non-null int64 \n 4 Street 1201 non-null object \n 5 LotShape 1201 non-null object \n 6 YearBuilt 1201 non-null int64 \n 7 1stFlrSF 1201 non-null int64 \n 8 2ndFlrSF 1201 non-null int64 \n 9 SalePrice 1201 non-null int64 \ndtypes: float64(1), int64(6), object(3)\nmemory usage: 103.2+ KB\n"
],
[
"for i in df.columns:\n print(\"Column name{} and unique values are {}\".format(i,len(df[i].unique())))",
"Column nameMSSubClass and unique values are 15\nColumn nameMSZoning and unique values are 5\nColumn nameLotFrontage and unique values are 110\nColumn nameLotArea and unique values are 869\nColumn nameStreet and unique values are 2\nColumn nameLotShape and unique values are 4\nColumn nameYearBuilt and unique values are 112\nColumn name1stFlrSF and unique values are 678\nColumn name2ndFlrSF and unique values are 368\nColumn nameSalePrice and unique values are 597\n"
],
[
"import datetime\ndatetime.datetime.now().year",
"_____no_output_____"
],
[
"df['Total Years']=datetime.datetime.now().year-df['YearBuilt']",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.drop('YearBuilt',axis=1,inplace=True)",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"##Creating Categorical Features\ncat_features=['MSSubClass','MSZoning','Street','LotShape']\nout_features='SalePrice'",
"_____no_output_____"
],
[
"df['MSSubClass'].unique()",
"_____no_output_____"
],
[
"from sklearn.preprocessing import LabelEncoder\nlbl_encoders={}\nlbl_encoders['MSSubClass']=LabelEncoder()\nlbl_encoders[\"MSSubClass\"].fit_transform(df['MSSubClass'])",
"_____no_output_____"
],
[
"lbl_encoders",
"_____no_output_____"
],
[
"label_encoder={}\nfor feature in cat_features:\n label_encoder[feature]=LabelEncoder()\n df[feature]=label_encoder[feature].fit_transform(df[feature])",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"##convertto numpy\ncat_features=df[[\"MSSubClass\",\"MSZoning\",\"Street\",\"LotShape\"]].to_numpy()\ncat_features",
"_____no_output_____"
],
[
"## convert numpy to Tensors\nimport torch\ncat_features=torch.tensor(cat_features,dtype=torch.int64)\ncat_features",
"_____no_output_____"
],
[
"### create continuous variable\ncont_features=[]\nfor i in df.columns:\n if i in [\"MSSubClass\",\"MSZoning\",\"Street\",\"LotShape\",\"SalePrice\"]:\n pass\n else:\n cont_features.append(i)",
"_____no_output_____"
],
[
"cont_features",
"_____no_output_____"
],
[
"cont_values=np.stack([df[i].values for i in cont_features],axis=1)\ncont_values=torch.tensor(cont_values,dtype=torch.float)\ncont_values",
"_____no_output_____"
],
[
"cont_values.dtype",
"_____no_output_____"
],
[
"### Dependent Feature\ny=torch.tensor(df['SalePrice'].values,dtype=torch.float).reshape(-1,1)\ny",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1201 entries, 0 to 1459\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MSSubClass 1201 non-null int64 \n 1 MSZoning 1201 non-null int32 \n 2 LotFrontage 1201 non-null float64\n 3 LotArea 1201 non-null int64 \n 4 Street 1201 non-null int32 \n 5 LotShape 1201 non-null int32 \n 6 1stFlrSF 1201 non-null int64 \n 7 2ndFlrSF 1201 non-null int64 \n 8 SalePrice 1201 non-null int64 \n 9 Total Years 1201 non-null int64 \ndtypes: float64(1), int32(3), int64(6)\nmemory usage: 89.1 KB\n"
],
[
"cat_features.shape,cont_values.shape,y.shape",
"_____no_output_____"
],
[
"len(df['MSSubClass'].unique())",
"_____no_output_____"
],
[
"##Embedding size for categorical columns\ncat_dims=[len(df[col].unique()) for col in [\"MSSubClass\",\"MSZoning\",\"Street\",\"LotShape\"]]",
"_____no_output_____"
],
[
"cat_dims",
"_____no_output_____"
],
[
"## output dimension should be setbased on the input dimension(min(50,features dimension /2))\nembedding_dim=[(x,min(50,(x+1)//2)) for x in cat_dims]",
"_____no_output_____"
],
[
"embedding_dim",
"_____no_output_____"
],
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nembed_representation=nn.ModuleList([nn.Embedding(inp,out) for inp,out in embedding_dim])\nembed_representation",
"_____no_output_____"
],
[
"cat_features",
"_____no_output_____"
],
[
"cat_featuresz=cat_features[:4]\ncat_featuresz",
"_____no_output_____"
],
[
"pd.set_option('display.max_rows',500)\nembedding_val=[]\nfor i, e in enumerate(embed_representation):\n embedding_val.append(e(cat_features[:,i]))",
"_____no_output_____"
],
[
"embedding_val",
"_____no_output_____"
],
[
"z=torch.cat(embedding_val,1)\nz",
"_____no_output_____"
],
[
"##implement Dropout\ndropout=nn.Dropout(.4)",
"_____no_output_____"
],
[
"final_embedded=dropout(z)\nfinal_embedded",
"_____no_output_____"
],
[
"##create a Feed Forward Neural Network\nclass FeedForwardNN(nn.Module):\n \n def __init__(self,embedding_dim,n_cont,out_sz,layers,p=0.5):\n super().__init__()\n self.embeds=nn.ModuleList([nn.Embedding(inp,out) for inp,out in embedding_dim])\n self.emb_drop=nn.Dropout(p)\n self.bn_cont=nn.BatchNorm1d(n_cont)\n\n layerlist=[]\n n_emb=sum((out for inp,out in embedding_dim))\n n_in=n_emb+n_cont\n \n for i in layers:\n layerlist.append(nn.Linear(n_in,i))\n layerlist.append(nn.ReLU(inplace=True))\n layerlist.append(nn.BatchNorm1d(i))\n layerlist.append(nn.Dropout(p))\n n_in=i\n layerlist.append(nn.Linear(layers[-1],out_sz))\n \n self.layers=nn.Sequential(*layerlist)\n \n def forward(self,x_cat,x_cont):\n embeddings=[]\n for i,e in enumerate(self.embeds):\n embeddings.append(e(x_cat[:,i]))\n x=torch.cat(embeddings,1)\n x=self.emb_drop(x)\n \n x_cont=self.bn_cont(x_cont)\n x=torch.cat([x,x_cont],1)\n x=self.layers(x)\n return x",
"_____no_output_____"
],
[
"len(cont_features)",
"_____no_output_____"
],
[
"torch.manual_seed(100)\nmodel=FeedForwardNN(embedding_dim,len(cont_features),1,[100,50],p=0.4)",
"_____no_output_____"
],
[
"model",
"_____no_output_____"
]
],
[
[
"## Define Loss and Optimizer",
"_____no_output_____"
]
],
[
[
"model.parameters",
"_____no_output_____"
],
[
"loss_function=nn.MSELoss()\noptimizer=torch.optim.Adam(model.parameters(),lr=0.01)",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"cont_values.shape",
"_____no_output_____"
],
[
"batch_size=1200\ntest_size=int(batch_size*0.15)\ntrain_categorical=cat_features[:batch_size-test_size]\ntest_categorical=cat_features[batch_size-test_size:batch_size]\ntrain_cont=cont_values[:batch_size-test_size]\ntest_cont=cont_values[batch_size-test_size:batch_size]\ny_train=y[:batch_size-test_size]\ny_test=y[batch_size-test_size:batch_size]",
"_____no_output_____"
],
[
"len(train_categorical),len(test_categorical),len(train_cont),len(test_cont),len(y_train),len(y_test)",
"_____no_output_____"
],
[
"epochs=5000\nfinal_losses=[]\nfor i in range(epochs):\n i=i+1\n y_pred=model(train_categorical,train_cont)\n loss=torch.sqrt(loss_function(y_pred,y_train))\n final_losses.append(loss)\n if i%10==1:\n print(\"Epoch number: {} and the loss : {}\".format(i,loss.item()))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()",
"Epoch number: 1 and the loss : 200496.75\nEpoch number: 11 and the loss : 200493.46875\nEpoch number: 21 and the loss : 200489.140625\nEpoch number: 31 and the loss : 200482.640625\nEpoch number: 41 and the loss : 200473.25\nEpoch number: 51 and the loss : 200461.375\nEpoch number: 61 and the loss : 200446.40625\nEpoch number: 71 and the loss : 200429.359375\nEpoch number: 81 and the loss : 200408.0\nEpoch number: 91 and the loss : 200383.421875\nEpoch number: 101 and the loss : 200355.3125\nEpoch number: 111 and the loss : 200322.125\nEpoch number: 121 and the loss : 200291.4375\nEpoch number: 131 and the loss : 200252.015625\nEpoch number: 141 and the loss : 200206.609375\nEpoch number: 151 and the loss : 200162.234375\nEpoch number: 161 and the loss : 200112.25\nEpoch number: 171 and the loss : 200059.6875\nEpoch number: 181 and the loss : 200005.875\nEpoch number: 191 and the loss : 199946.453125\nEpoch number: 201 and the loss : 199881.765625\nEpoch number: 211 and the loss : 199815.96875\nEpoch number: 221 and the loss : 199737.125\nEpoch number: 231 and the loss : 199669.828125\nEpoch number: 241 and the loss : 199589.453125\nEpoch number: 251 and the loss : 199505.96875\nEpoch number: 261 and the loss : 199411.140625\nEpoch number: 271 and the loss : 199323.6875\nEpoch number: 281 and the loss : 199243.84375\nEpoch number: 291 and the loss : 199139.546875\nEpoch number: 301 and the loss : 199027.109375\nEpoch number: 311 and the loss : 198931.171875\nEpoch number: 321 and the loss : 198845.578125\nEpoch number: 331 and the loss : 198694.15625\nEpoch number: 341 and the loss : 198602.921875\nEpoch number: 351 and the loss : 198496.609375\nEpoch number: 361 and the loss : 198384.71875\nEpoch number: 371 and the loss : 198244.578125\nEpoch number: 381 and the loss : 198103.6875\nEpoch number: 391 and the loss : 198014.3125\nEpoch number: 401 and the loss : 197882.171875\nEpoch number: 411 and the loss : 197728.40625\nEpoch number: 421 and the loss : 197593.546875\nEpoch number: 431 and the loss : 197421.71875\nEpoch number: 441 and the loss : 197283.5\nEpoch number: 451 and the loss : 197167.140625\nEpoch number: 461 and the loss : 196965.109375\nEpoch number: 471 and the loss : 196902.890625\nEpoch number: 481 and the loss : 196726.703125\nEpoch number: 491 and the loss : 196502.53125\nEpoch number: 501 and the loss : 196434.5625\nEpoch number: 511 and the loss : 196207.5625\nEpoch number: 521 and the loss : 196041.8125\nEpoch number: 531 and the loss : 195847.390625\nEpoch number: 541 and the loss : 195685.65625\nEpoch number: 551 and the loss : 195481.203125\nEpoch number: 561 and the loss : 195287.078125\nEpoch number: 571 and the loss : 195076.875\nEpoch number: 581 and the loss : 194886.75\nEpoch number: 591 and the loss : 194695.84375\nEpoch number: 601 and the loss : 194537.671875\nEpoch number: 611 and the loss : 194278.28125\nEpoch number: 621 and the loss : 194122.453125\nEpoch number: 631 and the loss : 193844.046875\nEpoch number: 641 and the loss : 193726.09375\nEpoch number: 651 and the loss : 193528.375\nEpoch number: 661 and the loss : 193225.171875\nEpoch number: 671 and the loss : 193063.796875\nEpoch number: 681 and the loss : 192771.765625\nEpoch number: 691 and the loss : 192491.546875\nEpoch number: 701 and the loss : 192303.90625\nEpoch number: 711 and the loss : 192205.125\nEpoch number: 721 and the loss : 191965.90625\nEpoch number: 731 and the loss : 191617.78125\nEpoch number: 741 and the loss : 191420.453125\nEpoch number: 751 and the loss : 191194.0\nEpoch number: 761 and the loss : 190981.390625\nEpoch number: 771 and the loss : 190716.703125\nEpoch number: 781 and the loss : 190475.390625\nEpoch number: 791 and the loss : 190269.71875\nEpoch number: 801 and the loss : 189876.078125\nEpoch number: 811 and the loss : 189740.921875\nEpoch number: 821 and the loss : 189711.796875\nEpoch number: 831 and the loss : 189281.015625\nEpoch number: 841 and the loss : 188930.875\nEpoch number: 851 and the loss : 188831.015625\nEpoch number: 861 and the loss : 188390.46875\nEpoch number: 871 and the loss : 188181.171875\nEpoch number: 881 and the loss : 187885.390625\nEpoch number: 891 and the loss : 187557.796875\nEpoch number: 901 and the loss : 187448.015625\nEpoch number: 911 and the loss : 187241.71875\nEpoch number: 921 and the loss : 186812.484375\nEpoch number: 931 and the loss : 186371.515625\nEpoch number: 941 and the loss : 186132.859375\nEpoch number: 951 and the loss : 185940.5\nEpoch number: 961 and the loss : 185596.046875\nEpoch number: 971 and the loss : 185295.109375\nEpoch number: 981 and the loss : 185073.140625\nEpoch number: 991 and the loss : 184780.296875\nEpoch number: 1001 and the loss : 184386.28125\nEpoch number: 1011 and the loss : 184010.359375\nEpoch number: 1021 and the loss : 183798.15625\nEpoch number: 1031 and the loss : 183369.984375\nEpoch number: 1041 and the loss : 183216.15625\nEpoch number: 1051 and the loss : 182900.703125\nEpoch number: 1061 and the loss : 182713.1875\nEpoch number: 1071 and the loss : 182285.046875\nEpoch number: 1081 and the loss : 182078.3125\nEpoch number: 1091 and the loss : 181533.390625\nEpoch number: 1101 and the loss : 181282.25\nEpoch number: 1111 and the loss : 180754.3125\nEpoch number: 1121 and the loss : 180462.03125\nEpoch number: 1131 and the loss : 180400.171875\nEpoch number: 1141 and the loss : 179942.25\nEpoch number: 1151 and the loss : 179673.75\nEpoch number: 1161 and the loss : 179348.0\nEpoch number: 1171 and the loss : 178919.28125\nEpoch number: 1181 and the loss : 178690.625\nEpoch number: 1191 and the loss : 178073.234375\nEpoch number: 1201 and the loss : 178103.265625\nEpoch number: 1211 and the loss : 177463.140625\nEpoch number: 1221 and the loss : 177485.5625\nEpoch number: 1231 and the loss : 176606.796875\nEpoch number: 1241 and the loss : 176364.296875\nEpoch number: 1251 and the loss : 175692.078125\nEpoch number: 1261 and the loss : 175565.734375\nEpoch number: 1271 and the loss : 175232.734375\nEpoch number: 1281 and the loss : 174929.71875\nEpoch number: 1291 and the loss : 174455.5625\nEpoch number: 1301 and the loss : 174202.3125\nEpoch number: 1311 and the loss : 173644.40625\nEpoch number: 1321 and the loss : 173141.671875\nEpoch number: 1331 and the loss : 173395.21875\nEpoch number: 1341 and the loss : 172593.75\nEpoch number: 1351 and the loss : 172279.5\nEpoch number: 1361 and the loss : 171989.015625\nEpoch number: 1371 and the loss : 171806.65625\nEpoch number: 1381 and the loss : 171285.046875\nEpoch number: 1391 and the loss : 170337.71875\nEpoch number: 1401 and the loss : 170518.375\nEpoch number: 1411 and the loss : 169670.625\nEpoch number: 1421 and the loss : 169798.140625\nEpoch number: 1431 and the loss : 169452.8125\nEpoch number: 1441 and the loss : 168694.796875\nEpoch number: 1451 and the loss : 168603.875\nEpoch number: 1461 and the loss : 167958.34375\nEpoch number: 1471 and the loss : 167630.375\nEpoch number: 1481 and the loss : 167345.15625\nEpoch number: 1491 and the loss : 166558.40625\nEpoch number: 1501 and the loss : 166342.390625\nEpoch number: 1511 and the loss : 165815.125\nEpoch number: 1521 and the loss : 165865.515625\nEpoch number: 1531 and the loss : 165307.21875\nEpoch number: 1541 and the loss : 164545.765625\nEpoch number: 1551 and the loss : 164492.359375\nEpoch number: 1561 and the loss : 163942.09375\nEpoch number: 1571 and the loss : 163439.984375\nEpoch number: 1581 and the loss : 163051.375\nEpoch number: 1591 and the loss : 162924.21875\nEpoch number: 1601 and the loss : 162439.6875\nEpoch number: 1611 and the loss : 161664.234375\nEpoch number: 1621 and the loss : 160749.28125\nEpoch number: 1631 and the loss : 160880.375\nEpoch number: 1641 and the loss : 160639.96875\nEpoch number: 1651 and the loss : 160170.046875\nEpoch number: 1661 and the loss : 160171.953125\nEpoch number: 1671 and the loss : 159037.875\nEpoch number: 1681 and the loss : 158624.546875\nEpoch number: 1691 and the loss : 158572.421875\nEpoch number: 1701 and the loss : 157608.078125\nEpoch number: 1711 and the loss : 157305.578125\nEpoch number: 1721 and the loss : 157531.9375\nEpoch number: 1731 and the loss : 156656.25\nEpoch number: 1741 and the loss : 156780.109375\nEpoch number: 1751 and the loss : 156207.984375\nEpoch number: 1761 and the loss : 155242.390625\n"
],
[
"import matplotlib.pyplot as plt\n%matplotlib inline\nplt.plot(range(epochs),final_losses)\nplt.ylabel(\"RMSE loss\")\nplt.xlabel('epochs');",
"_____no_output_____"
],
[
"## validate the test Data\ny_pred=\"\"\nwith torch.no_grad():\n y_pred=model(test_categorical,test_cont)\n loss=torch.sqrt(loss_function(y_pred,y_test))\nprint('RMSE: {}'.format(loss))",
"RMSE: 44610.72265625\n"
],
[
"data_verify=pd.DataFrame(y_test.tolist(),columns=[\"Test\"])",
"_____no_output_____"
],
[
"data_predicted=pd.DataFrame(y_pred.tolist(),columns=[\"Prediction\"])",
"_____no_output_____"
],
[
"data_predicted",
"_____no_output_____"
],
[
"final_output=pd.concat([data_verify,data_predicted],axis=1)\nfinal_output[\"Difference\"]=final_output[\"Test\"]-final_output['Prediction']\nfinal_output.head()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d3e498887a521cb910ccbe5910e36caadd4d85 | 39,009 | ipynb | Jupyter Notebook | 02_Working_With_MDF_Class.ipynb | SimulinkDevOps/Jupyter_MDF_Analysis | 3758309ee687b82eccbed2a0a0dc577e78cc5ded | [
"MIT"
] | null | null | null | 02_Working_With_MDF_Class.ipynb | SimulinkDevOps/Jupyter_MDF_Analysis | 3758309ee687b82eccbed2a0a0dc577e78cc5ded | [
"MIT"
] | null | null | null | 02_Working_With_MDF_Class.ipynb | SimulinkDevOps/Jupyter_MDF_Analysis | 3758309ee687b82eccbed2a0a0dc577e78cc5ded | [
"MIT"
] | 1 | 2020-03-25T06:10:50.000Z | 2020-03-25T06:10:50.000Z | 142.89011 | 16,736 | 0.882002 | [
[
[
"# Working with ````asammdf.MDF````",
"_____no_output_____"
]
],
[
[
"from asammdf import MDF, Signal\nprint(MDF.__doc__)",
"Unified access to MDF v3 and v4 files. Underlying _mdf's attributes and\n methods are linked to the `MDF` object via *setattr*. This is done to expose\n them to the user code and for performance considerations.\n\n Parameters\n ----------\n name : string | fsspec.core.OpenFile,\n mdf file name (if provided it must be a real file name) or\n fsspec.core.OpenFile object\n\n version : string\n mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10', '3.20',\n '3.30', '4.00', '4.10', '4.11', '4.20'); default '4.10'\n\n\n callback (\\*\\*kwargs) : function\n keyword only argument: function to call to update the progress; the\n function must accept two arguments (the current progress and maximum\n progress value)\n use_display_names (\\*\\*kwargs) : bool\n keyword only argument: for MDF4 files parse the XML channel comment to\n search for the display name; XML parsing is quite expensive so setting\n this to *False* can decrease the loading times very much; default\n *False*\n remove_source_from_channel_names (\\*\\*kwargs) : bool\n remove source from channel names (\"Speed\\XCP3\" -> \"Speed\")\n copy_on_get (\\*\\*kwargs) : bool\n copy arrays in the get method; default *True*\n\n \n"
]
],
[
[
"## Create 3 Signal objects",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
],
[
"timestamps = np.array([0.1, 0.2, 0.3, 0.4, 0.5], dtype=np.float32)",
"_____no_output_____"
]
],
[
[
"### unit8",
"_____no_output_____"
]
],
[
[
"s_uint8 = Signal(samples=np.array([0, 1, 2, 3, 4], dtype=np.uint8),\n timestamps=timestamps,\n name='Uint8_Signal',\n unit='u1')",
"_____no_output_____"
]
],
[
[
"### int32",
"_____no_output_____"
]
],
[
[
"s_int32 = Signal(samples=np.array([-20, -10, 0, 10, 20], dtype=np.int32),\n timestamps=timestamps,\n name='Int32_Signal',\n unit='i4')",
"_____no_output_____"
]
],
[
[
"### float64",
"_____no_output_____"
]
],
[
[
"s_float64 = Signal(samples=np.array([-20, -10, 0, 10, 20], dtype=np.float64),\n timestamps=timestamps,\n name='Float64_Signal',\n unit='f8')",
"_____no_output_____"
]
],
[
[
"Create empty MDF version 4.00 file:",
"_____no_output_____"
]
],
[
[
"with MDF(version=\"4.10\") as mdf4:\n\n # append the 3 signals to the new file\n signals = [s_uint8, s_int32, s_float64]\n mdf4.append(signals, \"Created by Python\")\n\n # save new file\n mdf4.save(\"my_new_file.mf4\", overwrite=True)\n\n # convert new file to mdf version 3.10\n mdf3 = mdf4.convert(version=\"3.10\")\n print(mdf3.version)\n\n # get the float signal\n sig = mdf3.get(\"Float64_Signal\")\n print(sig)\n\n # cut measurement from 0.3s to end of measurement\n mdf4_cut = mdf4.cut(start=0.3)\n mdf4_cut.get(\"Float64_Signal\").plot()\n\n # cut measurement from start of measurement to 0.4s\n mdf4_cut = mdf4.cut(stop=0.45)\n mdf4_cut.get(\"Float64_Signal\").plot()\n\n # filter some signals from the file\n mdf4 = mdf4.filter([\"Int32_Signal\", \"Uint8_Signal\"])\n\n # save using zipped transpose deflate blocks\n mdf4.save(\"out.mf4\", compression=2, overwrite=True)\n",
"WARNING:root:Signal plotting requires pyqtgraph or matplotlib\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d3ec4ecb59f8f45474b3080c45043fa244284a | 6,473 | ipynb | Jupyter Notebook | notebooks/03_categorical_pipeline_sol_01.ipynb | odotreppe/scikit-learn-mooc | da97773fc9b860371e94e3c72791b0c92471b22d | [
"CC-BY-4.0"
] | 634 | 2020-03-10T15:42:46.000Z | 2022-03-28T15:19:00.000Z | notebooks/03_categorical_pipeline_sol_01.ipynb | odotreppe/scikit-learn-mooc | da97773fc9b860371e94e3c72791b0c92471b22d | [
"CC-BY-4.0"
] | 467 | 2020-03-10T15:42:31.000Z | 2022-03-31T09:10:04.000Z | notebooks/03_categorical_pipeline_sol_01.ipynb | odotreppe/scikit-learn-mooc | da97773fc9b860371e94e3c72791b0c92471b22d | [
"CC-BY-4.0"
] | 314 | 2020-03-11T14:28:26.000Z | 2022-03-31T12:01:02.000Z | 32.527638 | 130 | 0.630774 | [
[
[
"# 📃 Solution for Exercise M1.04\n\nThe goal of this exercise is to evaluate the impact of using an arbitrary\ninteger encoding for categorical variables along with a linear\nclassification model such as Logistic Regression.\n\nTo do so, let's try to use `OrdinalEncoder` to preprocess the categorical\nvariables. This preprocessor is assembled in a pipeline with\n`LogisticRegression`. The generalization performance of the pipeline can be\nevaluated by cross-validation and then compared to the score obtained when\nusing `OneHotEncoder` or to some other baseline score.\n\nFirst, we load the dataset.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\nadult_census = pd.read_csv(\"../datasets/adult-census.csv\")",
"_____no_output_____"
],
[
"target_name = \"class\"\ntarget = adult_census[target_name]\ndata = adult_census.drop(columns=[target_name, \"education-num\"])",
"_____no_output_____"
]
],
[
[
"In the previous notebook, we used `sklearn.compose.make_column_selector` to\nautomatically select columns with a specific data type (also called `dtype`).\nHere, we will use this selector to get only the columns containing strings\n(column with `object` dtype) that correspond to categorical features in our\ndataset.",
"_____no_output_____"
]
],
[
[
"from sklearn.compose import make_column_selector as selector\n\ncategorical_columns_selector = selector(dtype_include=object)\ncategorical_columns = categorical_columns_selector(data)\ndata_categorical = data[categorical_columns]",
"_____no_output_____"
]
],
[
[
"We filter our dataset that it contains only categorical features.\nDefine a scikit-learn pipeline com\n\nBecause `OrdinalEncoder` can raise errors if it sees an unknown category at\nprediction time, you can set the `handle_unknown=\"use_encoded_value\"` and\n`unknown_value` parameters. You can refer to the\n[scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)\nfor more details regarding these parameters.",
"_____no_output_____"
]
],
[
[
"\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.linear_model import LogisticRegression\n\nmodel = make_pipeline(\n OrdinalEncoder(handle_unknown=\"use_encoded_value\", unknown_value=-1),\n LogisticRegression(max_iter=500))",
"_____no_output_____"
]
],
[
[
"Your model is now defined. Evaluate it using a cross-validation using\n`sklearn.model_selection.cross_validate`.",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import cross_validate\n\ncv_results = cross_validate(model, data_categorical, target)\n\nscores = cv_results[\"test_score\"]\nprint(\"The mean cross-validation accuracy is: \"\n f\"{scores.mean():.3f} +/- {scores.std():.3f}\")",
"_____no_output_____"
]
],
[
[
"Using an arbitrary mapping from string labels to integers as done here causes\nthe linear model to make bad assumptions on the relative ordering of\ncategories.\n\nThis prevents the model from learning anything predictive enough and the\ncross-validated score is even lower than the baseline we obtained by ignoring\nthe input data and just constantly predicting the most frequent class:",
"_____no_output_____"
]
],
[
[
"from sklearn.dummy import DummyClassifier\n\ncv_results = cross_validate(DummyClassifier(strategy=\"most_frequent\"),\n data_categorical, target)\nscores = cv_results[\"test_score\"]\nprint(\"The mean cross-validation accuracy is: \"\n f\"{scores.mean():.3f} +/- {scores.std():.3f}\")",
"_____no_output_____"
]
],
[
[
"Now, we would like to compare the generalization performance of our previous\nmodel with a new model where instead of using an `OrdinalEncoder`, we will\nuse a `OneHotEncoder`. Repeat the model evaluation using cross-validation.\nCompare the score of both models and conclude on the impact of choosing a\nspecific encoding strategy when using a linear model.",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import OneHotEncoder\n\nmodel = make_pipeline(\n OneHotEncoder(handle_unknown=\"ignore\"),\n LogisticRegression(max_iter=500))\ncv_results = cross_validate(model, data_categorical, target)\nscores = cv_results[\"test_score\"]\nprint(\"The mean cross-validation accuracy is: \"\n f\"{scores.mean():.3f} +/- {scores.std():.3f}\")",
"_____no_output_____"
]
],
[
[
"With the linear classifier chosen, using an encoding that does not assume\nany ordering lead to much better result.\n\nThe important message here is: linear model and `OrdinalEncoder` are used\ntogether only for ordinal categorical features, features with a specific\nordering. Otherwise, your model will perform poorly.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0d3ed0d32254b620e1cf04b030af50c33a73ba3 | 42,433 | ipynb | Jupyter Notebook | model_tflite/TFLite_FashionMnist.ipynb | alessiot/Flask-Tutorial-TFLite | dbbb7fa7d6617fec2335c5d9428534b290cd0fab | [
"MIT"
] | null | null | null | model_tflite/TFLite_FashionMnist.ipynb | alessiot/Flask-Tutorial-TFLite | dbbb7fa7d6617fec2335c5d9428534b290cd0fab | [
"MIT"
] | null | null | null | model_tflite/TFLite_FashionMnist.ipynb | alessiot/Flask-Tutorial-TFLite | dbbb7fa7d6617fec2335c5d9428534b290cd0fab | [
"MIT"
] | null | null | null | 43.079188 | 8,012 | 0.677185 | [
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"##### Copyright 2018 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Train Your Own Model and Convert It to TFLite",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/lmoroney/dlaicourse/blob/master/TensorFlow%20Deployment/Course%202%20-%20TensorFlow%20Lite/Week%201/Exercises/TFLite_Week1_Exercise.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />\n Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/lmoroney/dlaicourse/blob/master/TensorFlow%20Deployment/Course%202%20-%20TensorFlow%20Lite/Week%201/Exercises/TFLite_Week1_Exercise.ipynb\">\n <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />\n View source on GitHub</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"This notebook uses the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 by 28 pixels), as seen here:\n\n<table>\n <tr><td>\n <img src=\"https://tensorflow.org/images/fashion-mnist-sprite.png\"\n alt=\"Fashion MNIST sprite\" width=\"600\">\n </td></tr>\n <tr><td align=\"center\">\n <b>Figure 1.</b> <a href=\"https://github.com/zalandoresearch/fashion-mnist\">Fashion-MNIST samples</a> (by Zalando, MIT License).<br/> \n </td></tr>\n</table>\n\nFashion MNIST is intended as a drop-in replacement for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset—often used as the \"Hello, World\" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc.) in a format identical to that of the articles of clothing we'll use here.\n\nThis uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code.\n\nWe will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can access the Fashion MNIST directly from TensorFlow. Import and load the Fashion MNIST data directly from TensorFlow:",
"_____no_output_____"
],
[
"# Setup",
"_____no_output_____"
]
],
[
[
"try:\n %tensorflow_version 2.x\nexcept:\n pass",
"_____no_output_____"
],
[
"import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\ntfds.disable_progress_bar()\n\n\nprint('\\u2022 Using TensorFlow Version:', tf.__version__)\nprint('\\u2022 GPU Device Found.' if tf.test.is_gpu_available() else '\\u2022 GPU Device Not Found. Running on CPU')",
"• Using TensorFlow Version: 2.4.1\nWARNING:tensorflow:From <ipython-input-3-adb891da5723>:11: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.config.list_physical_devices('GPU')` instead.\n• GPU Device Not Found. Running on CPU\n"
]
],
[
[
"# Download Fashion MNIST Dataset\n\nWe will use TensorFlow Datasets to load the Fashion MNIST dataset. ",
"_____no_output_____"
]
],
[
[
"whole_ds,info_ds = tfds.load(\"fashion_mnist\", with_info = True, split='train+test', as_supervised=True) #60,000+10,000\n\nn = tf.data.experimental.cardinality(whole_ds).numpy() # 70,000\ntrain_num = int(n*0.8) #56,000\nval_num = int(n*0.1) #7000\n\ntrain_examples = whole_ds.take(train_num)\nvalidation_examples = whole_ds.skip(train_num).take(val_num)\ntest_examples = whole_ds.skip(train_num+val_num) #7000\n\nnum_examples = train_num\nnum_classes = info_ds.features['label'].num_classes",
"\u001b[1mDownloading and preparing dataset 29.45 MiB (download: 29.45 MiB, generated: 36.42 MiB, total: 65.87 MiB) to /Users/atambu/tensorflow_datasets/fashion_mnist/3.0.1...\u001b[0m\n\u001b[1mDataset fashion_mnist downloaded and prepared to /Users/atambu/tensorflow_datasets/fashion_mnist/3.0.1. Subsequent calls will reuse this data.\u001b[0m\n"
]
],
[
[
"The class names are not included with the dataset, so we will specify them here.",
"_____no_output_____"
]
],
[
[
"class_names = ['T-shirt_top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']",
"_____no_output_____"
],
[
"# Create a labels.txt file with the class names\nwith open('labels.txt', 'w') as f:\n f.write('\\n'.join(class_names))",
"_____no_output_____"
],
[
"# The images in the dataset are 28 by 28 pixels.\nIMG_SIZE = 28",
"_____no_output_____"
]
],
[
[
"# Preprocessing data",
"_____no_output_____"
],
[
"## Preprocess",
"_____no_output_____"
]
],
[
[
"def format_example(image, label):\n # Cast image to float32\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n \n # Normalize the image in the range [0, 1]\n image = image/255.0\n \n return image, tf.one_hot(label, num_classes)",
"_____no_output_____"
],
[
"# Specify the batch size\nBATCH_SIZE = 256",
"_____no_output_____"
]
],
[
[
"## Create Datasets From Images and Labels",
"_____no_output_____"
]
],
[
[
"# Create Datasets\ntrain_batches = train_examples.cache().shuffle(num_examples//4).batch(BATCH_SIZE).map(format_example).prefetch(1)\nvalidation_batches = validation_examples.cache().batch(BATCH_SIZE).map(format_example)\ntest_batches = test_examples.batch(1).map(format_example)",
"_____no_output_____"
]
],
[
[
"# Building the Model",
"_____no_output_____"
],
[
"```\nModel: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 26, 26, 16) 160 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 13, 13, 16) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 11, 11, 32) 4640 \n_________________________________________________________________\nflatten (Flatten) (None, 3872) 0 \n_________________________________________________________________\ndense (Dense) (None, 64) 247872 \n_________________________________________________________________\ndense_1 (Dense) (None, 10) 650 \n=================================================================\nTotal params: 253,322\nTrainable params: 253,322\nNon-trainable params: 0\n```",
"_____no_output_____"
]
],
[
[
"model = tf.keras.Sequential([\n # Set the input shape to (28, 28, 1), kernel size=3, filters=16 and use ReLU activation,\n tf.keras.layers.Conv2D(input_shape=(28,28,1), kernel_size=3, filters=16, activation='relu'),\n \n tf.keras.layers.MaxPooling2D(),\n \n # Set the number of filters to 32, kernel size to 3 and use ReLU activation \n tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'),\n \n # Flatten the output layer to 1 dimension\n tf.keras.layers.Flatten(),\n \n # Add a fully connected layer with 64 hidden units and ReLU activation\n tf.keras.layers.Dense(units=64, activation='relu'),\n \n # Attach a final softmax classification head\n tf.keras.layers.Dense(activation='softmax', units=num_classes)])\n\n# Set the appropriate loss function and use accuracy as your metric\nmodel.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics='accuracy')\n\nmodel.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 26, 26, 16) 160 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 13, 13, 16) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 11, 11, 32) 4640 \n_________________________________________________________________\nflatten (Flatten) (None, 3872) 0 \n_________________________________________________________________\ndense (Dense) (None, 64) 247872 \n_________________________________________________________________\ndense_1 (Dense) (None, 10) 650 \n=================================================================\nTotal params: 253,322\nTrainable params: 253,322\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"## Train",
"_____no_output_____"
]
],
[
[
"model.fit(train_batches, \n epochs=10,\n validation_data=validation_batches)",
"Epoch 1/10\n219/219 [==============================] - 10s 43ms/step - loss: 1.8871 - accuracy: 0.3617 - val_loss: 0.7890 - val_accuracy: 0.7144\nEpoch 2/10\n219/219 [==============================] - 8s 37ms/step - loss: 0.7622 - accuracy: 0.7165 - val_loss: 0.6610 - val_accuracy: 0.7543\nEpoch 3/10\n219/219 [==============================] - 8s 37ms/step - loss: 0.6648 - accuracy: 0.7514 - val_loss: 0.5999 - val_accuracy: 0.7757\nEpoch 4/10\n219/219 [==============================] - 8s 38ms/step - loss: 0.6079 - accuracy: 0.7698 - val_loss: 0.5624 - val_accuracy: 0.7874\nEpoch 5/10\n219/219 [==============================] - 8s 36ms/step - loss: 0.5757 - accuracy: 0.7822 - val_loss: 0.5387 - val_accuracy: 0.8013\nEpoch 6/10\n219/219 [==============================] - 8s 36ms/step - loss: 0.5566 - accuracy: 0.7939 - val_loss: 0.5084 - val_accuracy: 0.8141\nEpoch 7/10\n219/219 [==============================] - 8s 36ms/step - loss: 0.5353 - accuracy: 0.8020 - val_loss: 0.4899 - val_accuracy: 0.8243\nEpoch 8/10\n219/219 [==============================] - 8s 34ms/step - loss: 0.5182 - accuracy: 0.8121 - val_loss: 0.4874 - val_accuracy: 0.8209\nEpoch 9/10\n219/219 [==============================] - 8s 35ms/step - loss: 0.4962 - accuracy: 0.8211 - val_loss: 0.4713 - val_accuracy: 0.8297\nEpoch 10/10\n219/219 [==============================] - 8s 35ms/step - loss: 0.4769 - accuracy: 0.8273 - val_loss: 0.4523 - val_accuracy: 0.8371\n"
]
],
[
[
"# Exporting to TFLite\n\nYou will now save the model to TFLite. We should note, that you will probably see some warning messages when running the code below. These warnings have to do with software updates and should not cause any errors or prevent your code from running. ",
"_____no_output_____"
]
],
[
[
"# EXERCISE: Use the tf.saved_model API to save your model in the SavedModel format. \nexport_dir = 'saved_model/1'\n\ntf.saved_model.save(model, export_dir)",
"INFO:tensorflow:Assets written to: saved_model/1/assets\n"
],
[
"#@title Select mode of optimization\nmode = \"Speed\" #@param [\"Default\", \"Storage\", \"Speed\"]\n\nif mode == 'Storage':\n optimization = tf.lite.Optimize.OPTIMIZE_FOR_SIZE\nelif mode == 'Speed':\n optimization = tf.lite.Optimize.OPTIMIZE_FOR_LATENCY\nelse:\n optimization = tf.lite.Optimize.DEFAULT",
"_____no_output_____"
],
[
"# EXERCISE: Use the TFLiteConverter SavedModel API to initialize the converter\n\nconverter = tf.lite.TFLiteConverter.from_saved_model(export_dir)\n\n# Set the optimzations\nconverter.optimizations = [optimization]\n\n# Invoke the converter to finally generate the TFLite model\ntflite_model = converter.convert()",
"_____no_output_____"
],
[
"tflite_model_file = pathlib.Path('./model.tflite')\ntflite_model_file.write_bytes(tflite_model)",
"_____no_output_____"
]
],
[
[
"# Test the Model with TFLite Interpreter ",
"_____no_output_____"
]
],
[
[
"# Load TFLite model and allocate tensors.\ninterpreter = tf.lite.Interpreter(model_content=tflite_model)\ninterpreter.allocate_tensors()\n\ninput_index = interpreter.get_input_details()[0][\"index\"]\noutput_index = interpreter.get_output_details()[0][\"index\"]",
"_____no_output_____"
],
[
"# Gather results for the randomly sampled test images\npredictions = []\ntest_labels = []\ntest_images = []\n\nfor img, label in test_batches.take(50):\n interpreter.set_tensor(input_index, img)\n interpreter.invoke()\n predictions.append(interpreter.get_tensor(output_index))\n test_labels.append(label[0])\n test_images.append(np.array(img))",
"_____no_output_____"
],
[
"#@title Utility functions for plotting\n# Utilities for plotting\n\ndef plot_image(i, predictions_array, true_label, img):\n predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n \n img = np.squeeze(img)\n \n plt.imshow(img, cmap=plt.cm.binary)\n \n predicted_label = np.argmax(predictions_array)\n print(predicted_label, np.argmax(true_label.numpy()))\n\n if predicted_label == np.argmax(true_label.numpy()):\n color = 'green'\n else:\n color = 'red'\n \n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100*np.max(predictions_array),\n class_names[true_label]), color=color)\n\ndef plot_value_array(i, predictions_array, true_label):\n predictions_array, true_label = predictions_array[i], true_label[i]\n plt.grid(False)\n plt.xticks(list(range(10)), class_names, rotation='vertical')\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array[0], color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array[0])\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('green')",
"_____no_output_____"
],
[
"#@title Visualize the outputs { run: \"auto\" }\nindex = 33 #@param {type:\"slider\", min:1, max:50, step:1}\nplt.figure(figsize=(6,3))\nplt.subplot(1,2,1)\nplot_image(index, predictions, tf.argmax(test_labels, axis=1), test_images)\nplt.show()\nplot_value_array(index, predictions, tf.argmax(test_labels, axis=1))\nplt.show()",
"0 0\n"
]
],
[
[
"# Download the TFLite Model and Assets\n\nIf you are running this notebook in a Colab, you can run the cell below to download the tflite model and labels to your local disk.\n\n**Note**: If the files do not download when you run the cell, try running the cell a second time. Your browser might prompt you to allow multiple files to be downloaded. ",
"_____no_output_____"
]
],
[
[
"try:\n from google.colab import files\n \n files.download(tflite_model_file)\n files.download('labels.txt')\nexcept:\n pass",
"_____no_output_____"
]
],
[
[
"# Prepare the Test Images for Download (Optional)",
"_____no_output_____"
]
],
[
[
"!mkdir -p test_images",
"_____no_output_____"
],
[
"tf.argmax(label, 1).numpy()[0]",
"_____no_output_____"
],
[
"def format_example(image, label):\n \n return image, tf.one_hot(label, num_classes)\n\ntest_batches = test_examples.batch(1).map(format_example)",
"_____no_output_____"
],
[
"from PIL import Image\n\nfor index, (image, label) in enumerate(test_batches.take(50)):\n #print(image)\n #image = tf.cast(image * 255.0, tf.uint8)\n image = tf.squeeze(image).numpy()\n pil_image = Image.fromarray(image)\n #print(image)\n pil_image.save('test_images/{}_{}.jpg'.format(class_names[tf.argmax(label, 1).numpy()[0]].lower(), index))",
"_____no_output_____"
],
[
"!ls test_images",
"ankle boot_0.jpg coat_35.jpg pullover_14.jpg t-shirt_top_28.jpg\r\nankle boot_15.jpg coat_37.jpg pullover_16.jpg t-shirt_top_29.jpg\r\nankle boot_24.jpg coat_4.jpg pullover_26.jpg t-shirt_top_34.jpg\r\nankle boot_27.jpg coat_49.jpg pullover_30.jpg t-shirt_top_39.jpg\r\nankle boot_45.jpg coat_7.jpg pullover_32.jpg t-shirt_top_41.jpg\r\nbag_19.jpg dress_1.jpg sandal_9.jpg t-shirt_top_44.jpg\r\nbag_2.jpg dress_11.jpg shirt_17.jpg t-shirt_top_5.jpg\r\nbag_31.jpg dress_25.jpg shirt_22.jpg t-shirt_top_8.jpg\r\nbag_38.jpg dress_3.jpg shirt_33.jpg trouser_21.jpg\r\nbag_43.jpg dress_40.jpg shirt_46.jpg trouser_36.jpg\r\nbag_48.jpg dress_47.jpg sneaker_18.jpg trouser_42.jpg\r\ncoat_13.jpg pullover_10.jpg sneaker_20.jpg\r\ncoat_23.jpg pullover_12.jpg sneaker_6.jpg\r\n"
],
[
"!zip -qq fmnist_test_images.zip -r test_images/",
"_____no_output_____"
]
],
[
[
"If you are running this notebook in a Colab, you can run the cell below to download the Zip file with the images to your local disk. \n\n**Note**: If the Zip file does not download when you run the cell, try running the cell a second time.",
"_____no_output_____"
]
],
[
[
"try:\n files.download('fmnist_test_images.zip')\nexcept:\n pass",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d3ed4a31831436376fca51a913406dea478356 | 175,100 | ipynb | Jupyter Notebook | Data Analysis/Clean and Analyze Employee Exit Surveys/Clean and Analyze Employee Exit Surveys.ipynb | linnforsman/data-science-portfolio | fd51d5b74cea7a598fe0e4e7555af48cecf4c980 | [
"MIT"
] | null | null | null | Data Analysis/Clean and Analyze Employee Exit Surveys/Clean and Analyze Employee Exit Surveys.ipynb | linnforsman/data-science-portfolio | fd51d5b74cea7a598fe0e4e7555af48cecf4c980 | [
"MIT"
] | null | null | null | Data Analysis/Clean and Analyze Employee Exit Surveys/Clean and Analyze Employee Exit Surveys.ipynb | linnforsman/data-science-portfolio | fd51d5b74cea7a598fe0e4e7555af48cecf4c980 | [
"MIT"
] | null | null | null | 52.788664 | 16,542 | 0.412027 | [
[
[
"# Clean and Analyze Employee Exit Surveys\nIn this project, we'll clean and analyze exit surveys from employees of the Department of Education, Training and Employment (DETE)}) and the Technical and Further Education (TAFE) body of the Queensland government in Australia. The TAFE exit survey can be found here and the survey for the DETE can be found here.\n\nWe'll pretend our stakeholders want us to combine the results for both surveys to answer the following question:\n\n- Are employees who only worked for the institutes for a short period of time resigning due to some kind of dissatisfaction? What about employees who have been there longer?",
"_____no_output_____"
],
[
"# Introduction\nFirst, we'll read in the datasets and do some initial exploration.",
"_____no_output_____"
]
],
[
[
"#Read in the data\nimport pandas as pd\nimport numpy as np\ndete_survey = pd.read_csv('dete_survey.csv')\n\n#Quick exploration of the data\npd.options.display.max_columns = 150 # to avoid truncated output\ndete_survey.head()",
"_____no_output_____"
],
[
"dete_survey.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 822 entries, 0 to 821\nData columns (total 56 columns):\nID 822 non-null int64\nSeparationType 822 non-null object\nCease Date 822 non-null object\nDETE Start Date 822 non-null object\nRole Start Date 822 non-null object\nPosition 817 non-null object\nClassification 455 non-null object\nRegion 822 non-null object\nBusiness Unit 126 non-null object\nEmployment Status 817 non-null object\nCareer move to public sector 822 non-null bool\nCareer move to private sector 822 non-null bool\nInterpersonal conflicts 822 non-null bool\nJob dissatisfaction 822 non-null bool\nDissatisfaction with the department 822 non-null bool\nPhysical work environment 822 non-null bool\nLack of recognition 822 non-null bool\nLack of job security 822 non-null bool\nWork location 822 non-null bool\nEmployment conditions 822 non-null bool\nMaternity/family 822 non-null bool\nRelocation 822 non-null bool\nStudy/Travel 822 non-null bool\nIll Health 822 non-null bool\nTraumatic incident 822 non-null bool\nWork life balance 822 non-null bool\nWorkload 822 non-null bool\nNone of the above 822 non-null bool\nProfessional Development 808 non-null object\nOpportunities for promotion 735 non-null object\nStaff morale 816 non-null object\nWorkplace issue 788 non-null object\nPhysical environment 817 non-null object\nWorklife balance 815 non-null object\nStress and pressure support 810 non-null object\nPerformance of supervisor 813 non-null object\nPeer support 812 non-null object\nInitiative 813 non-null object\nSkills 811 non-null object\nCoach 767 non-null object\nCareer Aspirations 746 non-null object\nFeedback 792 non-null object\nFurther PD 768 non-null object\nCommunication 814 non-null object\nMy say 812 non-null object\nInformation 816 non-null object\nKept informed 813 non-null object\nWellness programs 766 non-null object\nHealth & Safety 793 non-null object\nGender 798 non-null object\nAge 811 non-null object\nAboriginal 16 non-null object\nTorres Strait 3 non-null object\nSouth Sea 7 non-null object\nDisability 23 non-null object\nNESB 32 non-null object\ndtypes: bool(18), int64(1), object(37)\nmemory usage: 258.6+ KB\n"
],
[
"#Read in the data\ntafe_survey = pd.read_csv(\"tafe_survey.csv\")\n\n#Quick exploration of the data\ntafe_survey.head()",
"_____no_output_____"
],
[
"tafe_survey.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 702 entries, 0 to 701\nData columns (total 72 columns):\nRecord ID 702 non-null float64\nInstitute 702 non-null object\nWorkArea 702 non-null object\nCESSATION YEAR 695 non-null float64\nReason for ceasing employment 701 non-null object\nContributing Factors. Career Move - Public Sector 437 non-null object\nContributing Factors. Career Move - Private Sector 437 non-null object\nContributing Factors. Career Move - Self-employment 437 non-null object\nContributing Factors. Ill Health 437 non-null object\nContributing Factors. Maternity/Family 437 non-null object\nContributing Factors. Dissatisfaction 437 non-null object\nContributing Factors. Job Dissatisfaction 437 non-null object\nContributing Factors. Interpersonal Conflict 437 non-null object\nContributing Factors. Study 437 non-null object\nContributing Factors. Travel 437 non-null object\nContributing Factors. Other 437 non-null object\nContributing Factors. NONE 437 non-null object\nMain Factor. Which of these was the main factor for leaving? 113 non-null object\nInstituteViews. Topic:1. I feel the senior leadership had a clear vision and direction 608 non-null object\nInstituteViews. Topic:2. I was given access to skills training to help me do my job better 613 non-null object\nInstituteViews. Topic:3. I was given adequate opportunities for personal development 610 non-null object\nInstituteViews. Topic:4. I was given adequate opportunities for promotion within %Institute]Q25LBL% 608 non-null object\nInstituteViews. Topic:5. I felt the salary for the job was right for the responsibilities I had 615 non-null object\nInstituteViews. Topic:6. The organisation recognised when staff did good work 607 non-null object\nInstituteViews. Topic:7. Management was generally supportive of me 614 non-null object\nInstituteViews. Topic:8. Management was generally supportive of my team 608 non-null object\nInstituteViews. Topic:9. I was kept informed of the changes in the organisation which would affect me 610 non-null object\nInstituteViews. Topic:10. Staff morale was positive within the Institute 602 non-null object\nInstituteViews. Topic:11. If I had a workplace issue it was dealt with quickly 601 non-null object\nInstituteViews. Topic:12. If I had a workplace issue it was dealt with efficiently 597 non-null object\nInstituteViews. Topic:13. If I had a workplace issue it was dealt with discreetly 601 non-null object\nWorkUnitViews. Topic:14. I was satisfied with the quality of the management and supervision within my work unit 609 non-null object\nWorkUnitViews. Topic:15. I worked well with my colleagues 605 non-null object\nWorkUnitViews. Topic:16. My job was challenging and interesting 607 non-null object\nWorkUnitViews. Topic:17. I was encouraged to use my initiative in the course of my work 610 non-null object\nWorkUnitViews. Topic:18. I had sufficient contact with other people in my job 613 non-null object\nWorkUnitViews. Topic:19. I was given adequate support and co-operation by my peers to enable me to do my job 609 non-null object\nWorkUnitViews. Topic:20. I was able to use the full range of my skills in my job 609 non-null object\nWorkUnitViews. Topic:21. I was able to use the full range of my abilities in my job. ; Category:Level of Agreement; Question:YOUR VIEWS ABOUT YOUR WORK UNIT] 608 non-null object\nWorkUnitViews. Topic:22. I was able to use the full range of my knowledge in my job 608 non-null object\nWorkUnitViews. Topic:23. My job provided sufficient variety 611 non-null object\nWorkUnitViews. Topic:24. I was able to cope with the level of stress and pressure in my job 610 non-null object\nWorkUnitViews. Topic:25. My job allowed me to balance the demands of work and family to my satisfaction 611 non-null object\nWorkUnitViews. Topic:26. My supervisor gave me adequate personal recognition and feedback on my performance 606 non-null object\nWorkUnitViews. Topic:27. My working environment was satisfactory e.g. sufficient space, good lighting, suitable seating and working area 610 non-null object\nWorkUnitViews. Topic:28. I was given the opportunity to mentor and coach others in order for me to pass on my skills and knowledge prior to my cessation date 609 non-null object\nWorkUnitViews. Topic:29. There was adequate communication between staff in my unit 603 non-null object\nWorkUnitViews. Topic:30. Staff morale was positive within my work unit 606 non-null object\nInduction. Did you undertake Workplace Induction? 619 non-null object\nInductionInfo. Topic:Did you undertake a Corporate Induction? 432 non-null object\nInductionInfo. Topic:Did you undertake a Institute Induction? 483 non-null object\nInductionInfo. Topic: Did you undertake Team Induction? 440 non-null object\nInductionInfo. Face to Face Topic:Did you undertake a Corporate Induction; Category:How it was conducted? 555 non-null object\nInductionInfo. On-line Topic:Did you undertake a Corporate Induction; Category:How it was conducted? 555 non-null object\nInductionInfo. Induction Manual Topic:Did you undertake a Corporate Induction? 555 non-null object\nInductionInfo. Face to Face Topic:Did you undertake a Institute Induction? 530 non-null object\nInductionInfo. On-line Topic:Did you undertake a Institute Induction? 555 non-null object\nInductionInfo. Induction Manual Topic:Did you undertake a Institute Induction? 553 non-null object\nInductionInfo. Face to Face Topic: Did you undertake Team Induction; Category? 555 non-null object\nInductionInfo. On-line Topic: Did you undertake Team Induction?process you undertook and how it was conducted.] 555 non-null object\nInductionInfo. Induction Manual Topic: Did you undertake Team Induction? 555 non-null object\nWorkplace. Topic:Did you and your Manager develop a Performance and Professional Development Plan (PPDP)? 608 non-null object\nWorkplace. Topic:Does your workplace promote a work culture free from all forms of unlawful discrimination? 594 non-null object\nWorkplace. Topic:Does your workplace promote and practice the principles of employment equity? 587 non-null object\nWorkplace. Topic:Does your workplace value the diversity of its employees? 586 non-null object\nWorkplace. Topic:Would you recommend the Institute as an employer to others? 581 non-null object\nGender. What is your Gender? 596 non-null object\nCurrentAge. Current Age 596 non-null object\nEmployment Type. Employment Type 596 non-null object\nClassification. Classification 596 non-null object\nLengthofServiceOverall. Overall Length of Service at Institute (in years) 596 non-null object\nLengthofServiceCurrent. Length of Service at current workplace (in years) 596 non-null object\ndtypes: float64(2), object(70)\nmemory usage: 395.0+ KB\n"
]
],
[
[
"We can make the following observations based on the work above:\n\n* The dete_survey dataframe contains 'Not Stated' values that indicate values are missing, but they aren't represented as NaN.\n* Both the dete_survey and tafe_survey contain many columns that we don't need to complete our analysis.\n* Each dataframe contains many of the same columns, but the column names are different.\n* There are multiple columns/answers that indicate an employee resigned because they were dissatisfied.",
"_____no_output_____"
],
[
"# Identify Missing Values and Drop Unneccessary Columns\nFirst, we'll correct the Not Stated values and drop some of the columns we don't need for our analysis.",
"_____no_output_____"
]
],
[
[
"# Read in the data again, but this time read 'Not Stated' values as 'NaN'\ndete_survey = pd.read_csv('dete_survey.csv', na_values='Not Stated')\n\n#Quick exploration of the data\ndete_survey.head()",
"_____no_output_____"
],
[
"# Remove columns we don't need for our analysis\ndete_survey_updated = dete_survey.drop(dete_survey.columns[28:49], axis=1)\ntafe_survey_updated = tafe_survey.drop(tafe_survey.columns[17:66], axis=1)\n\n#Check that the columns were dropped\nprint(dete_survey_updated.columns)\nprint(tafe_survey_updated.columns)",
"Index(['ID', 'SeparationType', 'Cease Date', 'DETE Start Date',\n 'Role Start Date', 'Position', 'Classification', 'Region',\n 'Business Unit', 'Employment Status', 'Career move to public sector',\n 'Career move to private sector', 'Interpersonal conflicts',\n 'Job dissatisfaction', 'Dissatisfaction with the department',\n 'Physical work environment', 'Lack of recognition',\n 'Lack of job security', 'Work location', 'Employment conditions',\n 'Maternity/family', 'Relocation', 'Study/Travel', 'Ill Health',\n 'Traumatic incident', 'Work life balance', 'Workload',\n 'None of the above', 'Gender', 'Age', 'Aboriginal', 'Torres Strait',\n 'South Sea', 'Disability', 'NESB'],\n dtype='object')\nIndex(['Record ID', 'Institute', 'WorkArea', 'CESSATION YEAR',\n 'Reason for ceasing employment',\n 'Contributing Factors. Career Move - Public Sector ',\n 'Contributing Factors. Career Move - Private Sector ',\n 'Contributing Factors. Career Move - Self-employment',\n 'Contributing Factors. Ill Health',\n 'Contributing Factors. Maternity/Family',\n 'Contributing Factors. Dissatisfaction',\n 'Contributing Factors. Job Dissatisfaction',\n 'Contributing Factors. Interpersonal Conflict',\n 'Contributing Factors. Study', 'Contributing Factors. Travel',\n 'Contributing Factors. Other', 'Contributing Factors. NONE',\n 'Gender. What is your Gender?', 'CurrentAge. Current Age',\n 'Employment Type. Employment Type', 'Classification. Classification',\n 'LengthofServiceOverall. Overall Length of Service at Institute (in years)',\n 'LengthofServiceCurrent. Length of Service at current workplace (in years)'],\n dtype='object')\n"
]
],
[
[
"# Rename Columns\nNext, we'll standardize the names of the columns we want to work with, because we eventually want to combine the dataframes.",
"_____no_output_____"
]
],
[
[
"# Clean the column names\ndete_survey_updated.columns = dete_survey_updated.columns.str.lower().str.strip().str.replace(' ', '_')\n\n# Check that the column names were updated correctly\ndete_survey_updated.columns",
"_____no_output_____"
],
[
"# Update column names to match the names in dete_survey_updated\nmapping = {'Record ID': 'id', 'CESSATION YEAR': 'cease_date', 'Reason for ceasing employment': 'separationtype', 'Gender. What is your Gender?': 'gender', 'CurrentAge. Current Age': 'age',\n 'Employment Type. Employment Type': 'employment_status',\n 'Classification. Classification': 'position',\n 'LengthofServiceOverall. Overall Length of Service at Institute (in years)': 'institute_service',\n 'LengthofServiceCurrent. Length of Service at current workplace (in years)': 'role_service'}\ntafe_survey_updated = tafe_survey_updated.rename(mapping, axis = 1)\n\n# Check that the specified column names were updated correctly\ntafe_survey_updated.columns",
"_____no_output_____"
]
],
[
[
"# Filter the Data\nFor this project, we'll only analyze survey respondents who resigned, so we'll only select separation types containing the string 'Resignation'.",
"_____no_output_____"
]
],
[
[
"# Check the unique values for the separationtype column\ntafe_survey_updated['separationtype'].value_counts()",
"_____no_output_____"
],
[
"# Check the unique values for the separationtype column\ndete_survey_updated['separationtype'].value_counts()",
"_____no_output_____"
],
[
"# Check the unique values for the separationtype column\ndete_survey_updated['separationtype'].value_counts()",
"_____no_output_____"
],
[
"# Update all separation types containing the word \"resignation\" to 'Resignation'\ndete_survey_updated['separationtype'] = dete_survey_updated['separationtype'].str.split('-').str[0]\n\n# Check the values in the separationtype column were updated correctly\ndete_survey_updated['separationtype'].value_counts()",
"_____no_output_____"
],
[
"# Select only the resignation separation types from each dataframe\ndete_resignations = dete_survey_updated[dete_survey_updated['separationtype'] == 'Resignation'].copy()\ntafe_resignations = tafe_survey_updated[tafe_survey_updated['separationtype'] == 'Resignation'].copy()",
"_____no_output_____"
]
],
[
[
"# Verify the Data\nBelow, we clean and explore the cease_date and dete_start_date columns to make sure all of the years make sense. We'll use the following criteria:\n\n* Since the cease_date is the last year of the person's employment and the dete_start_date is the person's first year of employment, it wouldn't make sense to have years after the current date.\n* Given that most people in this field start working in their 20s, it's also unlikely that the dete_start_date was before the year 1940.",
"_____no_output_____"
]
],
[
[
"# Check the unique values\ndete_resignations['cease_date'].value_counts()",
"_____no_output_____"
],
[
"# Extract the years and convert them to a float type\ndete_resignations['cease_date'] = dete_resignations['cease_date'].str.split('/').str[-1]\ndete_resignations['cease_date'] = dete_resignations['cease_date'].astype(\"float\")\n\n# Check the values again and look for outliers\ndete_resignations['cease_date'].value_counts()",
"_____no_output_____"
],
[
"# Check the unique values and look for outliers\ndete_resignations['dete_start_date'].value_counts().sort_values()",
"_____no_output_____"
],
[
"# Check the unique values\ntafe_resignations['cease_date'].value_counts().sort_values()",
"_____no_output_____"
]
],
[
[
"Below are our findings:\n\n* The years in both dataframes don't completely align. The tafe_survey_updated dataframe contains some cease dates in 2009, but the dete_survey_updated dataframe does not. The tafe_survey_updated dataframe also contains many more cease dates in 2010 than the dete_survey_updaed dataframe. Since we aren't concerned with analyzing the results by year, we'll leave them as is.",
"_____no_output_____"
],
[
"# Create a New Column¶\nSince our end goal is to answer the question below, we need a column containing the length of time an employee spent in their workplace, or years of service, in both dataframes.\n\n* End goal: Are employees who have only worked for the institutes for a short period of time resigning due to some kind of dissatisfaction? What about employees who have been at the job longer?\n\nThe tafe_resignations dataframe already contains a \"service\" column, which we renamed to institute_service.\n\nBelow, we calculate the years of service in the dete_survey_updated dataframe by subtracting the dete_start_date from the cease_date and create a new column named institute_service.",
"_____no_output_____"
]
],
[
[
"# Calculate the length of time an employee spent in their respective workplace and create a new column\ndete_resignations['institute_service'] = dete_resignations['cease_date'] - dete_resignations['dete_start_date']\n\n# Quick check of the result\ndete_resignations['institute_service'].head()",
"_____no_output_____"
]
],
[
[
"# Identify Dissatisfied Employees¶\nNext, we'll identify any employees who resigned because they were dissatisfied. Below are the columns we'll use to categorize employees as \"dissatisfied\" from each dataframe:\n\n1. tafe_survey_updated:\n* Contributing Factors. Dissatisfaction\n* Contributing Factors. Job Dissatisfaction\n2. dafe_survey_updated:\n* job_dissatisfaction\n* dissatisfaction_with_the_department\n* physical_work_environment\n* lack_of_recognition\n* lack_of_job_security\n* work_location\n* employment_conditions\n* work_life_balance\n* workload\n\nIf the employee indicated any of the factors above caused them to resign, we'll mark them as dissatisfied in a new column. After our changes, the new dissatisfied column will contain just the following values:\n\n* True: indicates a person resigned because they were dissatisfied in some way\n* False: indicates a person resigned because of a reason other than dissatisfaction with the job\n* NaN: indicates the value is missing",
"_____no_output_____"
]
],
[
[
"# Check the unique values\ntafe_resignations['Contributing Factors. Dissatisfaction'].value_counts()",
"_____no_output_____"
],
[
"# Check the unique values\ntafe_resignations['Contributing Factors. Job Dissatisfaction'].value_counts()",
"_____no_output_____"
],
[
"# Update the values in the contributing factors columns to be either True, False, or NaN\ndef update_vals(x):\n if x == '-':\n return False\n elif pd.isnull(x):\n return np.nan\n else:\n return True\ntafe_resignations['dissatisfied'] = tafe_resignations[['Contributing Factors. Dissatisfaction', 'Contributing Factors. Job Dissatisfaction']].applymap(update_vals).any(1, skipna=False)\ntafe_resignations_up = tafe_resignations.copy()\n\n# Check the unique values after the updates\ntafe_resignations_up['dissatisfied'].value_counts(dropna=False)",
"_____no_output_____"
],
[
"# Update the values in columns related to dissatisfaction to be either True, False, or NaN\ndete_resignations['dissatisfied'] = dete_resignations[['job_dissatisfaction',\n 'dissatisfaction_with_the_department', 'physical_work_environment',\n 'lack_of_recognition', 'lack_of_job_security', 'work_location',\n 'employment_conditions', 'work_life_balance',\n 'workload']].any(1, skipna=False)\ndete_resignations_up = dete_resignations.copy()\ndete_resignations_up['dissatisfied'].value_counts(dropna=False)",
"_____no_output_____"
]
],
[
[
"# Combining the Data¶\nBelow, we'll add an institute column so that we can differentiate the data from each survey after we combine them. Then, we'll combine the dataframes and drop any remaining columns we don't need.",
"_____no_output_____"
]
],
[
[
"# Add an institute column\ndete_resignations_up['institute'] = 'DETE'\ntafe_resignations_up['institute'] = 'TAFE'",
"_____no_output_____"
],
[
"# Combine the dataframes\ncombined = pd.concat([dete_resignations_up, tafe_resignations_up], ignore_index=True)\n\n# Verify the number of non null values in each column\ncombined.notnull().sum().sort_values()",
"_____no_output_____"
],
[
"# Drop columns with less than 500 non null values\ncombined_updated = combined.dropna(thresh = 500, axis =1).copy()",
"_____no_output_____"
]
],
[
[
"# Clean the Service Column¶\nNext, we'll clean the institute_service column and categorize employees according to the following definitions:\n\n* New: Less than 3 years in the workplace\n* Experienced: 3-6 years in the workplace\n* Established: 7-10 years in the workplace\n* Veteran: 11 or more years in the workplace\n\nOur analysis is based on this article, which makes the argument that understanding employee's needs according to career stage instead of age is more effective.",
"_____no_output_____"
]
],
[
[
"# Check the unique values\ncombined_updated['institute_service'].value_counts(dropna=False)",
"_____no_output_____"
],
[
"# Extract the years of service and convert the type to float\ncombined_updated['institute_service_up'] = combined_updated['institute_service'].astype('str').str.extract(r'(\\d+)')\ncombined_updated['institute_service_up'] = combined_updated['institute_service_up'].astype('float')\n\n# Check the years extracted are correct\ncombined_updated['institute_service_up'].value_counts()",
"/dataquest/system/env/python3/lib/python3.4/site-packages/ipykernel/__main__.py:2: FutureWarning: currently extract(expand=None) means expand=False (return Index/Series/DataFrame) but in a future version of pandas this will be changed to expand=True (return DataFrame)\n from ipykernel import kernelapp as app\n"
],
[
"# Convert years of service to categories\ndef transform_service(val):\n if val >= 11:\n return \"Veteran\"\n elif 7 <= val < 11:\n return \"Established\"\n elif 3 <= val < 7:\n return \"Experienced\"\n elif pd.isnull(val):\n return np.nan\n else:\n return \"New\"\ncombined_updated['service_cat'] = combined_updated['institute_service_up'].apply(transform_service)\n\n# Quick check of the update\ncombined_updated['service_cat'].value_counts()",
"_____no_output_____"
]
],
[
[
"# Perform Some Initial Analysis¶\nFinally, we'll replace the missing values in the dissatisfied column with the most frequent value, False. Then, we'll calculate the percentage of employees who resigned due to dissatisfaction in each service_cat group and plot the results.\n\nNote that since we still have additional missing values left to deal with, this is meant to be an initial introduction to the analysis, not the final analysis.",
"_____no_output_____"
]
],
[
[
"# Verify the unique values\ncombined_updated['dissatisfied'].value_counts(dropna=False)",
"_____no_output_____"
],
[
"# Replace missing values with the most frequent value, False\ncombined_updated['dissatisfied'] = combined_updated['dissatisfied'].fillna(False)",
"_____no_output_____"
],
[
"# Calculate the percentage of employees who resigned due to dissatisfaction in each category\ndis_pct = combined_updated.pivot_table(index='service_cat', values='dissatisfied')\n\n# Plot the results\n%matplotlib inline\ndis_pct.plot(kind='bar', rot=30)",
"_____no_output_____"
]
],
[
[
"From the initial analysis above, we can tentatively conclude that employees with 7 or more years of service are more likely to resign due to some kind of dissatisfaction with the job than employees with less than 7 years of service. However, we need to handle the rest of the missing data to finalize our analysis.",
"_____no_output_____"
],
[
"## Conclusions\n* Explored the data and figured out how to prepare it for analysis\n* Corrected some of the missing values\n* Dropped any data not needed for our analysis\n* Renamed our columns\n* Verified the quality of our data\n* Created a new institute_service column\n* Cleaned the Contributing Factors columns\n* Created a new column indicating if an employee resigned because they were dissatisfied in some way\n* Combined the data\n* Cleaned the institute_service column\n* Handled the missing values in the dissatisfied column\n* Aggregated the data\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
d0d3f32aab5a4d6391897bd1e5a410a9c082a7ad | 18,551 | ipynb | Jupyter Notebook | F_Camp/c11_Simple_CNN_hangul.ipynb | woosa7/pytorch | 9d424463db298904b266dd8edf78ded3536a3532 | [
"MIT"
] | null | null | null | F_Camp/c11_Simple_CNN_hangul.ipynb | woosa7/pytorch | 9d424463db298904b266dd8edf78ded3536a3532 | [
"MIT"
] | null | null | null | F_Camp/c11_Simple_CNN_hangul.ipynb | woosa7/pytorch | 9d424463db298904b266dd8edf78ded3536a3532 | [
"MIT"
] | null | null | null | 33.48556 | 4,342 | 0.574794 | [
[
[
"# Lec 11. Simple CNN : 한글 자모",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.init as init\nimport torch.utils.data as Data\n\nimport torchvision.utils as utils\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\n\nimport numpy as np\nimport os",
"_____no_output_____"
],
[
"from PIL import Image\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Load Custom Data",
"_____no_output_____"
],
[
"* transforms에 대해서는 다음 참조\n\nhttps://pytorch.org/docs/stable/torchvision/transforms.html",
"_____no_output_____"
]
],
[
[
"img_dir = \"data/hangul/\"\nimg_data = dsets.ImageFolder(img_dir, \n transforms.Compose([\n transforms.Grayscale(),\n \n# # Data Augmentation\n# transforms.RandomRotation(15)\n# transforms.CenterCrop(28),\n# transforms.Lambda(lambda x: x.rotate(15)),\n \n# # Data Nomalization\n# transforms.Normalize(mean=(0.5,), std=(0.5,))\n\n transforms.ToTensor(),\n ]))\n\nprint(img_data.classes)\nprint(img_data.class_to_idx) # class 39 - 각 class별 720개 이미지 존재.",
"['ㄱ', 'ㄲ', 'ㄴ', 'ㄷ', 'ㄸ', 'ㄹ', 'ㅁ', 'ㅂ', 'ㅃ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅉ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ', 'ㅏ', 'ㅐ', 'ㅑ', 'ㅒ', 'ㅓ', 'ㅔ', 'ㅕ', 'ㅖ', 'ㅗ', 'ㅘ', 'ㅙ', 'ㅛ', 'ㅜ', 'ㅝ', 'ㅞ', 'ㅟ', 'ㅠ', 'ㅡ', 'ㅢ', 'ㅣ']\n{'ㄱ': 0, 'ㄲ': 1, 'ㄴ': 2, 'ㄷ': 3, 'ㄸ': 4, 'ㄹ': 5, 'ㅁ': 6, 'ㅂ': 7, 'ㅃ': 8, 'ㅅ': 9, 'ㅆ': 10, 'ㅇ': 11, 'ㅈ': 12, 'ㅉ': 13, 'ㅊ': 14, 'ㅋ': 15, 'ㅌ': 16, 'ㅍ': 17, 'ㅎ': 18, 'ㅏ': 19, 'ㅐ': 20, 'ㅑ': 21, 'ㅒ': 22, 'ㅓ': 23, 'ㅔ': 24, 'ㅕ': 25, 'ㅖ': 26, 'ㅗ': 27, 'ㅘ': 28, 'ㅙ': 29, 'ㅛ': 30, 'ㅜ': 31, 'ㅝ': 32, 'ㅞ': 33, 'ㅟ': 34, 'ㅠ': 35, 'ㅡ': 36, 'ㅢ': 37, 'ㅣ': 38}\n"
],
[
"len(img_data) # = 39 * 720",
"_____no_output_____"
],
[
"img_data.imgs[0]",
"_____no_output_____"
],
[
"img = Image.open(\"data/hangul/ㅇ/111.png\").convert(\"L\") # 36 * 36 image\nimgarr = np.array(img)\nprint(imgarr.shape)\nplt.imshow(imgarr, cmap='gray')",
"(36, 36)\n"
],
[
"batch_size = 100\nfont_num = 720",
"_____no_output_____"
],
[
"from torch.utils.data import Sampler",
"_____no_output_____"
],
[
"def train_test_split(data, train_ratio, stratify, stratify_num, batch_size) :\n \n length = len(data)\n \n # 층화 추출\n if stratify :\n label_num = int(len(data)/stratify_num)\n cut = int(stratify_num*train_ratio)\n train_indices = np.random.permutation(np.arange(stratify_num))[:cut]\n test_indices = np.random.permutation(np.arange(stratify_num))[cut:]\n \n for i in range(1, label_num) :\n train_indices = np.concatenate((train_indices, np.random.permutation(np.arange(stratify_num))[:cut] + stratify_num*i))\n test_indices = np.concatenate((test_indices, np.random.permutation(np.arange(stratify_num))[cut:] + stratify_num*i))\n \n else :\n cut = int(len(data)*train_ratio)\n train_indices = np.random.permutation(np.arange(length))[:cut]\n test_indices = np.random.permutation(np.arange(length))[cut:]\n \n sampler = Data.SubsetRandomSampler(train_indices)\n \n train_loader = Data.DataLoader(data, batch_size=batch_size, shuffle=False, sampler=sampler, num_workers=0, drop_last=True)\n test_loader = Data.DataLoader(data, batch_size=batch_size, shuffle=False, sampler=sampler, num_workers=0, drop_last=True)\n\n return train_loader, test_loader, len(train_indices), len(test_indices)",
"_____no_output_____"
],
[
"train_loader, test_loader, train_num, test_num = train_test_split(img_data, 0.8, True, font_num, batch_size)",
"_____no_output_____"
],
[
"train_num, test_num",
"_____no_output_____"
]
],
[
[
"## Define Model",
"_____no_output_____"
]
],
[
[
"def c_conv(N, K, P=0, S=1):\n return int((N + 2*P - K) / S + 1)\n\ndef c_pool(N, K):\n return int(N/K)",
"_____no_output_____"
],
[
"c0 = 36\nc1 = c_conv(c0, 3)\nc2 = c_conv(c1, 3)\nc3 = c_pool(c2, 2)\n\nc4 = c_conv(c3, 3)\nc5 = c_conv(c4, 3)\nc6 = c_pool(c5, 2)\n\nprint(c1, c2, c3, c4, c5, c6)",
"34 32 16 14 12 6\n"
],
[
"class CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n\n # Test 1 - 84.78 %\n self.layer = nn.Sequential(\n nn.Conv2d(1,16,3), # 36 --> 34\n nn.BatchNorm2d(16),\n nn.ReLU(),\n\n nn.Conv2d(16,32,3), # 32\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(2,2), # 16\n\n nn.Conv2d(32,64,3), # 14\n nn.BatchNorm2d(64),\n nn.ReLU(),\n\n nn.Conv2d(64,128,3), # 12\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.MaxPool2d(2,2) # 6\n )\n\n self.fc_layer = nn.Sequential(\n nn.Linear(128*6*6,300),\n nn.ReLU(),\n nn.Linear(300,39)\n )\n \n # Weight Initialization\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # init.xavier_normal(m.weight.data)\n init.kaiming_normal_(m.weight.data)\n m.bias.data.fill_(0)\n elif isinstance(m, nn.Linear):\n init.kaiming_normal_(m.weight.data)\n m.bias.data.fill_(0)\n \n def forward(self,x):\n out = self.layer(x)\n out = out.view(batch_size, -1)\n out = self.fc_layer(out)\n\n return out\n \nmodel = CNN().cuda()",
"_____no_output_____"
],
[
"loss = nn.CrossEntropyLoss()\n\n# SGD\noptimizer = optim.SGD(model.parameters(), lr=0.1)\n\n# Adam\n# optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n# Momentum & Weight Regularization(L2)\n# optimizer = optim.SGD(model.parameters(), lr=1e-2, momentum=0.9, weight_decay=1e-5)",
"_____no_output_____"
],
[
"num_epochs = 10",
"_____no_output_____"
],
[
"# Learning Rate Scheduler\n# scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma= 0.99)\n# scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[10,30,80], gamma= 0.1)\n# scheduler = lr_scheduler.ExponentialLR(optimizer, gamma= 0.99)\n# scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min')",
"_____no_output_____"
],
[
"total_batch = train_num//batch_size\n\nfor epoch in range(num_epochs):\n \n# scheduler.step()\n\n for i, (batch_images, batch_labels) in enumerate(train_loader):\n\n X = batch_images.cuda()\n Y = batch_labels.cuda()\n\n pred = model(X)\n cost = loss(pred, Y)\n\n optimizer.zero_grad()\n cost.backward()\n optimizer.step()\n \n if (i+1) == total_batch:\n print('Epoch [%d/%d], lter [%d/%d] Loss: %.5f'%(epoch+1, num_epochs, i+1, total_batch, cost.item()))",
"Epoch [1/10], lter [224/224] Loss: 0.28873\nEpoch [2/10], lter [224/224] Loss: 0.13322\nEpoch [3/10], lter [224/224] Loss: 0.05497\nEpoch [4/10], lter [224/224] Loss: 0.02143\nEpoch [5/10], lter [224/224] Loss: 0.02459\nEpoch [6/10], lter [224/224] Loss: 0.03388\nEpoch [7/10], lter [224/224] Loss: 0.01384\nEpoch [8/10], lter [224/224] Loss: 0.01262\nEpoch [9/10], lter [224/224] Loss: 0.00434\nEpoch [10/10], lter [224/224] Loss: 0.01707\n"
],
[
"# torch.save(model.state_dict(), 'cnn_hangul_Adam.pkl')\n# print(\"Model Saved!\")",
"_____no_output_____"
]
],
[
[
"## Test Model",
"_____no_output_____"
]
],
[
[
"model.eval()\n\ncorrect = 0\ntotal = 0\n\nfor images, labels in test_loader:\n \n images = images.cuda()\n outputs = model(images)\n \n# print(outputs.data) # 39 class에 대한 확률\n \n _, predicted = torch.max(outputs.data, 1)\n \n total += labels.size(0)\n correct += (predicted == labels.cuda()).sum()\n \n \ncorrect = correct.cpu().numpy() \nprint('correct :', correct)\nprint('total :', total)\nprint('Accuracy of test images: %f' % (100 * correct / total))",
"correct : 22398\ntotal : 22400\nAccuracy of test images: 99.991071\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d3f459ecb27a4e8c8082aa1e3d9ec06c451cf5 | 650,427 | ipynb | Jupyter Notebook | Computer_Vision_Nanodegree_Program/01_Facial_Keypoint_Detection/3. Facial Keypoint Detection, Complete Pipeline.ipynb | cilsya/udacity | 056c7905b108ab140237a783a0203340256a3ac2 | [
"MIT"
] | 1 | 2018-10-31T17:18:28.000Z | 2018-10-31T17:18:28.000Z | Computer_Vision_Nanodegree_Program/01_Facial_Keypoint_Detection/3. Facial Keypoint Detection, Complete Pipeline.ipynb | cilsya/udacity | 056c7905b108ab140237a783a0203340256a3ac2 | [
"MIT"
] | null | null | null | Computer_Vision_Nanodegree_Program/01_Facial_Keypoint_Detection/3. Facial Keypoint Detection, Complete Pipeline.ipynb | cilsya/udacity | 056c7905b108ab140237a783a0203340256a3ac2 | [
"MIT"
] | null | null | null | 2,492.057471 | 323,020 | 0.961298 | [
[
[
"## Face and Facial Keypoint detection\n\nAfter you've trained a neural network to detect facial keypoints, you can then apply this network to *any* image that includes faces. The neural network expects a Tensor of a certain size as input and, so, to detect any face, you'll first have to do some pre-processing.\n\n1. Detect all the faces in an image using a face detector (we'll be using a Haar Cascade detector in this notebook).\n2. Pre-process those face images so that they are grayscale, and transformed to a Tensor of the input size that your net expects. This step will be similar to the `data_transform` you created and applied in Notebook 2, whose job was tp rescale, normalize, and turn any iimage into a Tensor to be accepted as input to your CNN.\n3. Use your trained model to detect facial keypoints on the image.\n\n---",
"_____no_output_____"
],
[
"In the next python cell we load in required libraries for this section of the project.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"#### Select an image \n\nSelect an image to perform facial keypoint detection on; you can select any image of faces in the `images/` directory.",
"_____no_output_____"
]
],
[
[
"import cv2\n# load in color image for face detection\nimage = cv2.imread('images/obamas.jpg')\n\n# switch red and blue color channels \n# --> by default OpenCV assumes BLUE comes first, not RED as in many images\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# plot the image\nfig = plt.figure(figsize=(9,9))\nplt.imshow(image)",
"_____no_output_____"
]
],
[
[
"## Detect all faces in an image\n\nNext, you'll use one of OpenCV's pre-trained Haar Cascade classifiers, all of which can be found in the `detector_architectures/` directory, to find any faces in your selected image.\n\nIn the code below, we loop over each face in the original image and draw a red square on each face (in a copy of the original image, so as not to modify the original). You can even [add eye detections](https://docs.opencv.org/3.4.1/d7/d8b/tutorial_py_face_detection.html) as an *optional* exercise in using Haar detectors.\n\nAn example of face detection on a variety of images is shown below.\n\n<img src='images/haar_cascade_ex.png' width=80% height=80%/>\n",
"_____no_output_____"
]
],
[
[
"# load in a haar cascade classifier for detecting frontal faces\nface_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')\n\n# run the detector\n# the output here is an array of detections; the corners of each detection box\n# if necessary, modify these parameters until you successfully identify every face in a given image\nfaces = face_cascade.detectMultiScale(image, 1.2, 2)\n\n# make a copy of the original image to plot detections on\nimage_with_detections = image.copy()\n\n# loop over the detected faces, mark the image where each face is found\nfor (x,y,w,h) in faces:\n # draw a rectangle around each detected face\n # you may also need to change the width of the rectangle drawn depending on image resolution\n cv2.rectangle(image_with_detections,(x,y),(x+w,y+h),(255,0,0),3) \n\nfig = plt.figure(figsize=(9,9))\n\nplt.imshow(image_with_detections)",
"_____no_output_____"
]
],
[
[
"## Loading in a trained model\n\nOnce you have an image to work with (and, again, you can select any image of faces in the `images/` directory), the next step is to pre-process that image and feed it into your CNN facial keypoint detector.\n\nFirst, load your best model by its filename.",
"_____no_output_____"
]
],
[
[
"import torch\nfrom models import Net\n\nnet = Net()\n\n## TODO: load the best saved model parameters (by your path name)\n## You'll need to un-comment the line below and add the correct name for *your* saved model\n# net.load_state_dict(torch.load('saved_models/keypoints_model_1.pt'))\n\n## print out your net and prepare it for testing (uncomment the line below)\n# net.eval()",
"_____no_output_____"
]
],
[
[
"## Keypoint detection\n\nNow, we'll loop over each detected face in an image (again!) only this time, you'll transform those faces in Tensors that your CNN can accept as input images.\n\n### TODO: Transform each detected face into an input Tensor\n\nYou'll need to perform the following steps for each detected face:\n1. Convert the face from RGB to grayscale\n2. Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]\n3. Rescale the detected face to be the expected square size for your CNN (224x224, suggested)\n4. Reshape the numpy image into a torch image.\n\n**Hint**: The sizes of faces detected by a Haar detector and the faces your network has been trained on are of different sizes. If you find that your model is generating keypoints that are too small for a given face, try adding some padding to the detected `roi` before giving it as input to your model.\n\nYou may find it useful to consult to transformation code in `data_load.py` to help you perform these processing steps.\n\n\n### TODO: Detect and display the predicted keypoints\n\nAfter each face has been appropriately converted into an input Tensor for your network to see as input, you can apply your `net` to each face. The ouput should be the predicted the facial keypoints. These keypoints will need to be \"un-normalized\" for display, and you may find it helpful to write a helper function like `show_keypoints`. You should end up with an image like the following with facial keypoints that closely match the facial features on each individual face:\n\n<img src='images/michelle_detected.png' width=30% height=30%/>\n\n\n",
"_____no_output_____"
]
],
[
[
"image_copy = np.copy(image)\n\n# loop over the detected faces from your haar cascade\nfor (x,y,w,h) in faces:\n \n # Select the region of interest that is the face in the image \n roi = image_copy[y:y+h, x:x+w]\n \n ## TODO: Convert the face region from RGB to grayscale\n\n ## TODO: Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]\n \n ## TODO: Rescale the detected face to be the expected square size for your CNN (224x224, suggested)\n \n ## TODO: Reshape the numpy image shape (H x W x C) into a torch image shape (C x H x W)\n \n ## TODO: Make facial keypoint predictions using your loaded, trained network \n\n ## TODO: Display each detected face and the corresponding keypoints \n \n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d3f5214e3458d07f0beebf4bee1f13bab19853 | 98,628 | ipynb | Jupyter Notebook | Tests/From Scratch/L layer mnist.ipynb | Sushant-ctrl/MNIST_digitClassifier | 92bdc07ac6620a0906650b4af2b17098062f5286 | [
"MIT"
] | 3 | 2020-09-19T08:02:29.000Z | 2020-09-22T18:18:47.000Z | Tests/From Scratch/L layer mnist.ipynb | Sushant-ctrl/MNIST_digitClassifier | 92bdc07ac6620a0906650b4af2b17098062f5286 | [
"MIT"
] | null | null | null | Tests/From Scratch/L layer mnist.ipynb | Sushant-ctrl/MNIST_digitClassifier | 92bdc07ac6620a0906650b4af2b17098062f5286 | [
"MIT"
] | null | null | null | 39.388179 | 9,136 | 0.625289 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport gzip",
"_____no_output_____"
],
[
"#loading the data from the given file\nimage_size = 28\nnum_images = 55000\nf = gzip.open('train-images-idx3-ubyte.gz','r')\n\nf.read(16)\nbuf = f.read(image_size * image_size * num_images)\n\ndata = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)\n\ndata = data.reshape(num_images, image_size, image_size, 1)",
"_____no_output_____"
],
[
"#pritning the images\nimage = np.asarray(data[550]).squeeze()\n\nplt.imshow(image)\nplt.show()",
"_____no_output_____"
],
[
"#storing the data in the form of matrix\nX=np.asarray(data[:])\nX=X.squeeze()\nX=X.reshape(X.shape[0],X.shape[2]*X.shape[1])\nX=X.T/255\nX.shape",
"_____no_output_____"
],
[
"#knowing the no of features and the no of data points in the given array\nm=X.shape[1]\nn=X.shape[0]\nprint(m)\nprint(n)",
"55000\n784\n"
],
[
"#loading the labels\nf = gzip.open('train-labels-idx1-ubyte.gz','r')\nf.read(8)\nY = np.zeros((1,m))\nfor i in range(0,54999): \n buf = f.read(1)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n Y[0,i]=labels\nprint(Y[0,550]) \nprint(Y.shape)",
"9.0\n(1, 55000)\n"
],
[
"Y1= np.zeros((10,m))\nfor i in range (0,m):\n for j in range(0,10):\n if(j==int(Y[0,i])):\n Y1[j,i]=1\n else:\n Y1[j,i]=0\nY=Y1 ",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('Downloads/mnist_train.csv',header = None)\ndata = np.array(df)\nX = (data[:,1:].transpose())/255\nm = X.shape[1]\nn = X.shape[0]\nY_orig = data[:,0:1].transpose()\nY = np.zeros((10,m))\nfor i in range(m): \n Y[int(Y_orig[0,i]),i] = 1",
"_____no_output_____"
]
],
[
[
"def relu(Z):\n result = (Z + np.abs(Z))/2\n return result",
"_____no_output_____"
],
[
"def relu_backward(Z):\n result = (Z + np.abs(Z))/(2*np.abs(Z))\n return result",
"_____no_output_____"
],
[
"def softmax(Z):\n temp = np.exp(Z)\n result = temp/np.sum(temp,axis = 0,keepdims = True)\n return result",
"_____no_output_____"
],
[
"def initialize_parameters(layer_dims):\n parameters = {}\n L = len(layer_dims) - 1\n for l in range(1,L + 1):\n parameters[\"W\" + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1])*0.01\n parameters[\"b\" + str(l)] = np.zeros((layer_dims[l],1))\n #print(parameters)\n return parameters",
"_____no_output_____"
],
[
"def forward_prop(X,parameters):\n cache = {}\n \n L = len(layer_dims) - 1\n A_prev = X\n for l in range(1,L):\n Z = parameters[\"W\" + str(l)].dot(A_prev) + parameters[\"b\" + str(l)]\n A = relu(Z)\n cache[\"Z\" + str(l)] = Z\n A_prev = A\n Z = parameters[\"W\" + str(L)].dot(A_prev) + parameters[\"b\" + str(L)]\n AL = softmax(Z)\n cache[\"Z\" + str(L)] = Z\n return AL,cache",
"_____no_output_____"
],
[
"def compute_cost(AL,Y):\n m = AL.shape[1]\n cost = (np.sum(-(Y * np.log(AL))))/(m)\n return cost",
"_____no_output_____"
],
[
"def backward_prop(X,Y,cache,parameters,AL,layer_dims):\n m = X.shape[1]\n dparameters = {}\n L = len(layer_dims) - 1\n dZ = AL - Y\n dparameters[\"dW\" + str(L)] = dZ.dot(relu(cache[\"Z\" + str(L-1)]).transpose())/m\n #dparameters[\"dW\" + str(L)] = dZ.dot(X.transpose())/m\n dparameters[\"db\" + str(L)] = np.sum(dZ,axis = 1,keepdims = True)/m\n for l in range(1,L):\n dZ = ((parameters[\"W\" + str(L-l+1)].transpose()).dot(dZ)) * (relu_backward(cache[\"Z\" + str(L-l)]))\n if L-l-1 != 0:\n dparameters[\"dW\" + str(L-l)] = dZ.dot(relu(cache[\"Z\" + str(L-1-l)]).transpose())/m\n else:\n dparameters[\"dW\" + str(L-l)] = dZ.dot(X.transpose())/m\n dparameters[\"db\" + str(L-l)] = np.sum(dZ,axis = 1,keepdims = True)/m\n return dparameters ",
"_____no_output_____"
],
[
"def update_parameters(parameters,dparameters,layer_dims,learning_rate):\n L = len(layer_dims) - 1\n for l in range(1,L+1):\n parameters[\"W\" + str(l)] = parameters[\"W\" + str(l)] - learning_rate*dparameters[\"dW\" + str(l)]\n parameters[\"b\" + str(l)] = parameters[\"b\" + str(l)] - learning_rate*dparameters[\"db\" + str(l)]\n return parameters",
"_____no_output_____"
],
[
"def model(X,Y,layer_dims,learning_rate,num_iters):\n costs = []\n parameters = initialize_parameters(layer_dims)\n for i in range(num_iters):\n AL,cache = forward_prop(X,parameters)\n cost = compute_cost(AL,Y)\n costs.append(cost)\n dparameters = backward_prop(X,Y,cache,parameters,AL,layer_dims)\n parameters = update_parameters(parameters,dparameters,layer_dims,learning_rate)\n print(i,\"\\t\",cost)\n return parameters,costs",
"_____no_output_____"
],
[
"#trainig\nlayer_dims = [784,120,10]\nparameters,costs = model(X,Y,layer_dims,0.5,2000)",
"0 \t 2.3032718522\n1 \t 2.29865899442\n2 \t 2.2938337029\n3 \t 2.28767836322\n4 \t 2.27912976402\n5 \t 2.26699519023\n6 \t 2.24977560538\n7 \t 2.22544551059\n8 \t 2.19143784975\n9 \t 2.14483139181\n10 \t 2.08282640261\n11 \t 2.00345765165\n12 \t 1.90640314372\n13 \t 1.79363611749\n14 \t 1.669845734\n15 \t 1.54206933712\n16 \t 1.41812718626\n17 \t 1.3042916204\n18 \t 1.20366794148\n19 \t 1.11645876565\n20 \t 1.04129593978\n21 \t 0.976403522064\n22 \t 0.920142392014\n23 \t 0.871140778447\n24 \t 0.828275458354\n25 \t 0.790660949894\n26 \t 0.757709974509\n27 \t 0.729780049365\n28 \t 0.711410854858\n29 \t 0.732917289783\n30 \t 0.90248402669\n31 \t 1.44940347996\n32 \t 1.36081586232\n33 \t 0.996636211535\n34 \t 0.77077740086\n35 \t 0.673642226646\n36 \t 0.619956616566\n37 \t 0.596204940865\n38 \t 0.579970767803\n39 \t 0.568616480664\n40 \t 0.564325269842\n41 \t 0.566733647772\n42 \t 0.587792238357\n43 \t 0.613202106263\n44 \t 0.68200824628\n45 \t 0.664431752173\n46 \t 0.695258323917\n47 \t 0.585266018481\n48 \t 0.553184104169\n49 \t 0.515809239712\n50 \t 0.499273769402\n51 \t 0.485759930432\n52 \t 0.477243477078\n53 \t 0.470803882211\n54 \t 0.46677634359\n55 \t 0.464250266909\n56 \t 0.464091356534\n57 \t 0.46498594045\n58 \t 0.468564116766\n59 \t 0.471968839532\n60 \t 0.477185853299\n61 \t 0.47877735389\n62 \t 0.479919352724\n63 \t 0.472762165676\n64 \t 0.4662307214\n65 \t 0.450921953336\n66 \t 0.441340590594\n67 \t 0.426246380351\n68 \t 0.419238010959\n69 \t 0.408705980111\n70 \t 0.405225866273\n71 \t 0.398621919283\n72 \t 0.397666283601\n73 \t 0.393776393216\n74 \t 0.394691927928\n75 \t 0.393080449906\n76 \t 0.395563225533\n77 \t 0.396323991672\n78 \t 0.399598792455\n79 \t 0.402606980338\n80 \t 0.404268914874\n81 \t 0.407591171637\n82 \t 0.403949395938\n83 \t 0.404323292163\n84 \t 0.394815503614\n85 \t 0.391461422377\n86 \t 0.380693484369\n87 \t 0.37610119448\n88 \t 0.36788662727\n89 \t 0.363963843232\n90 \t 0.358643171566\n91 \t 0.355681398014\n92 \t 0.352209830524\n93 \t 0.349922439865\n94 \t 0.347441863984\n95 \t 0.345564752138\n96 \t 0.343618704914\n97 \t 0.341994774911\n98 \t 0.340345786718\n99 \t 0.338881018397\n100 \t 0.33741437732\n101 \t 0.336059765152\n102 \t 0.334712440764\n103 \t 0.333439288991\n104 \t 0.332176019925\n105 \t 0.330968250284\n106 \t 0.329768827794\n107 \t 0.328611475433\n108 \t 0.327463368513\n109 \t 0.326350888978\n110 \t 0.325245938746\n111 \t 0.324168593696\n112 \t 0.323101171349\n113 \t 0.322058817966\n114 \t 0.321025984152\n115 \t 0.320015790284\n116 \t 0.31901372692\n117 \t 0.318030785837\n118 \t 0.317057336395\n119 \t 0.316102025946\n120 \t 0.315154560864\n121 \t 0.314222097039\n122 \t 0.313297868001\n123 \t 0.31238669593\n124 \t 0.311483368081\n125 \t 0.310593465141\n126 \t 0.309710716279\n127 \t 0.308839503422\n128 \t 0.307976004541\n129 \t 0.30712264137\n130 \t 0.30627588911\n131 \t 0.305439177201\n132 \t 0.304608167258\n133 \t 0.303785907755\n134 \t 0.302971199155\n135 \t 0.302165501455\n136 \t 0.301365513145\n137 \t 0.300574077385\n138 \t 0.299788534078\n139 \t 0.299010968901\n140 \t 0.298237998862\n141 \t 0.29747389405\n142 \t 0.296714551874\n143 \t 0.295962954209\n144 \t 0.295216183745\n145 \t 0.294476606743\n146 \t 0.293742250088\n147 \t 0.293014745335\n148 \t 0.292292673064\n149 \t 0.291577706786\n150 \t 0.290867976186\n151 \t 0.290164515734\n152 \t 0.289465650026\n153 \t 0.288771842443\n154 \t 0.288083023146\n155 \t 0.287399068199\n156 \t 0.286719405637\n157 \t 0.286044498587\n158 \t 0.285374175008\n159 \t 0.284708461089\n160 \t 0.284047300808\n161 \t 0.283390134122\n162 \t 0.282737261463\n163 \t 0.282088673534\n164 \t 0.281443696846\n165 \t 0.280802429294\n166 \t 0.280165946321\n167 \t 0.279534232483\n168 \t 0.278906333911\n169 \t 0.278282346163\n170 \t 0.277662177995\n171 \t 0.277045861463\n172 \t 0.276433100532\n173 \t 0.275824510815\n174 \t 0.275219206561\n175 \t 0.274617391109\n176 \t 0.274019022271\n177 \t 0.27342400527\n178 \t 0.272832240711\n179 \t 0.272243673788\n180 \t 0.271658282579\n181 \t 0.271075370418\n182 \t 0.270495488916\n183 \t 0.26991816993\n184 \t 0.269343825409\n185 \t 0.268771661036\n186 \t 0.268202160851\n187 \t 0.267635892151\n188 \t 0.267072896594\n189 \t 0.266512467346\n190 \t 0.265953822758\n191 \t 0.265397996126\n192 \t 0.26484491936\n193 \t 0.264294473305\n194 \t 0.263747295031\n195 \t 0.263202910622\n196 \t 0.262661596945\n197 \t 0.262122822603\n198 \t 0.261586673952\n199 \t 0.26105295374\n200 \t 0.260521964202\n201 \t 0.25999387698\n202 \t 0.25946853384\n203 \t 0.258946382827\n204 \t 0.258427045086\n205 \t 0.257910358424\n206 \t 0.257395901732\n207 \t 0.256883955564\n208 \t 0.256374449128\n209 \t 0.25586684128\n210 \t 0.255361334208\n211 \t 0.254858158664\n212 \t 0.254357609554\n213 \t 0.25385880785\n214 \t 0.253361971226\n215 \t 0.252867849736\n216 \t 0.252375734108\n217 \t 0.251885815052\n218 \t 0.251398221477\n219 \t 0.250912469596\n220 \t 0.250428713738\n221 \t 0.249946726123\n222 \t 0.249466752344\n223 \t 0.248988591724\n224 \t 0.248512290952\n225 \t 0.24803805619\n226 \t 0.247565800178\n227 \t 0.247096034359\n228 \t 0.246629043272\n229 \t 0.24616454275\n230 \t 0.245702273912\n231 \t 0.245241497789\n232 \t 0.244782735189\n233 \t 0.244326264781\n234 \t 0.243871776937\n235 \t 0.24341869371\n236 \t 0.242967238833\n237 \t 0.242517810939\n238 \t 0.242070419053\n239 \t 0.241624931752\n240 \t 0.241180939768\n241 \t 0.240738626457\n242 \t 0.240297969416\n243 \t 0.239859297007\n244 \t 0.23942171705\n245 \t 0.238985460772\n246 \t 0.238550759533\n247 \t 0.238117851921\n248 \t 0.237686606005\n249 \t 0.237257024406\n250 \t 0.236829593541\n251 \t 0.236403615396\n252 \t 0.235979709622\n253 \t 0.235557197209\n254 \t 0.235135950255\n255 \t 0.234716184712\n256 \t 0.234298111797\n257 \t 0.23388164155\n258 \t 0.233467052489\n259 \t 0.233054001674\n260 \t 0.232642191486\n261 \t 0.232231526611\n262 \t 0.231822394877\n263 \t 0.231414988964\n264 \t 0.231009629783\n265 \t 0.230606293771\n266 \t 0.230204837934\n267 \t 0.22980473498\n268 \t 0.229406205134\n269 \t 0.229009203801\n270 \t 0.228613844917\n271 \t 0.228219945252\n272 \t 0.227827342558\n273 \t 0.227436045852\n274 \t 0.227046099337\n275 \t 0.226657812252\n276 \t 0.226271137769\n277 \t 0.225885620439\n278 \t 0.225501275147\n279 \t 0.225118394784\n280 \t 0.224736979617\n281 \t 0.224357146481\n282 \t 0.223978471658\n283 \t 0.223600648914\n284 \t 0.223223707926\n285 \t 0.222848145383\n286 \t 0.222473697258\n287 \t 0.222100568147\n288 \t 0.221728913177\n289 \t 0.221358708215\n290 \t 0.22098969687\n291 \t 0.22062188797\n292 \t 0.220254974587\n293 \t 0.219889248491\n294 \t 0.219524965801\n295 \t 0.219162102282\n296 \t 0.218800409535\n297 \t 0.218439808958\n298 \t 0.218080524494\n299 \t 0.217722588936\n300 \t 0.21736567845\n301 \t 0.217009827364\n302 \t 0.216655354273\n303 \t 0.216301797928\n304 \t 0.215949278739\n305 \t 0.215597798451\n306 \t 0.215247558886\n307 \t 0.214898474893\n308 \t 0.214550869166\n309 \t 0.214204451004\n310 \t 0.213859563104\n311 \t 0.213515978987\n312 \t 0.213173303\n313 \t 0.212831868615\n314 \t 0.212491325979\n315 \t 0.212151862981\n316 \t 0.211813260186\n317 \t 0.211475361161\n318 \t 0.211138344032\n319 \t 0.210802295121\n320 \t 0.210467386299\n321 \t 0.210133054773\n322 \t 0.209799434142\n323 \t 0.209466936866\n324 \t 0.209135201324\n325 \t 0.208804376067\n326 \t 0.208474631177\n327 \t 0.208145920693\n328 \t 0.20781836192\n329 \t 0.207491508281\n330 \t 0.207165663136\n331 \t 0.206840757146\n332 \t 0.20651662599\n333 \t 0.206193286312\n334 \t 0.205870914729\n335 \t 0.205549448766\n336 \t 0.205228734848\n337 \t 0.204908971288\n338 \t 0.204590261957\n339 \t 0.204272407627\n340 \t 0.20395540419\n341 \t 0.203639396621\n342 \t 0.203324599809\n343 \t 0.203010600702\n344 \t 0.202697506335\n345 \t 0.202385379281\n346 \t 0.20207446437\n347 \t 0.20176460365\n348 \t 0.201455925162\n349 \t 0.201147961025\n350 \t 0.200840948056\n351 \t 0.200534702578\n352 \t 0.200229209024\n353 \t 0.199924798127\n354 \t 0.199621200789\n355 \t 0.199318529814\n356 \t 0.199016958181\n357 \t 0.198716463449\n358 \t 0.198416660422\n359 \t 0.198117512689\n360 \t 0.197818981539\n361 \t 0.197521361551\n362 \t 0.197224449598\n363 \t 0.196928143543\n364 \t 0.196632794419\n365 \t 0.196338534435\n366 \t 0.196044880924\n367 \t 0.195752132914\n368 \t 0.195460070133\n369 \t 0.195168816037\n370 \t 0.194878242708\n371 \t 0.194588578863\n372 \t 0.194299515668\n373 \t 0.194011370065\n374 \t 0.193723999323\n375 \t 0.193437253407\n376 \t 0.193151028085\n377 \t 0.192865538282\n378 \t 0.192580770907\n379 \t 0.192296781658\n380 \t 0.192013584212\n381 \t 0.191731281307\n382 \t 0.191449982315\n383 \t 0.191169652089\n384 \t 0.190890580152\n385 \t 0.190612295452\n386 \t 0.190334826579\n387 \t 0.190058077281\n388 \t 0.189781877742\n389 \t 0.189506226613\n390 \t 0.189231024096\n391 \t 0.188956531026\n392 \t 0.188682652582\n393 \t 0.188409647127\n394 \t 0.18813722057\n395 \t 0.187865186668\n396 \t 0.187593902957\n397 \t 0.187323232885\n398 \t 0.187053529382\n"
],
[
"plt.plot(costs)",
"_____no_output_____"
],
[
"#training\ndf = pd.read_csv('mnist_test.csv',header = None)\ndata = np.array(df)\nX_test = (data[:,1:].transpose())/255\nY_test = data[:,0:1].transpose()\naccuracy = 0\nm_test = X_test.shape[1]\npredict = np.zeros((1,m_test))\nA_test,cache = forward_prop(X_test,parameters)\nfor i in range(m_test):\n max = 0\n for j in range(10):\n if A_test[j,i] > max:\n max = A_test[j,i]\n max_index = j\n predict[0,i] = max_index\n if predict[0,i] == Y_test[0,i]:\n accuracy = accuracy + 1\naccuracy = (accuracy/m_test)*100\nprint(accuracy,\"%\")\nindex = 0",
"97.63 %\n"
],
[
"#change index toview different examples\nindex = 897\nprint(\"Its a\",int(predict[0,index]))\nplt.imshow(X_test[:,index].reshape(28,28))",
"Its a 5\n"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d3fc2518dd81eb41674d64db56e2c156908251 | 9,096 | ipynb | Jupyter Notebook | community/en/pdes.ipynb | thezwick/examples | baa164aab116c4110315bcfd50a572fee1c55ee6 | [
"Apache-2.0"
] | 3 | 2021-02-02T15:56:47.000Z | 2021-04-08T14:05:54.000Z | community/en/pdes.ipynb | thezwick/examples | baa164aab116c4110315bcfd50a572fee1c55ee6 | [
"Apache-2.0"
] | 7 | 2020-11-13T18:56:38.000Z | 2022-03-12T00:37:46.000Z | community/en/pdes.ipynb | thezwick/examples | baa164aab116c4110315bcfd50a572fee1c55ee6 | [
"Apache-2.0"
] | 8 | 2021-05-01T04:50:58.000Z | 2021-05-01T07:57:04.000Z | 28.248447 | 283 | 0.469437 | [
[
[
"##### Copyright 2019 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Partial Differential Equations",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/examples/blob/master/community/en/pdes.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/examples/blob/master/community/en/pdes.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"TensorFlow isn't just for machine learning. Here you will use TensorFlow to simulate the behavior of a [partial differential equation](https://en.wikipedia.org/wiki/Partial_differential_equation). You'll simulate the surface of square pond as a few raindrops land on it.\n\n## Basic setup\n\nA few imports you'll need.",
"_____no_output_____"
]
],
[
[
"#Import libraries for simulation\nimport tensorflow as tf\nassert tf.__version__.startswith('2')\nimport numpy as np\n\n#Imports for visualization\nimport PIL.Image\nfrom io import BytesIO\nfrom IPython.display import clear_output, Image, display\n",
"_____no_output_____"
]
],
[
[
"A function for displaying the state of the pond's surface as an image.",
"_____no_output_____"
]
],
[
[
"def DisplayArray(a, fmt='jpeg', rng=[0,1]):\n \"\"\"Display an array as a picture.\"\"\"\n a = (a - rng[0])/float(rng[1] - rng[0])*255\n a = np.uint8(np.clip(a, 0, 255))\n f = BytesIO()\n PIL.Image.fromarray(a).save(f, fmt)\n clear_output(wait = True)\n display(Image(data=f.getvalue()))",
"_____no_output_____"
]
],
[
[
"## Computational convenience functions",
"_____no_output_____"
]
],
[
[
"@tf.function\ndef make_kernel(a):\n \"\"\"Transform a 2D array into a convolution kernel\"\"\"\n a = np.asarray(a)\n a = a.reshape(list(a.shape) + [1,1])\n return tf.constant(a, dtype=1)\n\[email protected]\ndef simple_conv(x, k):\n \"\"\"A simplified 2D convolution operation\"\"\"\n x = tf.expand_dims(tf.expand_dims(x, 0), -1)\n y = tf.nn.depthwise_conv2d(input=x, filter=k, strides=[1, 1, 1, 1], padding='SAME')\n return y[0, :, :, 0]\n\[email protected]\ndef laplace(x):\n \"\"\"Compute the 2D laplacian of an array\"\"\"\n laplace_k = make_kernel([[0.5, 1.0, 0.5],\n [1.0, -6., 1.0],\n [0.5, 1.0, 0.5]])\n return simple_conv(x, laplace_k)",
"_____no_output_____"
]
],
[
[
"## Define the PDE\n\nYour pond is a perfect 500 x 500 square, as is the case for most ponds found in nature.",
"_____no_output_____"
]
],
[
[
"N = 500",
"_____no_output_____"
]
],
[
[
"\nHere you create your pond and hit it with some rain drops.",
"_____no_output_____"
]
],
[
[
"# Initial Conditions -- some rain drops hit a pond\n\n# Set everything to zero\nu_init = np.zeros([N, N], dtype=np.float32)\nut_init = np.zeros([N, N], dtype=np.float32)\n\n# Some rain drops hit a pond at random points\nfor n in range(40):\n a,b = np.random.randint(0, N, 2)\n u_init[a,b] = np.random.uniform()\n\nDisplayArray(u_init, rng=[-0.1, 0.1])",
"_____no_output_____"
]
],
[
[
"Now let's specify the details of the differential equation.",
"_____no_output_____"
]
],
[
[
"# Parameters:\n# eps -- time resolution\n# damping -- wave damping\neps = 0.03\ndamping = 0.04\n\n# Create variables for simulation state\nU = tf.Variable(u_init)\nUt = tf.Variable(ut_init)",
"_____no_output_____"
]
],
[
[
"## Run the simulation\n\nThis is where it gets fun -- running time forward with a simple for loop.",
"_____no_output_____"
]
],
[
[
"# Run 1000 steps of PDE\nfor i in range(1000):\n # Step simulation\n # Discretized PDE update rules\n U = U + eps * Ut\n Ut = Ut + eps * (laplace(U) - damping * Ut)\n\n# Show final image\nDisplayArray(U.numpy(), rng=[-0.1, 0.1])",
"_____no_output_____"
]
],
[
[
"Look! Ripples!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0d4133afbca60d38b776b004f110fd8f34d74a3 | 35,069 | ipynb | Jupyter Notebook | Week 3/numpy.ipynb | mubtasimfuad/ML_AiS_B1 | be1c77c8be01765603b41deb9894cfc28cdf4433 | [
"Apache-2.0"
] | 1 | 2021-04-23T08:59:12.000Z | 2021-04-23T08:59:12.000Z | Week 3/numpy.ipynb | mubtasimfuad/ML_AiS_B1 | be1c77c8be01765603b41deb9894cfc28cdf4433 | [
"Apache-2.0"
] | null | null | null | Week 3/numpy.ipynb | mubtasimfuad/ML_AiS_B1 | be1c77c8be01765603b41deb9894cfc28cdf4433 | [
"Apache-2.0"
] | 1 | 2021-04-23T08:59:16.000Z | 2021-04-23T08:59:16.000Z | 23.178453 | 175 | 0.360318 | [
[
[
"# NumPy\nNumpy is the core library for scientific computing in Python. <br/>\nIt provides a high-performance multidimensional array object, and tools for working with these arrays. <br/>\nOfficial NumPy Documentation: https://numpy.org/doc/stable/reference/ ",
"_____no_output_____"
]
],
[
[
"# Install NumPy \n# ! pip install numpy",
"_____no_output_____"
]
],
[
[
"Since NumPy is not a default thing in Python. We import this library. When we import a library we allow all the functions and types with the initial of that library.",
"_____no_output_____"
]
],
[
[
"# Import NumPy\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"# NumPy Arrays\nA grid of values, all of the same type. <br/>\n**Rank:** number of dimensions of the array <br/>\n**Shape:** an array of tuple of integers giving the size of the array along each dimension.",
"_____no_output_____"
]
],
[
[
"# Rank 1 array\na = np.array([1, 2, 3]) \nprint(type(a)) # Prints data type",
"<class 'numpy.ndarray'>\n"
],
[
"print(a.shape)",
"(3,)\n"
],
[
"print(a[0], a[1], a[2]) # Indexing\na[0] = 5 # Assigning\nprint(a) ",
"1 2 3\n[5 2 3]\n"
],
[
"# Rank 2 array\nb = np.array([ [1,2,3],\n [4,5,6] \n ])\n'''\n# of elements in first 3rd bracket => 2\n# of elements in second 3rd bracket => 3\n'''\n\nprint(b.shape) \nprint(b[0, 0], b[0, 1], b[1, 0], b[1,2])",
"(2, 3)\n1 2 4 6\n"
]
],
[
[
"## Special Arrays",
"_____no_output_____"
]
],
[
[
"a = np.zeros((6,4)) # Create an array of all zeros\na ",
"_____no_output_____"
],
[
"np.zeros_like(b,dtype=float)",
"_____no_output_____"
],
[
"b = np.ones((3,2)) # Create an array of all ones\nb ",
"_____no_output_____"
],
[
"c = np.full((6,4), 7) # Create a constant array\nc ",
"_____no_output_____"
],
[
"d = np.eye(5) # Create a 2x2 identity matrix\nd ",
"_____no_output_____"
],
[
"e = np.random.random((4,3)) # Create an array filled with random values\ne",
"_____no_output_____"
]
],
[
[
"## Indexing",
"_____no_output_____"
]
],
[
[
"a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])\na",
"_____no_output_____"
],
[
"a[:2,:3]",
"_____no_output_____"
],
[
"b = a[:2, 1:3]\nb",
"_____no_output_____"
],
[
"print(a[0, 1]) # Prints \"2\"\nb[0, 0] = 77 # b[0, 0] is the same piece of data as a[0, 1]\nprint(a[0, 1]) # Prints \"77\"",
"2\n77\n"
],
[
"a[1, :]",
"_____no_output_____"
],
[
"a[1:2, :]",
"_____no_output_____"
],
[
"a[:, 1]",
"_____no_output_____"
],
[
"a[:, 1:2]",
"_____no_output_____"
],
[
"np.arange(2,10,2)",
"_____no_output_____"
]
],
[
[
"## Boolean array indexing",
"_____no_output_____"
]
],
[
[
"a",
"_____no_output_____"
],
[
"bool_idx = (a>10)\nbool_idx",
"_____no_output_____"
],
[
"a[bool_idx]",
"_____no_output_____"
],
[
"a [ a>10 ]",
"_____no_output_____"
]
],
[
[
"# Data Types",
"_____no_output_____"
]
],
[
[
"x = np.array([1, 2]) \nprint(x.dtype) ",
"int64\n"
],
[
"x = np.array([1.0, 2.0])\nprint(x.dtype)",
"float64\n"
],
[
"x = np.array([1, 2], dtype=np.float64) # Foring a particular datatype\nprint(x,x.dtype) ",
"[1. 2.] float64\n"
],
[
"x.dtype",
"_____no_output_____"
]
],
[
[
"# Operations",
"_____no_output_____"
]
],
[
[
"x = np.array([[1,2],[3,4]], dtype=np.float64)\ny = np.array([[5,6],[7,8]], dtype=np.float64)\n\nx,y",
"_____no_output_____"
],
[
"# Adding two arrays element-wise\nprint(x + y)\nprint(np.add(x, y))",
"[[ 6. 8.]\n [10. 12.]]\n[[ 6. 8.]\n [10. 12.]]\n"
],
[
"# Substracting two arrays element-wise\nprint(x - y)\nprint(np.subtract(x, y))",
"[[-4. -4.]\n [-4. -4.]]\n[[-4. -4.]\n [-4. -4.]]\n"
],
[
"# Mutiplication Element-wise\nprint(x * y)\nprint(np.multiply(x, y))",
"[[ 5. 12.]\n [21. 32.]]\n[[ 5. 12.]\n [21. 32.]]\n"
],
[
"# Elementwise division\nprint(x / y)\nprint(np.divide(x, y))",
"[[0.2 0.33333333]\n [0.42857143 0.5 ]]\n[[0.2 0.33333333]\n [0.42857143 0.5 ]]\n"
],
[
"# Elementwise square root\nprint(np.sqrt(x))",
"[[1. 1.41421356]\n [1.73205081 2. ]]\n"
],
[
"# Matrix Multiplication\nprint(x.dot(y))\nprint(np.dot(x, y))",
"[[19. 22.]\n [43. 50.]]\n[[19. 22.]\n [43. 50.]]\n"
],
[
"x",
"_____no_output_____"
],
[
"# Sum of all elements in the array\nnp.sum(x)",
"_____no_output_____"
],
[
"print(np.sum(x, axis=0)) # Compute sum of each column\nprint(np.sum(x, axis=1)) # Compute sum of each row",
"[4. 6.]\n[3. 7.]\n"
],
[
"a",
"_____no_output_____"
],
[
"# Transpose\na.T",
"_____no_output_____"
]
],
[
[
"# Broadcasting",
"_____no_output_____"
]
],
[
[
"x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])\nv = np.array([1, 0, 1])\ny = x + v # Add v to each row of x using broadcasting\nprint(y)",
"[[ 2 2 4]\n [ 5 5 7]\n [ 8 8 10]\n [11 11 13]]\n"
],
[
"x = np.array([[1,2,3], [4,5,6]])\ny = np.array([4,5])\n(x.T+y).T",
"_____no_output_____"
],
[
"x, x.shape",
"_____no_output_____"
],
[
"x.T, x.T.shape",
"_____no_output_____"
],
[
"y, y.shape",
"_____no_output_____"
],
[
"x.T+y",
"_____no_output_____"
],
[
"(x.T+y).T",
"_____no_output_____"
],
[
"x*2",
"_____no_output_____"
],
[
"x+2",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d42a2038f2e270620ceec2c3aec10bbee419ec | 434,332 | ipynb | Jupyter Notebook | docs/tutorials/backtest.ipynb | goncaloperes/orbit | 1d38fcab69ffa9b7ceb4fadfd26aa42d6f331c14 | [
"Apache-2.0"
] | 1 | 2021-11-26T00:34:08.000Z | 2021-11-26T00:34:08.000Z | docs/tutorials/backtest.ipynb | ChakChak1234/orbit | b329326b8fd9382310645927846315714386de50 | [
"Apache-2.0"
] | null | null | null | docs/tutorials/backtest.ipynb | ChakChak1234/orbit | b329326b8fd9382310645927846315714386de50 | [
"Apache-2.0"
] | null | null | null | 247.060296 | 286,588 | 0.902982 | [
[
[
"# Backtest Orbit Model\n\nIn this section, we will cover:\n\n- How to create a TimeSeriesSplitter\n- How to create a BackTester and retrieve the backtesting results\n- How to leverage the backtesting to tune the hyper-paramters for orbit models",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport orbit\nfrom orbit.models import LGT, DLT\nfrom orbit.diagnostics.backtest import BackTester, TimeSeriesSplitter\nfrom orbit.diagnostics.plot import plot_bt_predictions\nfrom orbit.diagnostics.metrics import smape, wmape\nfrom orbit.utils.dataset import load_iclaims\n\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"print(orbit.__version__)",
"1.1.0dev\n"
],
[
"# load log-transformed data\ndata = load_iclaims()",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
]
],
[
[
"The way to gauge the performance of a time-series model is through re-training models with different historic periods and check their forecast within certain steps. This is similar to a time-based style cross-validation. More often, we called it `backtest` in time-series modeling.\n\nThe purpose of this notebook is to illustrate how to `backtest` a single model using `BackTester`\n\n`BackTester` will compose a `TimeSeriesSplitter` within it, but `TimeSeriesSplitter` is useful as a standalone, in case there are other tasks to perform that requires splitting but not backtesting. `TimeSeriesSplitter` implemented each 'slices' as genertor, i.e it can be used in a for loop. You can also retrieve the composed `TimeSeriesSplitter` object from `BackTester` to utilize the additional methods in `TimeSeriesSplitter`\n\nCurrently, there are two schemes supported for the back-testing engine: expanding window and rolling window.\n\n* **expanding window**: for each back-testing model training, the train start date is fixed, while the train end date is extended forward.\n* **rolling window**: for each back-testing model training, the training window length is fixed but the window is moving forward.",
"_____no_output_____"
],
[
"## Create a TimeSeriesSplitter",
"_____no_output_____"
],
[
"There two main way to splitting a timeseries: expanding and rolling. Expanding window has a fixed starting point, and the window length grows as we move forward in timeseries. It is useful when we want to incoporate all historical information. On the other hand, rolling window has a fixed window length, and the starting point of the window moves forward as we move forward in timeseries. Now, we will illustrate how to use `TimeSeriesSplitter` to split the claims timeseries.",
"_____no_output_____"
],
[
"### Expanding window",
"_____no_output_____"
]
],
[
[
"# configs\nmin_train_len = 380 # minimal length of window length\nforecast_len = 20 # length forecast window\nincremental_len = 20 # step length for moving forward",
"_____no_output_____"
],
[
"ex_splitter = TimeSeriesSplitter(df=data,\n min_train_len=min_train_len,\n incremental_len=incremental_len,\n forecast_len=forecast_len, \n window_type='expanding',\n date_col='week')",
"_____no_output_____"
],
[
"print(ex_splitter)",
"\n------------ Fold: (1 / 3)------------\nTrain start index: 0 Train end index: 379\nTest start index: 380 Test end index: 399\nTrain start date: 2010-01-03 00:00:00 Train end date: 2017-04-09 00:00:00\nTest start date: 2017-04-16 00:00:00 Test end date: 2017-08-27 00:00:00\n\n------------ Fold: (2 / 3)------------\nTrain start index: 0 Train end index: 399\nTest start index: 400 Test end index: 419\nTrain start date: 2010-01-03 00:00:00 Train end date: 2017-08-27 00:00:00\nTest start date: 2017-09-03 00:00:00 Test end date: 2018-01-14 00:00:00\n\n------------ Fold: (3 / 3)------------\nTrain start index: 0 Train end index: 419\nTest start index: 420 Test end index: 439\nTrain start date: 2010-01-03 00:00:00 Train end date: 2018-01-14 00:00:00\nTest start date: 2018-01-21 00:00:00 Test end date: 2018-06-03 00:00:00\n\n"
]
],
[
[
"We can visualize the splits, green is training window and yellow it the forecasting windown. The starting point is always 0 for three splits but window length increases from 380 to 420. ",
"_____no_output_____"
]
],
[
[
"_ = ex_splitter.plot()",
"_____no_output_____"
]
],
[
[
"### Rolling window",
"_____no_output_____"
]
],
[
[
"# configs\nmin_train_len = 380 # in case of rolling window, this specify the length of window length\nforecast_len = 20 # length forecast window\nincremental_len = 20 # step length for moving forward",
"_____no_output_____"
],
[
"roll_splitter = TimeSeriesSplitter(data, \n min_train_len=min_train_len, \n incremental_len=incremental_len, \n forecast_len=forecast_len, \n window_type='rolling', date_col='week')",
"_____no_output_____"
]
],
[
[
"We can visualize the splits, green is training window and yellow it the forecasting windown. The window length is always 380, while the starting point moves forward 20 weeks each steps. ",
"_____no_output_____"
]
],
[
[
"_ = roll_splitter.plot()",
"_____no_output_____"
]
],
[
[
"### Specifying number of splits",
"_____no_output_____"
],
[
"User can also define number of splits using `n_splits` instead of specifying minimum training length. That way, minimum training length will be automatically calculated.",
"_____no_output_____"
]
],
[
[
"ex_splitter2 = TimeSeriesSplitter(data, \n min_train_len=min_train_len, \n incremental_len=incremental_len, \n forecast_len=forecast_len, \n n_splits=5, \n window_type='expanding', date_col='week')",
"_____no_output_____"
],
[
"_ = ex_splitter2.plot()",
"_____no_output_____"
]
],
[
[
"### TimeSeriesSplitter as generator",
"_____no_output_____"
],
[
"`TimeSeriesSplitter` is implemented as a genetor, therefore we can call `split()` to loop through it. It comes handy even for tasks other than backtest. ",
"_____no_output_____"
]
],
[
[
" for train_df, test_df, scheme, key in roll_splitter.split():\n print('Initial Claim slice {} rolling mean:{:.3f}'.format(key, train_df['claims'].mean()))",
"Initial Claim slice 0 rolling mean:12.712\nInitial Claim slice 1 rolling mean:12.671\nInitial Claim slice 2 rolling mean:12.647\n"
]
],
[
[
"## Create a BackTester",
"_____no_output_____"
],
[
"Now, we are ready to do backtest, first let's initialize a `DLT` model and a `BackTester`. You pass in `TimeSeriesSplitter` parameters to `BackTester`. ",
"_____no_output_____"
]
],
[
[
"# instantiate a model\ndlt = DLT(\n date_col='week',\n response_col='claims',\n regressor_col=['trend.unemploy', 'trend.filling', 'trend.job'],\n seasonality=52,\n estimator='stan-map',\n)",
"_____no_output_____"
],
[
"# configs\nmin_train_len = 100 \nforecast_len = 20 \nincremental_len = 100 \nwindow_type = 'expanding'\n\nbt = BackTester(\n model=dlt,\n df=data,\n min_train_len=min_train_len,\n incremental_len=incremental_len,\n forecast_len=forecast_len,\n window_type=window_type,\n)",
"_____no_output_____"
]
],
[
[
"## Backtest fit and predict\n\nThe most expensive portion of backtesting is fitting the model iteratively. Thus, we separate the api calls for `fit_predict` and `score` to avoid redundant computation for multiple metrics or scoring methods",
"_____no_output_____"
]
],
[
[
"bt.fit_predict()",
"INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_b0a449c7523386cce384e92a998e71c8 NOW.\n"
]
],
[
[
"Once `fit_predict()` is called, the fitted models and predictions can be easily retrieved from `BackTester`. Here the data is grouped by the date, split_key, and whether or not that observation is part of the training or test data",
"_____no_output_____"
]
],
[
[
"predicted_df = bt.get_predicted_df()\npredicted_df.head()",
"_____no_output_____"
]
],
[
[
"We also provide a plotting utility to visualize the predictions against the actuals for each split.",
"_____no_output_____"
]
],
[
[
"plot_bt_predictions(predicted_df, metrics=smape, ncol=2, include_vline=True);",
"_____no_output_____"
]
],
[
[
"Users might find this useful for any custom computations that may need to be performed on the set of predicted data. Note that the columns are renamed to generic and consistent names.",
"_____no_output_____"
],
[
"Sometimes, it might be useful to match the data back to the original dataset for ad-hoc diagnostics. This can easily be done by merging back to the orignal dataset",
"_____no_output_____"
]
],
[
[
"predicted_df.merge(data, left_on='date', right_on='week')",
"_____no_output_____"
]
],
[
[
"## Backtest Scoring\n\nThe main purpose of `BackTester` are the evaluation metrics. Some of the most widely used metrics are implemented and built into the `BackTester` API.\n\nThe default metric list is **smape, wmape, mape, mse, mae, rmsse**.",
"_____no_output_____"
]
],
[
[
"bt.score()",
"_____no_output_____"
]
],
[
[
"It is possible to filter for only specific metrics of interest, or even implement your own callable and pass into the `score()` method. For example, see this function that uses last observed value as a predictor and computes the `mse`. Or `naive_error` which computes the error as the delta between predicted values and the training period mean. \n\nNote these are not really useful error metrics, just showing some examples of callables you can use ;)",
"_____no_output_____"
]
],
[
[
"def mse_naive(test_actual):\n actual = test_actual[1:]\n predicted = test_actual[:-1]\n return np.mean(np.square(actual - predicted))\n\ndef naive_error(train_actual, test_predicted):\n train_mean = np.mean(train_actual)\n return np.mean(np.abs(test_predicted - train_mean))",
"_____no_output_____"
],
[
"bt.score(metrics=[mse_naive, naive_error])",
"_____no_output_____"
]
],
[
[
"It doesn't take additional time to refit and predict the model, since the results are stored when `fit_predict()` is called. Check docstrings for function criteria that is required for it to be supported with this api.",
"_____no_output_____"
],
[
"In some cases, we may want to evaluate our metrics on both train and test data. To do this you can call score again with the following indicator",
"_____no_output_____"
]
],
[
[
"bt.score(include_training_metrics=True)",
"_____no_output_____"
]
],
[
[
"## Backtest Get Models\n\nIn cases where `BackTester` doesn't cut it or for more custom use-cases, there's an interface to export the `TimeSeriesSplitter` and predicted data, as shown earlier. It's also possible to get each of the fitted models for deeper diving",
"_____no_output_____"
]
],
[
[
"fitted_models = bt.get_fitted_models()",
"_____no_output_____"
],
[
"model_1 = fitted_models[0]\nmodel_1.get_regression_coefs()",
"_____no_output_____"
]
],
[
[
"BackTester composes a TimeSeriesSplitter within it, but TimeSeriesSplitter can also be created on its own as a standalone object. See section below on TimeSeriesSplitter for more details on how to use the splitter.\n\nAll of the additional TimeSeriesSplitter args can also be passed into BackTester on instantiation",
"_____no_output_____"
]
],
[
[
"ts_splitter = bt.get_splitter()\n_ = ts_splitter.plot()",
"_____no_output_____"
]
],
[
[
"## Hyperparameter Tunning\nAfter seeing the results fromt the backtest, users may wish to fine tune the hyperparmeters. Orbit also provide a `grid_search_orbit` utilities for parameter searching. It uses `Backtester` under the hood so users can compare backtest metrics for different paramters combination.",
"_____no_output_____"
]
],
[
[
"from orbit.utils.params_tuning import grid_search_orbit",
"_____no_output_____"
],
[
"# defining the search space for level smoothing paramter and seasonality smooth paramter\nparam_grid = {\n 'level_sm_input': [0.3, 0.5, 0.8], \n 'seasonality_sm_input': [0.3, 0.5, 0.8],\n}",
"_____no_output_____"
],
[
"# configs\nmin_train_len = 380 # in case of rolling window, this specify the length of window length\nforecast_len = 20 # length forecast window\nincremental_len = 20 # step length for moving forward\nbest_params, tuned_df = grid_search_orbit(param_grid, \n model=dlt, \n df=data,\n min_train_len=min_train_len, \n incremental_len=incremental_len, \n forecast_len=forecast_len, \n metrics=None, criteria=None, verbose=True)",
"\r 0%| | 0/9 [00:00<?, ?it/s]"
],
[
"tuned_df.head() # backtest output for each parameter searched",
"_____no_output_____"
],
[
"best_params # output best parameters",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
d0d430f3547f3eff14cdf4dbc0d35b28aef348f8 | 36,474 | ipynb | Jupyter Notebook | notebooks/Make_Project_Geometric_Elementary_Moment_Tensor_Reciprocity.ipynb | code-cullison/pyaspect | ee06a6c33877c5c43c7b8875d4a8b3f307fa6ab1 | [
"Apache-2.0"
] | null | null | null | notebooks/Make_Project_Geometric_Elementary_Moment_Tensor_Reciprocity.ipynb | code-cullison/pyaspect | ee06a6c33877c5c43c7b8875d4a8b3f307fa6ab1 | [
"Apache-2.0"
] | null | null | null | notebooks/Make_Project_Geometric_Elementary_Moment_Tensor_Reciprocity.ipynb | code-cullison/pyaspect | ee06a6c33877c5c43c7b8875d4a8b3f307fa6ab1 | [
"Apache-2.0"
] | 1 | 2021-12-23T03:32:06.000Z | 2021-12-23T03:32:06.000Z | 33.960894 | 129 | 0.518698 | [
[
[
"# Gemetric Test of Reciprocity Moment Tensors",
"_____no_output_____"
],
[
"### Step 0\n\nLoad packages",
"_____no_output_____"
]
],
[
[
"#load all packages\nimport datetime\nimport pickle\nimport copy\nimport os\n\nfrom sys import argv\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pyvista as pv\nimport matplotlib.pyplot as plt \nfrom matplotlib.colors import Normalize\n\n\nfrom pyaspect.project import *\nfrom pyaspect.model.gridmod3d import gridmod3d as gm\nfrom pyaspect.model.bbox import bbox as bb\nfrom pyaspect.model.gm3d_utils import *\nfrom pyaspect.moment_tensor import MomentTensor\nfrom pyaspect.specfemio.headers import *\nfrom pyaspect.specfemio.write import *\nfrom pyaspect.specfemio.write import _write_header\nfrom pyaspect.specfemio.read import *\nfrom pyaspect.specfemio.utils import *\n\n\nimport pyaspect.events.gevents as gevents\nimport pyaspect.events.gstations as gstations\nfrom pyaspect.events.munge.knmi import correct_station_depths as csd_f\nimport pyaspect.events.mtensors as mtensors\nfrom obspy.imaging.beachball import beach\nfrom obspy import UTCDateTime\nimport shapefile as sf\n\nfrom pyrocko.moment_tensor import MomentTensor as RockoMT",
"_____no_output_____"
]
],
[
[
"### Step 1 \n\nExtract the ndarray of the subsampled, smoothed NAM model and instantiate a new GriddedModel3D object for QC'ing",
"_____no_output_____"
]
],
[
[
"data_in_dir = 'data/output/'\ndata_out_dir = data_in_dir\n!ls {data_in_dir}\n!ls data/groningen",
"_____no_output_____"
]
],
[
[
"### Step 6 \n\nDecompress the ndarray of the sliced, subsampled, smoothed NAM model and instantiate a new GriddedModel3D object for QC'ing",
"_____no_output_____"
]
],
[
[
"# set filename then used it to decompress model\nifqn = f'{data_out_dir}/vsliced_subsmp_smth_nam_2017_vp_vs_rho_Q_model_dx100_dy100_dz100_maxdepth5850_sig250.npz'\nvslice_gm3d, other_pars = decompress_gm3d_from_file(ifqn)\n\nprint()\nprint('decompressed gridded model\\n:',vslice_gm3d) \nprint()\nprint('other parameters:\\n',other_pars)\nprint()\n\n# WARNING: this will unpack all other_pars, if you overwrite a variable of the samename as val(key), then you \n# may not notice, and this may cause large headaches. I use it because I am aware of it.\n'''\nfor key in other_pars:\n locals()[key] = other_pars[key] #this is more advanced python than I think is reasonable for most \nsig_meters = sig\n''';\n\n# another way to get these varibles is just use the accessor functions for the gridmod3d. We need them later.\nxmin = other_pars['xmin']\ndx = other_pars['dx']\nnx = other_pars['nx']\nymin = other_pars['ymin']\ndy = other_pars['dy']\nny = other_pars['ny']\nzmin = other_pars['zmin']\ndz = other_pars['dz']\nnz = other_pars['nz']\nsig_meters = other_pars['sig'] # this variable is used later\nprint('sig_meters:',sig_meters)",
"_____no_output_____"
],
[
"# Create the spatial reference\ngrid = pv.UniformGrid()\n\n# Set the grid dimensions: shape + 1 because we want to inject our values on\n# the CELL data\nnam_dims = list(vslice_gm3d.get_npoints())\nnam_origin = [0,0,-vslice_gm3d.get_gorigin()[2]]\n#nam_origin = list(vslice_gm3d.get_gorigin())\n#nam_origin[2] *= -1\nnam_origin = tuple(nam_origin)\nnam_spacing = list(vslice_gm3d.get_deltas())\nnam_spacing[2] *=-1\nnam_spacing = tuple(nam_spacing)\nprint('nam_dims:',nam_dims)\nprint('nam_origin:',nam_origin)\nprint('nam_spacing:',nam_spacing)\n\n# Edit the spatial reference\ngrid.dimensions = np.array(nam_dims) + 1\ngrid.origin = nam_origin # The bottom left corner of the data set\ngrid.spacing = nam_spacing # These are the cell sizes along each axis\nnam_pvalues = vslice_gm3d.getNPArray()[0]\nprint('pvalues.shape:',nam_pvalues.shape)\n\n# Add the data values to the cell data\ngrid.cell_arrays[\"values\"] = nam_pvalues.flatten(order=\"F\") # Flatten the array!\n\n# Now plot the grid!\ncmap = plt.cm.jet\n#grid.plot(show_edges=True,cmap=cmap)\ngrid.plot(cmap=cmap,opacity=1.0)\n",
"_____no_output_____"
],
[
"slices = grid.slice_orthogonal()\n\n#slices.plot(show_edges=True,cmap=cmap)\nslices.plot(cmap=cmap)",
"_____no_output_____"
]
],
[
[
"## create virtual recievers (CMT solutions in forwards sense)",
"_____no_output_____"
]
],
[
[
"#coords = vslice_gm3d.getGlobalCoordsPointsXYZ()\ncoords = vslice_gm3d.getLocalCoordsPointsXYZ()\ncoords[:,2] = -coords[:,2]\n\nxc = np.unique(coords.T[0,:])\nyc = np.unique(coords.T[1,:])\nzc = np.unique(coords.T[2,:])\n\n\n#n_rand_p = 1000\n\nn_rand_p = 3\nnp.random.seed(n_rand_p) #nothing special about using n_rand_p just want reproducible random\n\n#stay away from the edges of the model for derivatives \n# and to avoid boundary effects\nxy_pad = 500 \n\nlrx = np.min(xc) + xy_pad\nlry = np.min(yc) + xy_pad\nlrz = -3400.0\n\nhrx = np.max(xc) - xy_pad\nhry = np.max(yc) - xy_pad\nhrz = -2600.0\n\nsrx = hrx - lrx\nsry = hry - lry\nsrz = hrz - lrz\n\n \nvrec_cmt_xyz = np.array([lrx + 0.33*srx,lry + 0.33*sry,-3000],dtype=np.float32).reshape((1,3))\n \n\nprint('cmt_xyz:\\n',vrec_cmt_xyz)\n",
"_____no_output_____"
],
[
"pv_rpoints = pv.wrap(vrec_cmt_xyz)\np = pv.Plotter()\nslices = grid.slice_orthogonal()\n#p.add_mesh(slices,cmap=cmap,opacity=0.50)\n#p.add_mesh(slices,cmap=cmap,opacity=1)\np.add_mesh(grid,cmap=cmap,opacity=0.50)\np.add_mesh(pv_rpoints, render_points_as_spheres=True, point_size=5,opacity=1.0)\n\np.show()",
"_____no_output_____"
]
],
[
[
"## Make Moment Tensors and CMTSolutionHeaders for each tensor",
"_____no_output_____"
]
],
[
[
"def CMTtoM0(CMTsol):\n A = np.array(([CMTsol[0],CMTsol[3],CMTsol[4]],\n [CMTsol[3],CMTsol[1],CMTsol[5]],\n [CMTsol[4],CMTsol[5],CMTsol[2]]))\n M0 = ((1/np.sqrt(2))*np.sqrt(np.sum(A*A)))\n \n return(M0)\n\ndef aki_from_sdr(strike,dip,rake,M0):\n from math import sin,cos\n \n print('input M0:',M0)\n \"\"\"\n converts given strike/dip/rake to moment tensor\n \"\"\"\n S = strike\n D = dip\n R = rake\n\n # PI / 180 to convert degrees to radians\n d2r = 0.017453293\n\n print(\"Strike = %9.5f degrees\" % S)\n print(\"Dip = %9.5f degrees\" % D)\n print(\"Rake/Slip = %9.5f degrees\" % R)\n print(\"\")\n\n # convert to radians\n S *= d2r\n D *= d2r\n R *= d2r\n\n '''\n # Aki & Richards\n Mxx = -1.0 * ( sin(D) * cos(R) * sin (2*S) + sin(2*D) * sin(R) * sin(S)*sin(S) )\n Myy = ( sin(D) * cos(R) * sin (2*S) - sin(2*D) * sin(R) * cos(S)*cos(S) )\n Mzz = -1.0 * ( Mxx + Myy)\n Mxy = ( sin(D) * cos(R) * cos (2*S) + 0.5 * sin(2*D) * sin(R) * sin(2*S) )\n Mxz = -1.0 * ( cos(D) * cos(R) * cos (S) + cos(2*D) * sin(R) * sin(S) )\n Myz = -1.0 * ( cos(D) * cos(R) * sin (S) - cos(2*D) * sin(R) * cos(S) )\n ''';\n \n #Aki and Richards\n Mxx = -( np.sin(D)*np.cos(R)*np.sin(2*S) + np.sin(2*D)*np.sin(R)*(np.sin(S)**2) )\n Myy = ( np.sin(D)*np.cos(R)*np.sin(2*S) - np.sin(2*D)*np.sin(R)*(np.cos(S)**2) )\n Mzz = -( Mxx + Myy )\n Mxy = ( np.sin(D)*np.cos(R)*np.cos(2*S) + 0.5*np.sin(2*D)*np.sin(R)*np.sin(2*S) )\n Mxz = -( np.cos(D)*np.cos(R)*np.cos(S) + np.cos(2*D)*np.sin(R)*np.sin(S) )\n Myz = -( np.cos(D)*np.cos(R)*np.sin(S) - np.cos(2*D)*np.sin(R)*np.cos(S) )\n \n\n a_mt = np.array([Mxx,Myy,Mzz,Mxy,Mxz,Myz])\n a_mt *= M0\n \n # Harvard CMT\n Mtt = a_mt[0] #Mxx \n Mpp = a_mt[1] #Myy \n Mrr = a_mt[2] #Mzz \n Mtp = -1.0*a_mt[3] #Mxy\n Mrt = a_mt[4] #Mxz \n Mrp = -1.0*a_mt[5] #Myz\n \n h_mt = np.array([Mrr,Mtt,Mpp,Mrt,Mrp,Mtp])\n \n \n\n print(\"Aki&Richards1980: Mxx Myy Mzz Mxy Mxz Myz\")\n print(\"%9.5f %9.5f %9.5f %9.5f %9.5f %9.5f\\n\" %(tuple(a_mt)))\n print(\"M0:\",CMTtoM0(a_mt))\n print()\n print(\"Harvard: Mrr Mtt Mpp Mrt Mrp Mtp\")\n print(\"%9.5f %9.5f %9.5f %9.5f %9.5f %9.5f\\n\" %(tuple(h_mt)))\n print(\"M0:\",CMTtoM0(h_mt))\n print()\n \n return a_mt",
"_____no_output_____"
]
],
[
[
"# this is the path to the project dir on the cluster\nmy_proj_dir = '/scratch/seismology/tcullison/test_mesh/FWD_Batch_Src_Test'\n\nm0 = 1\nmW = (np.log10(m0)-9.1)/1.5\nprint(f'mW = {mW}')\n\n\n#(mnn, mee, mdd, mne, mnd, med, magnitude)\n#((mnn, mne, mnd), (mne, mee, med), (mnd, med, mdd))\n#Mnn = MomentTensor.from_values((0,0,0, 1, 0, 0, mW)) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\nMnn = MomentTensor.from_values(((0,0,0),(0,1,0),(0,0,0))) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\nrMnn = RockoMT.from_values(((0,0,0),(0,1,0),(0,0,0))) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\n#rMnn = RockoMT(strike=45,dip=90,rake=180,scalar_moment=0.707107)\n#rMnn = RockoMT(strike=270,dip=45,rake=90,scalar_moment=5)\nprint(f'MyMT normal: {Mnn}\\n\\n')\nprint(f'PyRockoMT: {rMnn}\\n\\n')\nprint(f'pyrocko normal: {rMnn.m6()}\\n\\n')\nprint(f'MyMT Aki: {Mnn.aki_richards_m6()}\\n\\n')\nprint(f'pyrocko harvard:{rMnn.m6_up_south_east()}\\n\\n')\nprint(f'MyMT Aki: {Mnn.harvard_m6()}\\n\\n')\nprint(f'\\nsdr aki:{aki_from_sdr(270,45,90,5)}\\n')\n\nl_mt = [Mnn,rMnn]\n#l_mt = [Mne,Mnd,Med]\n\nfor mt in l_mt:\n print(f'mt: {mt}')\n #print(f'mt_aki:\\n{mt.aki_richards_m6()}')\n #print(f'mt_har:\\n{mt.harvard_m6()}')\n \nl_cmt_srcs = []\nfor i in range(len(r_xyz)):\n cmt_h = CMTSolutionHeader(date=datetime.datetime.now(),\n ename=f'Event-{str(i).zfill(4)}',\n tshift=0.0,\n hdur=0.0,\n lat_yc=r_xyz[i,1],\n lon_xc=r_xyz[i,0],\n depth=-r_xyz[i,2],\n mt=l_mt[i],\n eid=i,\n sid=0)\n l_cmt_srcs.append(cmt_h)\n \nprint()\nfor cmt in l_cmt_srcs:\n print(f'cmt:\\n{cmt}')\n \nassert False",
"_____no_output_____"
],
[
"# this is the path to the project dir on the cluster\nmy_proj_dir = '/scratch/seismology/tcullison/test_mesh/FWD_Batch_Src_Test'\n\nm0 = 1\nmW = (np.log10(m0)-9.1)/1.5\n\n\n#(mnn, mee, mdd, mne, mnd, med, magnitude)\n#((mnn, mne, mnd), (mne, mee, med), (mnd, med, mdd))\nh_mat_xy = np.array([[0,0,0],[0,0,-1],[0,-1,0]])\nh_mat_xz = np.array([[0,0,1],[0,0,0],[1,0,0]])\nh_mat_yz = np.array([[0,-1,0],[-1,0,0],[0,0,0]])\nMxy = MomentTensor(m_up_south_east=h_mat_xy) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\nMxz = MomentTensor(m_up_south_east=h_mat_xz) #[[0,0,0],[0,0,1],[0,1,0]] SPEC coord system\nMyz = MomentTensor(m_up_south_east=h_mat_yz) #[[0,0,1],[0,0,0],[1,0,0]] SPEC coord system\n#Mxy = MomentTensor.from_values(((0,1, 0),(1,0, 0),( 0, 0,0))) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\n#Mxz = MomentTensor.from_values(((0,0, 0),(0,0,-1),( 0,-1,0))) #[[0,0,0],[0,0,1],[0,1,0]] SPEC coord system\n#Myz = MomentTensor.from_values(((0,0,-1),(0,0, 0),(-1, 0,0))) #[[0,0,1],[0,0,0],[1,0,0]] SPEC coord system\nprint(f'Mxy: {Mxy}')\nprint(f'Mxy PyR: {Mxy.m6()}')\nprint(f'Mxy Aki: {Mxy.m6_east_north_up()}')\nprint(f'Mxy Har: {Mxy.m6_up_south_east()}\\n\\n')\nprint(f'Mxz: {Mxz}')\nprint(f'Mxz PyR: {Mxz.m6()}')\nprint(f'Mxz Aki: {Mxz.m6_east_north_up()}')\nprint(f'Mxz Har: {Mxz.m6_up_south_east()}\\n\\n')\nprint(f'Myz: {Myz}')\nprint(f'Myz PyR: {Myz.m6()}')\nprint(f'Myz Aki: {Myz.m6_east_north_up()}')\nprint(f'Myz Har: {Myz.m6_up_south_east()}\\n\\n')\n'''\nMnn = MomentTensor.from_values(((1,0,0),(0,0,0),(0,0,0))) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\nMee = MomentTensor.from_values(((0,0,0),(0,1,0),(0,0,0))) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\nMdd = MomentTensor.from_values(((0,0,0),(0,0,0),(0,0,1))) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\nMne = MomentTensor.from_values(((0,1,0),(1,0,0),(0,0,0))) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\nMnd = MomentTensor.from_values(((0,0,-1),(0,0,0),(-1,0,0))) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\nMed = MomentTensor.from_values(((0,0,0),(0,0,1),(0,1,0))) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\nprint(f'Mnn: {Mnn}')\nprint(f'Mnn PyR: {Mnn.m6()}')\nprint(f'Mnn Aki: {Mnn.m6_east_north_up()}')\nprint(f'Mnn Har: {Mnn.m6_up_south_east()}\\n\\n')\nprint(f'Mee: {Mee}')\nprint(f'Mee PyR: {Mee.m6()}')\nprint(f'Mee Aki: {Mee.m6_east_north_up()}')\nprint(f'Mee Har: {Mee.m6_up_south_east()}\\n\\n')\nprint(f'Mdd: {Mdd}')\nprint(f'Mdd PyR: {Mdd.m6()}')\nprint(f'Mdd Aki: {Mdd.m6_east_north_up()}')\nprint(f'Mdd Har: {Mdd.m6_up_south_east()}\\n\\n')\nprint(f'Mne: {Mne}')\nprint(f'Mne PyR: {Mne.m6()}')\nprint(f'Mne Aki: {Mne.m6_east_north_up()}')\nprint(f'Mne Har: {Mne.m6_up_south_east()}\\n\\n')\nprint(f'Mnd: {Mnd}')\nprint(f'Mnd PyR: {Mnd.m6()}')\nprint(f'Mnd Aki: {Mnd.m6_east_north_up()}')\nprint(f'Mnd Har: {Mnd.m6_up_south_east()}\\n\\n')\nprint(f'Med: {Med}')\nprint(f'Med PyR: {Med.m6()}')\nprint(f'Med Aki: {Med.m6_east_north_up()}')\nprint(f'Med Har: {Med.m6_up_south_east()}\\n\\n')\n''';\n\nl_mt = [Mnn,rMnn]\n#l_mt = [Mne,Mnd,Med]\n\nfor mt in l_mt:\n print(f'mt: {mt}')\n #print(f'mt_aki:\\n{mt.aki_richards_m6()}')\n #print(f'mt_har:\\n{mt.harvard_m6()}')\n \nl_cmt_srcs = []\nfor i in range(len(r_xyz)):\n cmt_h = CMTSolutionHeader(date=datetime.datetime.now(),\n ename=f'Event-{str(i).zfill(4)}',\n tshift=0.0,\n hdur=0.0,\n lat_yc=r_xyz[i,1],\n lon_xc=r_xyz[i,0],\n depth=-r_xyz[i,2],\n mt=l_mt[i],\n eid=i,\n sid=0)\n l_cmt_srcs.append(cmt_h)\n \nprint()\nfor cmt in l_cmt_srcs:\n print(f'cmt:\\n{cmt}')\n \nassert False",
"_____no_output_____"
]
],
[
[
"# this is the path to the project dir on the cluster\nmy_proj_dir = '/scratch/seismology/tcullison/test_mesh/FWD_Batch_Src_Test'\n\nm0 = 1\nmW = (np.log10(m0)-9.1)/1.5\n\n\n#(mnn, mee, mdd, mne, mnd, med, magnitude)\n#((mnn, mne, mnd), (mne, mee, med), (mnd, med, mdd))\nh_mat_xy = np.array([[0, 0,0],[ 0,0,-1],[0,-1,0]])\nh_mat_xz = np.array([[0, 0,1],[ 0,0, 0],[1, 0,0]])\nh_mat_yz = np.array([[0,-1,0],[-1,0, 0],[0, 0,0]])\n#h_mat_111 = np.array([[1, 0,0],[ 0,1, 0],[0, 0,1]])\nh_mat_111 = np.array([[0, 0,0],[ 0,1, 0],[0, 0,0]])\nh_mat_123 = np.array([[1, 0,0],[ 0,2, 0],[0, 0,3]])\nh_mat_231 = np.array([[2, 0,0],[ 0,3, 0],[0, 0,1]])\nh_mat_312 = np.array([[3, 0,0],[ 0,1, 0],[0, 0,2]])\nMxy = MomentTensor(m_up_south_east=h_mat_xy) #[[0,1,0],[1,0,0],[0,0,0]] SPEC coord system\nMxz = MomentTensor(m_up_south_east=h_mat_xz) #[[0,0,0],[0,0,1],[0,1,0]] SPEC coord system\nMyz = MomentTensor(m_up_south_east=h_mat_yz) #[[0,0,1],[0,0,0],[1,0,0]] SPEC coord system\nM111 = MomentTensor(m_up_south_east=h_mat_111) #[[1,0,0],[0,1,0],[0,0,1]] SPEC coord system\nM123 = MomentTensor(m_up_south_east=h_mat_123) #[[1,0,0],[0,2,0],[0,0,3]] SPEC coord system\nM231 = MomentTensor(m_up_south_east=h_mat_231) #[[2,0,0],[0,3,0],[0,0,1]] SPEC coord system\nM312 = MomentTensor(m_up_south_east=h_mat_312) #[[3,0,0],[0,1,0],[0,0,2]] SPEC coord system\nprint(f'Mxy: {Mxy}')\nprint(f'Mxy PyR: {Mxy.m6()}')\nprint(f'Mxy Har: {Mxy.m6_up_south_east()}\\n\\n')\nprint(f'Mxz: {Mxz}')\nprint(f'Mxz PyR: {Mxz.m6()}')\nprint(f'Mxz Har: {Mxz.m6_up_south_east()}\\n\\n')\nprint(f'Myz: {Myz}')\nprint(f'Myz PyR: {Myz.m6()}')\nprint(f'Myz Har: {Myz.m6_up_south_east()}\\n\\n')\nprint(f'M111: {M111}')\nprint(f'M111 PyR: {M111.m6()}')\nprint(f'M111 Har: {M111.m6_up_south_east()}\\n\\n')\nprint(f'M123: {M123}')\nprint(f'M123 PyR: {M123.m6()}')\nprint(f'M123 Har: {M123.m6_up_south_east()}\\n\\n')\nprint(f'M231: {M231}')\nprint(f'M231 PyR: {M231.m6()}')\nprint(f'M231 Har: {M231.m6_up_south_east()}\\n\\n')\nprint(f'M312: {M312}')\nprint(f'M312 PyR: {M312.m6()}')\nprint(f'M312 Har: {M312.m6_up_south_east()}\\n\\n')\n\nl_mt = [('Harvard-XY',Mxy),('Harvard-XZ',Mxz),('Harvard-YZ',Myz),\n ('Harvard-111',M111),('Harvard-123',M123),('Harvard-231',M231),\n ('Harvard-312',M312)]\n\nfor mt in l_mt:\n print(f'mt: {mt}')\n \nl_cmt_srcs = []\nfor i in range(len(l_mt)):\n cmt_h = CMTSolutionHeader(date=datetime.datetime.now(),\n ename=l_mt[i][0],\n #ename=f'Event-{str(i).zfill(4)}',\n tshift=0.0,\n hdur=0.0,\n lat_yc=vrec_cmt_xyz[0,1],\n lon_xc=vrec_cmt_xyz[0,0],\n depth=-vrec_cmt_xyz[0,2],\n mt=l_mt[i][1],\n #mt=l_mt[i],\n eid=i,\n sid=0)\n l_cmt_srcs.append(cmt_h)\n \nprint()\nfor cmt in l_cmt_srcs:\n print(f'cmt:\\n{cmt}')\n \n#assert False",
"_____no_output_____"
]
],
[
[
"## Make Corresponding \"Virtual\" Recievers (including cross membors for derivatives) for the CMT's",
"_____no_output_____"
]
],
[
[
"m_delta = 25.0 # distance between cross stations for derivatives\nassert m_delta < xy_pad #see cells above this is padding\n#l_grp_vrecs = make_grouped_half_cross_reciprocal_station_headers_from_cmt_list(l_cmt_srcs,m_delta)\nl_grp_vrecs = make_grouped_cross_reciprocal_station_headers_from_cmt_list(l_cmt_srcs,m_delta)\n\nig = 0\nfor grp in l_grp_vrecs:\n print(f'***** Group: {ig} *****\\n')\n ir = 0\n for gvrec in grp:\n print(f'*** vrec: {ir} ***\\n{gvrec}')\n ir += 1\n ig += 1\n\nprint(len(flatten_grouped_headers(l_grp_vrecs)))\n ",
"_____no_output_____"
]
],
[
[
"## Plot Virtual Receiver Groups",
"_____no_output_____"
]
],
[
[
"all_g_xyz = get_xyz_coords_from_station_list(flatten_grouped_headers(l_grp_vrecs))\nall_g_xyz[:,2] *= -1 #pyview z-up positive and oposize sign of standard geophysics \npv_all_points = pv.wrap(all_g_xyz)\np = pv.Plotter()\np.add_mesh(grid,cmap=cmap,opacity=0.5)\n#p.add_mesh(slices,cmap=cmap,opacity=1.0)\np.add_mesh(pv_all_points, render_points_as_spheres=True, point_size=5,opacity=1.0)\np.show()",
"_____no_output_____"
]
],
[
[
"## Make real-receivers/virtual-sources",
"_____no_output_____"
]
],
[
[
"h = 3000\nrec_z = -200\nvsrc_rec_xyz = np.zeros((9,3))\n\nfor i in range(vsrc_rec_xyz.shape[0]):\n vsrc_rec_xyz[i,:] = vrec_cmt_xyz[0,:]\n vsrc_rec_xyz[i,2] = rec_z\n \n# x-h, y-y\nvsrc_rec_xyz[0,0] = vrec_cmt_xyz[0,0] - h \nvsrc_rec_xyz[0,1] = vrec_cmt_xyz[0,1] - h \n\n# x, y-y\nvsrc_rec_xyz[1,1] = vrec_cmt_xyz[0,1] - h \n \n# x+h, y-y\nvsrc_rec_xyz[2,0] = vrec_cmt_xyz[0,0] + h \nvsrc_rec_xyz[2,1] = vrec_cmt_xyz[0,1] - h \n \n# x-h, y\nvsrc_rec_xyz[3,0] = vrec_cmt_xyz[0,0] - h \n\n# x, y\n#do nothing but skip to next index below\n\n# x+h, y\nvsrc_rec_xyz[5,0] = vrec_cmt_xyz[0,0] + h \n\n# x-h, y+y\nvsrc_rec_xyz[6,0] = vrec_cmt_xyz[0,0] - h \nvsrc_rec_xyz[6,1] = vrec_cmt_xyz[0,1] + h \n\n# x, y+y\nvsrc_rec_xyz[7,1] = vrec_cmt_xyz[0,1] + h \n\n# x+h, y+y\nvsrc_rec_xyz[8,0] = vrec_cmt_xyz[0,0] + h \nvsrc_rec_xyz[8,1] = vrec_cmt_xyz[0,1] + h \n\n",
"_____no_output_____"
]
],
[
[
"## Plot virtual sources (red) with virtual receivers (white)",
"_____no_output_____"
]
],
[
[
"pv_spoints = pv.wrap(vsrc_rec_xyz)\np = pv.Plotter()\n#p.add_mesh(slices,cmap=cmap,opacity=0.50)\np.add_mesh(grid,cmap=cmap,opacity=0.3)\np.add_mesh(pv_spoints, render_points_as_spheres=True, point_size=8,opacity=1,color='red')\n#p.add_mesh(pv_rpoints, render_points_as_spheres=True, point_size=5,opacity=0.5)\np.add_mesh(all_g_xyz, render_points_as_spheres=True, point_size=5,opacity=0.5)\np.show()",
"_____no_output_____"
]
],
[
[
"## Make StationHeaders (real recievers/virtual sources) ",
"_____no_output_____"
]
],
[
[
"l_real_recs = []\nfor i in range(len(vsrc_rec_xyz)):\n \n tr_bname = 'tr'\n new_r = StationHeader(name=tr_bname,\n network='NL', #FIXME\n lon_xc=vsrc_rec_xyz[i,0],\n lat_yc=vsrc_rec_xyz[i,1],\n depth=-vsrc_rec_xyz[i,2], #specfem z-down is positive\n elevation=0.0,\n trid=i)\n l_real_recs.append(new_r)\n \nfor rec in l_real_recs:\n print(rec)\n",
"_____no_output_____"
]
],
[
[
"## Make ForceSolutionHeaders for the above virtual sources (including force-triplets for calculation derivatives)",
"_____no_output_____"
]
],
[
[
"l_grp_vsrcs = make_grouped_reciprocal_force_solution_triplet_headers_from_rec_list(l_real_recs)",
"_____no_output_____"
]
],
[
[
"## Make replicates of each virtual receiver list: one for each force-triplet",
"_____no_output_____"
]
],
[
[
"l_grp_vrecs_by_vsrcs = make_replicated_reciprocal_station_headers_from_src_triplet_list(l_grp_vsrcs,\n l_grp_vrecs)",
"_____no_output_____"
]
],
[
[
"## Plot virtual sources (red) and virtual receivers (white) FROM headers",
"_____no_output_____"
]
],
[
[
"grp_s_xyz = get_unique_xyz_coords_from_solution_list(flatten_grouped_headers(l_grp_vsrcs))\ngrp_s_xyz[:,2] *= -1 #pyvista z-up is positive\n\nflat_recs = flatten_grouped_headers(flatten_grouped_headers(l_grp_vrecs_by_vsrcs))\ngrp_r_xyz = get_unique_xyz_coords_from_station_list(flat_recs)\ngrp_r_xyz[:,2] *= -1 #pyvista z-up is positive\n\nprint(len(grp_s_xyz))\nprint(len(grp_r_xyz))\n\npv_spoints = pv.wrap(grp_s_xyz)\npv_rpoints = pv.wrap(grp_r_xyz)\n\np = pv.Plotter()\np.add_mesh(slices,cmap=cmap,opacity=0.50)\np.add_mesh(grid,cmap=cmap,opacity=0.3)\np.add_mesh(pv_spoints, render_points_as_spheres=True, point_size=8,opacity=1,color='red')\np.add_mesh(pv_rpoints, render_points_as_spheres=True, point_size=5,opacity=0.5)\np.show()",
"_____no_output_____"
]
],
[
[
"## Make replicates of each \"real\" receiver list: for each CMT source",
"_____no_output_____"
]
],
[
[
"l_grp_recs_by_srcs = make_replicated_station_headers_from_src_list(l_cmt_srcs,l_real_recs)\n\n\nfor i in range(len(l_cmt_srcs)):\n print(f'***** SRC Records for Source: {i} *****\\n')\n for j in range(len(l_real_recs)):\n print(f'*** REC Header for Receiver: {j} ***\\n{l_grp_recs_by_srcs[i][j]}')\n ",
"_____no_output_____"
]
],
[
[
"## Plot \"real\" sources (red) and virtual receivers (white) FROM headers",
"_____no_output_____"
]
],
[
[
"grp_s_xyz = get_unique_xyz_coords_from_solution_list(l_cmt_srcs)\ngrp_s_xyz[:,2] *= -1 #pyvista z-up is positive\n\nflat_recs = flatten_grouped_headers(l_grp_recs_by_srcs) #real!\ngrp_r_xyz = get_unique_xyz_coords_from_station_list(flat_recs)\ngrp_r_xyz[:,2] *= -1 #pyvista z-up is positive\n\nprint(len(grp_s_xyz))\nprint(len(grp_r_xyz))\n\npv_spoints = pv.wrap(grp_s_xyz)\npv_rpoints = pv.wrap(grp_r_xyz)\n\np = pv.Plotter()\np.add_mesh(slices,cmap=cmap,opacity=0.50)\np.add_mesh(grid,cmap=cmap,opacity=0.3)\np.add_mesh(pv_spoints, render_points_as_spheres=True, point_size=12,opacity=1,color='red')\np.add_mesh(pv_rpoints, render_points_as_spheres=True, point_size=8,opacity=0.5)\np.show()\n\n#assert False",
"_____no_output_____"
]
],
[
[
"## Make reciprical RecordHeader",
"_____no_output_____"
]
],
[
[
"l_flat_vsrcs = flatten_grouped_headers(l_grp_vsrcs)\nl_flat_vrecs = flatten_grouped_headers(flatten_grouped_headers(l_grp_vrecs_by_vsrcs))\n\nvrecord_h = RecordHeader(name='Reciprocal-Record',solutions_h=l_flat_vsrcs,stations_h=l_flat_vrecs)\nprint(vrecord_h)\n\n# save the header to disc\nvrec_fqp = os.path.join(data_out_dir,'simple_record_h')\n_write_header(vrec_fqp,vrecord_h)\n\n#verify file is there\n!ls -l {vrec_fqp}",
"_____no_output_____"
]
],
[
[
"## Make reciprocal project",
"_____no_output_____"
]
],
[
[
"test_proj_name = 'ReciprocalGeometricTestProject'\ntest_proj_root_fqp = os.path.join(data_out_dir, 'tmp/TestProjects/NewMKProj')\ntest_parfile_fqp = os.path.join(data_out_dir, 'Par_file')\ntest_mesh_fqp = '/scratch/seismology/tcullison/test_mesh/MESH-default_batch_force_src'\ntest_spec_fqp = '/quanta1/home/tcullison/DevGPU_specfem3d'\ntest_pyutils_fqp = '/quanta1/home/tcullison/myscripts/python/specfem/pyutils'\ntest_script_fqp = '/quanta1/home/tcullison/myscripts/specfem'\n\n#copy the reciprocal record\ntest_proj_record_h = vrecord_h.copy()\n\nmake_fwd_project_dir(test_proj_name,\n test_proj_root_fqp,\n test_parfile_fqp,\n test_mesh_fqp,\n test_spec_fqp,\n test_pyutils_fqp,\n test_script_fqp,\n test_proj_record_h,\n copy_mesh=False,\n batch_srcs=False,\n verbose=True,\n max_event_rdirs=MAX_SPEC_SRC)\n #max_event_rdirs=)\n \n\nprint()\nprint('ls:')\n!ls {test_proj_root_fqp}\nprint('ls:')\n!ls {test_proj_root_fqp}/*/*\n",
"_____no_output_____"
]
],
[
[
"## Make Forward/Real RecordHeader",
"_____no_output_____"
]
],
[
[
"l_flat_srcs = l_cmt_srcs #NOTE: we don't need to flatten CMT list because they are not grouped\nl_flat_recs = flatten_grouped_headers(l_grp_recs_by_srcs) #Note: only one level of flattening\n\nrecord_h = RecordHeader(name='Forward-Record',solutions_h=l_flat_srcs,stations_h=l_flat_recs)\nprint(f'Forward Record:\\n{record_h}')\n\n# save the header to disc\nrec_fqp = os.path.join(data_out_dir,'real_simple_record_h')\n_write_header(rec_fqp,record_h)\n\n#verify file is there\n!ls -l {rec_fqp}\n\nprint('l_flat_srcs:',type(l_flat_srcs[0]))",
"_____no_output_____"
]
],
[
[
"## Make \"real\" project",
"_____no_output_____"
]
],
[
[
"test_real_proj_name = 'ForwardGeometricTestProject'\ntest_proj_root_fqp = os.path.join(data_out_dir, 'tmp/TestProjects/NewMKProj')\ntest_parfile_fqp = os.path.join(data_out_dir, 'Par_file')\ntest_mesh_fqp = '/scratch/seismology/tcullison/test_mesh/MESH-default_batch_force_src'\ntest_spec_fqp = '/quanta1/home/tcullison/DevGPU_specfem3d'\ntest_pyutils_fqp = '/quanta1/home/tcullison/myscripts/python/specfem/pyutils'\ntest_script_fqp = '/quanta1/home/tcullison/myscripts/specfem'\n\n#copy the forward/real record\ntest_real_proj_record_h = record_h.copy()\n\nmake_fwd_project_dir(test_real_proj_name,\n test_proj_root_fqp,\n test_parfile_fqp,\n test_mesh_fqp,\n test_spec_fqp,\n test_pyutils_fqp,\n test_script_fqp,\n test_real_proj_record_h,\n copy_mesh=False,\n batch_srcs=False,\n verbose=True,\n max_event_rdirs=MAX_SPEC_SRC)\n #max_event_rdirs=2)\n\n\nprint()\nprint('ls:')\n!ls {test_proj_root_fqp}\nprint('ls:')\n!ls {test_proj_root_fqp}/*/*",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"raw",
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d440a9bb93636c754f92e44ab30b9fea11f34f | 30,098 | ipynb | Jupyter Notebook | notebooks/example_script.ipynb | jacobhinkle/bayescache | 1728aaaec112a375a7341776ccb5c2d4b67242d6 | [
"MIT"
] | null | null | null | notebooks/example_script.ipynb | jacobhinkle/bayescache | 1728aaaec112a375a7341776ccb5c2d4b67242d6 | [
"MIT"
] | null | null | null | notebooks/example_script.ipynb | jacobhinkle/bayescache | 1728aaaec112a375a7341776ccb5c2d4b67242d6 | [
"MIT"
] | null | null | null | 65.572985 | 1,996 | 0.657286 | [
[
[
"import torch.optim as optim\nimport torch.nn.functional as F\n\nimport bayescache.api as api\nfrom bayescache.data import P3B3\nfrom bayescache.models import mtcnn\nfrom bayescache.api.source import TrainingData\nfrom bayescache.callbacks.time_tracker import TimeTracker",
"_____no_output_____"
],
[
"hparams = mtcnn.Hyperparameters()\n# Update hyperparameters for the Synthetic data.\nhparams.vocab_size = 4014\nhparams.max_sent_len = 1500\n\nmodel = mtcnn.new(hparams)\n\noptimizer = optim.RMSprop(model.parameters(), lr=7.0e-4, eps=1e-3)",
"_____no_output_____"
],
[
"learner = api.Learner(device='cpu', model=model)\n#learner.summary()",
"_____no_output_____"
],
[
"train = P3B3(root='/home/ygx/data', partition='train', download=True)\ntest = P3B3(root='/home/ygx/data', partition='test', download=True)\n\nsource = TrainingData(train_source=train, val_source=test, num_workers=2, batch_size=4)\nsource.train_source",
"_____no_output_____"
],
[
"metrics = learner.metrics()\ncallbacks = [TimeTracker()]\n\ntraining_info = api.TrainingInfo(\n start_epoch_idx=0,\n run_name='test',\n metrics=metrics,\n callbacks=callbacks\n)",
"_____no_output_____"
],
[
"training_info.on_train_begin()\n",
"_____no_output_____"
],
[
"for global_epoch_idx in range(training_info.start_epoch_idx + 1, 1 + 1):\n epoch_info = api.EpochInfo(\n training_info=training_info,\n global_epoch_idx=global_epoch_idx,\n batches_per_epoch=source.train_iterations_per_epoch(),\n optimizer=optimizer\n )\n\n # Execute learning\n learner.train_epoch(epoch_info, source)",
"\nTraining: 0%| | 0/2000 [00:00<?, ?iter/s]\u001b[A"
]
],
[
[
"### Diagnosing Loss",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torch.utils.data import DataLoader",
"_____no_output_____"
],
[
"trainloader = DataLoader(train)",
"_____no_output_____"
],
[
"for idx, (data, label) in enumerate(trainloader):\n logits = model(data)\n losses = []\n print(label)\n for i in range(len(label[0])):\n print(i)\n loss = F.cross_entropy(logits[i], torch.tensor((label[:,i],)))\n losses.append(loss)\n #loss = sum(loss)\n if idx == 0:\n break ",
"tensor([[5, 0, 2, 2]])\n0\n1\n2\n"
],
[
"logits",
"_____no_output_____"
],
[
"torch.tensor((label[0][0],))\nlabel[:,2]",
"_____no_output_____"
],
[
"F.cross_entropy(logits[1], torch.tensor((label[:,1],)))",
"_____no_output_____"
],
[
"logits",
"_____no_output_____"
],
[
"logits[0].shape\nx = label[0][0].item()\ntarget = torch.tensor((x,))\ntarget",
"_____no_output_____"
],
[
"input = torch.randn(1, 6, requires_grad=True)\ntarget = torch.randint(6, (1,), dtype=torch.int64)",
"_____no_output_____"
],
[
"input.shape\ntarget",
"_____no_output_____"
]
],
[
[
"### Shape of Things to Come:",
"_____no_output_____"
]
],
[
[
"data, labels = train.load_data()",
"_____no_output_____"
],
[
"len(labels)",
"_____no_output_____"
],
[
"len(labels[:,0])",
"_____no_output_____"
],
[
"import numpy as np\nunq, counts = np.unique(labels[:,0], return_counts=True)",
"_____no_output_____"
],
[
"unq",
"_____no_output_____"
],
[
"counts",
"_____no_output_____"
],
[
"data[0].shape",
"_____no_output_____"
],
[
"sum(counts)",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
],
[
"labels",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d440c9981661a7ade04eaee80546eb4397ed77 | 51,647 | ipynb | Jupyter Notebook | pln/binarizacao_e_tf_idf.ipynb | IagorSs/roteiro-projetos | 176f44d7ca39d2e51d571dda11298da20ecf22ec | [
"CC-BY-4.0"
] | 8 | 2021-04-25T03:19:07.000Z | 2022-02-24T21:48:02.000Z | pln/binarizacao_e_tf_idf.ipynb | IagorSs/roteiro-projetos | 176f44d7ca39d2e51d571dda11298da20ecf22ec | [
"CC-BY-4.0"
] | null | null | null | pln/binarizacao_e_tf_idf.ipynb | IagorSs/roteiro-projetos | 176f44d7ca39d2e51d571dda11298da20ecf22ec | [
"CC-BY-4.0"
] | 7 | 2021-04-23T22:41:27.000Z | 2022-02-24T21:48:05.000Z | 37.076095 | 611 | 0.376943 | [
[
[
"# Representação numérica de palavras e textos",
"_____no_output_____"
],
[
"Neste notebook iremos apresentação formas de representar valores textuais por meio de representação numérica. Iremos usar pandas, caso queira entender um pouco sobre pandas, [veja este notebook](pandas.ipynb). Por isso, não esqueça de instalar o módulo pandas:\n\n``pip3 install pandas``\n\nEm aprendizado de máquina, muitas vezes, precisamos da representação numérica de um determinado valor. Por exemplo: ",
"_____no_output_____"
]
],
[
[
"import pandas as pd\ndf_jogos = pd.DataFrame([ [\"boa\",\"nublado\",\"não\"],\n [\"boa\",\"chuvoso\",\"não\"],\n [\"média\",\"nublado\",\"sim\"],\n [\"fraca\",\"chuvoso\",\"não\"]],\n columns=[\"disposição\",\"tempo\",\"jogar volei?\"])\ndf_jogos",
"_____no_output_____"
]
],
[
[
"Caso quisermos maperar cada coluna (agora chamada de atributo) para um valor, forma mais simples de se fazer a transformação é simplesmente mapear esse atributo para um valor numérico. Veja o exemplo abaixo: ",
"_____no_output_____"
],
[
"Nesse exemplo, temos dois atributos disposição do jogador e tempo e queremos prever se o jogar irá jogar volei ou não. Tanto os atributos quanto a classe podem ser mapeados como número. Além disso, o atributo `disposicao` é um atributo que representa uma escala - o que deixa essa forma de tranformação bem adequada para esse atributo.",
"_____no_output_____"
]
],
[
[
"from typing import Dict\ndef mapeia_atributo_para_int(df_data:pd.DataFrame, coluna:str, dic_nom_to_int: Dict[int,str]):\n for i,valor in enumerate(df_data[coluna]):\n valor_int = dic_nom_to_int[valor]\n df_data[coluna].iat[i] = valor_int\n\n \ndf_jogos = pd.DataFrame([ [\"boa\",\"nublado\",\"sim\"],\n [\"boa\",\"chuvoso\",\"não\"],\n [\"média\",\"ensolarado\",\"sim\"],\n [\"fraca\",\"chuvoso\",\"não\"]],\n columns=[\"disposição\",\"tempo\",\"jogar volei?\"])\ndic_disposicao = {\"boa\":3,\"média\":2,\"fraca\":1}\nmapeia_atributo_para_int(df_jogos, \"disposição\", dic_disposicao)\n\ndic_tempo = {\"ensolarado\":3,\"nublado\":2,\"chuvoso\":1}\nmapeia_atributo_para_int(df_jogos, \"tempo\", dic_tempo)\n\ndic_volei = {\"sim\":1, \"não\":0}\nmapeia_atributo_para_int(df_jogos, \"jogar volei?\", dic_volei)\ndf_jogos",
"_____no_output_____"
]
],
[
[
"## Binarização dos atributos categóricos",
"_____no_output_____"
],
[
"\nPodemos fazer a binarização dos atributos categóricos em que, cada valor de atributo transforma-se em uma coluna que recebe `0` caso esse atributo não exista e `1`, caso contrário. Em nosso exemplo: ",
"_____no_output_____"
]
],
[
[
"from preprocessamento_atributos import BagOfItems\ndf_jogos = pd.DataFrame([ [4, \"boa\",\"nublado\",\"sim\"],\n [3,\"boa\",\"chuvoso\",\"não\"],\n [2,\"média\",\"ensolarado\",\"sim\"],\n [1,\"fraca\",\"chuvoso\",\"não\"]],\n columns=[\"id\",\"disposição\",\"tempo\",\"jogar volei?\"])\ndic_disposicao = {\"boa\":3,\"média\":2,\"fraca\":1}\n\n\nbag_of_tempo = BagOfItems(0)\n#veja a implementação do método em preprocesamento_atributos.py\ndf_jogos_bot = bag_of_tempo.cria_bag_of_items(df_jogos,[\"tempo\"])\ndf_jogos_bot",
"0/4\n"
]
],
[
[
"Como existem vários valores no teste que você desconhece, se fizermos dessa forma, atributos que estão no teste poderiam estar completamente zerados no treino, sendo desnecessário, por exemplo: ",
"_____no_output_____"
]
],
[
[
"df_jogos_treino = df_jogos[:2]\ndf_jogos_treino",
"_____no_output_____"
],
[
"df_jogos_teste = df_jogos[2:]\ndf_jogos_teste",
"_____no_output_____"
]
],
[
[
"## Exemplo Real",
"_____no_output_____"
],
[
"Considere este exemplo real de filmes e seus atores ([obtidos no kaggle](https://www.kaggle.com/rounakbanik/the-movies-dataset)): ",
"_____no_output_____"
]
],
[
[
"import pandas as pd\ndf_amostra = pd.read_csv(\"movies_amostra.csv\")\ndf_amostra",
"_____no_output_____"
]
],
[
[
"Nesse exemplo, as colunas que representam os atores principais podem ser binarizadas. Em nosso caso, podemos colocar os atores todos em um \"Bag of Items\". Os atores são representados por as colunas `ator_1`, `ator_2`,..., `ator_5`. Abaixo, veja um sugestão de como fazer em dataset: ",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom preprocessamento_atributos import BagOfItems\n\n\n\n\nobj_bag_of_actors = BagOfItems(min_occur=3)\n\n#boa=bag of actors ;)\ndf_amostra_boa = obj_bag_of_actors.cria_bag_of_items(df_amostra,[\"ator_1\",\"ator_2\",\"ator_3\",\"ator_4\",\"ator_5\"])",
"0/3000\n1000/3000\n2000/3000\n"
],
[
"df_amostra_boa",
"_____no_output_____"
]
],
[
[
"Veja que temos bastante atributos um para cada ator. Mesmo sendo melhor possuirmos poucos atributos e mais informativos, um método de aprendizado de máquina pode ser capaz de usar essa quantidade de forma eficaz. Particularmente, o [SVM linear](https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html) e o [RandomForest](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) são métodos que conseguem ir bem nesse tipo de dado.",
"_____no_output_____"
],
[
"Essa é a forma mais prática de fazer, porém, em aprendizado de máquina, geralmente dividimos nossos dados em, pelo menos, treino e teste em que treino é o dado que você terá todo o acesso e, o teste, deve reproduzir uma amostra do mundo real. Vamos supor que no treino há atores raros que não ocorrem no teste, nesse caso tais atributos seriam inúteis para o teste. Isso pode fazer com que o resultado reproduza menos o mundo real - neste caso, é muito possível que a diferença seja quase insignificante. Mas, caso queiramos fazer da forma \"mais correta\", temos que considerar apenas o treino para isso:",
"_____no_output_____"
]
],
[
[
"#supondo que 80% da amostra é treino\ndf_treino_amostra = df_amostra.sample(frac=0.8, random_state = 2)\ndf_teste_amostra = df_amostra.drop(df_treino_amostra.index)\n\n#min_occur=3 definie o minimo de ocorrencias desse ator para ser considerado\n#pois, um ator que apareceu em poucos filmes, pode ser menos relevante para a predição do genero\nobj_bag_of_actors = BagOfItems(min_occur=3)\ndf_treino_amostra_boa = obj_bag_of_actors.cria_bag_of_items(df_treino_amostra,[\"ator_1\",\"ator_2\",\"ator_3\",\"ator_4\",\"ator_5\"])\ndf_teste_amostra_boa = obj_bag_of_actors.aplica_bag_of_items(df_teste_amostra,[\"ator_1\",\"ator_2\",\"ator_3\",\"ator_4\",\"ator_5\"])\n",
"0/2400\n1000/2400\n2000/2400\n0/600\n"
]
],
[
[
"## Representação Bag of Words",
"_____no_output_____"
],
[
"Muitas vezes, temos textos que podem ser relevantes para uma determinada tarefa de aprendizado d máquina. Por isso, temos que representar tais elementos para nosso método de aprendizado de máquina. \n\nA forma mais usual para isso, é a `Bag of Words` em que cada palavra é um atributo e, o valor dela, é a frequencia dele no texto (ou algum outro valor que indique a importancia dessa palavra no texto).\n\nPor exemplo, caso temos as frases `A casa é grande`, `A casa é verde verde` em que cada frase é uma instancia diferente. A representação seria da seguinte forma: ",
"_____no_output_____"
]
],
[
[
"dic_bow = {\"a\":[1,1],\n \"casa\":[1,1],\n \"é\":[1,1],\n \"verde\":[0,2]\n }\ndf_bow = pd.DataFrame.from_dict(dic_bow)\ndf_bow",
"_____no_output_____"
]
],
[
[
"Da forma que fizemos acima, usamos a frequencia de um termo para definir sua importancia no texto, porém, existem termos que possuem uma frequencia muito alta e importancia baixa: são os casos dos artigos e preposições por exemplo, pois, eles não discriminam o texto. \n\nUma forma de mensurar o porder discriminativo das palavras é usando a métrica `TF-IDF`. Para calcularmos essa métrica, primeiramente calculamos a frequencia de um termo no documento (TF) e, logo após multiplamos pelo IDF. \nA fórmula para calcular o TF-IDF do termo $i$ no documento (ou instancia) $j$ é a seguinte:\n\n\\begin{equation}\n TFIDF_{ij} = TF_{ij} \\times IDF_i\n\\end{equation}\n\\begin{equation}\n TF_{ij} = log(f_{ij})\n\\end{equation}\n\nem que $f_{ij}$ é a frequencia de um termo $i$ no documento $j$. Usa-se o `log` para suavizar valores muito altos e o $IDF$ (do inglês, _Inverse Document Frequency_) do termo $i$ é calculado da seguinte forma:\n\n\\begin{equation}\n IDF_i = log(\\frac{N}{n_i})\n\\end{equation}\n\nem que $N$ é o número de documentos da coleção e $n_i$ é o número de documentos em que esse termo $i$ ocorre. Espera-se que, quanto mais discriminativo o termo, em menos documentos esse termo irá ocorrer e, consequentemente, o $IDF$ deste termo será mais alto. \n\nPor exemplo, considere as palavras `de`, `bebida` e `cerveja`. `cerveja` é uma palavra mais discriminativa do que `bebida`; e `bebibda` é mais discriminativo do que a preposição `de`. Muito provavelmente teremos mais frequentemente termos menos discriminativos. Por exemplo, se tivermos uma coleção de 1000 documentos, `de` poderia ocorrer em 900 documentos, `bebida` em 500 e `cerveja` em 100 documentos. Se fizermos o calculo, veremos que quanto mais discriminativo um termo, mais alto é seu IDF:",
"_____no_output_____"
]
],
[
[
"import math\nN = 1000\nn_de = 900\nn_bebida = 500\nn_cerveja = 100\n\nIDF_de = math.log(N/n_de)\nIDF_bebida = math.log(N/n_bebida)\nIDF_cerveja = math.log(N/n_cerveja)\n\nprint(f\"IDF_de: {IDF_de}\\tIDF_bebida:{IDF_bebida}\\tIDF_cerveja:{IDF_cerveja}\")",
"_____no_output_____"
]
],
[
[
"A biblioteca `scikitlearn`também já possui uma classe [TFIDFVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html) que transforma um texto em um vetor de atributos usando o TF-IDF para o valor referente a relevancia deste termo. Veja um exemplo na coluna `resumo` do nosso dataset de filme:",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom preprocessamento_atributos import BagOfWords\n\ndf_amostra = pd.read_csv(\"datasets/movies_amostra.csv\")\nbow_amostra = BagOfWords()\ndf_bow_amostra = bow_amostra.cria_bow(df_amostra,\"resumo\")\ndf_bow_amostra\n",
"_____no_output_____"
]
],
[
[
"Como são muitos atributos, pode parecer que não ficou corretamente gerado. Mas, filtrando as palavras de um determinado resumo você verificará que está ok:",
"_____no_output_____"
]
],
[
[
"df_bow_amostra[[\"in\",\"lake\", \"high\"]]",
"_____no_output_____"
]
],
[
[
"Não fique preso apenas nessas representações. Vocês podem tentar fazer representações mais sucintas, como, por exemplo: para preprocessar os dados da equipe do filme (atores, diretor e escritor), calcule o número de filmes de comédia que membros da equipe participaram e, logo após, o número de filme de ação. Neste caso, como você usará a classe, você deverá usar **apenas** os dados de treino. No caso do resumo, você pode utilizar palavras chaves. Por exemplo, faça uma lista de palavras chaves que remetem \"ação\" e contabilize o quantidade dessas palavras chaves no resumo.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0d4459c5730c61017cd3a987fdb6b19a0e2038b | 83,738 | ipynb | Jupyter Notebook | Neural_Networks/classification/3_neural_net_NAI_MNIST_clothes.ipynb | dccstcc/NAI_PJATK_2020 | 293bc963b44aa42a50ec52c327983867c6c5a9ed | [
"MIT"
] | null | null | null | Neural_Networks/classification/3_neural_net_NAI_MNIST_clothes.ipynb | dccstcc/NAI_PJATK_2020 | 293bc963b44aa42a50ec52c327983867c6c5a9ed | [
"MIT"
] | null | null | null | Neural_Networks/classification/3_neural_net_NAI_MNIST_clothes.ipynb | dccstcc/NAI_PJATK_2020 | 293bc963b44aa42a50ec52c327983867c6c5a9ed | [
"MIT"
] | null | null | null | 83,738 | 83,738 | 0.881702 | [
[
[
"\"\"\"\r\n\r\nauthor: Dominik Stec,\r\n\r\nindex: s12623,\r\n\r\nemail: [email protected]\r\n\r\nTo run module:\r\n\r\nimport module into Google Colaboratory notebook and run.\r\n\r\nThis module recognize type of clothes according to given image of clothe.\r\n\r\nKeras model is build as classification type and contains two types of classification neural network architecture.\r\n\r\n\"\"\"",
"_____no_output_____"
],
[
"**First model**",
"_____no_output_____"
]
],
[
[
"%tensorflow_version 2.x\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.datasets.fashion_mnist import load_data\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Flatten, Dense, Dropout\r\nfrom keras.utils import to_categorical\r\nimport random\r\ntf.__version__",
"_____no_output_____"
],
[
"(X_train, y_train), (X_test, y_test) = load_data()",
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz\n32768/29515 [=================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz\n26427392/26421880 [==============================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz\n8192/5148 [===============================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz\n4423680/4422102 [==============================] - 0s 0us/step\n"
],
[
"print(X_train.shape)\r\nprint(y_train.shape)\r\nprint(X_test.shape)\r\nprint(y_test.shape)\r\nprint(y_test[:])\r\nprint(np.min(X_test[0]), np.max(X_test[0]))\r\n\r\ny_train_cat = to_categorical(y_train)\r\ny_test_cat = to_categorical(y_test)\r\n\r\nprint(y_train_cat.shape)\r\nprint(y_test_cat.shape)",
"(60000, 28, 28)\n(60000,)\n(10000, 28, 28)\n(10000,)\n[9 2 1 ... 8 1 5]\n0 255\n(60000, 10)\n(10000, 10)\n"
],
[
"class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\r\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\r\n\r\nplt.figure(figsize=(10,10))\r\nfor i in range(25):\r\n plt.subplot(5,5,i+1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.grid(False)\r\n plt.imshow(X_train[i], cmap=plt.cm.binary)\r\n plt.xlabel(class_names[y_train[i]])\r\nplt.show()\r\n",
"_____no_output_____"
],
[
"model = Sequential()\r\nmodel.add(Flatten(input_shape=(28, 28)))\r\nmodel.add(Dense(units=256, activation='relu'))\r\nmodel.add(Dense(units=128, activation='relu'))\r\nmodel.add(Dense(units=64, activation='relu'))\r\nmodel.add(Dense(units=10, activation='softmax'))\r\n\r\nmodel.compile(optimizer='adam',\r\n loss='categorical_crossentropy',\r\n metrics=['categorical_accuracy'])\r\n\r\nmodel.summary()",
"Model: \"sequential_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten_3 (Flatten) (None, 784) 0 \n_________________________________________________________________\ndense_10 (Dense) (None, 256) 200960 \n_________________________________________________________________\ndense_11 (Dense) (None, 128) 32896 \n_________________________________________________________________\ndense_12 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndense_13 (Dense) (None, 10) 650 \n=================================================================\nTotal params: 242,762\nTrainable params: 242,762\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"model.fit(X_train, y_train_cat, epochs=20, validation_data=(X_test, y_test_cat))",
"Epoch 1/20\n1875/1875 [==============================] - 7s 2ms/step - loss: 3.6016 - categorical_accuracy: 0.7180 - val_loss: 0.5244 - val_categorical_accuracy: 0.8240\nEpoch 2/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.4904 - categorical_accuracy: 0.8269 - val_loss: 0.4943 - val_categorical_accuracy: 0.8228\nEpoch 3/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4380 - categorical_accuracy: 0.8453 - val_loss: 0.4439 - val_categorical_accuracy: 0.8480\nEpoch 4/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.4025 - categorical_accuracy: 0.8525 - val_loss: 0.4354 - val_categorical_accuracy: 0.8474\nEpoch 5/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.3818 - categorical_accuracy: 0.8604 - val_loss: 0.4188 - val_categorical_accuracy: 0.8577\nEpoch 6/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.3709 - categorical_accuracy: 0.8651 - val_loss: 0.4029 - val_categorical_accuracy: 0.8585\nEpoch 7/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.3493 - categorical_accuracy: 0.8744 - val_loss: 0.4279 - val_categorical_accuracy: 0.8552\nEpoch 8/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.3400 - categorical_accuracy: 0.8788 - val_loss: 0.3946 - val_categorical_accuracy: 0.8700\nEpoch 9/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.3286 - categorical_accuracy: 0.8795 - val_loss: 0.3913 - val_categorical_accuracy: 0.8658\nEpoch 10/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.3133 - categorical_accuracy: 0.8887 - val_loss: 0.3937 - val_categorical_accuracy: 0.8676\nEpoch 11/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.3110 - categorical_accuracy: 0.8874 - val_loss: 0.3766 - val_categorical_accuracy: 0.8692\nEpoch 12/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.3088 - categorical_accuracy: 0.8896 - val_loss: 0.4042 - val_categorical_accuracy: 0.8652\nEpoch 13/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.2999 - categorical_accuracy: 0.8945 - val_loss: 0.3933 - val_categorical_accuracy: 0.8680\nEpoch 14/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.2969 - categorical_accuracy: 0.8950 - val_loss: 0.4055 - val_categorical_accuracy: 0.8674\nEpoch 15/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.2850 - categorical_accuracy: 0.8965 - val_loss: 0.4112 - val_categorical_accuracy: 0.8724\nEpoch 16/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.2885 - categorical_accuracy: 0.8948 - val_loss: 0.3849 - val_categorical_accuracy: 0.8673\nEpoch 17/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.2832 - categorical_accuracy: 0.8985 - val_loss: 0.4277 - val_categorical_accuracy: 0.8700\nEpoch 18/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.2824 - categorical_accuracy: 0.8998 - val_loss: 0.4059 - val_categorical_accuracy: 0.8743\nEpoch 19/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.2774 - categorical_accuracy: 0.9012 - val_loss: 0.4297 - val_categorical_accuracy: 0.8742\nEpoch 20/20\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.2751 - categorical_accuracy: 0.9004 - val_loss: 0.4217 - val_categorical_accuracy: 0.8623\n"
],
[
"i = random.randrange(0, len(y_test))\r\n\r\nprint('real value: ', class_names[y_test[i]])\r\nX_test_rs = X_test[i].reshape(1, 28, 28)\r\ncat = model.predict(X_test_rs)\r\ncat_idx = np.argmax(cat)\r\n\r\nplt.figure(figsize=(10,10))\r\nplt.subplot(5,5,1)\r\nplt.xticks([])\r\nplt.yticks([])\r\nplt.grid(False)\r\nplt.imshow(X_test[i], cmap=plt.cm.binary)\r\nplt.xlabel(class_names[y_test[i]])\r\nplt.show()\r\n\r\nprint('predict value: ', class_names[cat_idx])\r\n\r\n\r\n",
"real value: Sneaker\n"
]
],
[
[
"**Second model**",
"_____no_output_____"
]
],
[
[
"model = Sequential()\r\nmodel.add(Flatten(input_shape=(28, 28)))\r\nmodel.add(Dense(units=1024, activation='relu'))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(units=512, activation='relu'))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(units=512, activation='relu'))\r\n\r\nmodel.add(Dense(units=10, activation='softmax'))\r\n\r\nmodel.compile(optimizer='adam',\r\n loss='categorical_crossentropy',\r\n metrics=['categorical_accuracy'])\r\n\r\nmodel.summary()",
"Model: \"sequential_4\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten_4 (Flatten) (None, 784) 0 \n_________________________________________________________________\ndense_14 (Dense) (None, 1024) 803840 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 1024) 0 \n_________________________________________________________________\ndense_15 (Dense) (None, 512) 524800 \n_________________________________________________________________\ndropout_6 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_16 (Dense) (None, 512) 262656 \n_________________________________________________________________\ndense_17 (Dense) (None, 10) 5130 \n=================================================================\nTotal params: 1,596,426\nTrainable params: 1,596,426\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"model.fit(X_train, y_train_cat, epochs=20, validation_data=(X_test, y_test_cat))",
"Epoch 1/20\n1875/1875 [==============================] - 5s 3ms/step - loss: 7.5605 - categorical_accuracy: 0.6366 - val_loss: 0.7122 - val_categorical_accuracy: 0.7575\nEpoch 2/20\n1875/1875 [==============================] - 5s 3ms/step - loss: 0.7541 - categorical_accuracy: 0.7387 - val_loss: 0.6959 - val_categorical_accuracy: 0.7571\nEpoch 3/20\n1875/1875 [==============================] - 5s 3ms/step - loss: 0.6912 - categorical_accuracy: 0.7567 - val_loss: 0.5114 - val_categorical_accuracy: 0.8169\nEpoch 4/20\n1875/1875 [==============================] - 5s 3ms/step - loss: 0.6096 - categorical_accuracy: 0.7864 - val_loss: 0.5084 - val_categorical_accuracy: 0.8228\nEpoch 5/20\n1875/1875 [==============================] - 5s 3ms/step - loss: 0.5254 - categorical_accuracy: 0.8151 - val_loss: 0.4847 - val_categorical_accuracy: 0.8307\nEpoch 6/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.5055 - categorical_accuracy: 0.8227 - val_loss: 0.4388 - val_categorical_accuracy: 0.8494\nEpoch 7/20\n1875/1875 [==============================] - 5s 3ms/step - loss: 0.4754 - categorical_accuracy: 0.8361 - val_loss: 0.4612 - val_categorical_accuracy: 0.8405\nEpoch 8/20\n1875/1875 [==============================] - 5s 3ms/step - loss: 0.4744 - categorical_accuracy: 0.8365 - val_loss: 0.4287 - val_categorical_accuracy: 0.8449\nEpoch 9/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4654 - categorical_accuracy: 0.8381 - val_loss: 0.4099 - val_categorical_accuracy: 0.8600\nEpoch 10/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4632 - categorical_accuracy: 0.8395 - val_loss: 0.4279 - val_categorical_accuracy: 0.8556\nEpoch 11/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4509 - categorical_accuracy: 0.8469 - val_loss: 0.4234 - val_categorical_accuracy: 0.8583\nEpoch 12/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4524 - categorical_accuracy: 0.8447 - val_loss: 0.4250 - val_categorical_accuracy: 0.8590\nEpoch 13/20\n1875/1875 [==============================] - 5s 3ms/step - loss: 0.4511 - categorical_accuracy: 0.8426 - val_loss: 0.4754 - val_categorical_accuracy: 0.8378\nEpoch 14/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4405 - categorical_accuracy: 0.8455 - val_loss: 0.4369 - val_categorical_accuracy: 0.8576\nEpoch 15/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4304 - categorical_accuracy: 0.8499 - val_loss: 0.4391 - val_categorical_accuracy: 0.8500\nEpoch 16/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4277 - categorical_accuracy: 0.8527 - val_loss: 0.4162 - val_categorical_accuracy: 0.8626\nEpoch 17/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4418 - categorical_accuracy: 0.8491 - val_loss: 0.4125 - val_categorical_accuracy: 0.8595\nEpoch 18/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4177 - categorical_accuracy: 0.8563 - val_loss: 0.4146 - val_categorical_accuracy: 0.8627\nEpoch 19/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4184 - categorical_accuracy: 0.8543 - val_loss: 0.4032 - val_categorical_accuracy: 0.8638\nEpoch 20/20\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.4127 - categorical_accuracy: 0.8593 - val_loss: 0.4257 - val_categorical_accuracy: 0.8604\n"
],
[
"i = random.randrange(0, len(y_test))\r\n\r\nprint('real value: ', class_names[y_test[i]])\r\nX_test_rs = X_test[i].reshape(1, 28, 28)\r\ncat = model.predict(X_test_rs)\r\ncat_idx = np.argmax(cat)\r\n\r\nplt.figure(figsize=(10,10))\r\nplt.subplot(5,5,1)\r\nplt.xticks([])\r\nplt.yticks([])\r\nplt.grid(False)\r\nplt.imshow(X_test[i], cmap=plt.cm.binary)\r\nplt.xlabel(class_names[y_test[i]])\r\nplt.show()\r\n\r\nprint('predict value: ', class_names[cat_idx])",
"real value: Trouser\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0d458919ac65d412b9b74adaa1271a8be741cd5 | 4,578 | ipynb | Jupyter Notebook | dev/34_callback_rnn.ipynb | anhquan0412/fastai_dev | aabbc18d496ef89de2063b9a1357d725b77e6f7d | [
"Apache-2.0"
] | 380 | 2019-07-10T17:23:35.000Z | 2022-03-21T10:30:55.000Z | dev/34_callback_rnn.ipynb | um3rella/fastai_dev | f2df87af028b5414a579d33cbfa42c1e6a04e4b9 | [
"Apache-2.0"
] | 162 | 2019-08-16T17:24:47.000Z | 2021-09-27T21:41:00.000Z | dev/34_callback_rnn.ipynb | um3rella/fastai_dev | f2df87af028b5414a579d33cbfa42c1e6a04e4b9 | [
"Apache-2.0"
] | 238 | 2019-07-13T13:38:43.000Z | 2022-03-06T01:45:00.000Z | 29.346154 | 128 | 0.579511 | [
[
[
"#export\nfrom fastai2.test import *\nfrom fastai2.data.all import *\nfrom fastai2.optimizer import *\nfrom fastai2.learner import *",
"_____no_output_____"
],
[
"from nbdev.showdoc import *",
"_____no_output_____"
],
[
"#default_exp callback.rnn",
"_____no_output_____"
]
],
[
[
"# Callback for RNN training\n\n> Callback that uses the outputs of language models to add AR and TAR regularization",
"_____no_output_____"
]
],
[
[
"#export\n@docs\nclass RNNTrainer(Callback):\n \"`Callback` that adds AR and TAR regularization in RNN training\"\n def __init__(self, alpha=0., beta=0.): self.alpha,self.beta = alpha,beta\n\n def begin_train(self): self.model.reset()\n def begin_validate(self): self.model.reset()\n def after_pred(self):\n self.raw_out,self.out = self.pred[1],self.pred[2]\n self.learn.pred = self.pred[0]\n\n def after_loss(self):\n if not self.training: return\n if self.alpha != 0.: self.learn.loss += self.alpha * self.out[-1].float().pow(2).mean()\n if self.beta != 0.:\n h = self.raw_out[-1]\n if len(h)>1: self.learn.loss += self.beta * (h[:,1:] - h[:,:-1]).float().pow(2).mean()\n\n _docs = dict(begin_train=\"Reset the model before training\",\n begin_validate=\"Reset the model before validation\",\n after_pred=\"Save the raw and dropped-out outputs and only keep the true output for loss computation\",\n after_loss=\"Add AR and TAR regularization\")",
"_____no_output_____"
]
],
[
[
"## Export -",
"_____no_output_____"
]
],
[
[
"#hide\nfrom nbdev.export import notebook2script\nnotebook2script()",
"Converted 00_test.ipynb.\nConverted 01_core.ipynb.\nConverted 01a_dataloader.ipynb.\nConverted 01a_script.ipynb.\nConverted 02_transforms.ipynb.\nConverted 03_pipeline.ipynb.\nConverted 04_data_external.ipynb.\nConverted 05_data_core.ipynb.\nConverted 06_data_source.ipynb.\nConverted 07_vision_core.ipynb.\nConverted 08_pets_tutorial.ipynb.\nConverted 09_vision_augment.ipynb.\nConverted 09a_rect_augment.ipynb.\nConverted 10_data_block.ipynb.\nConverted 11_layers.ipynb.\nConverted 12_optimizer.ipynb.\nConverted 13_learner.ipynb.\nConverted 14_callback_schedule.ipynb.\nConverted 15_callback_hook.ipynb.\nConverted 16_callback_progress.ipynb.\nConverted 17_callback_tracker.ipynb.\nConverted 18_callback_fp16.ipynb.\nConverted 19_callback_mixup.ipynb.\nConverted 20_metrics.ipynb.\nConverted 21_tutorial_imagenette.ipynb.\nConverted 30_text_core.ipynb.\nConverted 31_text_data.ipynb.\nConverted 32_text_models_awdlstm.ipynb.\nConverted 33_test_models_core.ipynb.\nConverted 34_callback_rnn.ipynb.\nConverted 35_tutorial_wikitext.ipynb.\nConverted 40_tabular_core.ipynb.\nConverted 60_vision_models_xresnet.ipynb.\nConverted 90_notebook_core.ipynb.\nConverted 91_notebook_export.ipynb.\nConverted 92_notebook_showdoc.ipynb.\nConverted 93_notebook_export2html.ipynb.\nConverted 94_index.ipynb.\nConverted 95_synth_learner.ipynb.\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d458969fc2f88bdf5008455137a7181c77fb4a | 69,819 | ipynb | Jupyter Notebook | wafer_by_technology.ipynb | willhyper/wafer-by-technology | ab668d2389adf7de3d1e0fbfc2f106b540ad2dad | [
"MIT"
] | null | null | null | wafer_by_technology.ipynb | willhyper/wafer-by-technology | ab668d2389adf7de3d1e0fbfc2f106b540ad2dad | [
"MIT"
] | null | null | null | wafer_by_technology.ipynb | willhyper/wafer-by-technology | ab668d2389adf7de3d1e0fbfc2f106b540ad2dad | [
"MIT"
] | null | null | null | 634.718182 | 66,954 | 0.942609 | [
[
[
"import pandas as pd\n\nxlsx_link = 'https://docs.google.com/spreadsheets/d/1ge20fpSWJgiuI_6Cn5KyBKnkLe4gVhW9GutD-Qf5z38/export'\ndf = pd.read_excel(xlsx_link)",
"_____no_output_____"
],
[
"# build tech_rev\n\nimport numpy as np\n\nquarters = df['Quarter']\nrevenue_total = df['Revenue (M NTD)'] / 1000 # in billion NTD\n\ntechnologies = ['5nm','7nm','10nm','16nm','20nm','28nm','40/45nm','65nm','90nm','0.11/0.13um','0.15/0.18um','0.25um and above']\n\ndef nan2zero(arr):\n return np.array([0.0 if np.isnan(v) else v for v in arr])\n\ndef revbytech(tech : str):\n share = df[tech] # percentage\n return nan2zero(share * revenue_total)\n\ntech_rev : dict = { tech : revbytech(tech) for tech in technologies}\n",
"_____no_output_____"
],
[
"xlabels = quarters\nlabel_bar : dict = tech_rev\n\nimport matplotlib.pyplot as plt\n\nfig, ax = plt.subplots()\n\nbottom = 0\nfor label, bar in label_bar.items():\n ax.bar(xlabels, bar, bottom=bottom, label=label)\n bottom += bar\n \nax.set_ylabel('Billion NTD')\nax.set_title('Revenue by Technology')\nax.legend()\n\nfig.set_dpi(150)\n\nplt.xticks(rotation=90)\nplt.grid()\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
d0d45a001b4c9a3a3e7b1a5192df6bb6b1f708df | 353,674 | ipynb | Jupyter Notebook | P4-Training-a-Smart-Cab/smartcab.ipynb | yatingupta10/Machine-Learning-Nanodegree | bf98fdcb17c9a4dcb1cd66a79f54ece7d050f69f | [
"MIT"
] | 1 | 2021-12-03T12:50:28.000Z | 2021-12-03T12:50:28.000Z | P4-Training-a-Smart-Cab/smartcab.ipynb | yatingupta10/Machine-Learning-Nanodegree | bf98fdcb17c9a4dcb1cd66a79f54ece7d050f69f | [
"MIT"
] | null | null | null | P4-Training-a-Smart-Cab/smartcab.ipynb | yatingupta10/Machine-Learning-Nanodegree | bf98fdcb17c9a4dcb1cd66a79f54ece7d050f69f | [
"MIT"
] | null | null | null | 583.620462 | 140,098 | 0.925047 | [
[
[
"# Machine Learning Engineer Nanodegree\n## Reinforcement Learning\n## Project: Train a Smartcab to Drive\n\nWelcome to the fourth project of the Machine Learning Engineer Nanodegree! In this notebook, template code has already been provided for you to aid in your analysis of the *Smartcab* and your implemented learning algorithm. You will not need to modify the included code beyond what is requested. There will be questions that you must answer which relate to the project and the visualizations provided in the notebook. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide in `agent.py`. \n\n>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.",
"_____no_output_____"
],
[
"-----\n\n## Getting Started\nIn this project, you will work towards constructing an optimized Q-Learning driving agent that will navigate a *Smartcab* through its environment towards a goal. Since the *Smartcab* is expected to drive passengers from one location to another, the driving agent will be evaluated on two very important metrics: **Safety** and **Reliability**. A driving agent that gets the *Smartcab* to its destination while running red lights or narrowly avoiding accidents would be considered **unsafe**. Similarly, a driving agent that frequently fails to reach the destination in time would be considered **unreliable**. Maximizing the driving agent's **safety** and **reliability** would ensure that *Smartcabs* have a permanent place in the transportation industry.\n\n**Safety** and **Reliability** are measured using a letter-grade system as follows:\n\n| Grade \t| Safety \t| Reliability \t|\n|:-----:\t|:------:\t|:-----------:\t|\n| A+ \t| Agent commits no traffic violations,<br/>and always chooses the correct action. | Agent reaches the destination in time<br />for 100% of trips. |\n| A \t| Agent commits few minor traffic violations,<br/>such as failing to move on a green light. | Agent reaches the destination on time<br />for at least 90% of trips. |\n| B \t| Agent commits frequent minor traffic violations,<br/>such as failing to move on a green light. | Agent reaches the destination on time<br />for at least 80% of trips. |\n| C \t| Agent commits at least one major traffic violation,<br/> such as driving through a red light. | Agent reaches the destination on time<br />for at least 70% of trips. |\n| D \t| Agent causes at least one minor accident,<br/> such as turning left on green with oncoming traffic. \t| Agent reaches the destination on time<br />for at least 60% of trips. |\n| F \t| Agent causes at least one major accident,<br />such as driving through a red light with cross-traffic. \t| Agent fails to reach the destination on time<br />for at least 60% of trips. |\n\nTo assist evaluating these important metrics, you will need to load visualization code that will be used later on in the project. Run the code cell below to import this code which is required for your analysis.",
"_____no_output_____"
]
],
[
[
"# Import the visualization code\nimport visuals as vs\n\n# Pretty display for notebooks\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"### Understand the World\nBefore starting to work on implementing your driving agent, it's necessary to first understand the world (environment) which the *Smartcab* and driving agent work in. One of the major components to building a self-learning agent is understanding the characteristics about the agent, which includes how the agent operates. To begin, simply run the `agent.py` agent code exactly how it is -- no need to make any additions whatsoever. Let the resulting simulation run for some time to see the various working components. Note that in the visual simulation (if enabled), the **white vehicle** is the *Smartcab*.",
"_____no_output_____"
],
[
"### Question 1\nIn a few sentences, describe what you observe during the simulation when running the default `agent.py` agent code. Some things you could consider:\n- *Does the Smartcab move at all during the simulation?*\n- *What kind of rewards is the driving agent receiving?*\n- *How does the light changing color affect the rewards?* \n\n**Hint:** From the `/smartcab/` top-level directory (where this notebook is located), run the command \n```bash\n'python smartcab/agent.py'\n```",
"_____no_output_____"
],
[
"**Answer:** The smart cab does not move at all during the simulation.\n\nWhilst runnning the simulation, we see both the movement of other vehicles around the grid system, and the changing colour of the traffic, either red or green. The smartcab receives either a positive or negative reward depending on whether is took an appropriate action: negative if the wrong action, positive if correct. The magnitude of the reward increases for consecutive incorrect/correct actions i.e. the reward received will be greater if the smartcab continues to do the correct action, if the previous action was also correct.\n\nThe light colour determines the reward the smartcab receives for the current action taken. The smartcab receives a positive reward if it idles in front of red lights, and conversely receives a negative reward if it idles in front of green lights.\n\nIt is receiving reward when waiting on red light, or green light with oncoming traffic. There is also a penalty when idling on green light without traffic.\n\nLight color determines whether going or staying will give reward or penalty.",
"_____no_output_____"
],
[
"### Understand the Code\nIn addition to understanding the world, it is also necessary to understand the code itself that governs how the world, simulation, and so on operate. Attempting to create a driving agent would be difficult without having at least explored the *\"hidden\"* devices that make everything work. In the `/smartcab/` top-level directory, there are two folders: `/logs/` (which will be used later) and `/smartcab/`. Open the `/smartcab/` folder and explore each Python file included, then answer the following question.",
"_____no_output_____"
],
[
"### Question 2\n- *In the *`agent.py`* Python file, choose three flags that can be set and explain how they change the simulation.*\n- *In the *`environment.py`* Python file, what Environment class function is called when an agent performs an action?*\n- *In the *`simulator.py`* Python file, what is the difference between the *`'render_text()'`* function and the *`'render()'`* function?*\n- *In the *`planner.py`* Python file, will the *`'next_waypoint()`* function consider the North-South or East-West direction first?*",
"_____no_output_____"
],
[
"**Answer:** \n\nagent.py\n- update_delay determines time delay between actions with a default of 2 seconds\n- log_metrics Boolean toggle to determine whether to log trial and simulation results to /logs\n- optimized set default log file name\n\n\nenvironment.py\n- The class function act is called when the agent performs an action\n\n\nsimulator.py\n- render_text producing the logging viewed in the terminal, whereas render produces the logging viewed in the GUI simulation\n\n\nplanner.py\n- next_waypoint checks the East-West direction before checking the North-South direction",
"_____no_output_____"
],
[
"-----\n## Implement a Basic Driving Agent\n\nThe first step to creating an optimized Q-Learning driving agent is getting the agent to actually take valid actions. In this case, a valid action is one of `None`, (do nothing) `'Left'` (turn left), `'Right'` (turn right), or `'Forward'` (go forward). For your first implementation, navigate to the `'choose_action()'` agent function and make the driving agent randomly choose one of these actions. Note that you have access to several class variables that will help you write this functionality, such as `'self.learning'` and `'self.valid_actions'`. Once implemented, run the agent file and simulation briefly to confirm that your driving agent is taking a random action each time step.",
"_____no_output_____"
],
[
"### Basic Agent Simulation Results\nTo obtain results from the initial simulation, you will need to adjust following flags:\n- `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time.\n- `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial.\n- `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file in `/logs/`.\n- `'n_test'` - Set this to `'10'` to perform 10 testing trials.\n\nOptionally, you may disable to the visual simulation (which can make the trials go faster) by setting the `'display'` flag to `False`. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation!\n\nOnce you have successfully completed the initial simulation (there should have been 20 training trials and 10 testing trials), run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded!\nRun the agent.py file after setting the flags from projects/smartcab folder instead of projects/smartcab/smartcab.\n",
"_____no_output_____"
]
],
[
[
"# Load the 'sim_no-learning' log file from the initial simulation results\nvs.plot_trials('sim_no-learning.csv')",
"_____no_output_____"
]
],
[
[
"### Question 3\nUsing the visualization above that was produced from your initial simulation, provide an analysis and make several observations about the driving agent. Be sure that you are making at least one observation about each panel present in the visualization. Some things you could consider:\n- *How frequently is the driving agent making bad decisions? How many of those bad decisions cause accidents?*\n- *Given that the agent is driving randomly, does the rate of reliabilty make sense?*\n- *What kind of rewards is the agent receiving for its actions? Do the rewards suggest it has been penalized heavily?*\n- *As the number of trials increases, does the outcome of results change significantly?*\n- *Would this Smartcab be considered safe and/or reliable for its passengers? Why or why not?*",
"_____no_output_____"
],
[
"**Answer:** \n\n- From the \"10-trial rolling relative frequency of bad actions\" visualisation, the agent is making bad decisions approximately 44% percent of the time. Of those bad decisions, approximately 24% result in accidents\n- This reliability result makes some sense in light of the agent choosing actions randomly between four actions\n- The \"10-trial rolling average reward per action\" displays that on average the agent receives a reward between -7 and -5.5. This would indicate that the agent is being heavily penalised as it is performing a slight majority of good decisions, from the top left visualisation.\n- From the visualisation \"10-trial rolling rate of reliability\", the results are consistently 0% as no trail completed successfully\n- This smartcab should not be considered safe for its passengers, as both the safety rating and reliability rating are \"F\". This would indicate that the agent caused at least one major accident per run, and failed to reach the destination on time for at least 60% of trips",
"_____no_output_____"
],
[
"-----\n## Inform the Driving Agent\nThe second step to creating an optimized Q-learning driving agent is defining a set of states that the agent can occupy in the environment. Depending on the input, sensory data, and additional variables available to the driving agent, a set of states can be defined for the agent so that it can eventually *learn* what action it should take when occupying a state. The condition of `'if state then action'` for each state is called a **policy**, and is ultimately what the driving agent is expected to learn. Without defining states, the driving agent would never understand which action is most optimal -- or even what environmental variables and conditions it cares about!",
"_____no_output_____"
],
[
"### Identify States\nInspecting the `'build_state()'` agent function shows that the driving agent is given the following data from the environment:\n- `'waypoint'`, which is the direction the *Smartcab* should drive leading to the destination, relative to the *Smartcab*'s heading.\n- `'inputs'`, which is the sensor data from the *Smartcab*. It includes \n - `'light'`, the color of the light.\n - `'left'`, the intended direction of travel for a vehicle to the *Smartcab*'s left. Returns `None` if no vehicle is present.\n - `'right'`, the intended direction of travel for a vehicle to the *Smartcab*'s right. Returns `None` if no vehicle is present.\n - `'oncoming'`, the intended direction of travel for a vehicle across the intersection from the *Smartcab*. Returns `None` if no vehicle is present.\n- `'deadline'`, which is the number of actions remaining for the *Smartcab* to reach the destination before running out of time.",
"_____no_output_____"
],
[
"### Question 4\n*Which features available to the agent are most relevant for learning both **safety** and **efficiency**? Why are these features appropriate for modeling the *Smartcab* in the environment? If you did not choose some features, why are those features* not *appropriate?*",
"_____no_output_____"
],
[
"**Answer:**\n\nShould only need waypoint and inputs features, including lightm left, right, and oncoming attributes. Though debatable, it can be said that the deadline feature is not as relevant as either of the other features as it does not contain any information that cannot also be derived from the other features.\n\n\nThe inputs feature is relevant for safety, because it determines the constraints under which the smartcab can operate. For example, if there there is a car from the smartcab's left that will come across the intersection, the smartcab should respond appropriately. It does not capture any sense of efficiency as we do not know how this feature relates to the direction the smartcab should go under some constraint. On important qualification comes from the domain knowledge knowing that this agent relates to a road system where traffic travels on the right hand side of the road. It would be more efficient, but less generalisable, to drop knowledge of traffic to the right of the agent in our state description.\n\n\nThe waypoint feature captures efficiency. This indicates the ideal actions to follow to reach our destination. Furthermore, waypoint would produce the optimal path under the constraint of no lights or other vehicles. Unlike the deadline feature, this feature is a necessary requirement to undertand the smartcab's position in the environment.\n\n\nAlthough the deadline feature is a measure of efficiency, we can deduce a measure of efficiency from the waypoint feature, assuming that it will always indicate the optimal direction the smartcab should follow.",
"_____no_output_____"
],
[
"### Define a State Space\nWhen defining a set of states that the agent can occupy, it is necessary to consider the *size* of the state space. That is to say, if you expect the driving agent to learn a **policy** for each state, you would need to have an optimal action for *every* state the agent can occupy. If the number of all possible states is very large, it might be the case that the driving agent never learns what to do in some states, which can lead to uninformed decisions. For example, consider a case where the following features are used to define the state of the *Smartcab*:\n\n`('is_raining', 'is_foggy', 'is_red_light', 'turn_left', 'no_traffic', 'previous_turn_left', 'time_of_day')`.\n\nHow frequently would the agent occupy a state like `(False, True, True, True, False, False, '3AM')`? Without a near-infinite amount of time for training, it's doubtful the agent would ever learn the proper action!",
"_____no_output_____"
],
[
"### Question 5\n*If a state is defined using the features you've selected from **Question 4**, what would be the size of the state space? Given what you know about the evironment and how it is simulated, do you think the driving agent could learn a policy for each possible state within a reasonable number of training trials?* \n**Hint:** Consider the *combinations* of features to calculate the total number of states!",
"_____no_output_____"
],
[
"**Answer:**\n\nFrom question 4, I said that the required features were waypoint and inputs.\n\n\nWaypoint can be one of four values: right, forward, left - we can discount None from our state space as this would indicate that the smartcab has reached its destination\n\n\nInputs breaks down as:\n- light can be red or green\n- left, and oncoming can be either right, forward, left, or None. Can drop inputs['right'] feature as traffic to right of smartcab is travelling away\n\n\nThe total state space would be 3 \\* 4 \\* 4 \\* * 2 = 96\n\n\nI do not think it is reasonable to expect the agent to learn a policy for each possible state. This is because if we are to assume each journey is going to take a maximum of the order of 10 steps, we would realistically need an order of 10^3 trials to obtain some meaningful results for this state space. Or in other words, for anything less than 10^3 trials, we would get a total number or data points of a similar order of magnitude as the state space.\n",
"_____no_output_____"
],
[
"### Update the Driving Agent State\nFor your second implementation, navigate to the `'build_state()'` agent function. With the justification you've provided in **Question 4**, you will now set the `'state'` variable to a tuple of all the features necessary for Q-Learning. Confirm your driving agent is updating its state by running the agent file and simulation briefly and note whether the state is displaying. If the visual simulation is used, confirm that the updated state corresponds with what is seen in the simulation.\n\n**Note:** Remember to reset simulation flags to their default setting when making this observation!",
"_____no_output_____"
],
[
"-----\n## Implement a Q-Learning Driving Agent\nThe third step to creating an optimized Q-Learning agent is to begin implementing the functionality of Q-Learning itself. The concept of Q-Learning is fairly straightforward: For every state the agent visits, create an entry in the Q-table for all state-action pairs available. Then, when the agent encounters a state and performs an action, update the Q-value associated with that state-action pair based on the reward received and the interative update rule implemented. Of course, additional benefits come from Q-Learning, such that we can have the agent choose the *best* action for each state based on the Q-values of each state-action pair possible. For this project, you will be implementing a *decaying,* $\\epsilon$*-greedy* Q-learning algorithm with *no* discount factor. Follow the implementation instructions under each **TODO** in the agent functions.\n\nNote that the agent attribute `self.Q` is a dictionary: This is how the Q-table will be formed. Each state will be a key of the `self.Q` dictionary, and each value will then be another dictionary that holds the *action* and *Q-value*. Here is an example:\n\n```\n{ 'state-1': { \n 'action-1' : Qvalue-1,\n 'action-2' : Qvalue-2,\n ...\n },\n 'state-2': {\n 'action-1' : Qvalue-1,\n ...\n },\n ...\n}\n```\n\nFurthermore, note that you are expected to use a *decaying* $\\epsilon$ *(exploration) factor*. Hence, as the number of trials increases, $\\epsilon$ should decrease towards 0. This is because the agent is expected to learn from its behavior and begin acting on its learned behavior. Additionally, The agent will be tested on what it has learned after $\\epsilon$ has passed a certain threshold (the default threshold is 0.01). For the initial Q-Learning implementation, you will be implementing a linear decaying function for $\\epsilon$.",
"_____no_output_____"
],
[
"### Q-Learning Simulation Results\nTo obtain results from the initial Q-Learning implementation, you will need to adjust the following flags and setup:\n- `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time.\n- `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial.\n- `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file and the Q-table as a `.txt` file in `/logs/`.\n- `'n_test'` - Set this to `'10'` to perform 10 testing trials.\n- `'learning'` - Set this to `'True'` to tell the driving agent to use your Q-Learning implementation.\n\nIn addition, use the following decay function for $\\epsilon$:\n\n$$ \\epsilon_{t+1} = \\epsilon_{t} - 0.05, \\hspace{10px}\\textrm{for trial number } t$$\n\nIf you have difficulty getting your implementation to work, try setting the `'verbose'` flag to `True` to help debug. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation! \n\nOnce you have successfully completed the initial Q-Learning simulation, run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded!",
"_____no_output_____"
]
],
[
[
"# Load the 'sim_default-learning' file from the default Q-Learning simulation\nvs.plot_trials('sim_default-learning.csv')",
"_____no_output_____"
]
],
[
[
"### Question 6\nUsing the visualization above that was produced from your default Q-Learning simulation, provide an analysis and make observations about the driving agent like in **Question 3**. Note that the simulation should have also produced the Q-table in a text file which can help you make observations about the agent's learning. Some additional things you could consider: \n- *Are there any observations that are similar between the basic driving agent and the default Q-Learning agent?*\n- *Approximately how many training trials did the driving agent require before testing? Does that number make sense given the epsilon-tolerance?*\n- *Is the decaying function you implemented for $\\epsilon$ (the exploration factor) accurately represented in the parameters panel?*\n- *As the number of training trials increased, did the number of bad actions decrease? Did the average reward increase?*\n- *How does the safety and reliability rating compare to the initial driving agent?*",
"_____no_output_____"
],
[
"**Answer:** \n- Between this simulation and the previous with no learning enabled the rolling average reward is still consistently negative although of a much smaller size and getting better with trials. The safety rating is similar with both having a safety rating of \"F\"\n- By default epsilon tolerance is 0.05, and we reduced the exploration factor by 0.05 each training trial. This corresponds to the 20 training trials performed by the agent, as 1 / 0.05 = 20\n- From the second diagram on the right hand side, we see a plot of paramtere values with the trials. Exploration factor decreases at a constant rate, which is expected as this was reduced by a constant amount following each trial\n- As the number of training trials increased, the number of bad actions decreased significantly to around 11% as seen in the top left plot and the average reward improved significantly shown in the top right plot\n- The reliability has substaintially improved, with a grade of \"D\", this would indicate the agent is effectively learning how to navigate the grid. Perhaps with more trials this could improve much more. The safety rating is still \"F\", but this discounts the fact that the frequency of bad actions has fallen.",
"_____no_output_____"
],
[
"-----\n## Improve the Q-Learning Driving Agent\nThe third step to creating an optimized Q-Learning agent is to perform the optimization! Now that the Q-Learning algorithm is implemented and the driving agent is successfully learning, it's necessary to tune settings and adjust learning paramaters so the driving agent learns both **safety** and **efficiency**. Typically this step will require a lot of trial and error, as some settings will invariably make the learning worse. One thing to keep in mind is the act of learning itself and the time that this takes: In theory, we could allow the agent to learn for an incredibly long amount of time; however, another goal of Q-Learning is to *transition from experimenting with unlearned behavior to acting on learned behavior*. For example, always allowing the agent to perform a random action during training (if $\\epsilon = 1$ and never decays) will certainly make it *learn*, but never let it *act*. When improving on your Q-Learning implementation, consider the impliciations it creates and whether it is logistically sensible to make a particular adjustment.",
"_____no_output_____"
],
[
"### Improved Q-Learning Simulation Results\nTo obtain results from the initial Q-Learning implementation, you will need to adjust the following flags and setup:\n- `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time.\n- `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial.\n- `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file and the Q-table as a `.txt` file in `/logs/`.\n- `'learning'` - Set this to `'True'` to tell the driving agent to use your Q-Learning implementation.\n- `'optimized'` - Set this to `'True'` to tell the driving agent you are performing an optimized version of the Q-Learning implementation.\n\nAdditional flags that can be adjusted as part of optimizing the Q-Learning agent:\n- `'n_test'` - Set this to some positive number (previously 10) to perform that many testing trials.\n- `'alpha'` - Set this to a real number between 0 - 1 to adjust the learning rate of the Q-Learning algorithm.\n- `'epsilon'` - Set this to a real number between 0 - 1 to adjust the starting exploration factor of the Q-Learning algorithm.\n- `'tolerance'` - set this to some small value larger than 0 (default was 0.05) to set the epsilon threshold for testing.\n\nFurthermore, use a decaying function of your choice for $\\epsilon$ (the exploration factor). Note that whichever function you use, it **must decay to **`'tolerance'`** at a reasonable rate**. The Q-Learning agent will not begin testing until this occurs. Some example decaying functions (for $t$, the number of trials):\n\n$$ \\epsilon = a^t, \\textrm{for } 0 < a < 1 \\hspace{50px}\\epsilon = \\frac{1}{t^2}\\hspace{50px}\\epsilon = e^{-at}, \\textrm{for } 0 < a < 1 \\hspace{50px} \\epsilon = \\cos(at), \\textrm{for } 0 < a < 1$$\nYou may also use a decaying function for $\\alpha$ (the learning rate) if you so choose, however this is typically less common. If you do so, be sure that it adheres to the inequality $0 \\leq \\alpha \\leq 1$.\n\nIf you have difficulty getting your implementation to work, try setting the `'verbose'` flag to `True` to help debug. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation! \n\nOnce you have successfully completed the improved Q-Learning simulation, run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded!",
"_____no_output_____"
]
],
[
[
"# Load the 'sim_improved-learning' file from the improved Q-Learning simulation\nvs.plot_trials('sim_improved-learning.csv')",
"_____no_output_____"
]
],
[
[
"### Question 7\nUsing the visualization above that was produced from your improved Q-Learning simulation, provide a final analysis and make observations about the improved driving agent like in **Question 6**. Questions you should answer: \n- *What decaying function was used for epsilon (the exploration factor)?*\n- *Approximately how many training trials were needed for your agent before begining testing?*\n- *What epsilon-tolerance and alpha (learning rate) did you use? Why did you use them?*\n- *How much improvement was made with this Q-Learner when compared to the default Q-Learner from the previous section?*\n- *Would you say that the Q-Learner results show that your driving agent successfully learned an appropriate policy?*\n- *Are you satisfied with the safety and reliability ratings of the *Smartcab*?*",
"_____no_output_____"
],
[
"**Answer:** \n- I use the cosine decay function for epsilon, cos(alpha * trial)\n- The agent completed around 150 training trials before testing\n- I set alpha to 0.01 and the epsilon-tolerance to 0.05. This was to make sure that I got a larger number of trials than the there are states in the sample space - found to be 96 above. This was so my agent could adequetly learn the environment without redundency\n- The safety rating has significantly improved from \"F\" to \"A+\", which would indicate that we are adequetly capturing information about the environment. The reliability rating has also improved to \"A+\" though only from \"D\", which would indicate that it is less influenced by the exploration factor.\n- I think this demonstrates that the agent learned an appropriate policy.\n- I am satisfied with the ratings of the smartcab. More trials could run to improve the average reward per action. Perhpas a different epsilon function could be used to achieve better results within fewer trials to make the learner more scalable.",
"_____no_output_____"
],
[
"### Define an Optimal Policy\n\nSometimes, the answer to the important question *\"what am I trying to get my agent to learn?\"* only has a theoretical answer and cannot be concretely described. Here, however, you can concretely define what it is the agent is trying to learn, and that is the U.S. right-of-way traffic laws. Since these laws are known information, you can further define, for each state the *Smartcab* is occupying, the optimal action for the driving agent based on these laws. In that case, we call the set of optimal state-action pairs an **optimal policy**. Hence, unlike some theoretical answers, it is clear whether the agent is acting \"incorrectly\" not only by the reward (penalty) it receives, but also by pure observation. If the agent drives through a red light, we both see it receive a negative reward but also know that it is not the correct behavior. This can be used to your advantage for verifying whether the **policy** your driving agent has learned is the correct one, or if it is a **suboptimal policy**.",
"_____no_output_____"
],
[
"### Question 8\nProvide a few examples (using the states you've defined) of what an optimal policy for this problem would look like. Afterwards, investigate the `'sim_improved-learning.txt'` text file to see the results of your improved Q-Learning algorithm. _For each state that has been recorded from the simulation, is the **policy** (the action with the highest value) correct for the given state? Are there any states where the policy is different than what would be expected from an optimal policy?_ Provide an example of a state and all state-action rewards recorded, and explain why it is the correct policy.",
"_____no_output_____"
],
[
"**Answer:** \nIn general we can imagine the optimal policy to determine that:\n- The smartcab should respond 'right' if no oncoming traffic is approaching from left through the intersection on a red light \n- The smartcab should respond with action 'None' if it will lead to a bad action with other traffic\n- The smartcab should go in the direction of the waypoint if the lights are green and not obstructed by traffic\n\nAn example of a policy from the Q-Learning algorithm in line with the ideal is the following:\n```\n('forward', 'red', None, None)\n -- forward : -3.97\n -- right : 0.25\n -- None : 1.39\n -- left : -5.08\n```\n\nThat is, with a waypoint of forward, the light are red, and there are no cars near the smartcab. In this case the ideal action of None, has the highest positive weighting, and the two or the most disruptive actions are severly penalised: any movement would be a violation.\n\n\nThe Q-Learning algorithm does not produce ideal policies when there is lots of noise: traffic in all directions. It must be difficult to optimise for these situations given the range of possibilities in a small number of training trials. For example,\n```\n('right', 'red', 'left', 'left')\n -- forward : -0.21\n -- right : 0.06\n -- None : 0.00\n -- left : -0.10\n```\n\nWhere the waypoint is to the right, the agent is at a red light, and there is traffic in all directions. In this case we would expect that the ideal policy would strongly be a None action.\nAlso, on a red light, a right turn is permitted if no oncoming traffic is approaching from your left through the intersection.",
"_____no_output_____"
],
[
"-----\n### Optional: Future Rewards - Discount Factor, `'gamma'`\nCuriously, as part of the Q-Learning algorithm, you were asked to **not** use the discount factor, `'gamma'` in the implementation. Including future rewards in the algorithm is used to aid in propogating positive rewards backwards from a future state to the current state. Essentially, if the driving agent is given the option to make several actions to arrive at different states, including future rewards will bias the agent towards states that could provide even more rewards. An example of this would be the driving agent moving towards a goal: With all actions and rewards equal, moving towards the goal would theoretically yield better rewards if there is an additional reward for reaching the goal. However, even though in this project, the driving agent is trying to reach a destination in the allotted time, including future rewards will not benefit the agent. In fact, if the agent were given many trials to learn, it could negatively affect Q-values!",
"_____no_output_____"
],
[
"### Optional Question 9\n*There are two characteristics about the project that invalidate the use of future rewards in the Q-Learning algorithm. One characteristic has to do with the *Smartcab* itself, and the other has to do with the environment. Can you figure out what they are and why future rewards won't work for this project?*",
"_____no_output_____"
],
[
"**Answer:**",
"_____no_output_____"
],
[
"> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
d0d45ff81744d440c038d314d846878060c0c4a4 | 589,390 | ipynb | Jupyter Notebook | notebooks/computer_science/Algorithms_and_data_structures_in_python/maps_and_dictionaries/example_1.ipynb | NathanielDake/NathanielDake.github.io | 82b7013afa66328e06e51304b6af10e1ed648eb8 | [
"MIT"
] | 3 | 2018-03-30T06:28:21.000Z | 2018-04-25T15:43:24.000Z | notebooks/computer_science/Algorithms_and_data_structures_in_python/maps_and_dictionaries/example_1.ipynb | NathanielDake/NathanielDake.github.io | 82b7013afa66328e06e51304b6af10e1ed648eb8 | [
"MIT"
] | null | null | null | notebooks/computer_science/Algorithms_and_data_structures_in_python/maps_and_dictionaries/example_1.ipynb | NathanielDake/NathanielDake.github.io | 82b7013afa66328e06e51304b6af10e1ed648eb8 | [
"MIT"
] | 3 | 2018-02-07T22:21:33.000Z | 2018-05-04T20:16:43.000Z | 1,137.818533 | 478,568 | 0.955004 | [
[
[
"# Hash Codes\nConsider the challenges associated with the 16-bit hashcode for a character string `s` that sums the Unicode values of the characters in `s`.\n\nFor example, let `s = \"stop\"`. It's unicode character representation is:",
"_____no_output_____"
]
],
[
[
"for char in \"stop\":\n print(char + ': ' + str(ord(char)))\n",
"s: 115\nt: 116\no: 111\np: 112\n"
],
[
"sum([ord(x) for x in \"stop\"])",
"_____no_output_____"
]
],
[
[
"If we then sum these unicode values, we arrive as the following hash code:\n\n```\nstop -----------> 454\n```\n\nThe problem is, the following strings will all map to the same value!\n\n```\nstop -----------> 454\npots -----------> 454\ntops -----------> 454\nspot -----------> 454\n```\n\nA better hash code would take into account the _position_ of our characters.\n\n## Polynomial Hash code\nIf we refer to the characters of our string as $x_0, x_1, \\dots, x_n$, we can then chose a non-zero constant, $a \\neq 1$, and use a hash code:\n\n$$a^{n-1} x_0 + a^{n-2} x_1 + \\dots + a^1 x_{n-1} + a^0 x_{n}$$\n\nThis is simply a polynomial in $a$ that has our $x_i$ values as it's coefficients. This is known as a **polynomial** hash code.",
"_____no_output_____"
]
],
[
[
"1 << 32",
"_____no_output_____"
],
[
"2**32",
"_____no_output_____"
],
[
"2 << 2",
"_____no_output_____"
]
],
[
[
"## Investigate hash map uniformity",
"_____no_output_____"
]
],
[
[
"import random\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n%config InlineBackend.figure_format='retina'",
"_____no_output_____"
],
[
"n = 0\nprime = 109345121\nscale = 1 + random.randrange(prime - 1)\nshift = random.randrange(prime)",
"_____no_output_____"
],
[
"def my_hash_func(k, upper):\n table = upper * [None]\n \n\n \n hash_code = hash(k)\n \n compressed_code = (hash_code * scale + shift) % prime % len(table)\n return compressed_code",
"_____no_output_____"
],
[
"\nupper = 1000\ninputs = list(range(0, upper))\nhash_results = []\nfor i in inputs:\n hash_results.append(my_hash_func(i, upper))",
"_____no_output_____"
],
[
"plt.figure(figsize=(15,10))\nplt.plot(inputs, hash_results)",
"_____no_output_____"
],
[
"plt.figure(figsize=(15,10))\nplt.scatter(inputs, hash_results)",
"_____no_output_____"
],
[
"def moving_average(x, w):\n return np.convolve(x, np.ones(w), 'valid') / w",
"_____no_output_____"
],
[
"averages_over_window_size_5 = moving_average(hash_results, 5)",
"_____no_output_____"
],
[
"plt.hist(averages_over_window_size_5)",
"_____no_output_____"
],
[
"l = [4, 7, 9, 13, 1, 3, 7]",
"_____no_output_____"
],
[
"l1 = [1, 4, 7]; l2 = [3, 9, 13] ",
"_____no_output_____"
],
[
"def merge_sort(l):\n\n size = len(l)\n midway = size // 2\n first_half = l[:midway]\n second_half = l[midway:]\n\n if len(first_half) > 1 or len(second_half) > 1:\n sorted_first_half = merge_sort(first_half)\n sorted_second_half = merge_sort(second_half)\n else:\n sorted_first_half = first_half\n sorted_second_half = second_half\n \n sorted_l = merge(sorted_first_half, sorted_second_half)\n return sorted_l\n\ndef merge(l1, l2):\n \"\"\"Merge two sorted lists.\"\"\"\n\n i = 0 \n j = 0\n\n lmerged = []\n\n while (i <= len(l1) - 1) or (j <= len(l2) - 1):\n if i == len(l1):\n lmerged.extend(l2[j:])\n break\n if j == len(l2):\n lmerged.extend(l1[i:])\n break\n if (i < len(l1)) and (l1[i] < l2[j]):\n lmerged.append(l1[i])\n i += 1\n else:\n lmerged.append(l2[j])\n j += 1\n \n return lmerged\n",
"_____no_output_____"
],
[
"merge_sort(l)",
"_____no_output_____"
],
[
"l = [random.choice(list(range(1000))) for x in range(1000)]",
"_____no_output_____"
],
[
"%%time\nres = sorted(l)",
"CPU times: user 134 µs, sys: 6 µs, total: 140 µs\nWall time: 142 µs\n"
],
[
"%%time\nres = merge_sort(l)",
"CPU times: user 6.33 ms, sys: 413 µs, total: 6.74 ms\nWall time: 6.4 ms\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d46a3021d9df155b77026c781f50a0d530c034 | 7,527 | ipynb | Jupyter Notebook | lessons/landlab/overland_flow_driver_for_espin.ipynb | BCampforts/espin | 376e98eed45352af6b4f66345bec1f5792a64124 | [
"MIT"
] | null | null | null | lessons/landlab/overland_flow_driver_for_espin.ipynb | BCampforts/espin | 376e98eed45352af6b4f66345bec1f5792a64124 | [
"MIT"
] | null | null | null | lessons/landlab/overland_flow_driver_for_espin.ipynb | BCampforts/espin | 376e98eed45352af6b4f66345bec1f5792a64124 | [
"MIT"
] | null | null | null | 25.68942 | 248 | 0.575262 | [
[
[
"<a href=\"http://landlab.github.io\"><img style=\"float: left\" src=\"../../media/landlab_header.png\"></a>",
"_____no_output_____"
],
[
"# The deAlmeida Overland Flow Component ",
"_____no_output_____"
],
[
"<hr>\n<small>For more Landlab tutorials, click here: <a href=\"https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html\">https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html</a></small>\n<hr>",
"_____no_output_____"
],
[
"This notebook illustrates running the deAlmeida overland flow component in an extremely simple-minded way on a real topography, then shows it creating a flood sequence along an inclined surface with an oscillating water surface at one end.\n\nFirst, import what we'll need:",
"_____no_output_____"
]
],
[
[
"from landlab.components.overland_flow import OverlandFlow\nfrom landlab.plot.imshow import imshow_grid\nfrom landlab.plot.colors import water_colormap\nfrom landlab import RasterModelGrid\nfrom landlab.io.esri_ascii import read_esri_ascii\nfrom matplotlib.pyplot import figure\nimport numpy as np\nfrom time import time\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"Pick the initial and run conditions",
"_____no_output_____"
]
],
[
[
"run_time = 100 # duration of run, (s)\nh_init = 0.1 # initial thin layer of water (m)\nn = 0.01 # roughness coefficient, (s/m^(1/3))\ng = 9.8 # gravity (m/s^2)\nalpha = 0.7 # time-step factor (nondimensional; from Bates et al., 2010)\nu = 0.4 # constant velocity (m/s, de Almeida et al., 2012)\nrun_time_slices = (10, 50, 100)",
"_____no_output_____"
]
],
[
[
"Elapsed time starts at 1 second. This prevents errors when setting our boundary conditions.",
"_____no_output_____"
]
],
[
[
"elapsed_time = 1.0",
"_____no_output_____"
]
],
[
[
"Use Landlab methods to import an ARC ascii grid, and load the data into the field that the component needs to look at to get the data. This loads the elevation data, z, into a \"field\" in the grid itself, defined on the nodes.",
"_____no_output_____"
]
],
[
[
"rmg, z = read_esri_ascii('Square_TestBasin.asc', name='topographic__elevation')\nrmg.set_closed_boundaries_at_grid_edges(True, True, True, True)\n\n# un-comment these two lines for a \"real\" DEM\n#rmg, z = read_esri_ascii('hugo_site.asc', name='topographic__elevation') \n#rmg.status_at_node[z<0.0] = rmg.BC_NODE_IS_CLOSED\n",
"_____no_output_____"
]
],
[
[
"We can get at this data with this syntax:",
"_____no_output_____"
]
],
[
[
"np.all(rmg.at_node['topographic__elevation'] == z)",
"_____no_output_____"
]
],
[
[
"Note that the boundary conditions for this grid mainly got handled with the final line of those three, but for the sake of completeness, we should probably manually \"open\" the outlet. We can find and set the outlet like this:",
"_____no_output_____"
]
],
[
[
"my_outlet_node = 100 # This DEM was generated using Landlab and the outlet node ID was known\nrmg.status_at_node[my_outlet_node] = rmg.BC_NODE_IS_FIXED_VALUE",
"_____no_output_____"
]
],
[
[
"Now initialize a couple more grid fields that the component is going to need:",
"_____no_output_____"
]
],
[
[
"rmg.add_zeros('surface_water__depth', at='node') # water depth (m)",
"_____no_output_____"
],
[
"rmg.at_node['surface_water__depth'] += h_init",
"_____no_output_____"
]
],
[
[
"Let's look at our watershed topography",
"_____no_output_____"
]
],
[
[
"imshow_grid(rmg, 'topographic__elevation') #, vmin=1650.0)",
"_____no_output_____"
]
],
[
[
"Now instantiate the component itself",
"_____no_output_____"
]
],
[
[
"of = OverlandFlow(\n rmg, steep_slopes=True\n) #for stability in steeper environments, we set the steep_slopes flag to True",
"_____no_output_____"
]
],
[
[
"Now we're going to run the loop that drives the component:",
"_____no_output_____"
]
],
[
[
"while elapsed_time < run_time:\n # First, we calculate our time step.\n dt = of.calc_time_step()\n # Now, we can generate overland flow.\n of.overland_flow()\n # Increased elapsed time\n print('Elapsed time: ', elapsed_time)\n elapsed_time += dt",
"_____no_output_____"
],
[
"imshow_grid(rmg, 'surface_water__depth', cmap='Blues')",
"_____no_output_____"
]
],
[
[
"Now let's get clever, and run a set of time slices:",
"_____no_output_____"
]
],
[
[
"elapsed_time = 1.\nfor t in run_time_slices:\n while elapsed_time < t:\n # First, we calculate our time step.\n dt = of.calc_time_step()\n # Now, we can generate overland flow.\n of.overland_flow()\n # Increased elapsed time\n elapsed_time += dt\n figure(t)\n imshow_grid(rmg, 'surface_water__depth', cmap='Blues')",
"_____no_output_____"
]
],
[
[
"### Click here for more <a href=\"https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html\">Landlab tutorials</a>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0d487f51e1a435369012e63886af95c99a811d1 | 29,946 | ipynb | Jupyter Notebook | phy2nn_battery.ipynb | Yuanyuan-Shi/batterydeg | bfa3f64b40e83d7b7277242139729eded83c4d62 | [
"MIT"
] | 5 | 2020-06-19T06:07:57.000Z | 2022-01-21T09:32:02.000Z | phy2nn_battery.ipynb | Yuanyuan-Shi/batterydeg | bfa3f64b40e83d7b7277242139729eded83c4d62 | [
"MIT"
] | null | null | null | phy2nn_battery.ipynb | Yuanyuan-Shi/batterydeg | bfa3f64b40e83d7b7277242139729eded83c4d62 | [
"MIT"
] | null | null | null | 38.941482 | 87 | 0.463401 | [
[
[
"import tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import datasets, linear_model\nfrom sklearn import cross_validation\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing",
"_____no_output_____"
],
[
"df = pd.read_excel(\"data0505.xlsx\",header=0)\n# clean up data\ndf = df.dropna(how = 'all')\ndf = df.fillna(0)\ndf = df.round(4)\ndf=df[df['Power']>=0]\ndf.head()",
"_____no_output_____"
],
[
"min_max_scaler = preprocessing.MinMaxScaler()\nnp_scaled = min_max_scaler.fit_transform(df)\ndf_normalized = pd.DataFrame(np_scaled)\ndf_normalized.head()",
"_____no_output_____"
],
[
"x = np.array(df_normalized.ix[:,0:2])#first three column are SoC, SoH, power\ny = np.array(df_normalized.ix[:,5])#delta SEI\nX_train, X_test, Y_train, Y_test = cross_validation.train_test_split(\nx, y, test_size=0.2, random_state=42)\ntotal_len = X_train.shape[0]\ntotal_len",
"_____no_output_____"
],
[
"# Parameters\nlearning_rate = 0.001\ntraining_epochs = 50\nbatch_size = 100\ndisplay_step = 1\ndropout_rate = 0.1\n# Network Parameters\nn_hidden_1 = 10 # 1st layer number of features\nn_hidden_2 = 5 # 2nd layer number of features\nn_input = X_train.shape[1]\nn_classes = 1\n\n# tf Graph input\nx = tf.placeholder(\"float\", [None, 3])\ny = tf.placeholder(\"float\", [None])",
"_____no_output_____"
],
[
"# Create model\ndef multilayer_perceptron(x, weights, biases):\n # Hidden layer with RELU activation\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n\n # Hidden layer with RELU activation\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_2 = tf.nn.relu(layer_2)\n\n # Output layer with linear activation\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n return out_layer",
"_____no_output_____"
],
[
"# Store layers weight & bias\nweights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 0.1)),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], 0, 0.1))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 0.1)),\n 'out': tf.Variable(tf.random_normal([n_classes], 0, 0.1))\n}",
"_____no_output_____"
],
[
"# Construct model\npred = multilayer_perceptron(x, weights, biases)",
"_____no_output_____"
],
[
"# Define loss and optimizer\ncost = tf.reduce_mean((tf.transpose(pred)-y)*(tf.transpose(pred)-y)) \noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n tf.initialize_all_variables()\n\n # Training cycle\n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(total_len/batch_size)\n # Loop over all batches\n for i in range(total_batch-1):\n batch_x = X_train[i*batch_size:(i+1)*batch_size]\n batch_y = Y_train[i*batch_size:(i+1)*batch_size]\n # Run optimization op (backprop) and cost op (to get loss value)\n _, c, p = sess.run([optimizer, cost, pred], feed_dict={x: batch_x,\n y: batch_y})\n # Compute average loss\n avg_cost += c / total_batch\n\n # sample prediction\n label_value = batch_y\n estimate = p\n err = label_value-estimate\n print (\"num batch:\", total_batch)\n\n # Display logs per epoch step\n if epoch % display_step == 0:\n print (\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n \"{:.9f}\".format(avg_cost))\n print (\"[*]----------------------------\")\n for i in range(3):\n print (\"label value:\", label_value[i], \\\n \"estimated value:\", estimate[i])\n print (\"[*]============================\")\n\n print (\"Optimization Finished!\")\n \n # Test model\n # correct_prediction = tf.equal(tf.argmax(pred,0), tf.argmax(y,0))\n # Calculate accuracy\n accuracy = tf.reduce_mean((tf.transpose(pred)-y)*(tf.transpose(pred)-y)) \n print (\"MSE:\", accuracy.eval({x: X_test, y: Y_test}))",
"num batch: 48\nEpoch: 0001 cost= 0.653324438\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.07771525]\nlabel value: 0.788510037923 estimated value: [ 0.08068381]\nlabel value: 0.75568544624 estimated value: [ 0.07602236]\n[*]============================\nnum batch: 48\nEpoch: 0002 cost= 0.424272126\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.35695586]\nlabel value: 0.788510037923 estimated value: [ 0.36942384]\nlabel value: 0.75568544624 estimated value: [ 0.32976228]\n[*]============================\nnum batch: 48\nEpoch: 0003 cost= 0.094070199\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.82547671]\nlabel value: 0.788510037923 estimated value: [ 0.85426039]\nlabel value: 0.75568544624 estimated value: [ 0.74013388]\n[*]============================\nnum batch: 48\nEpoch: 0004 cost= 0.010735776\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.88168484]\nlabel value: 0.788510037923 estimated value: [ 0.90648782]\nlabel value: 0.75568544624 estimated value: [ 0.78530633]\n[*]============================\nnum batch: 48\nEpoch: 0005 cost= 0.009506162\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.87628341]\nlabel value: 0.788510037923 estimated value: [ 0.8940109]\nlabel value: 0.75568544624 estimated value: [ 0.77669036]\n[*]============================\nnum batch: 48\nEpoch: 0006 cost= 0.008579969\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.87534475]\nlabel value: 0.788510037923 estimated value: [ 0.88600343]\nlabel value: 0.75568544624 estimated value: [ 0.77202308]\n[*]============================\nnum batch: 48\nEpoch: 0007 cost= 0.007678320\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.87415296]\nlabel value: 0.788510037923 estimated value: [ 0.87772399]\nlabel value: 0.75568544624 estimated value: [ 0.7673378]\n[*]============================\nnum batch: 48\nEpoch: 0008 cost= 0.006822387\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.87283683]\nlabel value: 0.788510037923 estimated value: [ 0.86942708]\nlabel value: 0.75568544624 estimated value: [ 0.762833]\n[*]============================\nnum batch: 48\nEpoch: 0009 cost= 0.006023696\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.87143499]\nlabel value: 0.788510037923 estimated value: [ 0.86124712]\nlabel value: 0.75568544624 estimated value: [ 0.75860649]\n[*]============================\nnum batch: 48\nEpoch: 0010 cost= 0.005289455\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.86995476]\nlabel value: 0.788510037923 estimated value: [ 0.85326779]\nlabel value: 0.75568544624 estimated value: [ 0.75471181]\n[*]============================\nnum batch: 48\nEpoch: 0011 cost= 0.004623550\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.86841047]\nlabel value: 0.788510037923 estimated value: [ 0.84556603]\nlabel value: 0.75568544624 estimated value: [ 0.75119466]\n[*]============================\nnum batch: 48\nEpoch: 0012 cost= 0.004027425\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.86681861]\nlabel value: 0.788510037923 estimated value: [ 0.83820951]\nlabel value: 0.75568544624 estimated value: [ 0.74808908]\n[*]============================\nnum batch: 48\nEpoch: 0013 cost= 0.003500679\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.8651973]\nlabel value: 0.788510037923 estimated value: [ 0.83125806]\nlabel value: 0.75568544624 estimated value: [ 0.74541897]\n[*]============================\nnum batch: 48\nEpoch: 0014 cost= 0.003041471\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.86356628]\nlabel value: 0.788510037923 estimated value: [ 0.82476377]\nlabel value: 0.75568544624 estimated value: [ 0.74319702]\n[*]============================\nnum batch: 48\nEpoch: 0015 cost= 0.002646791\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.86194569]\nlabel value: 0.788510037923 estimated value: [ 0.81876957]\nlabel value: 0.75568544624 estimated value: [ 0.74142402]\n[*]============================\nnum batch: 48\nEpoch: 0016 cost= 0.002312662\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.86035675]\nlabel value: 0.788510037923 estimated value: [ 0.8133083]\nlabel value: 0.75568544624 estimated value: [ 0.74008906]\n[*]============================\nnum batch: 48\nEpoch: 0017 cost= 0.002034322\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.85881937]\nlabel value: 0.788510037923 estimated value: [ 0.80840063]\nlabel value: 0.75568544624 estimated value: [ 0.73916835]\n[*]============================\nnum batch: 48\nEpoch: 0018 cost= 0.001806399\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.85735208]\nlabel value: 0.788510037923 estimated value: [ 0.80405349]\nlabel value: 0.75568544624 estimated value: [ 0.73862606]\n[*]============================\nnum batch: 48\nEpoch: 0019 cost= 0.001623118\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.85597086]\nlabel value: 0.788510037923 estimated value: [ 0.80026001]\nlabel value: 0.75568544624 estimated value: [ 0.73841643]\n[*]============================\nnum batch: 48\nEpoch: 0020 cost= 0.001478519\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.85468924]\nlabel value: 0.788510037923 estimated value: [ 0.79699993]\nlabel value: 0.75568544624 estimated value: [ 0.73848593]\n[*]============================\nnum batch: 48\nEpoch: 0021 cost= 0.001366686\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.85351658]\nlabel value: 0.788510037923 estimated value: [ 0.79424119]\nlabel value: 0.75568544624 estimated value: [ 0.73877704]\n[*]============================\nnum batch: 48\nEpoch: 0022 cost= 0.001281967\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.85245913]\nlabel value: 0.788510037923 estimated value: [ 0.791942]\nlabel value: 0.75568544624 estimated value: [ 0.73923159]\n[*]============================\nnum batch: 48\nEpoch: 0023 cost= 0.001219137\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.85151917]\nlabel value: 0.788510037923 estimated value: [ 0.79005414]\nlabel value: 0.75568544624 estimated value: [ 0.73979443]\n[*]============================\nnum batch: 48\nEpoch: 0024 cost= 0.001173541\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.85069513]\nlabel value: 0.788510037923 estimated value: [ 0.78852606]\nlabel value: 0.75568544624 estimated value: [ 0.74041605]\n[*]============================\nnum batch: 48\nEpoch: 0025 cost= 0.001141171\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84998292]\nlabel value: 0.788510037923 estimated value: [ 0.78730583]\nlabel value: 0.75568544624 estimated value: [ 0.74105489]\n[*]============================\nnum batch: 48\nEpoch: 0026 cost= 0.001118698\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84937578]\nlabel value: 0.788510037923 estimated value: [ 0.78634346]\nlabel value: 0.75568544624 estimated value: [ 0.74167836]\n[*]============================\nnum batch: 48\nEpoch: 0027 cost= 0.001103439\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84886533]\nlabel value: 0.788510037923 estimated value: [ 0.78559321]\nlabel value: 0.75568544624 estimated value: [ 0.74226278]\n[*]============================\nnum batch: 48\nEpoch: 0028 cost= 0.001093306\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84844136]\nlabel value: 0.788510037923 estimated value: [ 0.78501344]\nlabel value: 0.75568544624 estimated value: [ 0.74279213]\n[*]============================\nnum batch: 48\nEpoch: 0029 cost= 0.001086724\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84809309]\nlabel value: 0.788510037923 estimated value: [ 0.78456849]\nlabel value: 0.75568544624 estimated value: [ 0.74325788]\n[*]============================\nnum batch: 48\nEpoch: 0030 cost= 0.001082541\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84780991]\nlabel value: 0.788510037923 estimated value: [ 0.78422821]\nlabel value: 0.75568544624 estimated value: [ 0.74365687]\n[*]============================\nnum batch: 48\nEpoch: 0031 cost= 0.001079941\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84758151]\nlabel value: 0.788510037923 estimated value: [ 0.78396845]\nlabel value: 0.75568544624 estimated value: [ 0.74399078]\n[*]============================\nnum batch: 48\nEpoch: 0032 cost= 0.001078361\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.8473984]\nlabel value: 0.788510037923 estimated value: [ 0.78376949]\nlabel value: 0.75568544624 estimated value: [ 0.74426365]\n[*]============================\nnum batch: 48\nEpoch: 0033 cost= 0.001077425\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84725165]\nlabel value: 0.788510037923 estimated value: [ 0.78361571]\nlabel value: 0.75568544624 estimated value: [ 0.74448144]\n[*]============================\nnum batch: 48\nEpoch: 0034 cost= 0.001076886\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84713423]\nlabel value: 0.788510037923 estimated value: [ 0.78349543]\nlabel value: 0.75568544624 estimated value: [ 0.74465144]\n[*]============================\nnum batch: 48\nEpoch: 0035 cost= 0.001076587\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84703946]\nlabel value: 0.788510037923 estimated value: [ 0.7833994]\nlabel value: 0.75568544624 estimated value: [ 0.74478048]\n[*]============================\nnum batch: 48\nEpoch: 0036 cost= 0.001076433\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84696174]\nlabel value: 0.788510037923 estimated value: [ 0.78332084]\nlabel value: 0.75568544624 estimated value: [ 0.74487519]\n[*]============================\nnum batch: 48\nEpoch: 0037 cost= 0.001076363\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84689677]\nlabel value: 0.788510037923 estimated value: [ 0.78325474]\nlabel value: 0.75568544624 estimated value: [ 0.74494183]\n[*]============================\nnum batch: 48\nEpoch: 0038 cost= 0.001076343\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84684116]\nlabel value: 0.788510037923 estimated value: [ 0.78319722]\nlabel value: 0.75568544624 estimated value: [ 0.74498576]\n[*]============================\nnum batch: 48\nEpoch: 0039 cost= 0.001076352\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84679174]\nlabel value: 0.788510037923 estimated value: [ 0.78314525]\nlabel value: 0.75568544624 estimated value: [ 0.74501139]\n[*]============================\nnum batch: 48\nEpoch: 0040 cost= 0.001076379\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84674621]\nlabel value: 0.788510037923 estimated value: [ 0.78309679]\nlabel value: 0.75568544624 estimated value: [ 0.74502224]\n[*]============================\nnum batch: 48\nEpoch: 0041 cost= 0.001076418\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84670252]\nlabel value: 0.788510037923 estimated value: [ 0.78305]\nlabel value: 0.75568544624 estimated value: [ 0.74502128]\n[*]============================\nnum batch: 48\nEpoch: 0042 cost= 0.001076466\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84665966]\nlabel value: 0.788510037923 estimated value: [ 0.78300381]\nlabel value: 0.75568544624 estimated value: [ 0.74501073]\n[*]============================\nnum batch: 48\nEpoch: 0043 cost= 0.001076520\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84661597]\nlabel value: 0.788510037923 estimated value: [ 0.7829569]\nlabel value: 0.75568544624 estimated value: [ 0.74499238]\n[*]============================\nnum batch: 48\nEpoch: 0044 cost= 0.001076579\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84657085]\nlabel value: 0.788510037923 estimated value: [ 0.78290868]\nlabel value: 0.75568544624 estimated value: [ 0.74496722]\n[*]============================\nnum batch: 48\nEpoch: 0045 cost= 0.001076645\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84652346]\nlabel value: 0.788510037923 estimated value: [ 0.78285843]\nlabel value: 0.75568544624 estimated value: [ 0.74493659]\n[*]============================\nnum batch: 48\nEpoch: 0046 cost= 0.001076715\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84647334]\nlabel value: 0.788510037923 estimated value: [ 0.78280574]\nlabel value: 0.75568544624 estimated value: [ 0.74490082]\n[*]============================\nnum batch: 48\nEpoch: 0047 cost= 0.001076790\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84642005]\nlabel value: 0.788510037923 estimated value: [ 0.78275025]\nlabel value: 0.75568544624 estimated value: [ 0.74486059]\n[*]============================\nnum batch: 48\nEpoch: 0048 cost= 0.001076872\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84636319]\nlabel value: 0.788510037923 estimated value: [ 0.78269166]\nlabel value: 0.75568544624 estimated value: [ 0.74481606]\n[*]============================\nnum batch: 48\nEpoch: 0049 cost= 0.001076959\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.84630257]\nlabel value: 0.788510037923 estimated value: [ 0.78262967]\nlabel value: 0.75568544624 estimated value: [ 0.74476755]\n[*]============================\nnum batch: 48\nEpoch: 0050 cost= 0.001077051\n[*]----------------------------\nlabel value: 0.844460892497 estimated value: [ 0.8462379]\nlabel value: 0.788510037923 estimated value: [ 0.78256404]\nlabel value: 0.75568544624 estimated value: [ 0.74471509]\n[*]============================\nOptimization Finished!\nMSE: 0.00165942\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d49f085b6cb808ae48928066cb385fc04ad36b | 231,783 | ipynb | Jupyter Notebook | 1_exact_diagonalization/sol4_simple_ED.ipynb | harrow/computational_QM | d8a2060bee6a55c4b8bf937ad96feed10227373b | [
"MIT"
] | 25 | 2019-03-07T08:41:24.000Z | 2022-02-19T21:31:11.000Z | 1_exact_diagonalization/sol4_simple_ED.ipynb | harrow/computational_QM | d8a2060bee6a55c4b8bf937ad96feed10227373b | [
"MIT"
] | 2 | 2019-05-15T12:30:41.000Z | 2019-07-03T18:08:12.000Z | 1_exact_diagonalization/sol4_simple_ED.ipynb | harrow/computational_QM | d8a2060bee6a55c4b8bf937ad96feed10227373b | [
"MIT"
] | 9 | 2018-12-04T21:01:33.000Z | 2021-04-27T15:35:31.000Z | 511.662252 | 93,270 | 0.932514 | [
[
[
"import numpy as np\nimport scipy\nfrom scipy import sparse\nimport scipy.sparse.linalg\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"# part a)\nId = sparse.csr_matrix(np.eye(2))\nSx = sparse.csr_matrix([[0., 1.], [1., 0.]])\nSz = sparse.csr_matrix([[1., 0.], [0., -1.]])\nprint(Sz.shape)",
"(2, 2)\n"
],
[
"# part b)\ndef singesite_to_full(op, i, L):\n op_list = [Id]*L # = [Id, Id, Id ...] with L entries\n op_list[i] = op\n full = op_list[0]\n for op_i in op_list[1:]:\n full = sparse.kron(full, op_i, format=\"csr\")\n return full\n\ndef gen_sx_list(L):\n return [singesite_to_full(Sx, i, L) for i in range(L)]\n\n# part c)\n\ndef gen_sz_list(L):\n return [singesite_to_full(Sz, i, L) for i in range(L)]",
"_____no_output_____"
],
[
"# part d)\n\ndef gen_hamiltonian(sx_list, sz_list, g, J=1.):\n L = len(sx_list)\n H = sparse.csr_matrix((2**L, 2**L))\n for j in range(L):\n H = H - J *( sx_list[j] * sx_list[(j+1)%L])\n H = H - g * sz_list[j]\n return H",
"_____no_output_____"
],
[
"# check in part d)\nL = 2\nsx_list = gen_sx_list(L)\nsz_list = gen_sz_list(L)\nH = gen_hamiltonian(sx_list, sz_list, 0.1)\nprint(\"H for L=2, g=0.1\")\nprint(H.toarray())",
"H for L=2, g=0.1\n[[-0.2 0. 0. -2. ]\n [ 0. 0. -2. 0. ]\n [ 0. -2. 0. 0. ]\n [-2. 0. 0. 0.2]]\n"
],
[
"# part e)\nL = 12\nsx_list = gen_sx_list(L)\nsz_list = gen_sz_list(L)\nH = gen_hamiltonian(sx_list, sz_list, 1.)\nHdense = H.toarray()\nprint(\"L =12: H =\", repr(H))",
"L =12: H = <4096x4096 sparse matrix of type '<class 'numpy.float64'>'\n\twith 52324 stored elements in Compressed Sparse Row format>\n"
],
[
"%%timeit\nsparse.linalg.eigsh(H, which='SA')",
"91.5 ms ± 18.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
],
[
"%%timeit\nnp.linalg.eigh(Hdense)",
"17.6 s ± 1.43 s per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
],
[
"# part f)\n\nLs = [6, 8, 10, 12]\ngs = np.linspace(0., 2., 21)\n\nplt.figure()\nfor L in Ls:\n sx_list = gen_sx_list(L)\n sz_list = gen_sz_list(L)\n sxsx = sx_list[0]*sx_list[L//2]\n corrs = []\n for g in gs:\n H = gen_hamiltonian(sx_list, sz_list, g, J=1.)\n E, v = sparse.linalg.eigsh(H, k=3, which='SA')\n v0 = v[:, 0] # first column of v is the ground state\n corr = np.inner(v0, sxsx*v0)\n corrs.append(corr)\n corrs = np.array(corrs)\n plt.plot(gs, corrs, label=\"L={L:d}\".format(L=L))\nplt.xlabel(\"g\")\nplt.ylabel(\"C\")\nplt.legend()",
"_____no_output_____"
],
[
"# part g)\nplt.figure(figsize=(10, 8))\nfor L in [6, 8, 10, 12]:\n sx_list = gen_sx_list(L)\n sz_list = gen_sz_list(L)\n gaps = []\n for g in gs:\n H = gen_hamiltonian(sx_list, sz_list, g, J=1.)\n E, v = sparse.linalg.eigsh(H, k=3, which='SA')\n gaps.append((E[1] - E[0], E[2] - E[0]))\n\n gaps = np.array(gaps)\n lines = plt.plot(gs, gaps[:, 0], linestyle='-', label=\"first excited state, L={L:d}\".format(L=L))\n plt.plot(gs, gaps[:, 1], color = lines[0].get_color(), linestyle='--', label=\"second excited state, L={L:d}\".format(L=L))\nplt.legend()",
"_____no_output_____"
],
[
"# just for fun: regenerate the correlation plot with open boundary conditions\n\ndef gen_hamiltonian_open_bc(sx_list, sz_list, g, J=1.):\n L = len(sx_list)\n H = sparse.csr_matrix((2**L, 2**L))\n for j in range(L):\n if j < L-1:\n H = H - J *( sx_list[j] * sx_list[j+1])\n H = H - g * sz_list[j]\n return H\n\nplt.figure()\nfor L in Ls:\n sx_list = gen_sx_list(L)\n sz_list = gen_sz_list(L)\n sxsx = sx_list[0]*sx_list[L//2]\n corrs = []\n for g in gs:\n H = gen_hamiltonian_open_bc(sx_list, sz_list, g, J=1.)\n E, v = sparse.linalg.eigsh(H, k=3, which='SA')\n v0 = v[:, 0] # first column of v is the ground state\n corr = np.inner(v0, sxsx*v0)\n corrs.append(corr)\n corrs = np.array(corrs)\n plt.plot(gs, corrs, label=\"L={L:d}\".format(L=L))\nplt.xlabel(\"g\")\nplt.ylabel(\"C\")\nplt.legend()",
"_____no_output_____"
],
[
"# and the plot for the excitation energies for open b.c.\n\nplt.figure(figsize=(10, 8))\nfor L in [6, 8, 10, 12]:\n sx_list = gen_sx_list(L)\n sz_list = gen_sz_list(L)\n gaps = []\n for g in gs:\n H = gen_hamiltonian_open_bc(sx_list, sz_list, g, J=1.)\n E, v = sparse.linalg.eigsh(H, k=3, which='SA')\n gaps.append((E[1] - E[0], E[2] - E[0]))\n\n gaps = np.array(gaps)\n lines = plt.plot(gs, gaps[:, 0], linestyle='-', label=\"first excited state, L={L:d}\".format(L=L))\n plt.plot(gs, gaps[:, 1], color = lines[0].get_color(), linestyle='--', label=\"second excited state, L={L:d}\".format(L=L))\nplt.legend()",
"_____no_output_____"
],
[
"# For comparison on the next sheet:\nL = 10\nsx_list = gen_sx_list(L)\nsz_list = gen_sz_list(L)\nH = gen_hamiltonian(sx_list, sz_list, g=0.1, J=1.)\nE, v = sparse.linalg.eigsh(H, k=3, which='SA')\nprint(E[0])",
"-10.02501566423431\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d4a5c2b134fe7a5c0aebfa8d8b278ad3998d10 | 44,976 | ipynb | Jupyter Notebook | Tutorials/Boston Housing - XGBoost (Deploy) - Low Level.ipynb | karimhamdi/sagemaker-deployment | 8d330c883ed4ef1b374a851790674b7c6442cb63 | [
"MIT"
] | null | null | null | Tutorials/Boston Housing - XGBoost (Deploy) - Low Level.ipynb | karimhamdi/sagemaker-deployment | 8d330c883ed4ef1b374a851790674b7c6442cb63 | [
"MIT"
] | null | null | null | Tutorials/Boston Housing - XGBoost (Deploy) - Low Level.ipynb | karimhamdi/sagemaker-deployment | 8d330c883ed4ef1b374a851790674b7c6442cb63 | [
"MIT"
] | null | null | null | 62.640669 | 14,460 | 0.73457 | [
[
[
"# Predicting Boston Housing Prices\n\n## Using XGBoost in SageMaker (Deploy)\n\n_Deep Learning Nanodegree Program | Deployment_\n\n---\n\nAs an introduction to using SageMaker's Low Level Python API we will look at a relatively simple problem. Namely, we will use the [Boston Housing Dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) to predict the median value of a home in the area of Boston Mass.\n\nThe documentation reference for the API used in this notebook is the [SageMaker Developer's Guide](https://docs.aws.amazon.com/sagemaker/latest/dg/)\n\n## General Outline\n\nTypically, when using a notebook instance with SageMaker, you will proceed through the following steps. Of course, not every step will need to be done with each project. Also, there is quite a lot of room for variation in many of the steps, as you will see throughout these lessons.\n\n1. Download or otherwise retrieve the data.\n2. Process / Prepare the data.\n3. Upload the processed data to S3.\n4. Train a chosen model.\n5. Test the trained model (typically using a batch transform job).\n6. Deploy the trained model.\n7. Use the deployed model.\n\nIn this notebook we will be skipping step 5, testing the model. We will still test the model but we will do so by first deploying it and then sending the test data to the deployed model.",
"_____no_output_____"
],
[
"## Step 0: Setting up the notebook\n\nWe begin by setting up all of the necessary bits required to run our notebook. To start that means loading all of the Python modules we will need.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport os\n\nimport time\nfrom time import gmtime, strftime\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import load_boston\nimport sklearn.model_selection",
"_____no_output_____"
]
],
[
[
"In addition to the modules above, we need to import the various bits of SageMaker that we will be using. ",
"_____no_output_____"
]
],
[
[
"import sagemaker\nfrom sagemaker import get_execution_role\nfrom sagemaker.amazon.amazon_estimator import get_image_uri\n\n# This is an object that represents the SageMaker session that we are currently operating in. This\n# object contains some useful information that we will need to access later such as our region.\nsession = sagemaker.Session()\n\n# This is an object that represents the IAM role that we are currently assigned. When we construct\n# and launch the training job later we will need to tell it what IAM role it should have. Since our\n# use case is relatively simple we will simply assign the training job the role we currently have.\nrole = get_execution_role()",
"_____no_output_____"
]
],
[
[
"## Step 1: Downloading the data\n\nFortunately, this dataset can be retrieved using sklearn and so this step is relatively straightforward.",
"_____no_output_____"
]
],
[
[
"boston = load_boston()",
"_____no_output_____"
]
],
[
[
"## Step 2: Preparing and splitting the data\n\nGiven that this is clean tabular data, we don't need to do any processing. However, we do need to split the rows in the dataset up into train, test and validation sets.",
"_____no_output_____"
]
],
[
[
"# First we package up the input data and the target variable (the median value) as pandas dataframes. This\n# will make saving the data to a file a little easier later on.\n\nX_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names)\nY_bos_pd = pd.DataFrame(boston.target)\n\n# We split the dataset into 2/3 training and 1/3 testing sets.\nX_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33)\n\n# Then we split the training set further into 2/3 training and 1/3 validation sets.\nX_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33)",
"_____no_output_____"
]
],
[
[
"## Step 3: Uploading the training and validation files to S3\n\nWhen a training job is constructed using SageMaker, a container is executed which performs the training operation. This container is given access to data that is stored in S3. This means that we need to upload the data we want to use for training to S3. We can use the SageMaker API to do this and hide some of the details.\n\n### Save the data locally\n\nFirst we need to create the train and validation csv files which we will then upload to S3.",
"_____no_output_____"
]
],
[
[
"# This is our local data directory. We need to make sure that it exists.\ndata_dir = '../data/boston'\nif not os.path.exists(data_dir):\n os.makedirs(data_dir)",
"_____no_output_____"
],
[
"# We use pandas to save our train and validation data to csv files. Note that we make sure not to include header\n# information or an index as this is required by the built in algorithms provided by Amazon. Also, it is assumed\n# that the first entry in each row is the target variable.\n\npd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False)\npd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)",
"_____no_output_____"
]
],
[
[
"### Upload to S3\n\nSince we are currently running inside of a SageMaker session, we can use the object which represents this session to upload our data to the 'default' S3 bucket. Note that it is good practice to provide a custom prefix (essentially an S3 folder) to make sure that you don't accidentally interfere with data uploaded from some other notebook or project.",
"_____no_output_____"
]
],
[
[
"prefix = 'boston-xgboost-deploy-ll'\n\nval_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix)\ntrain_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)",
"_____no_output_____"
]
],
[
[
"## Step 4: Train and construct the XGBoost model\n\nNow that we have the training and validation data uploaded to S3, we can construct a training job for our XGBoost model and build the model itself.\n\n### Set up the training job\n\nFirst, we will set up and execute a training job for our model. To do this we need to specify some information that SageMaker will use to set up and properly execute the computation. For additional documentation on constructing a training job, see the [CreateTrainingJob API](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateTrainingJob.html) reference.",
"_____no_output_____"
]
],
[
[
"# We will need to know the name of the container that we want to use for training. SageMaker provides\n# a nice utility method to construct this for us.\ncontainer = get_image_uri(session.boto_region_name, 'xgboost', '0.90-1')\n\n# We now specify the parameters we wish to use for our training job\ntraining_params = {}\n\n# We need to specify the permissions that this training job will have. For our purposes we can use\n# the same permissions that our current SageMaker session has.\ntraining_params['RoleArn'] = role\n\n# Here we describe the algorithm we wish to use. The most important part is the container which\n# contains the training code.\ntraining_params['AlgorithmSpecification'] = {\n \"TrainingImage\": container,\n \"TrainingInputMode\": \"File\"\n}\n\n# We also need to say where we would like the resulting model artifacst stored.\ntraining_params['OutputDataConfig'] = {\n \"S3OutputPath\": \"s3://\" + session.default_bucket() + \"/\" + prefix + \"/output\"\n}\n\n# We also need to set some parameters for the training job itself. Namely we need to describe what sort of\n# compute instance we wish to use along with a stopping condition to handle the case that there is\n# some sort of error and the training script doesn't terminate.\ntraining_params['ResourceConfig'] = {\n \"InstanceCount\": 1,\n \"InstanceType\": \"ml.m4.xlarge\",\n \"VolumeSizeInGB\": 5\n}\n \ntraining_params['StoppingCondition'] = {\n \"MaxRuntimeInSeconds\": 86400\n}\n\n# Next we set the algorithm specific hyperparameters. You may wish to change these to see what effect\n# there is on the resulting model.\ntraining_params['HyperParameters'] = {\n \"max_depth\": \"5\",\n \"eta\": \"0.2\",\n \"gamma\": \"4\",\n \"min_child_weight\": \"6\",\n \"subsample\": \"0.8\",\n \"objective\": \"reg:squarederror\",\n \"early_stopping_rounds\": \"10\",\n \"num_round\": \"200\"\n}\n\n# Now we need to tell SageMaker where the data should be retrieved from.\ntraining_params['InputDataConfig'] = [\n {\n \"ChannelName\": \"train\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": train_location,\n \"S3DataDistributionType\": \"FullyReplicated\"\n }\n },\n \"ContentType\": \"csv\",\n \"CompressionType\": \"None\"\n },\n {\n \"ChannelName\": \"validation\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": val_location,\n \"S3DataDistributionType\": \"FullyReplicated\"\n }\n },\n \"ContentType\": \"csv\",\n \"CompressionType\": \"None\"\n }\n]",
"_____no_output_____"
]
],
[
[
"### Execute the training job\n\nNow that we've built the dict containing the training job parameters, we can ask SageMaker to execute the job.",
"_____no_output_____"
]
],
[
[
"# First we need to choose a training job name. This is useful for if we want to recall information about our\n# training job at a later date. Note that SageMaker requires a training job name and that the name needs to\n# be unique, which we accomplish by appending the current timestamp.\ntraining_job_name = \"boston-xgboost-\" + strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\ntraining_params['TrainingJobName'] = training_job_name\n\n# And now we ask SageMaker to create (and execute) the training job\ntraining_job = session.sagemaker_client.create_training_job(**training_params)",
"_____no_output_____"
]
],
[
[
"The training job has now been created by SageMaker and is currently running. Since we need the output of the training job, we may wish to wait until it has finished. We can do so by asking SageMaker to output the logs generated by the training job and continue doing so until the training job terminates.",
"_____no_output_____"
]
],
[
[
"session.logs_for_job(training_job_name, wait=True)",
"2020-06-24 14:07:15 Starting - Launching requested ML instances.........\n2020-06-24 14:08:20 Starting - Preparing the instances for training...\n2020-06-24 14:09:10 Downloading - Downloading input data...\n2020-06-24 14:09:27 Training - Downloading the training image..\u001b[34mINFO:sagemaker-containers:Imported framework sagemaker_xgboost_container.training\u001b[0m\n\u001b[34mINFO:sagemaker-containers:Failed to parse hyperparameter objective value reg:squarederror to Json.\u001b[0m\n\u001b[34mReturning the value itself\u001b[0m\n\u001b[34mINFO:sagemaker-containers:No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34mINFO:sagemaker_xgboost_container.training:Running XGBoost Sagemaker in algorithm mode\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[14:09:53] 227x13 matrix with 2951 entries loaded from /opt/ml/input/data/train?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[34mINFO:root:Determined delimiter of CSV input is ','\u001b[0m\n\u001b[34m[14:09:53] 112x13 matrix with 1456 entries loaded from /opt/ml/input/data/validation?format=csv&label_column=0&delimiter=,\u001b[0m\n\u001b[34mINFO:root:Single node training.\u001b[0m\n\u001b[34mINFO:root:Train matrix has 227 rows\u001b[0m\n\u001b[34mINFO:root:Validation matrix has 112 rows\u001b[0m\n\u001b[34m[0]#011train-rmse:18.994#011validation-rmse:20.2783\u001b[0m\n\u001b[34m[1]#011train-rmse:15.5339#011validation-rmse:16.5344\u001b[0m\n\u001b[34m[2]#011train-rmse:12.7513#011validation-rmse:13.4411\u001b[0m\n\u001b[34m[3]#011train-rmse:10.5834#011validation-rmse:11.2934\u001b[0m\n\u001b[34m[4]#011train-rmse:8.84639#011validation-rmse:9.67482\u001b[0m\n\u001b[34m[5]#011train-rmse:7.39023#011validation-rmse:8.10357\u001b[0m\n\u001b[34m[6]#011train-rmse:6.30785#011validation-rmse:7.03037\u001b[0m\n\u001b[34m[7]#011train-rmse:5.41246#011validation-rmse:6.23572\u001b[0m\n\u001b[34m[8]#011train-rmse:4.71797#011validation-rmse:5.65275\u001b[0m\n\u001b[34m[9]#011train-rmse:4.13881#011validation-rmse:5.24869\u001b[0m\n\u001b[34m[10]#011train-rmse:3.64118#011validation-rmse:4.93444\u001b[0m\n\u001b[34m[11]#011train-rmse:3.31143#011validation-rmse:4.68858\u001b[0m\n\u001b[34m[12]#011train-rmse:3.02537#011validation-rmse:4.5044\u001b[0m\n\u001b[34m[13]#011train-rmse:2.84681#011validation-rmse:4.41538\u001b[0m\n\u001b[34m[14]#011train-rmse:2.67526#011validation-rmse:4.34334\u001b[0m\n\u001b[34m[15]#011train-rmse:2.56096#011validation-rmse:4.30721\u001b[0m\n\u001b[34m[16]#011train-rmse:2.46261#011validation-rmse:4.27764\u001b[0m\n\u001b[34m[17]#011train-rmse:2.36842#011validation-rmse:4.27507\u001b[0m\n\u001b[34m[18]#011train-rmse:2.30905#011validation-rmse:4.28478\u001b[0m\n\u001b[34m[19]#011train-rmse:2.23369#011validation-rmse:4.22795\u001b[0m\n\u001b[34m[20]#011train-rmse:2.17197#011validation-rmse:4.21938\u001b[0m\n\u001b[34m[21]#011train-rmse:2.16045#011validation-rmse:4.20019\u001b[0m\n\u001b[34m[22]#011train-rmse:2.11134#011validation-rmse:4.17136\u001b[0m\n\u001b[34m[23]#011train-rmse:2.07182#011validation-rmse:4.16619\u001b[0m\n\u001b[34m[24]#011train-rmse:2.03219#011validation-rmse:4.17941\u001b[0m\n\u001b[34m[25]#011train-rmse:1.999#011validation-rmse:4.18589\u001b[0m\n\u001b[34m[26]#011train-rmse:1.93638#011validation-rmse:4.14863\u001b[0m\n\u001b[34m[27]#011train-rmse:1.90945#011validation-rmse:4.14727\u001b[0m\n\u001b[34m[28]#011train-rmse:1.86484#011validation-rmse:4.09432\u001b[0m\n\u001b[34m[29]#011train-rmse:1.84039#011validation-rmse:4.07063\u001b[0m\n\u001b[34m[30]#011train-rmse:1.82151#011validation-rmse:4.05619\u001b[0m\n\u001b[34m[31]#011train-rmse:1.77496#011validation-rmse:4.03818\u001b[0m\n\u001b[34m[32]#011train-rmse:1.73339#011validation-rmse:4.03474\u001b[0m\n\u001b[34m[33]#011train-rmse:1.69617#011validation-rmse:4.04682\u001b[0m\n\u001b[34m[34]#011train-rmse:1.67166#011validation-rmse:4.04437\u001b[0m\n\u001b[34m[35]#011train-rmse:1.64386#011validation-rmse:4.01851\u001b[0m\n\u001b[34m[36]#011train-rmse:1.61165#011validation-rmse:4.01618\u001b[0m\n\u001b[34m[37]#011train-rmse:1.546#011validation-rmse:4.03394\u001b[0m\n\u001b[34m[38]#011train-rmse:1.50761#011validation-rmse:4.05178\u001b[0m\n\u001b[34m[39]#011train-rmse:1.47994#011validation-rmse:4.0451\u001b[0m\n\u001b[34m[40]#011train-rmse:1.45661#011validation-rmse:4.04327\u001b[0m\n\u001b[34m[41]#011train-rmse:1.43855#011validation-rmse:4.02355\u001b[0m\n\u001b[34m[42]#011train-rmse:1.42143#011validation-rmse:4.02356\u001b[0m\n\u001b[34m[43]#011train-rmse:1.40277#011validation-rmse:4.01517\u001b[0m\n\u001b[34m[44]#011train-rmse:1.38596#011validation-rmse:4.01575\u001b[0m\n\u001b[34m[45]#011train-rmse:1.36621#011validation-rmse:4.00015\u001b[0m\n\u001b[34m[46]#011train-rmse:1.33122#011validation-rmse:4.00134\u001b[0m\n\u001b[34m[47]#011train-rmse:1.30739#011validation-rmse:4.00649\u001b[0m\n\u001b[34m[48]#011train-rmse:1.29342#011validation-rmse:4.02371\u001b[0m\n\u001b[34m[49]#011train-rmse:1.282#011validation-rmse:4.01358\u001b[0m\n\u001b[34m[50]#011train-rmse:1.26578#011validation-rmse:4.00823\u001b[0m\n\u001b[34m[51]#011train-rmse:1.23044#011validation-rmse:4.02941\u001b[0m\n\u001b[34m[52]#011train-rmse:1.2078#011validation-rmse:4.05237\u001b[0m\n\u001b[34m[53]#011train-rmse:1.19128#011validation-rmse:4.0545\u001b[0m\n\u001b[34m[54]#011train-rmse:1.18363#011validation-rmse:4.0507\u001b[0m\n\u001b[34m[55]#011train-rmse:1.16402#011validation-rmse:4.04508\u001b[0m\n\n2020-06-24 14:10:04 Uploading - Uploading generated training model\n2020-06-24 14:10:04 Completed - Training job completed\nTraining seconds: 54\nBillable seconds: 54\n"
]
],
[
[
"### Build the model\n\nNow that the training job has completed, we have some model artifacts which we can use to build a model. Note that here we mean SageMaker's definition of a model, which is a collection of information about a specific algorithm along with the artifacts which result from a training job.",
"_____no_output_____"
]
],
[
[
"# We begin by asking SageMaker to describe for us the results of the training job. The data structure\n# returned contains a lot more information than we currently need, try checking it out yourself in\n# more detail.\ntraining_job_info = session.sagemaker_client.describe_training_job(TrainingJobName=training_job_name)\n\nmodel_artifacts = training_job_info['ModelArtifacts']['S3ModelArtifacts']",
"_____no_output_____"
],
[
"# Just like when we created a training job, the model name must be unique\nmodel_name = training_job_name + \"-model\"\n\n# We also need to tell SageMaker which container should be used for inference and where it should\n# retrieve the model artifacts from. In our case, the xgboost container that we used for training\n# can also be used for inference.\nprimary_container = {\n \"Image\": container,\n \"ModelDataUrl\": model_artifacts\n}\n\n# And lastly we construct the SageMaker model\nmodel_info = session.sagemaker_client.create_model(\n ModelName = model_name,\n ExecutionRoleArn = role,\n PrimaryContainer = primary_container)",
"_____no_output_____"
]
],
[
[
"## Step 5: Test the trained model\n\nWe will be skipping this step for now. We will still test our trained model but we are going to do it by using the deployed model, rather than setting up a batch transform job.\n\n## Step 6: Create and deploy the endpoint\n\nNow that we have trained and constructed a model it is time to build the associated endpoint and deploy it. As in the earlier steps, we first need to construct the appropriate configuration.",
"_____no_output_____"
]
],
[
[
"# As before, we need to give our endpoint configuration a name which should be unique\nendpoint_config_name = \"boston-xgboost-endpoint-config-\" + strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\n\n# And then we ask SageMaker to construct the endpoint configuration\nendpoint_config_info = session.sagemaker_client.create_endpoint_config(\n EndpointConfigName = endpoint_config_name,\n ProductionVariants = [{\n \"InstanceType\": \"ml.m4.xlarge\",\n \"InitialVariantWeight\": 1,\n \"InitialInstanceCount\": 1,\n \"ModelName\": model_name,\n \"VariantName\": \"AllTraffic\"\n }])",
"_____no_output_____"
]
],
[
[
"And now that the endpoint configuration has been created we can deploy the endpoint itself.\n\n**NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for.\n\nIn other words **If you are no longer using a deployed endpoint, shut it down!**",
"_____no_output_____"
]
],
[
[
"# Again, we need a unique name for our endpoint\nendpoint_name = \"boston-xgboost-endpoint-\" + strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\n\n# And then we can deploy our endpoint\nendpoint_info = session.sagemaker_client.create_endpoint(\n EndpointName = endpoint_name,\n EndpointConfigName = endpoint_config_name)",
"_____no_output_____"
]
],
[
[
"Just like when we created a training job, SageMaker is now requisitioning and launching our endpoint. Since we can't do much until the endpoint has been completely deployed we can wait for it to finish.",
"_____no_output_____"
]
],
[
[
"endpoint_dec = session.wait_for_endpoint(endpoint_name)",
"---------------!"
]
],
[
[
"## Step 7: Use the model\n\nNow that our model is trained and deployed we can send test data to it and evaluate the results. Here, because our test data is so small, we can send it all using a single call to our endpoint. If our test dataset was larger we would need to split it up and send the data in chunks, making sure to accumulate the results.",
"_____no_output_____"
]
],
[
[
"# First we need to serialize the input data. In this case we want to send the test data as a csv and\n# so we manually do this. Of course, there are many other ways to do this.\npayload = [[str(entry) for entry in row] for row in X_test.values]\npayload = '\\n'.join([','.join(row) for row in payload])",
"_____no_output_____"
],
[
"# This time we use the sagemaker runtime client rather than the sagemaker client so that we can invoke\n# the endpoint that we created.\nresponse = session.sagemaker_runtime_client.invoke_endpoint(\n EndpointName = endpoint_name,\n ContentType = 'text/csv',\n Body = payload)\n\n# We need to make sure that we deserialize the result of our endpoint call.\nresult = response['Body'].read().decode(\"utf-8\")\nY_pred = np.fromstring(result, sep=',')",
"_____no_output_____"
]
],
[
[
"To see how well our model works we can create a simple scatter plot between the predicted and actual values. If the model was completely accurate the resulting scatter plot would look like the line $x=y$. As we can see, our model seems to have done okay but there is room for improvement.",
"_____no_output_____"
]
],
[
[
"plt.scatter(Y_test, Y_pred)\nplt.xlabel(\"Median Price\")\nplt.ylabel(\"Predicted Price\")\nplt.title(\"Median Price vs Predicted Price\")",
"_____no_output_____"
]
],
[
[
"## Delete the endpoint\n\nSince we are no longer using the deployed model we need to make sure to shut it down. Remember that you have to pay for the length of time that your endpoint is deployed so the longer it is left running, the more it costs.",
"_____no_output_____"
]
],
[
[
"session.sagemaker_client.delete_endpoint(EndpointName = endpoint_name)",
"_____no_output_____"
]
],
[
[
"## Optional: Clean up\n\nThe default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook.",
"_____no_output_____"
]
],
[
[
"# First we will remove all of the files contained in the data_dir directory\n#!rm $data_dir/*\n\n# And then we delete the directory itself\n#!rmdir $data_dir",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d4af816e2811419d31c21a19d56eee48cf9689 | 190,933 | ipynb | Jupyter Notebook | Chapter 3: MNIST/Classification.ipynb | prachuryanath/machinelearning | 059b47815c286bf34ddad8ca7010f23c5cb18be7 | [
"MIT"
] | null | null | null | Chapter 3: MNIST/Classification.ipynb | prachuryanath/machinelearning | 059b47815c286bf34ddad8ca7010f23c5cb18be7 | [
"MIT"
] | null | null | null | Chapter 3: MNIST/Classification.ipynb | prachuryanath/machinelearning | 059b47815c286bf34ddad8ca7010f23c5cb18be7 | [
"MIT"
] | null | null | null | 255.942359 | 46,613 | 0.743198 | [
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"from mnist import MNIST\nmnist= MNIST('mnist/')\nX_train,y_train=mnist.load_training()\nX_test,y_test=mnist.load_testing()",
"_____no_output_____"
],
[
"X_train=np.asarray(X_train).astype(np.float32)\ny_train=np.asarray(y_train).astype(np.float32)\nX_test=np.asarray(X_test).astype(np.float32)\ny_test=np.asarray(y_test).astype(np.float32)",
"_____no_output_____"
],
[
"some_digit = X_train[6]\nsome_digit_image = some_digit.reshape(28, 28)\nplt.imshow(some_digit_image, cmap = 'Greys', interpolation=\"nearest\")\nplt.axis(\"off\")\nplt.show()",
"_____no_output_____"
],
[
"y_train_5 = (y_train == 5) # True for all 5s, False for all other digits.\ny_test_5 = (y_test == 5)",
"_____no_output_____"
],
[
"from sklearn.linear_model import SGDClassifier\nsgd_clf = SGDClassifier(random_state=42)\nsgd_clf.fit(X_train, y_train_5)",
"_____no_output_____"
],
[
"sgd_clf.predict([X_train[6]])",
"_____no_output_____"
],
[
"from sklearn.model_selection import cross_val_score\ncross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring=\"accuracy\")",
"_____no_output_____"
],
[
"from sklearn.model_selection import cross_val_predict\ny_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)",
"_____no_output_____"
],
[
"from sklearn.metrics import confusion_matrix\nconfusion_matrix(y_train_5, y_train_pred)",
"_____no_output_____"
],
[
"from sklearn.metrics import precision_score, recall_score\nprecision_score(y_train_5, y_train_pred) # 3530/(687+3530)",
"_____no_output_____"
],
[
"recall_score(y_train_5, y_train_pred) #3530/(3530+1891)",
"_____no_output_____"
],
[
"from sklearn.metrics import f1_score\nf1_score(y_train_5, y_train_pred)",
"_____no_output_____"
],
[
"y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3,method=\"decision_function\")",
"_____no_output_____"
],
[
"from sklearn.metrics import precision_recall_curve\nprecisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)",
"_____no_output_____"
],
[
"def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\n plt.plot(thresholds, precisions[:-1], \"b--\", label=\"Precision\")\n plt.plot(thresholds, recalls[:-1], \"g-\", label=\"Recall\")\n [...] # highlight the threshold, add the legend, axis label and grid\nplot_precision_recall_vs_threshold(precisions, recalls, thresholds)\nplt.show()",
"_____no_output_____"
],
[
"threshold_90_precision = thresholds[np.argmax(precisions >= 0.91)]\nprint(threshold_90_precision)",
"4146.346368451577\n"
],
[
"y_train_pred_90 = (y_scores >= 4150)",
"_____no_output_____"
],
[
" precision_score(y_train_5, y_train_pred_90)",
"_____no_output_____"
],
[
"recall_score(y_train_5, y_train_pred_90)",
"_____no_output_____"
],
[
"from sklearn.metrics import roc_curve\nfpr, tpr, thresholds = roc_curve(y_train_5, y_scores)",
"_____no_output_____"
],
[
"def plot_roc_curve(fpr, tpr, label=None):\n plt.plot(fpr, tpr, linewidth=2, label=label)\n plt.plot([0, 1], [0, 1], 'k--') # dashed diagonal\n [...] # Add axis labels and grid\nplot_roc_curve(fpr, tpr)\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.metrics import roc_auc_score\nroc_auc_score(y_train_5, y_scores)",
"_____no_output_____"
],
[
"from sklearn.ensemble import RandomForestClassifier\nforest_clf = RandomForestClassifier(random_state=42)\ny_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3,method=\"predict_proba\")",
"_____no_output_____"
],
[
"y_scores_forest = y_probas_forest[:, 1] # score = proba of positive class\nfpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5,y_scores_forest)",
"_____no_output_____"
],
[
"plt.plot(fpr, tpr, \"b:\", label=\"SGD\")\nplot_roc_curve(fpr_forest, tpr_forest, \"Random Forest\")\nplt.legend(loc=\"lower right\")\nplt.show()",
"_____no_output_____"
],
[
" roc_auc_score(y_train_5, y_scores_forest)",
"_____no_output_____"
],
[
"sgd_clf.fit(X_train, y_train) # y_train, not y_train_5\nsgd_clf.predict([some_digit])",
"_____no_output_____"
],
[
"some_digit_scores = sgd_clf.decision_function([some_digit])\nsome_digit_scores",
"_____no_output_____"
],
[
"from sklearn.multiclass import OneVsOneClassifier\novo_clf = OneVsOneClassifier(SGDClassifier(random_state=42))\novo_clf.fit(X_train, y_train)\novo_clf.predict([some_digit])",
"_____no_output_____"
],
[
"len(ovo_clf.estimators_)",
"_____no_output_____"
],
[
"forest_clf.fit(X_train, y_train)\nforest_clf.predict([some_digit])",
"_____no_output_____"
],
[
"forest_clf.predict_proba([some_digit])",
"_____no_output_____"
],
[
" cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring=\"accuracy\")",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nX_train_scaled = scaler.fit_transform(X_train.astype(np.float64))\ncross_val_score(sgd_clf, X_train_scaled, y_train, cv=3,scoring=\"accuracy\")",
"_____no_output_____"
],
[
"y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)\nconf_mx = confusion_matrix(y_train, y_train_pred)\nconf_mx",
"_____no_output_____"
],
[
"plt.matshow(conf_mx, cmap=plt.cm.gray)\nplt.show()",
"_____no_output_____"
],
[
"row_sums = conf_mx.sum(axis=1, keepdims=True)\nnorm_conf_mx = conf_mx / row_sums",
"_____no_output_____"
],
[
"np.fill_diagonal(norm_conf_mx, 0)\nplt.matshow(norm_conf_mx, cmap=plt.cm.gray)\nplt.show()",
"_____no_output_____"
],
[
"cl_a, cl_b = 3, 5\nX_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)]\nX_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)]\nX_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)]\nX_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)]\nplt.figure(figsize=(8,8))\nplt.subplot(221); plot_digits(X_aa[:25], images_per_row=5)\nplt.subplot(222); plot_digits(X_ab[:25], images_per_row=5)\nplt.subplot(223); plot_digits(X_ba[:25], images_per_row=5)\nplt.subplot(224); plot_digits(X_bb[:25], images_per_row=5)\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.neighbors import KNeighborsClassifier\ny_train_large = (y_train >= 7)\ny_train_odd = (y_train % 2 == 1)\ny_multilabel = np.c_[y_train_large, y_train_odd]\nknn_clf = KNeighborsClassifier()\nknn_clf.fit(X_train, y_multilabel)",
"_____no_output_____"
],
[
" knn_clf.predict([some_digit])",
"_____no_output_____"
],
[
"y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3)\nf1_score(y_multilabel, y_train_knn_pred, average=\"macro\")",
"_____no_output_____"
],
[
"noise = np.random.randint(0, 100, (len(X_train), 784))\nX_train_mod = X_train + noise\nnoise = np.random.randint(0, 100, (len(X_test), 784))\nX_test_mod = X_test + noise\ny_train_mod = X_train\ny_test_mod = X_test",
"_____no_output_____"
],
[
"knn_clf.fit(X_train_mod, y_train_mod)\nclean_digit = knn_clf.predict([X_test_mod[some_index]])\nplot_digit(clean_digit)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d4b116cceef3bcd1fae78ac7f3c15eec0ebfb3 | 632,475 | ipynb | Jupyter Notebook | 2021-fall-part-1/seminars/13_em_algo/13_em_algo_practice.ipynb | bagrorg/ml-course | 9a2aa7379ea0dee6968eef3a4ae5926e83c391ca | [
"MIT"
] | 4 | 2021-09-16T07:03:16.000Z | 2021-12-13T10:33:51.000Z | 2021-fall-part-1/seminars/13_em_algo/13_em_algo_practice.ipynb | bagrorg/ml-course | 9a2aa7379ea0dee6968eef3a4ae5926e83c391ca | [
"MIT"
] | null | null | null | 2021-fall-part-1/seminars/13_em_algo/13_em_algo_practice.ipynb | bagrorg/ml-course | 9a2aa7379ea0dee6968eef3a4ae5926e83c391ca | [
"MIT"
] | 13 | 2021-09-02T07:29:24.000Z | 2021-12-13T15:26:00.000Z | 612.863372 | 358,160 | 0.943418 | [
[
[
"<center><img src=\"./images/logo_fmkn.png\" width=300 style=\"display: inline-block;\"></center> \n\n## Машинное обучение\n### Семинар 13. ЕМ-алгоритм\n\n<br />\n<br />\n9 декабря 2021",
"_____no_output_____"
],
[
"Будем решать задачу восставновления картинки лица по набору зашумленных картинок (взято с курса deep bayes 2018 https://github.com/bayesgroup/deepbayes-2018).\n\nУ вас есть $K$ фотографий, поврежденных электромагнитным шумом. Известно, что на каждом фото есть лицо в неизвестно где начинающейся прямоугольной области ширины $w$ и фон, одинаковый для всех фотографий.\n",
"_____no_output_____"
],
[
"<center><img src=\"./images/example_and_structure.jpg\" width=800 style=\"display: inline-block;\"></center> \n",
"_____no_output_____"
]
],
[
[
"from matplotlib import pyplot as plt\nimport numpy as np",
"_____no_output_____"
],
[
"import zipfile\nwith zipfile.ZipFile('data_em.zip', 'r') as zip_ref:\n zip_ref.extractall('.')",
"_____no_output_____"
],
[
"DATA_FILE = \"data_em\"\nw = 73 # face_width",
"_____no_output_____"
],
[
"X = np.load(DATA_FILE)",
"_____no_output_____"
],
[
"X.shape # H, W, K",
"_____no_output_____"
],
[
"plt.imshow(X[:, :, 7], cmap=\"Greys_r\")\nplt.axis(\"off\")",
"_____no_output_____"
],
[
"tH, tW, tw, tK = 2, 3, 1, 2\ntX = np.arange(tH*tW*tK).reshape(tH, tW, tK)\ntF = np.arange(tH*tw).reshape(tH, tw)\ntB = np.arange(tH*tW).reshape(tH, tW)\nts = 0.1\nta = np.arange(1, (tW-tw+1)+1)\nta = ta / ta.sum()\ntq = np.arange(1, (tW-tw+1)*tK+1).reshape(tW-tw+1, tK)\ntq = tq / tq.sum(axis=0)[np.newaxis, :]",
"_____no_output_____"
]
],
[
[
"1. **Реализуйте calculate_log_probability**\n\nДля $k$-й картини $X_k$ и некоторой позиции $d_k$: \n $$ p(X_k \\mid d_k,\\,F,\\,B,\\, std) = \\prod\\limits_{ij}\\begin{cases} \n \\mathcal{N}(X_k[i,j]\\mid F[i,\\,j-d_k],\\, std^2), \n & \\text{if}\\, (i,j)\\in faceArea(d_k)\\\\\n \\mathcal{N}(X_k[i,j]\\mid B[i,j],\\, std^2), & \\text{else}\n \\end{cases}\n $$\nЗамечания:\n* $faceArea(d_k) = \\{[i, j]| d_k \\leq j \\leq d_k + w - 1 \\}$\n* Априорное распределение задаётся обучаемым вектором $a \\in \\mathbb{R}^{W-w+1}$: $$p(d_k \\mid a) = a[d_k],\\ \\sum\\limits_j a[j] = 1$$\n* Итоговая вероятностная модель: $$ p(X, d \\mid F,\\,B,\\,std,\\,a) = \\prod\\limits_k p(X_k \\mid d_k,\\,F,\\,B,\\,std) p(d_k \\mid a)$$\n* Не забудьте про логарифм!\n* `scipy.stats.norm` может вам пригодиться",
"_____no_output_____"
]
],
[
[
"import scipy.stats",
"_____no_output_____"
],
[
"def calculate_log_probability(X, F, B, s):\n \"\"\"\n Calculates log p(X_k|d_k, F, B, s) for all images X_k in X and\n all possible face position d_k.\n\n Parameters\n ----------\n X : array, shape (H, W, K)\n K images of size H x W.\n F : array, shape (H, w)\n Estimate of prankster's face.\n B : array, shape (H, W)\n Estimate of background.\n s : float\n Estimate of standard deviation of Gaussian noise.\n\n Returns\n -------\n ll : array, shape(W-w+1, K)\n ll[dw, k] - log-likelihood of observing image X_k given\n that the prankster's face F is located at position dw\n \"\"\"\n H, W, K = X.shape\n _, w = F.shape\n # your code here\n ll = np.zeros((W-w+1, K))\n for dw in range(W-w+1):\n combined = np.copy(B)\n combined[:, dw:dw+w] = F\n d_combined = X - np.expand_dims(combined, 2)\n ll[dw] = scipy.stats.norm(0, s).logpdf(d_combined).sum(axis=(0,1))\n return ll",
"_____no_output_____"
],
[
"# run this cell to test your implementation\nexpected = np.array([[-3541.69812064, -5541.69812064],\n [-4541.69812064, -6741.69812064],\n [-6141.69812064, -8541.69812064]])\nactual = calculate_log_probability(tX, tF, tB, ts)\nassert np.allclose(actual, expected)\nprint(\"OK\")",
"OK\n"
]
],
[
[
"\n2. **Реализуйте calculate_lower_bound**\n\\begin{equation}\\mathscr{L}(q, \\,F, \\,B,\\, s,\\, a) = \\sum_k \\biggl (\\mathbb{E} _ {q( d_k)}\\bigl ( \\log p( X_{k} \\mid {d}_{k} , \\,F,\\,B,\\,s) + \n \\log p( d_k \\mid a)\\bigr) - \\mathbb{E} _ {q( d_k)} \\log q( d_k)\\biggr) \\end{equation}\n\nЗамечания\n * Используйте calculate_log_probability!\n * Обратите внимание, что $q( d_k)$ и $p( d_k \\mid a)$ дискретны. Например, $P(d_k=i \\mid a) = a[i]$.",
"_____no_output_____"
]
],
[
[
"def calculate_lower_bound(X, F, B, s, a, q):\n \"\"\"\n Calculates the lower bound L(q, F, B, s, a) for \n the marginal log likelihood.\n\n Parameters\n ----------\n X : array, shape (H, W, K)\n K images of size H x W.\n F : array, shape (H, w)\n Estimate of prankster's face.\n B : array, shape (H, W)\n Estimate of background.\n s : float\n Estimate of standard deviation of Gaussian noise.\n a : array, shape (W-w+1)\n Estimate of prior on position of face in any image.\n q : array\n q[dw, k] - estimate of posterior \n of position dw\n of prankster's face given image Xk\n\n Returns\n -------\n L : float\n The lower bound L(q, F, B, s, a) \n for the marginal log likelihood.\n \"\"\"\n # your code here\n return (q * (calculate_log_probability(X,F,B,s) + np.expand_dims(np.log(a), 1) - np.log(q))).sum()",
"_____no_output_____"
],
[
"calculate_lower_bound(tX, tF, tB, ts, ta, tq)",
"_____no_output_____"
],
[
"# run this cell to test your implementation\nexpected = -12761.1875\nactual = calculate_lower_bound(tX, tF, tB, ts, ta, tq)\nassert np.allclose(actual, expected)\nprint(\"OK\")",
"OK\n"
]
],
[
[
"3. **Реализуем E шаг**\n$$q(d_k) = p(d_k \\mid X_k, \\,F, \\,B, \\,s,\\, a) = \n\\frac {p( X_{k} \\mid {d}_{k} , \\,F,\\,B,\\,s)\\, p(d_k \\mid a)}\n{\\sum_{d'_k} p( X_{k} \\mid d'_k , \\,F,\\,B,\\,s) \\,p(d'_k \\mid a)}$$\n\nЗамечания\n * Используйте calculate_log_probability!\n * Считайте в логарифмах, возведите в экспоненту в конце. \n * Рекомендется использовать следующее утверждение для выч. стабильности: $$\\beta_i = \\log{p_i(\\dots)} \\quad\\rightarrow \\quad\n \\frac{e^{\\beta_i}}{\\sum\\limits_k e^{\\beta_k}} = \n \\frac{e^{(\\beta_i - \\max_j \\beta_j)}}{\\sum\\limits_k e^{(\\beta_k- \\max_j \\beta_j)}}$$",
"_____no_output_____"
]
],
[
[
"def run_e_step(X, F, B, s, a):\n \"\"\"\n Given the current esitmate of the parameters, for each image Xk\n esitmates the probability p(d_k|X_k, F, B, s, a).\n\n Parameters\n ----------\n X : array, shape(H, W, K)\n K images of size H x W.\n F : array_like, shape(H, w)\n Estimate of prankster's face.\n B : array shape(H, W)\n Estimate of background.\n s : float\n Estimate of standard deviation of Gaussian noise.\n a : array, shape(W-w+1)\n Estimate of prior on face position in any image.\n\n Returns\n -------\n q : array\n shape (W-w+1, K)\n q[dw, k] - estimate of posterior of position dw\n of prankster's face given image Xk\n \"\"\"\n # your code here\n log_nom = calculate_log_probability(X,F,B,s) + np.expand_dims(np.log(a), 1)\n mx = log_nom.max(axis=0)\n nom = np.exp(log_nom - mx)\n return nom / nom.sum(axis=0)",
"_____no_output_____"
],
[
"run_e_step(tX, tF, tB, ts, ta)",
"_____no_output_____"
],
[
"# run this cell to test your implementation\nexpected = np.array([[ 1., 1.],\n [ 0., 0.],\n [ 0., 0.]])\nactual = run_e_step(tX, tF, tB, ts, ta)\nassert np.allclose(actual, expected)\nprint(\"OK\")",
"OK\n"
]
],
[
[
"4. **Реализуйте M шаг**\n\nНадо\n\\begin{equation}\\mathscr{L}(q, \\,F, \\,B,\\, s,\\, a) = \\sum_k \\biggl (\\mathbb{E} _ {q( d_k)}\\bigl ( \\log p( X_{k} \\mid {d}_{k} , \\,F,\\,B,\\,s) + \n \\log p( d_k \\mid a)\\bigr) - \\mathbb{E} _ {q( d_k)} \\log q( d_k)\\biggr)\\rightarrow \\max\\limits_{\\theta, a} \\end{equation}\nПосле долгих вычислений получим:\n$$a[j] = \\frac{\\sum_k q( d_k = j )}{\\sum_{j'} \\sum_{k'} q( d_{k'} = j')}$$$$F[i, m] = \\frac 1 K \\sum_k \\sum_{d_k} q(d_k)\\, X^k[i,\\, m+d_k]$$\\begin{equation}B[i, j] = \\frac {\\sum_k \\sum_{ d_k:\\, (i, \\,j) \\,\\not\\in faceArea(d_k)} q(d_k)\\, X^k[i, j]} \n {\\sum_k \\sum_{d_k: \\,(i, \\,j)\\, \\not\\in faceArea(d_k)} q(d_k)}\\end{equation}\\begin{equation}s^2 = \\frac 1 {HWK} \\sum_k \\sum_{d_k} q(d_k)\n \\sum_{i,\\, j} (X^k[i, \\,j] - Model^{d_k}[i, \\,j])^2\\end{equation}\nгде $Model^{d_k}[i, j]$ картинка из фона и лица, сдвинутого на $d_k$.\n\nЗамечания\n* Обновляйте параметры в порядке: $a$, $F$, $B$, $s$.\n* Используйте обновленный параметр для оценки следующего параметра.\n",
"_____no_output_____"
]
],
[
[
"def run_m_step(X, q, w):\n \"\"\"\n Estimates F, B, s, a given esitmate of posteriors defined by q.\n\n Parameters\n ----------\n X : array, shape (H, W, K)\n K images of size H x W.\n q :\n q[dw, k] - estimate of posterior of position dw\n of prankster's face given image Xk\n w : int\n Face mask width.\n\n Returns\n -------\n F : array, shape (H, w)\n Estimate of prankster's face.\n B : array, shape (H, W)\n Estimate of background.\n s : float\n Estimate of standard deviation of Gaussian noise.\n a : array, shape (W-w+1)\n Estimate of prior on position of face in any image.\n \"\"\"\n# your code here\n H, W, K = X.shape\n dw, _ = q.shape\n w = W - dw + 1\n\n a = q.sum(axis=1)/q.sum()\n \n F = np.zeros((H, w))\n for dk in range(dw):\n F += (q[dk] * X[:, dk:dk+w]).sum(axis=2) / K\n \n B = np.zeros((H, W))\n denom = np.zeros((H, W))\n for dk in range(dw):\n if dk > 0:\n denom[:, :dk] += q[dk].sum()\n B[:, :dk] += (q[dk] * X[:, :dk]).sum(axis=2)\n if dk + w < W:\n B[:, dk+w:] += (q[dk] * X[:, dk+w:]).sum(axis=2)\n denom[:, dk + w:] += q[dk].sum()\n B /= denom\n \n s2 = 0\n for dk in range(dw):\n model = np.copy(B)\n model[:, dk:dk+w] = F\n s2 += (q[dk] * ((X - np.expand_dims(model,2)) ** 2)).sum()\n s2 /= H * W * K\n \n return F, B, np.sqrt(s2), a\n",
"_____no_output_____"
],
[
"run_m_step(tX, tq, tw)",
"_____no_output_____"
],
[
"# run this cell to test your implementation\nexpected = [np.array([[ 3.27777778],\n [ 9.27777778]]),\n np.array([[ 0.48387097, 2.5 , 4.52941176],\n [ 6.48387097, 8.5 , 10.52941176]]),\n 0.94868,\n np.array([ 0.13888889, 0.33333333, 0.52777778])]\nactual = run_m_step(tX, tq, tw)\nfor a, e in zip(actual, expected):\n assert np.allclose(a, e)\nprint(\"OK\")",
"OK\n"
]
],
[
[
"5. **Реализуйте EM алгоритм**",
"_____no_output_____"
]
],
[
[
"def run_EM(X, w, F=None, B=None, s=None, a=None, tolerance=0.001,\n max_iter=50):\n \"\"\"\n Runs EM loop until the likelihood of observing X given current\n estimate of parameters is idempotent as defined by a fixed\n tolerance.\n\n Parameters\n ----------\n X : array, shape (H, W, K)\n K images of size H x W.\n w : int\n Face mask width.\n F : array, shape (H, w), optional\n Initial estimate of prankster's face.\n B : array, shape (H, W), optional\n Initial estimate of background.\n s : float, optional\n Initial estimate of standard deviation of Gaussian noise.\n a : array, shape (W-w+1), optional\n Initial estimate of prior on position of face in any image.\n tolerance : float, optional\n Parameter for stopping criterion.\n max_iter : int, optional\n Maximum number of iterations.\n\n Returns\n -------\n F, B, s, a : trained parameters.\n \"\"\"\n # your code here\n H, W, N = X.shape\n if F is None:\n F = np.random.randint(0, 255, (H, w))\n if B is None:\n B = np.random.randint(0, 255, (H, W))\n if a is None:\n a = np.ones(W - w + 1)\n a /= np.sum(a)\n if s is None:\n s = np.random.rand()*64*64\n l_prev = -np.inf\n for it in range(max_iter):\n print(f\"iteration = {it}\")\n q = run_e_step(X, F, B, s, a)\n print(\"e\")\n F, B, s, a = run_m_step(X, q, w)\n print(\"m\")\n print(s)\n if it == max_iter - 1:\n print(\"no convergence\")\n break\n l_cur = calculate_lower_bound(X, F, B, s, a, q)\n if l_cur - l_prev < tolerance:\n print(f\"converged in {it} iterations {l_cur - l_prev}\")\n break\n else:\n l_prev = l_cur\n \n return F, B, s, a",
"_____no_output_____"
]
],
[
[
"Расшифровываем картинку:",
"_____no_output_____"
]
],
[
[
"def show(F, i=1, n=1):\n \"\"\"\n shows face F at subplot i out of n\n \"\"\"\n plt.subplot(1, n, i)\n plt.imshow(F, cmap=\"Greys_r\")\n plt.axis(\"off\")",
"_____no_output_____"
],
[
"%%time\nF, B, s, a = [None] * 4\nlens = [50, 100, 300, 500, 1000]\niters = [5, 1, 1, 1, 1]\nplt.figure(figsize=(20, 5))\nfor i, (l, it) in enumerate(zip(lens, iters)):\n F, B, s, a = run_EM(X[:, :, :l], w, F, B, s, a, max_iter=it)\n print(s)\n show(F, i+1, 5)",
"iteration = 0\ne\nm\n114.93555180113619\niteration = 1\ne\nm\n113.77686133696521\niteration = 2\ne\nm\n113.43816994843172\n"
]
],
[
[
"И фон:",
"_____no_output_____"
]
],
[
[
"show(B)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d4b75ada4e54500f67af418d0b157e85048da3 | 153,112 | ipynb | Jupyter Notebook | notebooks/2017-07-25-HFRadar_currents.ipynb | kellydesent/notebooks_demos | 8e18371f223872731fde437651e21a66f79a78e2 | [
"MIT"
] | null | null | null | notebooks/2017-07-25-HFRadar_currents.ipynb | kellydesent/notebooks_demos | 8e18371f223872731fde437651e21a66f79a78e2 | [
"MIT"
] | null | null | null | notebooks/2017-07-25-HFRadar_currents.ipynb | kellydesent/notebooks_demos | 8e18371f223872731fde437651e21a66f79a78e2 | [
"MIT"
] | null | null | null | 353.60739 | 93,816 | 0.929947 | [
[
[
"# Near real-time HF-Radar currents in the proximity of the Deepwater Horizon site",
"_____no_output_____"
],
[
"The explosion on the Deepwater Horizon (DWH) tragically killed 11 people, and resulted in one of the largest marine oil spills in history. One of the first questions when there is such a tragedy is: where will the oil go?\n\nIn order the help answer that question one can use Near real time currents from the HF-Radar sites near the incident.\n\nFirst let's start with the [HF-Radar DAC](http://cordc.ucsd.edu/projects/mapping/maps/), where one can browser the all available data interactively. Below we show an IFrame with the area near DWH for the 27 of July of 2017.\n\nIn this notebook we will demonstrate how to obtain such data programmatically.\n\n(For more information on the DWH see [http://response.restoration.noaa.gov/oil-and-chemical-spills/significant-incidents/deepwater-horizon-oil-spill](http://response.restoration.noaa.gov/oil-and-chemical-spills/significant-incidents/deepwater-horizon-oil-spill).)",
"_____no_output_____"
]
],
[
[
"from IPython.display import HTML\n\nurl = (\n 'https://cordc.ucsd.edu/projects/mapping/maps/fullpage.php?'\n 'll=29.061888,-87.373643&'\n 'zm=7&'\n 'mt=&'\n 'rng=0.00,50.00&'\n 'us=1&'\n 'cs=4&'\n 'res=6km_h&'\n 'ol=3&'\n 'cp=1'\n)\n\n\niframe = '<iframe src=\"{src}\" width=\"750\" height=\"450\" style=\"border:none;\"></iframe>'.format\n\nHTML(iframe(src=url))",
"_____no_output_____"
]
],
[
[
"The interactive interface is handy for exploration but we usually need to download \"mechanically\" in order to use them in our analysis, plots, or for downloading time-series.\n\nOne way to achieve that is to use an OPeNDAP client, here Python's `xarray`, and explore the endpoint directly. \n\n(We'll use the same 6 km resolution from the IFrame above.)",
"_____no_output_____"
]
],
[
[
"import xarray as xr\n\n\nurl = (\n 'http://hfrnet-tds.ucsd.edu/thredds/dodsC/HFR/USEGC/6km/hourly/RTV/'\n 'HFRADAR_US_East_and_Gulf_Coast_6km_Resolution_Hourly_RTV_best.ncd'\n)\n\nds = xr.open_dataset(url)\nds",
"_____no_output_____"
]
],
[
[
"How about extracting a week time-series from the dataset averaged around the area of interest?",
"_____no_output_____"
]
],
[
[
"dx = dy = 2.25 # Area around the point of interest.\ncenter = -87.373643, 29.061888 # Point of interest.\n\ndsw = ds.sel(time=slice('2017-07-20', '2017-07-27'))",
"_____no_output_____"
],
[
"dsw = dsw.sel(\n lon=(dsw.lon < center[0]+dx) & (dsw.lon > center[0]-dx),\n lat=(dsw.lat < center[1]+dy) & (dsw.lat > center[1]-dy),\n)",
"_____no_output_____"
]
],
[
[
"With `xarray` we can average hourly (`resample`) the whole dataset with one method call.",
"_____no_output_____"
]
],
[
[
"dsw = dsw.resample(freq='1H', dim='time', how='mean')",
"/home/filipe/miniconda3/envs/IOOS/lib/python3.6/site-packages/ipykernel/__main__.py:1: DeprecationWarning: \n.resample() has been modified to defer calculations. Instead of passing 'dim' and 'how=\"mean\", instead consider using .resample(time=\"1H\").mean() \n if __name__ == '__main__':\n"
]
],
[
[
"Now all we have to do is mask the missing data with `NaN`s and average over the area.",
"_____no_output_____"
]
],
[
[
"import numpy.ma as ma\n\nv = dsw['v'].data\nu = dsw['u'].data\ntime = dsw['time'].to_index().to_pydatetime()\n\nu = ma.masked_invalid(u)\nv = ma.masked_invalid(v)",
"_____no_output_____"
],
[
"i, j, k = u.shape\n\nu = u.reshape(i, j*k).mean(axis=1)\nv = v.reshape(i, j*k).mean(axis=1)",
"_____no_output_____"
],
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom oceans import stick_plot\n\nfig, ax = plt.subplots(figsize=(11, 2.75))\n\nq = stick_plot(time, u, v, ax=ax)\n\nref = 0.5\nqk = plt.quiverkey(q, 0.1, 0.85, ref,\n '{} {}'.format(ref, ds['u'].units),\n labelpos='N', coordinates='axes')\n\n_ = plt.xticks(rotation=70)",
"_____no_output_____"
]
],
[
[
"To close this post let's us reproduce the HF radar DAC image from above but using yesterday's data.",
"_____no_output_____"
]
],
[
[
"from datetime import date, timedelta\n\n\nyesterday = date.today() - timedelta(days=1)\n\ndsy = ds.sel(time=yesterday)",
"_____no_output_____"
]
],
[
[
"Now that we singled out the date and and time we want the data, we trigger the download by accessing the data with `xarray`'s `.data` property.",
"_____no_output_____"
]
],
[
[
"u = dsy['u'].data\nv = dsy['v'].data\n\nlon = dsy.coords['lon'].data\nlat = dsy.coords['lat'].data\ntime = dsy.coords['time'].data",
"_____no_output_____"
]
],
[
[
"The cell below computes the speed from the velocity. We can use the speed computation to color code the vectors. Note that we re-create the vector velocity preserving the direction but using intensity of `1`. (The same visualization technique used in the HF radar DAC.)",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom oceans import uv2spdir, spdir2uv\n\n\nangle, speed = uv2spdir(u, v)\nus, vs = spdir2uv(np.ones_like(speed), angle, deg=True)",
"_____no_output_____"
]
],
[
[
"Now we can create a `matplotlib` figure displaying the data.",
"_____no_output_____"
]
],
[
[
"import cartopy.crs as ccrs\n\nfrom cartopy import feature\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\n\nLAND = feature.NaturalEarthFeature(\n 'physical', 'land', '10m',\n edgecolor='face',\n facecolor='lightgray'\n)\n\nsub = 2\nbbox = lon.min(), lon.max(), lat.min(), lat.max()\n\nfig, ax = plt.subplots(\n figsize=(9, 9),\n subplot_kw=dict(projection=ccrs.PlateCarree())\n)\n\n\nax.set_extent([center[0]-dx-dx, center[0]+dx, center[1]-dy, center[1]+dy])\nvmin, vmax = np.nanmin(speed[::sub, ::sub]), np.nanmax(speed[::sub, ::sub])\nspeed_clipped = np.clip(speed[::sub, ::sub], 0, 0.65)\nax.quiver(\n lon[::sub], lat[::sub],\n us[::sub, ::sub], vs[::sub, ::sub],\n speed_clipped, scale=30,\n)\n\n# Deepwater Horizon site.\nax.plot(-88.365997, 28.736628, marker='o', color='crimson')\ngl = ax.gridlines(draw_labels=True)\ngl.xlabels_top = gl.ylabels_right = False\ngl.xformatter = LONGITUDE_FORMATTER\ngl.yformatter = LATITUDE_FORMATTER\n\nfeature = ax.add_feature(LAND, zorder=0, edgecolor='black')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d4ccc97ce7954c611224ca70bdd12e55eee044 | 6,244 | ipynb | Jupyter Notebook | 0.2 Silhouette.ipynb | jinyihung/ATL_neurosynth | 5d9f01b20b713b668cae1b535087ba09220b75a8 | [
"MIT"
] | null | null | null | 0.2 Silhouette.ipynb | jinyihung/ATL_neurosynth | 5d9f01b20b713b668cae1b535087ba09220b75a8 | [
"MIT"
] | null | null | null | 0.2 Silhouette.ipynb | jinyihung/ATL_neurosynth | 5d9f01b20b713b668cae1b535087ba09220b75a8 | [
"MIT"
] | null | null | null | 28.381818 | 85 | 0.582639 | [
[
[
"\n## define what to cluster\n\nThis document contains part of the codes in -lfc\n\ndataset = 0.6 (July 2015)\nminimum number of studies = 80\nmask = bilateral ATL",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"from neurosynth.base.dataset import Dataset\ndataset = Dataset.load(\"data/neurosynth_0.6_400_4.pkl\")",
"_____no_output_____"
],
[
"from neurosynth.analysis.cluster import Clusterable",
"_____no_output_____"
],
[
"mask = 'masks/Xu_ATLp2.nii'",
"_____no_output_____"
],
[
"roi = Clusterable(dataset, mask=mask,min_studies=80,feature_threshold=0.05)",
"_____no_output_____"
],
[
"reference = Clusterable(dataset, min_studies=80,feature_threshold=0.05)",
"_____no_output_____"
],
[
"from copy import deepcopy\nimport numpy as np\nfrom six import string_types\nfrom sklearn import decomposition as sk_decomp\nfrom sklearn import cluster as sk_cluster\nfrom sklearn.metrics import pairwise_distances\nfrom os.path import exists, join\nfrom os import makedirs\nfrom nibabel import nifti1\nfrom neurosynth.analysis import meta\n\n\n\nreduce_reference = 'pca'\nn_components = 100\nreduce_reference = {\n 'pca': sk_decomp.RandomizedPCA,\n 'ica': sk_decomp.FastICA\n }[reduce_reference](n_components)",
"_____no_output_____"
],
[
"method = 'coactivation'\ntranspose = (method == 'coactivation')",
"_____no_output_____"
],
[
"reference = reference.transform(reduce_reference, transpose=transpose)",
"_____no_output_____"
],
[
"distance_metric = 'correlation'\ndistances = pairwise_distances(roi.data, reference.data,\n metric=distance_metric)",
"_____no_output_____"
],
[
"from __future__ import print_function\n\nfrom sklearn.datasets import make_blobs\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_samples, silhouette_score\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\n\nrange_n_clusters = [2,3,4,5,6,7,8,9,10,11,12,13,14,15]\n\nfor n_clusters in range_n_clusters:\n\n \n #clustering_algorithm = 'kmeans'\n #clustering_kwargs={}\n #clustering_algorithm = {\n # 'kmeans': sk_cluster.KMeans,\n # 'minik': sk_cluster.MiniBatchKMeans\n # }[clustering_algorithm](n_clusters, **clustering_kwargs)\n \n \n # Initialize the clusterer with n_clusters value and a random generator\n # seed of 10 for reproducibility.\n #labels = clustering_algorithm.fit_predict(distances) \n \n clusterer = KMeans(n_clusters=n_clusters, random_state=10)\n labels = clusterer.fit_predict(distances)\n \n # The silhouette_score gives the average value for all the samples.\n # This gives a perspective into the density and separation of the formed\n # clusters\n silhouette_avg = silhouette_score(distances, labels)\n print(\"For n_clusters =\", n_clusters,\n \"The average silhouette_score is :\", silhouette_avg)",
"For n_clusters = 2 The average silhouette_score is : 0.190294163513\nFor n_clusters = 3 The average silhouette_score is : 0.167182073533\nFor n_clusters = 4 The average silhouette_score is : 0.184467016516\nFor n_clusters = 5 The average silhouette_score is : 0.154431718383\nFor n_clusters = 6 The average silhouette_score is : 0.152940636074\nFor n_clusters = 7 The average silhouette_score is : 0.163771721997\nFor n_clusters = 8 The average silhouette_score is : 0.151980647777\nFor n_clusters = 9 The average silhouette_score is : 0.15673433723\nFor n_clusters = 10 The average silhouette_score is : 0.160857904424\nFor n_clusters = 11 The average silhouette_score is : 0.15334487601\nFor n_clusters = 12 The average silhouette_score is : 0.157220179237\nFor n_clusters = 13 The average silhouette_score is : 0.15521439245\nFor n_clusters = 14 The average silhouette_score is : 0.155730637855\nFor n_clusters = 15 The average silhouette_score is : 0.151272493316\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d4ce98bf2992845a2afe12df3464d57b80aca4 | 109,677 | ipynb | Jupyter Notebook | notebooks/dert/1.0-sa-dert-demo.ipynb | anujstam/kitchen | 31e167393ff054e0becc4a9c3ceca43a26bfbc8c | [
"MIT"
] | null | null | null | notebooks/dert/1.0-sa-dert-demo.ipynb | anujstam/kitchen | 31e167393ff054e0becc4a9c3ceca43a26bfbc8c | [
"MIT"
] | null | null | null | notebooks/dert/1.0-sa-dert-demo.ipynb | anujstam/kitchen | 31e167393ff054e0becc4a9c3ceca43a26bfbc8c | [
"MIT"
] | null | null | null | 106.379243 | 75,268 | 0.814273 | [
[
[
"### DeRT analysis using Iris dataset",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_iris\nfrom sklearn.preprocessing import StandardScaler\n\nfrom kitchen.dert.dert_models import FeatureDrivenModel, CombinedModel\nimport numpy as np\nfrom nltk.translate.bleu_score import sentence_bleu",
"Using TensorFlow backend.\n"
],
[
"iris = load_iris()\nX = iris['data']\ny = iris['target']\nscaler = StandardScaler()\nscaler.fit(X)\nX = scaler.transform(X)",
"_____no_output_____"
],
[
"trial_2 = CombinedModel()",
"_____no_output_____"
],
[
"trial_2.transform_data(X,y)",
"_____no_output_____"
],
[
"trial_2.create_model()",
"WARNING:tensorflow:From /home/shakkeel/anaconda3/envs/test_imly/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n"
],
[
"trial_2.fit_model()",
"WARNING:tensorflow:From /home/shakkeel/anaconda3/envs/test_imly/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\n"
],
[
"trial_2.combined_model.summary()",
"__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\nlabel_ip (InputLayer) (None, 5) 0 \n__________________________________________________________________________________________________\ndec_feat_ip (InputLayer) (None, 6, 10) 0 \n__________________________________________________________________________________________________\ngru_seq (GRU) (None, 5) 240 dec_feat_ip[0][0] \n label_ip[0][0] \n__________________________________________________________________________________________________\ncat (Concatenate) (None, 10) 0 label_ip[0][0] \n gru_seq[0][0] \n__________________________________________________________________________________________________\nop_sent (Dense) (None, 10) 110 cat[0][0] \n==================================================================================================\nTotal params: 350\nTrainable params: 350\nNon-trainable params: 0\n__________________________________________________________________________________________________\n"
],
[
"x = np.array(trial_2.df.iloc[10, 0:4])",
"_____no_output_____"
],
[
"' '.join(trial_2.predict(x)[0])",
"_____no_output_____"
],
[
"trial_2.score()",
"actual vs predicted: ['S', '4L', '3R', '3L', '4L', 'E'] vs S 4L 3R 3L 4L E labels: 1 1\nactual vs predicted: ['S', '4R', 'E'] vs S 4R E labels: 2 2\nactual vs predicted: ['S', '4L', '3L', 'E'] vs S 4L 3L E labels: 0 0\nactual vs predicted: ['S', '4L', '3R', '3L', '4L', 'E'] vs S 4L 3R 3L 4L E labels: 1 1\nactual vs predicted: ['S', '4R', 'E'] vs S 4R E labels: 2 2\nactual vs predicted: ['S', '4R', 'E'] vs S 4R E labels: 2 2\nactual vs predicted: ['S', '4L', '3R', '3L', '4L', 'E'] vs S 4L 3R 3L 4L E labels: 1 1\nactual vs predicted: ['S', '4R', 'E'] vs S 4R E labels: 2 2\nactual vs predicted: ['S', '4L', '3R', '3L', '4L', 'E'] vs S 4L 3R 3L 4L E labels: 1 1\nactual vs predicted: ['S', '4R', 'E'] vs S E labels: 1 1\n -- Path mismatch -- \n"
]
],
[
[
"---",
"_____no_output_____"
],
[
"### Trials",
"_____no_output_____"
]
],
[
[
"actual_path = trial_2.df.iloc[1, 8]\nactual_path_tok = [trial_2.char_indices[char] for char in actual_path]",
"_____no_output_____"
],
[
"actual_path_tok",
"_____no_output_____"
],
[
"trial_2.char_indices",
"_____no_output_____"
],
[
"trial_2.get_j_coeff(actual_path_tok, [0,9,6,1])",
"_____no_output_____"
],
[
"import distance\ndistance.levenshtein(['S', 'L', 'E'],['S', 'L', 'E'])",
"_____no_output_____"
],
[
"## Extracting failure paths\n\na = ['S', '4R', '4L', '3L', 'E'] # Actual\nb = ['S', '4R', '4L', '3L', 'E'] # Predicted\n# Target - 1, versicolor\nlist(set(a) - set(b)) == [] # order doesn't matter. Lost in BLEU score\n\n## Failure scenarios\n# Case 1 - Same path different order.\n# Sticking to the order would fail. Should we re-order and then use?\na = ['S', '4R', '4L', '3L', 'E']\nb = ['S', '4L', '3L','4R', 'E']\n\n# Case 2 - Different path, right prediction\n# Prediction is right but path is entirely different\na = ['S', '4R', '4L', '3L', 'E']\nb = ['S', '4R', 'E'] # Not a leaf node !\n\n# Ex actual path - ['S', '4R', '4L', '3L', 'E']\n# Perfect match \n# Order mismatch - ['S', '4L', '3L', '4R', 'E'] -- Check at pred level\n# Subset of the tree - ['S', '4L','4L','4L' 'E']",
"_____no_output_____"
],
[
"a == b # order matters",
"_____no_output_____"
],
[
"a[-1]",
"_____no_output_____"
],
[
"test_path = list(''.join(a))[1:-1]",
"_____no_output_____"
],
[
"for i in range(len(test_path)):\n if i%2 == 0:\n test_path[i] = int(test_path[i])",
"_____no_output_____"
],
[
"test_path",
"_____no_output_____"
],
[
"trial_2.clf",
"_____no_output_____"
],
[
"from IPython.display import Image \nfrom sklearn import tree\nimport pydotplus\n\ndot_data = tree.export_graphviz(trial_2.clf, out_file=None, \n feature_names=iris.feature_names, \n class_names=iris.target_names)\n\ngraph = pydotplus.graph_from_dot_data(dot_data) \n\nImage(graph.create_png())\n\n",
"_____no_output_____"
],
[
"iris.feature_names",
"_____no_output_____"
],
[
"iris.target_names",
"_____no_output_____"
],
[
"trial_2.clf.tree_.feature",
"_____no_output_____"
],
[
"trial_2.clf.tree_.threshold",
"_____no_output_____"
],
[
"stack = [(0, -1)]\nstack.pop()",
"_____no_output_____"
],
[
"trial_2.clf.tree_.children_left # left nodes",
"_____no_output_____"
],
[
"trial_2.clf.tree_.children_right # right nodes",
"_____no_output_____"
],
[
"pred_path = ['L', 'L', 'R']\npred_features = [4, 3, 4]",
"_____no_output_____"
],
[
"n_nodes = trial_2.clf.tree_.node_count\nchildren_left = trial_2.clf.tree_.children_left\nchildren_right = trial_2.clf.tree_.children_right\nfeature = trial_2.clf.tree_.feature\n# threshold = trial_2.clf.tree_.threshold\n\n\n# The tree structure can be traversed to compute various properties such\n# as the depth of each node and whether or not it is a leaf.\n# node_depth = np.zeros(shape=n_nodes, dtype=np.int64)\nis_leaves = np.zeros(shape=n_nodes, dtype=bool)\nstack = [(0, -1)] # seed is the root node id and its parent depth\nwhile len(stack) > 0:\n node_id, parent_depth = stack.pop()\n node_depth[node_id] = parent_depth + 1\n\n # If we have a test node\n if (children_left[node_id] != children_right[node_id]):\n stack.append((children_left[node_id], parent_depth + 1))\n stack.append((children_right[node_id], parent_depth + 1))\n else:\n is_leaves[node_id] = True\n\n# print(\"The binary tree structure has %s nodes and has \"\n# \"the following tree structure:\"\n# % n_nodes)\n# for i in range(n_nodes):\n# if is_leaves[i]:\n# print(\"%snode=%s leaf node.\" % (node_depth[i] * \"\\t\", i))\n# else:\n# print(\"%snode=%s test node: go to node %s if X[:, %s] <= %s else to \"\n# \"node %s.\"\n# % (node_depth[i] * \"\\t\",\n# i,\n# children_left[i],\n# feature[i],\n# threshold[i],\n# children_right[i],\n# ))\n \n\nnode = 0\npred_target = -1\nfor i in range(len(pred_path)):\n if pred_path[i] == 'L':\n if feature[node]+1 == pred_features[i]:\n node = children_left[node]\n else:\n pred_target = -1 # Remove for \"subset\" checks\n break\n elif pred_path[i] == 'R':\n print(node)\n if feature[node]+1 == pred_features[i]:\n node = children_right[node]\n else:\n pred_target = -1 # Remove for \"subset\" checks\n break\n if is_leaves[node]:\n for i, x in enumerate(trial_2.clf.tree_.value[node][0]):\n if x > 0:\n pred_target = i",
"_____no_output_____"
],
[
"pred_target",
"_____no_output_____"
],
[
"feature",
"_____no_output_____"
],
[
"trial_2.clf.tree_.value[1][0]",
"_____no_output_____"
],
[
"children_left",
"_____no_output_____"
],
[
"feature",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d4d3d546ce4fb5e2d0fc2ce88235aad3ffe7c8 | 46,129 | ipynb | Jupyter Notebook | docs/examples/notebooks/01_notebook_introduction.ipynb | tkf/ipython | a90f362925c92c4cf6e541e4d31edecabefc88a7 | [
"BSD-3-Clause-Clear"
] | null | null | null | docs/examples/notebooks/01_notebook_introduction.ipynb | tkf/ipython | a90f362925c92c4cf6e541e4d31edecabefc88a7 | [
"BSD-3-Clause-Clear"
] | 3 | 2015-04-01T13:14:57.000Z | 2015-05-26T16:01:37.000Z | docs/examples/notebooks/01_notebook_introduction.ipynb | tkf/ipython | a90f362925c92c4cf6e541e4d31edecabefc88a7 | [
"BSD-3-Clause-Clear"
] | 1 | 2015-05-17T14:14:26.000Z | 2015-05-17T14:14:26.000Z | 110.093079 | 28,080 | 0.82434 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
d0d4de3de0c056cd293363e9e4c4f60338f09491 | 36,168 | ipynb | Jupyter Notebook | colab/MusicVAE.ipynb | hwangsog/magenta | 7f711751fcd0160891f3aa27b334d35252ffe796 | [
"Apache-2.0"
] | null | null | null | colab/MusicVAE.ipynb | hwangsog/magenta | 7f711751fcd0160891f3aa27b334d35252ffe796 | [
"Apache-2.0"
] | null | null | null | colab/MusicVAE.ipynb | hwangsog/magenta | 7f711751fcd0160891f3aa27b334d35252ffe796 | [
"Apache-2.0"
] | null | null | null | 38.517572 | 621 | 0.571389 | [
[
[
"<a href=\"https://colab.research.google.com/github/hwangsog/magenta/blob/master/MusicVAE.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Copyright 2017 Google LLC.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttps://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.",
"_____no_output_____"
],
[
"# MusicVAE: A Hierarchical Latent Vector Model for Learning Long-Term Structure in Music.\n### ___Adam Roberts, Jesse Engel, Colin Raffel, Curtis Hawthorne, and Douglas Eck___\n\n[MusicVAE](https://g.co/magenta/music-vae) learns a latent space of musical scores, providing different modes\nof interactive musical creation, including:\n\n* Random sampling from the prior distribution.\n* Interpolation between existing sequences.\n* Manipulation of existing sequences via attribute vectors.\n\nExamples of these interactions can be generated below, and selections can be heard in our\n[YouTube playlist](https://www.youtube.com/playlist?list=PLBUMAYA6kvGU8Cgqh709o5SUvo-zHGTxr).\n\nFor short sequences (e.g., 2-bar \"loops\"), we use a bidirectional LSTM encoder\nand LSTM decoder. For longer sequences, we use a novel hierarchical LSTM\ndecoder, which helps the model learn longer-term structures.\n\nWe also model the interdependencies between instruments by training multiple\ndecoders on the lowest-level embeddings of the hierarchical decoder.\n\nFor additional details, check out our [blog post](https://g.co/magenta/music-vae) and [paper](https://goo.gl/magenta/musicvae-paper).\n___\n\nThis colab notebook is self-contained and should run natively on google cloud. The [code](https://github.com/tensorflow/magenta/tree/master/magenta/models/music_vae) and [checkpoints](http://download.magenta.tensorflow.org/models/music_vae/checkpoints.tar.gz) can be downloaded separately and run locally, which is required if you want to train your own model.",
"_____no_output_____"
],
[
"# Basic Instructions\n\n1. Double click on the hidden cells to make them visible, or select \"View > Expand Sections\" in the menu at the top.\n2. Hover over the \"`[ ]`\" in the top-left corner of each cell and click on the \"Play\" button to run it, in order.\n3. Listen to the generated samples.\n4. Make it your own: copy the notebook, modify the code, train your own models, upload your own MIDI, etc.!",
"_____no_output_____"
],
[
"# Environment Setup\nIncludes package installation for sequence synthesis. Will take a few minutes.\n",
"_____no_output_____"
]
],
[
[
"#@title Setup Environment\n#@test {\"output\": \"ignore\"}\n\nimport glob\n\nprint 'Copying checkpoints and example MIDI from GCS. This will take a few minutes...'\n!gsutil -q -m cp -R gs://download.magenta.tensorflow.org/models/music_vae/colab2/* /content/\n\nprint 'Installing dependencies...'\n!apt-get update -qq && apt-get install -qq libfluidsynth1 fluid-soundfont-gm build-essential libasound2-dev libjack-dev\n!pip install -q pyfluidsynth\n!pip install -qU magenta\n\n# Hack to allow python to pick up the newly-installed fluidsynth lib.\n# This is only needed for the hosted Colab environment.\nimport ctypes.util\norig_ctypes_util_find_library = ctypes.util.find_library\ndef proxy_find_library(lib):\n if lib == 'fluidsynth':\n return 'libfluidsynth.so.1'\n else:\n return orig_ctypes_util_find_library(lib)\nctypes.util.find_library = proxy_find_library\n\n\nprint 'Importing libraries and defining some helper functions...'\nfrom google.colab import files\nimport magenta.music as mm\nfrom magenta.models.music_vae import configs\nfrom magenta.models.music_vae.trained_model import TrainedModel\nimport numpy as np\nimport os\nimport tensorflow as tf\n\n# Necessary until pyfluidsynth is updated (>1.2.5).\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\ndef play(note_sequence):\n mm.play_sequence(note_sequence, synth=mm.fluidsynth)\n\ndef interpolate(model, start_seq, end_seq, num_steps, max_length=32,\n assert_same_length=True, temperature=0.5,\n individual_duration=4.0):\n \"\"\"Interpolates between a start and end sequence.\"\"\"\n note_sequences = model.interpolate(\n start_seq, end_seq,num_steps=num_steps, length=max_length,\n temperature=temperature,\n assert_same_length=assert_same_length)\n\n print 'Start Seq Reconstruction'\n play(note_sequences[0])\n print 'End Seq Reconstruction'\n play(note_sequences[-1])\n print 'Mean Sequence'\n play(note_sequences[num_steps // 2])\n print 'Start -> End Interpolation'\n interp_seq = mm.sequences_lib.concatenate_sequences(\n note_sequences, [individual_duration] * len(note_sequences))\n play(interp_seq)\n mm.plot_sequence(interp_seq)\n return interp_seq if num_steps > 3 else note_sequences[num_steps // 2]\n\ndef download(note_sequence, filename):\n mm.sequence_proto_to_midi_file(note_sequence, filename)\n files.download(filename)\n\nprint 'Done'",
"_____no_output_____"
]
],
[
[
"# 2-Bar Drums Model\n\nBelow are 4 pre-trained models to experiment with. The first 3 map the 61 MIDI drum \"pitches\" to a reduced set of 9 classes (bass, snare, closed hi-hat, open hi-hat, low tom, mid tom, high tom, crash cymbal, ride cymbal) for a simplified but less expressive output space. The last model uses a [NADE](http://homepages.inf.ed.ac.uk/imurray2/pub/11nade/) to represent all possible MIDI drum \"pitches\".\n\n* **drums_2bar_oh_lokl**: This *low* KL model was trained for more *realistic* sampling. The output is a one-hot encoding of 2^9 combinations of hits. It has a single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM decoder with 256 nodes in each layer, and a Z with 256 dimensions. During training it was given 0 free bits, and had a fixed beta value of 0.8. After 300k steps, the final accuracy is 0.73 and KL divergence is 11 bits.\n* **drums_2bar_oh_hikl**: This *high* KL model was trained for *better reconstruction and interpolation*. The output is a one-hot encoding of 2^9 combinations of hits. It has a single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM decoder with 256 nodes in each layer, and a Z with 256 dimensions. During training it was given 96 free bits and had a fixed beta value of 0.2. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 300k, steps the final accuracy is 0.97 and KL divergence is 107 bits.\n* **drums_2bar_nade_reduced**: This model outputs a multi-label \"pianoroll\" with 9 classes. It has a single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM-NADE decoder with 512 nodes in each layer and 9-dimensional NADE with 128 hidden units, and a Z with 256 dimensions. During training it was given 96 free bits and has a fixed beta value of 0.2. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 300k steps, the final accuracy is 0.98 and KL divergence is 110 bits.\n* **drums_2bar_nade_full**: The output is a multi-label \"pianoroll\" with 61 classes. A single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM-NADE decoder with 512 nodes in each layer and 61-dimensional NADE with 128 hidden units, and a Z with 256 dimensions. During training it was given 0 free bits and has a fixed beta value of 0.2. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 300k steps, the final accuracy is 0.90 and KL divergence is 116 bits.",
"_____no_output_____"
]
],
[
[
"#@title Load Pretrained Models\n\ndrums_models = {}\n# One-hot encoded.\ndrums_config = configs.CONFIG_MAP['cat-drums_2bar_small']\ndrums_models['drums_2bar_oh_lokl'] = TrainedModel(drums_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_small.lokl.ckpt')\ndrums_models['drums_2bar_oh_hikl'] = TrainedModel(drums_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_small.hikl.ckpt')\n\n# Multi-label NADE.\ndrums_nade_reduced_config = configs.CONFIG_MAP['nade-drums_2bar_reduced']\ndrums_models['drums_2bar_nade_reduced'] = TrainedModel(drums_nade_reduced_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_nade.reduced.ckpt')\ndrums_nade_full_config = configs.CONFIG_MAP['nade-drums_2bar_full']\ndrums_models['drums_2bar_nade_full'] = TrainedModel(drums_nade_full_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_nade.full.ckpt')\n",
"_____no_output_____"
]
],
[
[
"## Generate Samples",
"_____no_output_____"
]
],
[
[
"#@title Generate 4 samples from the prior of one of the models listed above.\ndrums_sample_model = \"drums_2bar_oh_lokl\" #@param [\"drums_2bar_oh_lokl\", \"drums_2bar_oh_hikl\", \"drums_2bar_nade_reduced\", \"drums_2bar_nade_full\"]\ntemperature = 0.5 #@param {type:\"slider\", min:0.1, max:1.5, step:0.1}\ndrums_samples = drums_models[drums_sample_model].sample(n=4, length=32, temperature=temperature)\nfor ns in drums_samples:\n play(ns)",
"_____no_output_____"
],
[
"#@title Optionally download generated MIDI samples.\nfor i, ns in enumerate(drums_samples):\n download(ns, '%s_sample_%d.mid' % (drums_sample_model, i))",
"_____no_output_____"
]
],
[
[
"## Generate Interpolations",
"_____no_output_____"
]
],
[
[
"#@title Option 1: Use example MIDI files for interpolation endpoints.\ninput_drums_midi_data = [\n tf.gfile.Open(fn).read()\n for fn in sorted(tf.gfile.Glob('/content/midi/drums_2bar*.mid'))]",
"_____no_output_____"
],
[
"#@title Option 2: upload your own MIDI files to use for interpolation endpoints instead of those provided.\ninput_drums_midi_data = files.upload().values() or input_drums_midi_data",
"_____no_output_____"
],
[
"#@title Extract drums from MIDI files. This will extract all unique 2-bar drum beats using a sliding window with a stride of 1 bar.\ndrums_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_drums_midi_data]\nextracted_beats = []\nfor ns in drums_input_seqs:\n extracted_beats.extend(drums_nade_full_config.data_converter.to_notesequences(\n drums_nade_full_config.data_converter.to_tensors(ns)[1]))\nfor i, ns in enumerate(extracted_beats):\n print \"Beat\", i\n play(ns)",
"_____no_output_____"
],
[
"#@title Interpolate between 2 beats, selected from those in the previous cell.\ndrums_interp_model = \"drums_2bar_oh_hikl\" #@param [\"drums_2bar_oh_lokl\", \"drums_2bar_oh_hikl\", \"drums_2bar_nade_reduced\", \"drums_2bar_nade_full\"]\nstart_beat = 0 #@param {type:\"integer\"}\nend_beat = 1 #@param {type:\"integer\"}\nstart_beat = extracted_beats[start_beat]\nend_beat = extracted_beats[end_beat]\n\ntemperature = 0.5 #@param {type:\"slider\", min:0.1, max:1.5, step:0.1}\nnum_steps = 13 #@param {type:\"integer\"}\n\ndrums_interp = interpolate(drums_models[drums_interp_model], start_beat, end_beat, num_steps=num_steps, temperature=temperature)",
"_____no_output_____"
],
[
"#@title Optionally download interpolation MIDI file.\ndownload(drums_interp, '%s_interp.mid' % drums_interp_model)",
"_____no_output_____"
]
],
[
[
"# 2-Bar Melody Model\n\nThe pre-trained model consists of a single-layer bidirectional LSTM encoder with 2048 nodes in each direction, a 3-layer LSTM decoder with 2048 nodes in each layer, and Z with 512 dimensions. The model was given 0 free bits, and had its beta valued annealed at an exponential rate of 0.99999 from 0 to 0.43 over 200k steps. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. The final accuracy is 0.95 and KL divergence is 58 bits.",
"_____no_output_____"
]
],
[
[
"#@title Load the pre-trained model.\nmel_2bar_config = configs.CONFIG_MAP['cat-mel_2bar_big']\nmel_2bar = TrainedModel(mel_2bar_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/mel_2bar_big.ckpt')",
"_____no_output_____"
]
],
[
[
"## Generate Samples",
"_____no_output_____"
]
],
[
[
"#@title Generate 4 samples from the prior.\ntemperature = 0.5 #@param {type:\"slider\", min:0.1, max:1.5, step:0.1}\nmel_2_samples = mel_2bar.sample(n=4, length=32, temperature=temperature)\nfor ns in mel_2_samples:\n play(ns)",
"_____no_output_____"
],
[
"#@title Optionally download samples.\nfor i, ns in enumerate(mel_2_samples):\n download(ns, 'mel_2bar_sample_%d.mid' % i)",
"_____no_output_____"
]
],
[
[
"## Generate Interpolations",
"_____no_output_____"
]
],
[
[
"#@title Option 1: Use example MIDI files for interpolation endpoints.\ninput_mel_midi_data = [\n tf.gfile.Open(fn).read()\n for fn in sorted(tf.gfile.Glob('/content/midi/mel_2bar*.mid'))]",
"_____no_output_____"
],
[
"#@title Option 2: Upload your own MIDI files to use for interpolation endpoints instead of those provided.\ninput_mel_midi_data = files.upload().values() or input_mel_midi_data",
"_____no_output_____"
],
[
"#@title Extract melodies from MIDI files. This will extract all unique 2-bar melodies using a sliding window with a stride of 1 bar.\nmel_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_mel_midi_data]\nextracted_mels = []\nfor ns in mel_input_seqs:\n extracted_mels.extend(\n mel_2bar_config.data_converter.to_notesequences(\n mel_2bar_config.data_converter.to_tensors(ns)[1]))\nfor i, ns in enumerate(extracted_mels):\n print \"Melody\", i\n play(ns)",
"_____no_output_____"
],
[
"#@title Interpolate between 2 melodies, selected from those in the previous cell.\nstart_melody = 0 #@param {type:\"integer\"}\nend_melody = 1 #@param {type:\"integer\"}\nstart_mel = extracted_mels[start_melody]\nend_mel = extracted_mels[end_melody]\n\ntemperature = 0.5 #@param {type:\"slider\", min:0.1, max:1.5, step:0.1}\nnum_steps = 13 #@param {type:\"integer\"}\n\nmel_2bar_interp = interpolate(mel_2bar, start_mel, end_mel, num_steps=num_steps, temperature=temperature)",
"_____no_output_____"
],
[
"#@title Optionally download interpolation MIDI file.\ndownload(mel_2bar_interp, 'mel_2bar_interp.mid')",
"_____no_output_____"
]
],
[
[
"# 16-bar Melody Models\n\nThe pre-trained hierarchical model consists of a 2-layer stacked bidirectional LSTM encoder with 2048 nodes in each direction for each layer, a 16-step 2-layer LSTM \"conductor\" decoder with 1024 nodes in each layer, a 2-layer LSTM core decoder with 1024 nodes in each layer, and a Z with 512 dimensions. It was given 256 free bits, and had a fixed beta value of 0.2. After 25k steps, the final accuracy is 0.90 and KL divergence is 277 bits.",
"_____no_output_____"
]
],
[
[
"#@title Load the pre-trained models.\nmel_16bar_models = {}\nhierdec_mel_16bar_config = configs.CONFIG_MAP['hierdec-mel_16bar']\nmel_16bar_models['hierdec_mel_16bar'] = TrainedModel(hierdec_mel_16bar_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/mel_16bar_hierdec.ckpt')\n\nflat_mel_16bar_config = configs.CONFIG_MAP['flat-mel_16bar']\nmel_16bar_models['baseline_flat_mel_16bar'] = TrainedModel(flat_mel_16bar_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/mel_16bar_flat.ckpt')",
"_____no_output_____"
]
],
[
[
"## Generate Samples",
"_____no_output_____"
]
],
[
[
"#@title Generate 4 samples from the selected model prior.\nmel_sample_model = \"hierdec_mel_16bar\" #@param [\"hierdec_mel_16bar\", \"baseline_flat_mel_16bar\"]\ntemperature = 0.5 #@param {type:\"slider\", min:0.1, max:1.5, step:0.1}\nmel_16_samples = mel_16bar_models[mel_sample_model].sample(n=4, length=256, temperature=temperature)\nfor ns in mel_16_samples:\n play(ns)",
"_____no_output_____"
],
[
"#@title Optionally download MIDI samples.\nfor i, ns in enumerate(mel_16_samples):\n download(ns, '%s_sample_%d.mid' % (mel_sample_model, i))",
"_____no_output_____"
]
],
[
[
"## Generate Means",
"_____no_output_____"
]
],
[
[
"#@title Option 1: Use example MIDI files for interpolation endpoints.\ninput_mel_16_midi_data = [\n tf.gfile.Open(fn).read()\n for fn in sorted(tf.gfile.Glob('/content/midi/mel_16bar*.mid'))]",
"_____no_output_____"
],
[
"#@title Option 2: upload your own MIDI files to use for interpolation endpoints instead of those provided.\ninput_mel_16_midi_data = files.upload().values() or input_mel_16_midi_data",
"_____no_output_____"
],
[
"#@title Extract melodies from MIDI files. This will extract all unique 16-bar melodies using a sliding window with a stride of 1 bar.\nmel_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_mel_16_midi_data]\nextracted_16_mels = []\nfor ns in mel_input_seqs:\n extracted_16_mels.extend(\n hierdec_mel_16bar_config.data_converter.to_notesequences(\n hierdec_mel_16bar_config.data_converter.to_tensors(ns)[1]))\nfor i, ns in enumerate(extracted_16_mels):\n print \"Melody\", i\n play(ns)",
"_____no_output_____"
],
[
"#@title Compute the reconstructions and mean of the two melodies, selected from the previous cell.\nmel_interp_model = \"hierdec_mel_16bar\" #@param [\"hierdec_mel_16bar\", \"baseline_flat_mel_16bar\"]\n\nstart_melody = 0 #@param {type:\"integer\"}\nend_melody = 1 #@param {type:\"integer\"}\nstart_mel = extracted_16_mels[start_melody]\nend_mel = extracted_16_mels[end_melody]\n\ntemperature = 0.5 #@param {type:\"slider\", min:0.1, max:1.5, step:0.1}\n\nmel_16bar_mean = interpolate(mel_16bar_models[mel_interp_model], start_mel, end_mel, num_steps=3, max_length=256, individual_duration=32, temperature=temperature)",
"_____no_output_____"
],
[
"#@title Optionally download mean MIDI file.\ndownload(mel_16bar_mean, '%s_mean.mid' % mel_interp_model)",
"_____no_output_____"
]
],
[
[
"#16-bar \"Trio\" Models (lead, bass, drums)\n\nWe present two pre-trained models for 16-bar trios: a hierarchical model and a flat (baseline) model.\n\nThe pre-trained hierarchical model consists of a 2-layer stacked bidirectional LSTM encoder with 2048 nodes in each direction for each layer, a 16-step 2-layer LSTM \"conductor\" decoder with 1024 nodes in each layer, 3 (lead, bass, drums) 2-layer LSTM core decoders with 1024 nodes in each layer, and a Z with 512 dimensions. It was given 1024 free bits, and had a fixed beta value of 0.1. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 50k steps, the final accuracy is 0.82 for lead, 0.87 for bass, and 0.90 for drums, and the KL divergence is 1027 bits.\n\nThe pre-trained flat model consists of a 2-layer stacked bidirectional LSTM encoder with 2048 nodes in each direction for each layer, a 3-layer LSTM decoder with 2048 nodes in each layer, and a Z with 512 dimensions. It was given 1024 free bits, and had a fixed beta value of 0.1. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 50k steps, the final accuracy is 0.67 for lead, 0.66 for bass, and 0.79 for drums, and the KL divergence is 1016 bits.",
"_____no_output_____"
]
],
[
[
"#@title Load the pre-trained models.\ntrio_models = {}\nhierdec_trio_16bar_config = configs.CONFIG_MAP['hierdec-trio_16bar']\ntrio_models['hierdec_trio_16bar'] = TrainedModel(hierdec_trio_16bar_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/trio_16bar_hierdec.ckpt')\n\nflat_trio_16bar_config = configs.CONFIG_MAP['flat-trio_16bar']\ntrio_models['baseline_flat_trio_16bar'] = TrainedModel(flat_trio_16bar_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/trio_16bar_flat.ckpt')",
"_____no_output_____"
]
],
[
[
"## Generate Samples",
"_____no_output_____"
]
],
[
[
"#@title Generate 4 samples from the selected model prior.\ntrio_sample_model = \"hierdec_trio_16bar\" #@param [\"hierdec_trio_16bar\", \"baseline_flat_trio_16bar\"]\ntemperature = 0.5 #@param {type:\"slider\", min:0.1, max:1.5, step:0.1}\n\ntrio_16_samples = trio_models[trio_sample_model].sample(n=4, length=256, temperature=temperature)\nfor ns in trio_16_samples:\n play(ns)",
"_____no_output_____"
],
[
"#@title Optionally download MIDI samples.\nfor i, ns in enumerate(trio_16_samples):\n download(ns, '%s_sample_%d.mid' % (trio_sample_model, i))",
"_____no_output_____"
]
],
[
[
"## Generate Means",
"_____no_output_____"
]
],
[
[
"#@title Option 1: Use example MIDI files for interpolation endpoints.\ninput_trio_midi_data = [\n tf.gfile.Open(fn).read()\n for fn in sorted(tf.gfile.Glob('/content/midi/trio_16bar*.mid'))]",
"_____no_output_____"
],
[
"#@title Option 2: Upload your own MIDI files to use for interpolation endpoints instead of those provided.\ninput_trio_midi_data = files.upload().values() or input_trio_midi_data",
"_____no_output_____"
],
[
"#@title Extract trios from MIDI files. This will extract all unique 16-bar trios using a sliding window with a stride of 1 bar.\ntrio_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_trio_midi_data]\nextracted_trios = []\nfor ns in trio_input_seqs:\n extracted_trios.extend(\n hierdec_trio_16bar_config.data_converter.to_notesequences(\n hierdec_trio_16bar_config.data_converter.to_tensors(ns)[1]))\nfor i, ns in enumerate(extracted_trios):\n print \"Trio\", i\n play(ns)",
"_____no_output_____"
],
[
"#@title Compute the reconstructions and mean of the two trios, selected from the previous cell.\ntrio_interp_model = \"hierdec_trio_16bar\" #@param [\"hierdec_trio_16bar\", \"baseline_flat_trio_16bar\"]\n\nstart_trio = 0 #@param {type:\"integer\"}\nend_trio = 1 #@param {type:\"integer\"}\nstart_trio = extracted_trios[start_trio]\nend_trio = extracted_trios[end_trio]\n\ntemperature = 0.5 #@param {type:\"slider\", min:0.1, max:1.5, step:0.1}\ntrio_16bar_mean = interpolate(trio_models[trio_interp_model], start_trio, end_trio, num_steps=3, max_length=256, individual_duration=32, temperature=temperature)",
"_____no_output_____"
],
[
"#@title Optionally download mean MIDI file.\ndownload(trio_16bar_mean, '%s_mean.mid' % trio_interp_model)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
d0d4de4dff993526a094f3f44f90ca0dbbffe961 | 548,046 | ipynb | Jupyter Notebook | project-03/project_3_starter.ipynb | rafaelmartinsbuck/ai-for-trading | 51234e408c94ccdeee9b06301a2f63bd170243e3 | [
"MIT"
] | 1 | 2020-05-15T09:41:14.000Z | 2020-05-15T09:41:14.000Z | project-03/project_3_starter.ipynb | rafaelmartinsbuck/ai-for-trading | 51234e408c94ccdeee9b06301a2f63bd170243e3 | [
"MIT"
] | null | null | null | project-03/project_3_starter.ipynb | rafaelmartinsbuck/ai-for-trading | 51234e408c94ccdeee9b06301a2f63bd170243e3 | [
"MIT"
] | null | null | null | 57.495384 | 70,335 | 0.57634 | [
[
[
"# Project 3: Smart Beta Portfolio and Portfolio Optimization\n\n## Overview\n\n\nSmart beta has a broad meaning, but we can say in practice that when we use the universe of stocks from an index, and then apply some weighting scheme other than market cap weighting, it can be considered a type of smart beta fund. A Smart Beta portfolio generally gives investors exposure or \"beta\" to one or more types of market characteristics (or factors) that are believed to predict prices while giving investors a diversified broad exposure to a particular market. Smart Beta portfolios generally target momentum, earnings quality, low volatility, and dividends or some combination. Smart Beta Portfolios are generally rebalanced infrequently and follow relatively simple rules or algorithms that are passively managed. Model changes to these types of funds are also rare requiring prospectus filings with US Security and Exchange Commission in the case of US focused mutual funds or ETFs.. Smart Beta portfolios are generally long-only, they do not short stocks.\n\nIn contrast, a purely alpha-focused quantitative fund may use multiple models or algorithms to create a portfolio. The portfolio manager retains discretion in upgrading or changing the types of models and how often to rebalance the portfolio in attempt to maximize performance in comparison to a stock benchmark. Managers may have discretion to short stocks in portfolios.\n\nImagine you're a portfolio manager, and wish to try out some different portfolio weighting methods.\n\nOne way to design portfolio is to look at certain accounting measures (fundamentals) that, based on past trends, indicate stocks that produce better results. \n\n\nFor instance, you may start with a hypothesis that dividend-issuing stocks tend to perform better than stocks that do not. This may not always be true of all companies; for instance, Apple does not issue dividends, but has had good historical performance. The hypothesis about dividend-paying stocks may go something like this: \n\nCompanies that regularly issue dividends may also be more prudent in allocating their available cash, and may indicate that they are more conscious of prioritizing shareholder interests. For example, a CEO may decide to reinvest cash into pet projects that produce low returns. Or, the CEO may do some analysis, identify that reinvesting within the company produces lower returns compared to a diversified portfolio, and so decide that shareholders would be better served if they were given the cash (in the form of dividends). So according to this hypothesis, dividends may be both a proxy for how the company is doing (in terms of earnings and cash flow), but also a signal that the company acts in the best interest of its shareholders. Of course, it's important to test whether this works in practice.\n\n\nYou may also have another hypothesis, with which you wish to design a portfolio that can then be made into an ETF. You may find that investors may wish to invest in passive beta funds, but wish to have less risk exposure (less volatility) in their investments. The goal of having a low volatility fund that still produces returns similar to an index may be appealing to investors who have a shorter investment time horizon, and so are more risk averse.\n\nSo the objective of your proposed portfolio is to design a portfolio that closely tracks an index, while also minimizing the portfolio variance. Also, if this portfolio can match the returns of the index with less volatility, then it has a higher risk-adjusted return (same return, lower volatility).\n\nSmart Beta ETFs can be designed with both of these two general methods (among others): alternative weighting and minimum volatility ETF.\n\n\n## Instructions\nEach problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity.\n\n## Packages\nWhen you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code.\n\nThe other packages that we're importing are `helper`, `project_helper`, and `project_tests`. These are custom packages built to help you solve the problems. The `helper` and `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems.\n### Install Packages",
"_____no_output_____"
]
],
[
[
"import sys\n!{sys.executable} -m pip install -r requirements.txt",
"Requirement already satisfied: colour==0.1.5 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 1))\nCollecting cvxpy==1.0.3 (from -r requirements.txt (line 2))\n Downloading https://files.pythonhosted.org/packages/a1/59/2613468ffbbe3a818934d06b81b9f4877fe054afbf4f99d2f43f398a0b34/cvxpy-1.0.3.tar.gz (880kB)\n\u001b[K 100% |████████████████████████████████| 880kB 520kB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: cycler==0.10.0 in /opt/conda/lib/python3.6/site-packages/cycler-0.10.0-py3.6.egg (from -r requirements.txt (line 3))\nCollecting numpy==1.14.5 (from -r requirements.txt (line 4))\n Downloading https://files.pythonhosted.org/packages/68/1e/116ad560de97694e2d0c1843a7a0075cc9f49e922454d32f49a80eb6f1f2/numpy-1.14.5-cp36-cp36m-manylinux1_x86_64.whl (12.2MB)\n\u001b[K 100% |████████████████████████████████| 12.2MB 34kB/s eta 0:00:01 25% |████████▎ | 3.1MB 31.7MB/s eta 0:00:01 37% |████████████ | 4.6MB 27.5MB/s eta 0:00:01\n\u001b[?25hCollecting pandas==0.21.1 (from -r requirements.txt (line 5))\n Downloading https://files.pythonhosted.org/packages/3a/e1/6c514df670b887c77838ab856f57783c07e8760f2e3d5939203a39735e0e/pandas-0.21.1-cp36-cp36m-manylinux1_x86_64.whl (26.2MB)\n\u001b[K 100% |████████████████████████████████| 26.2MB 17kB/s eta 0:00:01 24% |████████ | 6.5MB 32.9MB/s eta 0:00:01 42% |█████████████▌ | 11.1MB 30.8MB/s eta 0:00:01 47% |███████████████▎ | 12.6MB 28.4MB/s eta 0:00:01 53% |█████████████████▏ | 14.1MB 31.8MB/s eta 0:00:01 92% |█████████████████████████████▌ | 24.2MB 29.7MB/s eta 0:00:01 97% |███████████████████████████████▎| 25.7MB 30.8MB/s eta 0:00:01\n\u001b[?25hCollecting plotly==2.2.3 (from -r requirements.txt (line 6))\n Downloading https://files.pythonhosted.org/packages/99/a6/8214b6564bf4ace9bec8a26e7f89832792be582c042c47c912d3201328a0/plotly-2.2.3.tar.gz (1.1MB)\n\u001b[K 100% |████████████████████████████████| 1.1MB 412kB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: pyparsing==2.2.0 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 7))\nRequirement already satisfied: python-dateutil==2.6.1 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 8))\nRequirement already satisfied: pytz==2017.3 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 9))\nRequirement already satisfied: requests==2.18.4 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 10))\nCollecting scipy==1.0.0 (from -r requirements.txt (line 11))\n Downloading https://files.pythonhosted.org/packages/d8/5e/caa01ba7be11600b6a9d39265440d7b3be3d69206da887c42bef049521f2/scipy-1.0.0-cp36-cp36m-manylinux1_x86_64.whl (50.0MB)\n\u001b[K 100% |████████████████████████████████| 50.0MB 9.1kB/s eta 0:00:01 12% |████ | 6.3MB 28.2MB/s eta 0:00:02 28% |█████████ | 14.2MB 27.5MB/s eta 0:00:02 33% |██████████▋ | 16.5MB 28.1MB/s eta 0:00:02 35% |███████████▍ | 17.9MB 27.5MB/s eta 0:00:02 37% |████████████ | 18.9MB 23.2MB/s eta 0:00:02 39% |████████████▊ | 20.0MB 22.7MB/s eta 0:00:02 47% |███████████████ | 23.6MB 24.0MB/s eta 0:00:02 49% |████████████████ | 24.9MB 26.9MB/s eta 0:00:01 52% |████████████████▊ | 26.1MB 28.2MB/s eta 0:00:01 56% |██████████████████▏ | 28.4MB 24.4MB/s eta 0:00:01 59% |███████████████████ | 29.6MB 26.1MB/s eta 0:00:01 64% |████████████████████▌ | 32.1MB 28.3MB/s eta 0:00:01 68% |██████████████████████ | 34.4MB 25.0MB/s eta 0:00:01 69% |██████████████████████▏ | 34.7MB 2.4MB/s eta 0:00:07 74% |███████████████████████▊ | 37.1MB 30.9MB/s eta 0:00:01 76% |████████████████████████▋ | 38.4MB 25.9MB/s eta 0:00:01 81% |██████████████████████████ | 40.7MB 26.6MB/s eta 0:00:01 83% |██████████████████████████▉ | 42.0MB 25.8MB/s eta 0:00:01 86% |███████████████████████████▊ | 43.3MB 25.5MB/s eta 0:00:01 91% |█████████████████████████████▏ | 45.7MB 26.2MB/s eta 0:00:01 93% |██████████████████████████████ | 47.0MB 30.9MB/s eta 0:00:01 96% |██████████████████████████████▉ | 48.2MB 25.3MB/s eta 0:00:01 98% |███████████████████████████████▋| 49.4MB 29.0MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: scikit-learn==0.19.1 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 12))\nRequirement already satisfied: six==1.11.0 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 13))\nCollecting tqdm==4.19.5 (from -r requirements.txt (line 14))\n Downloading https://files.pythonhosted.org/packages/71/3c/341b4fa23cb3abc335207dba057c790f3bb329f6757e1fcd5d347bcf8308/tqdm-4.19.5-py2.py3-none-any.whl (51kB)\n\u001b[K 100% |████████████████████████████████| 61kB 3.6MB/s eta 0:00:01\n\u001b[?25hCollecting osqp (from cvxpy==1.0.3->-r requirements.txt (line 2))\n Downloading https://files.pythonhosted.org/packages/c0/01/8becb29b0d38e0c40eab9e3d54aa8138fa62a010d519caf65e9210021bd3/osqp-0.5.0-cp36-cp36m-manylinux1_x86_64.whl (147kB)\n\u001b[K 100% |████████████████████████████████| 153kB 2.4MB/s ta 0:00:011\n\u001b[?25hCollecting ecos>=2 (from cvxpy==1.0.3->-r requirements.txt (line 2))\n Downloading https://files.pythonhosted.org/packages/55/ed/d131ff51f3a8f73420eb1191345eb49f269f23cadef515172e356018cde3/ecos-2.0.7.post1-cp36-cp36m-manylinux1_x86_64.whl (147kB)\n\u001b[K 100% |████████████████████████████████| 153kB 2.6MB/s eta 0:00:01\n\u001b[?25hCollecting scs>=1.1.3 (from cvxpy==1.0.3->-r requirements.txt (line 2))\n Downloading https://files.pythonhosted.org/packages/b3/fd/6e01c4f4a69fcc6c3db130ba55572089e78e77ea8c0921a679f9da1ec04c/scs-2.0.2.tar.gz (133kB)\n\u001b[K 100% |████████████████████████████████| 143kB 2.8MB/s eta 0:00:01\n\u001b[?25hCollecting multiprocess (from cvxpy==1.0.3->-r requirements.txt (line 2))\n Downloading https://files.pythonhosted.org/packages/31/60/6d74caa02b54ca43092e745640c7d98f367f07160441682a01602ce00bc5/multiprocess-0.70.7.tar.gz (1.4MB)\n\u001b[K 100% |████████████████████████████████| 1.4MB 332kB/s eta 0:00:01 19% |██████▏ | 266kB 23.9MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: fastcache in /opt/conda/lib/python3.6/site-packages (from cvxpy==1.0.3->-r requirements.txt (line 2))\nRequirement already satisfied: toolz in /opt/conda/lib/python3.6/site-packages (from cvxpy==1.0.3->-r requirements.txt (line 2))\nRequirement already satisfied: decorator>=4.0.6 in /opt/conda/lib/python3.6/site-packages (from plotly==2.2.3->-r requirements.txt (line 6))\nRequirement already satisfied: nbformat>=4.2 in /opt/conda/lib/python3.6/site-packages (from plotly==2.2.3->-r requirements.txt (line 6))\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 10))\nRequirement already satisfied: idna<2.7,>=2.5 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 10))\nRequirement already satisfied: urllib3<1.23,>=1.21.1 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 10))\nRequirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 10))\nRequirement already satisfied: future in /opt/conda/lib/python3.6/site-packages (from osqp->cvxpy==1.0.3->-r requirements.txt (line 2))\nCollecting dill>=0.2.9 (from multiprocess->cvxpy==1.0.3->-r requirements.txt (line 2))\n Downloading https://files.pythonhosted.org/packages/fe/42/bfe2e0857bc284cbe6a011d93f2a9ad58a22cb894461b199ae72cfef0f29/dill-0.2.9.tar.gz (150kB)\n\u001b[K 100% |████████████████████████████████| 153kB 2.4MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: ipython-genutils in /opt/conda/lib/python3.6/site-packages (from nbformat>=4.2->plotly==2.2.3->-r requirements.txt (line 6))\nRequirement already satisfied: traitlets>=4.1 in /opt/conda/lib/python3.6/site-packages (from nbformat>=4.2->plotly==2.2.3->-r requirements.txt (line 6))\nRequirement already satisfied: jsonschema!=2.5.0,>=2.4 in /opt/conda/lib/python3.6/site-packages (from nbformat>=4.2->plotly==2.2.3->-r requirements.txt (line 6))\nRequirement already satisfied: jupyter-core in /opt/conda/lib/python3.6/site-packages (from nbformat>=4.2->plotly==2.2.3->-r requirements.txt (line 6))\nBuilding wheels for collected packages: cvxpy, plotly, scs, multiprocess, dill\n Running setup.py bdist_wheel for cvxpy ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /root/.cache/pip/wheels/2b/60/0b/0c2596528665e21d698d6f84a3406c52044c7b4ca6ac737cf3\n Running setup.py bdist_wheel for plotly ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /root/.cache/pip/wheels/98/54/81/dd92d5b0858fac680cd7bdb8800eb26c001dd9f5dc8b1bc0ba\n Running setup.py bdist_wheel for scs ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /root/.cache/pip/wheels/ff/f0/aa/530ccd478d7d9900b4e9ef5bc5a39e895ce110bed3d3ac653e\n Running setup.py bdist_wheel for multiprocess ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /root/.cache/pip/wheels/3a/ed/51/77c833462c3e757ce50c4b2b68bdf53f5d1745542fe567d740\n Running setup.py bdist_wheel for dill ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /root/.cache/pip/wheels/5b/d7/0f/e58eae695403de585269f4e4a94e0cd6ca60ec0c202936fa4a\nSuccessfully built cvxpy plotly scs multiprocess dill\nInstalling collected packages: numpy, scipy, osqp, ecos, scs, dill, multiprocess, cvxpy, pandas, plotly, tqdm\n Found existing installation: numpy 1.12.1\n Uninstalling numpy-1.12.1:\n Successfully uninstalled numpy-1.12.1\n Found existing installation: scipy 0.19.1\n Uninstalling scipy-0.19.1:\n Successfully uninstalled scipy-0.19.1\n Found existing installation: dill 0.2.7.1\n Uninstalling dill-0.2.7.1:\n Successfully uninstalled dill-0.2.7.1\n Found existing installation: pandas 0.20.3\n Uninstalling pandas-0.20.3:\n Successfully uninstalled pandas-0.20.3\n Found existing installation: plotly 2.0.15\n Uninstalling plotly-2.0.15:\n Successfully uninstalled plotly-2.0.15\n Found existing installation: tqdm 4.11.2\n Uninstalling tqdm-4.11.2:\n Successfully uninstalled tqdm-4.11.2\nSuccessfully installed cvxpy-1.0.3 dill-0.2.9 ecos-2.0.7.post1 multiprocess-0.70.7 numpy-1.14.5 osqp-0.5.0 pandas-0.21.1 plotly-2.2.3 scipy-1.0.0 scs-2.0.2 tqdm-4.19.5\n\u001b[33mYou are using pip version 9.0.1, however version 19.0.1 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n"
]
],
[
[
"### Load Packages",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport helper\nimport project_helper\nimport project_tests",
"_____no_output_____"
]
],
[
[
"## Market Data\n### Load Data\nFor this universe of stocks, we'll be selecting large dollar volume stocks. We're using this universe, since it is highly liquid.",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('../../data/project_3/eod-quotemedia.csv')\n\npercent_top_dollar = 0.2\nhigh_volume_symbols = project_helper.large_dollar_volume_stocks(df, 'adj_close', 'adj_volume', percent_top_dollar)\ndf = df[df['ticker'].isin(high_volume_symbols)]\n\nclose = df.reset_index().pivot(index='date', columns='ticker', values='adj_close')\nvolume = df.reset_index().pivot(index='date', columns='ticker', values='adj_volume')\ndividends = df.reset_index().pivot(index='date', columns='ticker', values='dividends')",
"_____no_output_____"
]
],
[
[
"### View Data\nTo see what one of these 2-d matrices looks like, let's take a look at the closing prices matrix.",
"_____no_output_____"
]
],
[
[
"project_helper.print_dataframe(close)",
"_____no_output_____"
]
],
[
[
"# Part 1: Smart Beta Portfolio\nIn Part 1 of this project, you'll build a portfolio using dividend yield to choose the portfolio weights. A portfolio such as this could be incorporated into a smart beta ETF. You'll compare this portfolio to a market cap weighted index to see how well it performs. \n\nNote that in practice, you'll probably get the index weights from a data vendor (such as companies that create indices, like MSCI, FTSE, Standard and Poor's), but for this exercise we will simulate a market cap weighted index.\n\n## Index Weights\nThe index we'll be using is based on large dollar volume stocks. Implement `generate_dollar_volume_weights` to generate the weights for this index. For each date, generate the weights based on dollar volume traded for that date. For example, assume the following is close prices and volume data:\n```\n Prices\n A B ...\n2013-07-08 2 2 ...\n2013-07-09 5 6 ...\n2013-07-10 1 2 ...\n2013-07-11 6 5 ...\n... ... ... ...\n\n Volume\n A B ...\n2013-07-08 100 340 ...\n2013-07-09 240 220 ...\n2013-07-10 120 500 ...\n2013-07-11 10 100 ...\n... ... ... ...\n```\nThe weights created from the function `generate_dollar_volume_weights` should be the following:\n```\n A B ...\n2013-07-08 0.126.. 0.194.. ...\n2013-07-09 0.759.. 0.377.. ...\n2013-07-10 0.075.. 0.285.. ...\n2013-07-11 0.037.. 0.142.. ...\n... ... ... ...\n```",
"_____no_output_____"
]
],
[
[
"def generate_dollar_volume_weights(close, volume):\n \"\"\"\n Generate dollar volume weights.\n\n Parameters\n ----------\n close : DataFrame\n Close price for each ticker and date\n volume : str\n Volume for each ticker and date\n\n Returns\n -------\n dollar_volume_weights : DataFrame\n The dollar volume weights for each ticker and date\n \"\"\"\n assert close.index.equals(volume.index)\n assert close.columns.equals(volume.columns)\n \n #TODO: Implement function\n dollar_volume = close * volume\n \n for index,_ in close.iterrows():\n # weights = close * volume / (sum of close * volume for all assets in the line)\n dollar_volume.loc[index] = dollar_volume.loc[index]/sum(dollar_volume.loc[index])\n\n return dollar_volume\n\nproject_tests.test_generate_dollar_volume_weights(generate_dollar_volume_weights)",
"Tests Passed\n"
]
],
[
[
"### View Data\nLet's generate the index weights using `generate_dollar_volume_weights` and view them using a heatmap.",
"_____no_output_____"
]
],
[
[
"index_weights = generate_dollar_volume_weights(close, volume)\nproject_helper.plot_weights(index_weights, 'Index Weights')",
"_____no_output_____"
]
],
[
[
"## Portfolio Weights\nNow that we have the index weights, let's choose the portfolio weights based on dividend. You would normally calculate the weights based on trailing dividend yield, but we'll simplify this by just calculating the total dividend yield over time.\n\nImplement `calculate_dividend_weights` to return the weights for each stock based on its total dividend yield over time. This is similar to generating the weight for the index, but it's using dividend data instead.\nFor example, assume the following is `dividends` data:\n```\n Prices\n A B\n2013-07-08 0 0\n2013-07-09 0 1\n2013-07-10 0.5 0\n2013-07-11 0 0\n2013-07-12 2 0\n... ... ...\n```\nThe weights created from the function `calculate_dividend_weights` should be the following:\n```\n A B\n2013-07-08 NaN NaN\n2013-07-09 0 1\n2013-07-10 0.333.. 0.666..\n2013-07-11 0.333.. 0.666..\n2013-07-12 0.714.. 0.285..\n... ... ...\n```",
"_____no_output_____"
]
],
[
[
"def calculate_dividend_weights(dividends):\n \"\"\"\n Calculate dividend weights.\n\n Parameters\n ----------\n dividends : DataFrame\n Dividend for each stock and date\n\n Returns\n -------\n dividend_weights : DataFrame\n Weights for each stock and date\n \"\"\"\n #TODO: Implement function\n cumulated_dividend = dividends.cumsum()\n \n for index,_ in dividends.iterrows():\n # weights = dividends / (sum of dividends for all assets in the line)\n cumulated_dividend.loc[index] = cumulated_dividend.loc[index]/sum(cumulated_dividend.loc[index])\n\n return cumulated_dividend\n\nproject_tests.test_calculate_dividend_weights(calculate_dividend_weights)",
"Tests Passed\n"
]
],
[
[
"### View Data\nJust like the index weights, let's generate the ETF weights and view them using a heatmap.",
"_____no_output_____"
]
],
[
[
"etf_weights = calculate_dividend_weights(dividends)\nproject_helper.plot_weights(etf_weights, 'ETF Weights')",
"_____no_output_____"
]
],
[
[
"## Returns\nImplement `generate_returns` to generate returns data for all the stocks and dates from price data. You might notice we're implementing returns and not log returns. Since we're not dealing with volatility, we don't have to use log returns.",
"_____no_output_____"
]
],
[
[
"def generate_returns(prices):\n \"\"\"\n Generate returns for ticker and date.\n\n Parameters\n ----------\n prices : DataFrame\n Price for each ticker and date\n\n Returns\n -------\n returns : Dataframe\n The returns for each ticker and date\n \"\"\"\n #TODO: Implement function\n\n return ((prices - prices.shift(1))/prices.shift(1))\n\nproject_tests.test_generate_returns(generate_returns)",
"Tests Passed\n"
]
],
[
[
"### View Data\nLet's generate the closing returns using `generate_returns` and view them using a heatmap.",
"_____no_output_____"
]
],
[
[
"returns = generate_returns(close)\nproject_helper.plot_returns(returns, 'Close Returns')",
"_____no_output_____"
]
],
[
[
"## Weighted Returns\nWith the returns of each stock computed, we can use it to compute the returns for an index or ETF. Implement `generate_weighted_returns` to create weighted returns using the returns and weights.",
"_____no_output_____"
]
],
[
[
"def generate_weighted_returns(returns, weights):\n \"\"\"\n Generate weighted returns.\n\n Parameters\n ----------\n returns : DataFrame\n Returns for each ticker and date\n weights : DataFrame\n Weights for each ticker and date\n\n Returns\n -------\n weighted_returns : DataFrame\n Weighted returns for each ticker and date\n \"\"\"\n assert returns.index.equals(weights.index)\n assert returns.columns.equals(weights.columns)\n \n #TODO: Implement function\n\n return (returns * weights)\n\nproject_tests.test_generate_weighted_returns(generate_weighted_returns)",
"Tests Passed\n"
]
],
[
[
"### View Data\nLet's generate the ETF and index returns using `generate_weighted_returns` and view them using a heatmap.",
"_____no_output_____"
]
],
[
[
"index_weighted_returns = generate_weighted_returns(returns, index_weights)\netf_weighted_returns = generate_weighted_returns(returns, etf_weights)\nproject_helper.plot_returns(index_weighted_returns, 'Index Returns')\nproject_helper.plot_returns(etf_weighted_returns, 'ETF Returns')",
"_____no_output_____"
]
],
[
[
"## Cumulative Returns\nTo compare performance between the ETF and Index, we're going to calculate the tracking error. Before we do that, we first need to calculate the index and ETF comulative returns. Implement `calculate_cumulative_returns` to calculate the cumulative returns over time given the returns.",
"_____no_output_____"
]
],
[
[
"def calculate_cumulative_returns(returns):\n \"\"\"\n Calculate cumulative returns.\n\n Parameters\n ----------\n returns : DataFrame\n Returns for each ticker and date\n\n Returns\n -------\n cumulative_returns : Pandas Series\n Cumulative returns for each date\n \"\"\"\n #TODO: Implement function\n cumulative_returns = (returns.sum(axis=1) + 1).cumprod()\n \n return cumulative_returns\n\nproject_tests.test_calculate_cumulative_returns(calculate_cumulative_returns)",
"Tests Passed\n"
]
],
[
[
"### View Data\nLet's generate the ETF and index cumulative returns using `calculate_cumulative_returns` and compare the two.",
"_____no_output_____"
]
],
[
[
"index_weighted_cumulative_returns = calculate_cumulative_returns(index_weighted_returns)\netf_weighted_cumulative_returns = calculate_cumulative_returns(etf_weighted_returns)\nproject_helper.plot_benchmark_returns(index_weighted_cumulative_returns, etf_weighted_cumulative_returns, 'Smart Beta ETF vs Index')",
"_____no_output_____"
]
],
[
[
"## Tracking Error\nIn order to check the performance of the smart beta portfolio, we can calculate the annualized tracking error against the index. Implement `tracking_error` to return the tracking error between the ETF and benchmark.\n\nFor reference, we'll be using the following annualized tracking error function:\n$$ TE = \\sqrt{252} * SampleStdev(r_p - r_b) $$\n\nWhere $ r_p $ is the portfolio/ETF returns and $ r_b $ is the benchmark returns.\n\n_Note: When calculating the sample standard deviation, the delta degrees of freedom is 1, which is the also the default value._",
"_____no_output_____"
]
],
[
[
"def tracking_error(benchmark_returns_by_date, etf_returns_by_date):\n \"\"\"\n Calculate the tracking error.\n\n Parameters\n ----------\n benchmark_returns_by_date : Pandas Series\n The benchmark returns for each date\n etf_returns_by_date : Pandas Series\n The ETF returns for each date\n\n Returns\n -------\n tracking_error : float\n The tracking error\n \"\"\"\n assert benchmark_returns_by_date.index.equals(etf_returns_by_date.index)\n \n #TODO: Implement function\n\n return (np.sqrt(252)*np.std(etf_returns_by_date - benchmark_returns_by_date, ddof=1))\n\nproject_tests.test_tracking_error(tracking_error)",
"Tests Passed\n"
]
],
[
[
"### View Data\nLet's generate the tracking error using `tracking_error`.",
"_____no_output_____"
]
],
[
[
"smart_beta_tracking_error = tracking_error(np.sum(index_weighted_returns, 1), np.sum(etf_weighted_returns, 1))\nprint('Smart Beta Tracking Error: {}'.format(smart_beta_tracking_error))",
"Smart Beta Tracking Error: 0.09940101796032481\n"
]
],
[
[
"# Part 2: Portfolio Optimization\n\nNow, let's create a second portfolio. We'll still reuse the market cap weighted index, but this will be independent of the dividend-weighted portfolio that we created in part 1.\n\nWe want to both minimize the portfolio variance and also want to closely track a market cap weighted index. In other words, we're trying to minimize the distance between the weights of our portfolio and the weights of the index.\n\n$Minimize \\left [ \\sigma^2_p + \\lambda \\sqrt{\\sum_{1}^{m}(weight_i - indexWeight_i)^2} \\right ]$ where $m$ is the number of stocks in the portfolio, and $\\lambda$ is a scaling factor that you can choose.\n\nWhy are we doing this? One way that investors evaluate a fund is by how well it tracks its index. The fund is still expected to deviate from the index within a certain range in order to improve fund performance. A way for a fund to track the performance of its benchmark is by keeping its asset weights similar to the weights of the index. We’d expect that if the fund has the same stocks as the benchmark, and also the same weights for each stock as the benchmark, the fund would yield about the same returns as the benchmark. By minimizing a linear combination of both the portfolio risk and distance between portfolio and benchmark weights, we attempt to balance the desire to minimize portfolio variance with the goal of tracking the index.\n\n\n## Covariance\nImplement `get_covariance_returns` to calculate the covariance of the `returns`. We'll use this to calculate the portfolio variance.\n\nIf we have $m$ stock series, the covariance matrix is an $m \\times m$ matrix containing the covariance between each pair of stocks. We can use [`Numpy.cov`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.cov.html) to get the covariance. We give it a 2D array in which each row is a stock series, and each column is an observation at the same period of time. For any `NaN` values, you can replace them with zeros using the [`DataFrame.fillna`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html) function.\n\nThe covariance matrix $\\mathbf{P} = \n\\begin{bmatrix}\n\\sigma^2_{1,1} & ... & \\sigma^2_{1,m} \\\\ \n... & ... & ...\\\\\n\\sigma_{m,1} & ... & \\sigma^2_{m,m} \\\\\n\\end{bmatrix}$",
"_____no_output_____"
]
],
[
[
"def get_covariance_returns(returns):\n \"\"\"\n Calculate covariance matrices.\n\n Parameters\n ----------\n returns : DataFrame\n Returns for each ticker and date\n\n Returns\n -------\n returns_covariance : 2 dimensional Ndarray\n The covariance of the returns\n \"\"\"\n #TODO: Implement function\n \n return (np.cov(returns.T.fillna(0)))\n\nproject_tests.test_get_covariance_returns(get_covariance_returns)",
"Tests Passed\n"
]
],
[
[
"### View Data\nLet's look at the covariance generated from `get_covariance_returns`.",
"_____no_output_____"
]
],
[
[
"covariance_returns = get_covariance_returns(returns)\ncovariance_returns = pd.DataFrame(covariance_returns, returns.columns, returns.columns)\n\ncovariance_returns_correlation = np.linalg.inv(np.diag(np.sqrt(np.diag(covariance_returns))))\ncovariance_returns_correlation = pd.DataFrame(\n covariance_returns_correlation.dot(covariance_returns).dot(covariance_returns_correlation),\n covariance_returns.index,\n covariance_returns.columns)\n\nproject_helper.plot_covariance_returns_correlation(\n covariance_returns_correlation,\n 'Covariance Returns Correlation Matrix')",
"_____no_output_____"
]
],
[
[
"### portfolio variance\nWe can write the portfolio variance $\\sigma^2_p = \\mathbf{x^T} \\mathbf{P} \\mathbf{x}$\n\nRecall that the $\\mathbf{x^T} \\mathbf{P} \\mathbf{x}$ is called the quadratic form.\nWe can use the cvxpy function `quad_form(x,P)` to get the quadratic form.\n\n### Distance from index weights\nWe want portfolio weights that track the index closely. So we want to minimize the distance between them.\nRecall from the Pythagorean theorem that you can get the distance between two points in an x,y plane by adding the square of the x and y distances and taking the square root. Extending this to any number of dimensions is called the L2 norm. So: $\\sqrt{\\sum_{1}^{n}(weight_i - indexWeight_i)^2}$ Can also be written as $\\left \\| \\mathbf{x} - \\mathbf{index} \\right \\|_2$. There's a cvxpy function called [norm()](https://www.cvxpy.org/api_reference/cvxpy.atoms.other_atoms.html#norm)\n`norm(x, p=2, axis=None)`. The default is already set to find an L2 norm, so you would pass in one argument, which is the difference between your portfolio weights and the index weights.\n\n### objective function\nWe want to minimize both the portfolio variance and the distance of the portfolio weights from the index weights.\nWe also want to choose a `scale` constant, which is $\\lambda$ in the expression. \n\n$\\mathbf{x^T} \\mathbf{P} \\mathbf{x} + \\lambda \\left \\| \\mathbf{x} - \\mathbf{index} \\right \\|_2$\n\n\nThis lets us choose how much priority we give to minimizing the difference from the index, relative to minimizing the variance of the portfolio. If you choose a higher value for `scale` ($\\lambda$).\n\nWe can find the objective function using cvxpy `objective = cvx.Minimize()`. Can you guess what to pass into this function?\n\n",
"_____no_output_____"
],
[
"### constraints\nWe can also define our constraints in a list. For example, you'd want the weights to sum to one. So $\\sum_{1}^{n}x = 1$. You may also need to go long only, which means no shorting, so no negative weights. So $x_i >0 $ for all $i$. you could save a variable as `[x >= 0, sum(x) == 1]`, where x was created using `cvx.Variable()`.\n\n### optimization\nSo now that we have our objective function and constraints, we can solve for the values of $\\mathbf{x}$.\ncvxpy has the constructor `Problem(objective, constraints)`, which returns a `Problem` object.\n\nThe `Problem` object has a function solve(), which returns the minimum of the solution. In this case, this is the minimum variance of the portfolio.\n\nIt also updates the vector $\\mathbf{x}$.\n\nWe can check out the values of $x_A$ and $x_B$ that gave the minimum portfolio variance by using `x.value`",
"_____no_output_____"
]
],
[
[
"import cvxpy as cvx\n\ndef get_optimal_weights(covariance_returns, index_weights, scale=2.0):\n \"\"\"\n Find the optimal weights.\n\n Parameters\n ----------\n covariance_returns : 2 dimensional Ndarray\n The covariance of the returns\n index_weights : Pandas Series\n Index weights for all tickers at a period in time\n scale : int\n The penalty factor for weights the deviate from the index \n Returns\n -------\n x : 1 dimensional Ndarray\n The solution for x\n \"\"\"\n assert len(covariance_returns.shape) == 2\n assert len(index_weights.shape) == 1\n assert covariance_returns.shape[0] == covariance_returns.shape[1] == index_weights.shape[0]\n\n #TODO: Implement function\n \n # number of stocks m is number of rows of returns, and also number of index weights\n m = covariance_returns.shape[0]\n \n # x variables (to be found with optimization)\n x = cvx.Variable(m)\n \n # portfolio variance, in quadratic form\n portfolio_variance = cvx.quad_form(x, covariance_returns)\n \n # euclidean distance (L2 norm) between portfolio and index weights\n distance_to_index = cvx.norm(x - index_weights)\n \n # objective function\n objective = cvx.Minimize(portfolio_variance + scale * distance_to_index)\n \n # constraints\n constraints = [x >= 0, sum(x) == 1]\n\n # use cvxpy to solve the objective\n problem = cvx.Problem(objective, constraints).solve()\n \n # retrieve the weights of the optimized portfolio\n x_values = x.value\n \n return x_values\n\nproject_tests.test_get_optimal_weights(get_optimal_weights)",
"Tests Passed\n"
]
],
[
[
"## Optimized Portfolio\nUsing the `get_optimal_weights` function, let's generate the optimal ETF weights without rebalanceing. We can do this by feeding in the covariance of the entire history of data. We also need to feed in a set of index weights. We'll go with the average weights of the index over time.",
"_____no_output_____"
]
],
[
[
"raw_optimal_single_rebalance_etf_weights = get_optimal_weights(covariance_returns.values, index_weights.iloc[-1])\noptimal_single_rebalance_etf_weights = pd.DataFrame(\n np.tile(raw_optimal_single_rebalance_etf_weights, (len(returns.index), 1)),\n returns.index,\n returns.columns)",
"_____no_output_____"
]
],
[
[
"With our ETF weights built, let's compare it to the index. Run the next cell to calculate the ETF returns and compare it to the index returns.",
"_____no_output_____"
]
],
[
[
"optim_etf_returns = generate_weighted_returns(returns, optimal_single_rebalance_etf_weights)\noptim_etf_cumulative_returns = calculate_cumulative_returns(optim_etf_returns)\nproject_helper.plot_benchmark_returns(index_weighted_cumulative_returns, optim_etf_cumulative_returns, 'Optimized ETF vs Index')\n\noptim_etf_tracking_error = tracking_error(np.sum(index_weighted_returns, 1), np.sum(optim_etf_returns, 1))\nprint('Optimized ETF Tracking Error: {}'.format(optim_etf_tracking_error))",
"_____no_output_____"
]
],
[
[
"## Rebalance Portfolio Over Time\nThe single optimized ETF portfolio used the same weights for the entire history. This might not be the optimal weights for the entire period. Let's rebalance the portfolio over the same period instead of using the same weights. Implement `rebalance_portfolio` to rebalance a portfolio.\n\nReblance the portfolio every n number of days, which is given as `shift_size`. When rebalancing, you should look back a certain number of days of data in the past, denoted as `chunk_size`. Using this data, compute the optoimal weights using `get_optimal_weights` and `get_covariance_returns`.",
"_____no_output_____"
]
],
[
[
"def rebalance_portfolio(returns, index_weights, shift_size, chunk_size):\n \"\"\"\n Get weights for each rebalancing of the portfolio.\n\n Parameters\n ----------\n returns : DataFrame\n Returns for each ticker and date\n index_weights : DataFrame\n Index weight for each ticker and date\n shift_size : int\n The number of days between each rebalance\n chunk_size : int\n The number of days to look in the past for rebalancing\n\n Returns\n -------\n all_rebalance_weights : list of Ndarrays\n The ETF weights for each point they are rebalanced\n \"\"\"\n assert returns.index.equals(index_weights.index)\n assert returns.columns.equals(index_weights.columns)\n assert shift_size > 0\n assert chunk_size >= 0\n \n #TODO: Implement function\n\n # List of all rebalanced weights\n rebalance_portfolio_weights = []\n\n for index in range(chunk_size, returns.shape[0], shift_size):\n # calculates the chunk of returns\n chunk = returns.iloc[index - chunk_size : index]\n # calculates covariance returns\n covariance_returns = get_covariance_returns(chunk)\n # calculates optimal weights\n raw_optimal_single_rebalance_etf_weights = get_optimal_weights(covariance_returns, index_weights.iloc[index - 1])\n # append the results\n rebalance_portfolio_weights.append(raw_optimal_single_rebalance_etf_weights)\n \n return rebalance_portfolio_weights\n\nproject_tests.test_rebalance_portfolio(rebalance_portfolio)",
"Tests Passed\n"
]
],
[
[
"Run the following cell to rebalance the portfolio using `rebalance_portfolio`.",
"_____no_output_____"
]
],
[
[
"chunk_size = 250\nshift_size = 5\nall_rebalance_weights = rebalance_portfolio(returns, index_weights, shift_size, chunk_size)",
"_____no_output_____"
]
],
[
[
"## Portfolio Turnover\nWith the portfolio rebalanced, we need to use a metric to measure the cost of rebalancing the portfolio. Implement `get_portfolio_turnover` to calculate the annual portfolio turnover. We'll be using the formulas used in the classroom:\n\n$ AnnualizedTurnover =\\frac{SumTotalTurnover}{NumberOfRebalanceEvents} * NumberofRebalanceEventsPerYear $\n\n$ SumTotalTurnover =\\sum_{t,n}{\\left | x_{t,n} - x_{t+1,n} \\right |} $ Where $ x_{t,n} $ are the weights at time $ t $ for equity $ n $.\n\n$ SumTotalTurnover $ is just a different way of writing $ \\sum \\left | x_{t_1,n} - x_{t_2,n} \\right | $",
"_____no_output_____"
]
],
[
[
"def get_portfolio_turnover(all_rebalance_weights, shift_size, rebalance_count, n_trading_days_in_year=252):\n \"\"\"\n Calculage portfolio turnover.\n\n Parameters\n ----------\n all_rebalance_weights : list of Ndarrays\n The ETF weights for each point they are rebalanced\n shift_size : int\n The number of days between each rebalance\n rebalance_count : int\n Number of times the portfolio was rebalanced\n n_trading_days_in_year: int\n Number of trading days in a year\n\n Returns\n -------\n portfolio_turnover : float\n The portfolio turnover\n \"\"\"\n assert shift_size > 0\n assert rebalance_count > 0\n \n #TODO: Implement function\n portfolio_turnover = 0\n \n for index in range(1, len(all_rebalance_weights)):\n portfolio_turnover += sum(np.abs(all_rebalance_weights[index] - all_rebalance_weights[index-1]))\n \n # annualized turnover calculation\n annualized_portfolio_turnover = portfolio_turnover*(n_trading_days_in_year/shift_size)/rebalance_count\n \n return annualized_portfolio_turnover\n\nproject_tests.test_get_portfolio_turnover(get_portfolio_turnover)",
"Tests Passed\n"
]
],
[
[
"Run the following cell to get the portfolio turnover from `get_portfolio turnover`.",
"_____no_output_____"
]
],
[
[
"print(get_portfolio_turnover(all_rebalance_weights, shift_size, len(all_rebalance_weights) - 1))",
"16.72683266050277\n"
]
],
[
[
"That's it! You've built a smart beta portfolio in part 1 and did portfolio optimization in part 2. You can now submit your project.",
"_____no_output_____"
],
[
"## Submission\nNow that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
d0d4de662384eb07db6a5021420b8b0b26144776 | 375,763 | ipynb | Jupyter Notebook | copa_roberta_more_data.ipynb | LiGhtime/NLPProject | 4a7226bba82a62c21cd9525dfda6405d1dd162ce | [
"MIT"
] | null | null | null | copa_roberta_more_data.ipynb | LiGhtime/NLPProject | 4a7226bba82a62c21cd9525dfda6405d1dd162ce | [
"MIT"
] | null | null | null | copa_roberta_more_data.ipynb | LiGhtime/NLPProject | 4a7226bba82a62c21cd9525dfda6405d1dd162ce | [
"MIT"
] | null | null | null | 50.971649 | 56,934 | 0.609134 | [
[
[
"import pandas as pd\nimport numpy as np\n# from sklearn.model_selection import train_test_split\nimport torch\nfrom torch import nn\nfrom torch import optim\n# import json\n# from torch.utils.data import Dataset, DataLoader\n# import transformers\nfrom transformers import RobertaModel, RobertaTokenizer, RobertaForMultipleChoice\nfrom torch import cuda\nfrom datetime import datetime\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"device = 'cuda' if cuda.is_available() else 'cpu'\nprint(device)",
"cuda\n"
],
[
"# load data\ntest_raw_data = pd.read_xml('data/COPA-resources/datasets/copa-test.xml')\ndev_raw_data = pd.read_xml('data/COPA-resources/datasets/copa-dev.xml') # train-test-split 400-100\ndev_raw_data.head(10)",
"_____no_output_____"
],
[
"# test\ntokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n# test_sequence = \"{\" + \"effect\" + \"}\" + \"I ran the ice cube under warm water.\"\ntest_sequence = \"{\"+ test_raw_data.iloc[28]['asks-for'] + \"}\" + test_raw_data.iloc[28]['p']\nprint(\"test_sequence is: \", test_sequence)\nprint(tokenizer(test_sequence))\nprint(tokenizer.tokenize(test_sequence))\n# test 2\ntest_sequence = \"{\"+ test_raw_data.iloc[1]['asks-for'] + \"}\" + test_raw_data.iloc[1]['p']\nprint(\"test_sequence is: \", test_sequence)\nprint(tokenizer(test_sequence))\nprint(tokenizer.tokenize(test_sequence))\n\nprint(test_raw_data.shape[0])",
"test_sequence is: {cause}The frozen food thawed.\n{'input_ids': [0, 45152, 27037, 24303, 133, 9214, 689, 3553, 32211, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n['{', 'cause', '}', 'The', 'Ġfrozen', 'Ġfood', 'Ġth', 'awed', '.']\ntest_sequence is: {effect}I emptied my pockets.\n{'input_ids': [0, 45152, 26715, 24303, 100, 35371, 127, 12189, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n['{', 'effect', '}', 'I', 'Ġemptied', 'Ġmy', 'Ġpockets', '.']\n500\n"
],
[
"def load_data(rawdata):\n tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n \n # for i in range(0, rawdata.shape[0]):\n for i in range(2, 5):\n prompt = rawdata.iloc[i]['asks-for'] + \".\" + rawdata.iloc[i]['p']\n choice0 = rawdata.iloc[i]['a1']\n choice1 = rawdata.iloc[i]['a2']\n label = torch.tensor(rawdata.iloc[i]['most-plausible-alternative'] - 1)\n # label = torch.tensor(rawdata.iloc[i]['label']).unsqueeze(0).to(device)\n\n encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True)\n print(\"encoding['input_ids']: \", encoding['input_ids'])\n print(\"encoding['input_ids'] with size of : \", encoding['input_ids'].size())\n print(\"encoding['attention_mask']: \", encoding['attention_mask'])\n print(\"label: \", label)\n return encoding",
"_____no_output_____"
],
[
"print(dev_raw_data.shape[0])\nprint(test_raw_data.shape[0])",
"500\n500\n"
],
[
"# tokenize data tests\ndev_data = load_data(dev_raw_data)\n# print(f'Training data loaded (length {len(train_data)})')\n# dev_data = load_data('data/dev.jsonl')\n# print(f'Dev data loaded (length {len(dev_data)})')\n# test_data = load_data('data/test.jsonl')\n# print(f'Test data loaded (length {len(test_data)})')",
"encoding['input_ids']: tensor([[ 0, 27037, 4, 133, 390, 1145, 13, 3895, 4, 2,\n 2, 133, 16381, 14015, 11, 10, 92, 2259, 4, 2,\n 1],\n [ 0, 27037, 4, 133, 390, 1145, 13, 3895, 4, 2,\n 2, 1213, 770, 7, 2916, 62, 19, 349, 97, 4,\n 2]])\nencoding['input_ids'] with size of : torch.Size([2, 21])\nencoding['attention_mask']: tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])\nlabel: tensor(1)\nencoding['input_ids']: tensor([[ 0, 27037, 4, 133, 7449, 5328, 13344, 4, 2, 2,\n 133, 1914, 6126, 239, 3971, 4, 2, 1, 1],\n [ 0, 27037, 4, 133, 7449, 5328, 13344, 4, 2, 2,\n 2515, 1904, 7, 422, 552, 5, 4105, 4, 2]])\nencoding['input_ids'] with size of : torch.Size([2, 19])\nencoding['attention_mask']: tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])\nlabel: tensor(0)\nencoding['input_ids']: tensor([[ 0, 27037, 4, 133, 3958, 9, 5, 537, 20119, 639,\n 5, 16433, 4, 2, 2, 243, 21, 10, 2755, 537,\n 4, 2],\n [ 0, 27037, 4, 133, 3958, 9, 5, 537, 20119, 639,\n 5, 16433, 4, 2, 2, 243, 21, 10, 4115, 537,\n 4, 2]])\nencoding['input_ids'] with size of : torch.Size([2, 22])\nencoding['attention_mask']: tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])\nlabel: tensor(0)\n"
]
],
[
[
"## Model Construction",
"_____no_output_____"
]
],
[
[
"# Model_3, use only the very last hidden layer from Roberta.\nfrom torch import nn\nfrom transformers import RobertaConfig, RobertaModel\n\nclass OurRobertaCOPA(torch.nn.Module):\n def __init__(self):\n super(OurRobertaCOPA, self).__init__()\n # self.configuration = RobertaConfig()\n # self.tokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n # self.l1 = RobertaModel(self.configuration)\n self.l1 = RobertaModel.from_pretrained(\"roberta-base\")\n self.l1.requires_grad = True\n self.softmax = nn.Softmax(dim=0)\n self.pre_classifier = torch.nn.Linear(768, 512)\n self.dropout = torch.nn.Dropout(0.3)\n # self.classifier = torch.nn.Linear(768, 5)\n # hidden_dim=32 for later trials.\n # self.lstm = nn.LSTM(768, 32, 1, bias=False)\n self.output_layer = nn.Linear(512, 2)\n\n def forward(self, sequence_1, sequence_2):\n # Two input here\n token_1 = tokenizer(sequence_1)\n token_2 = tokenizer(sequence_2)\n output_1 = self.l1(input_ids=torch.tensor(token_1[\"input_ids\"]).unsqueeze(0), attention_mask=torch.tensor(token_1[\"attention_mask\"]).unsqueeze(0))[0]\n output_2 = self.l1(input_ids=torch.tensor(token_2[\"input_ids\"]).unsqueeze(0), attention_mask=torch.tensor(token_2[\"attention_mask\"]).unsqueeze(0))[0]\n # RobertaModel(RobertaConfig())\n\n # _, (hidden_rep_1, _) = self.lstm(output_1.unsqueeze(0))\n # _, (hidden_rep_2, _) = self.lstm(output_2.unsqueeze(0))\n # _, (hidden_rep_1, _) = self.lstm(output_1)\n # _, (hidden_rep_2, _) = self.lstm(output_2)\n\n hidden_rep_1 = torch.nn.ReLU()(self.pre_classifier(output_1[0])).squeeze(0)\n hidden_rep_2 = torch.nn.ReLU()(self.pre_classifier(output_2[0])).squeeze(0)\n pooler_1 = hidden_rep_1[:, 0]\n pooler_2 = hidden_rep_2[:, 0]\n # hidden_rep_1 = self.pre_classifier(output_1[0]).squeeze(0)\n # hidden_rep_2 = self.pre_classifier(output_2[0]).squeeze(0)\n # print(\"-------hidden_rep_1:\")\n # print(hidden_rep_1)\n # print(hidden_rep_1.size())\n # print(\"-------hidden_rep_2:\")\n # print(hidden_rep_2)\n # print(hidden_rep_2.size())\n \n # hidden_rep = torch.cat((hidden_rep_1.unsqueeze(1), hidden_rep_2.unsqueeze(1)), 1)\n # hidden_rep = self.dropout(torch.cat((hidden_rep_1, hidden_rep_2), 0))\n hidden_rep = self.dropout(torch.cat((pooler_1, pooler_2), 0))\n\n print(\"-------hidden_rep:\")\n # print(hidden_rep)\n print(hidden_rep.size())\n\n output = self.output_layer(hidden_rep.unsqueeze(0))\n print(\"-------output:\")\n # print(output)\n print(output.size())\n print(\"--------------\")\n\n output_squezzed = output.squeeze(0).squeeze(0)\n print(\"-------output_squezzed:\")\n print(output_squezzed)\n print(output_squezzed.size())\n print(\"--------------\")\n \n # y_hat = softmax(output_squezzed)\n # y_sum = torch.sum(y_hat, 0)\n # col1= torch.sum(y_hat, 0)[0]\n # col2 = torch.sum(y_hat, 0)[1]\n # y_result = torch.tensor(torch.argmax(y_sum)).type(torch.FloatTensor)\n # y_result = torch.tensor(y_sum)\n \n return output_squezzed",
"_____no_output_____"
]
],
[
[
"## Training",
"_____no_output_____"
]
],
[
[
"# Initialization\ntokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n# model = OurRobertaCOPA()\nmodel = RobertaForMultipleChoice.from_pretrained('roberta-base')\nmodel.to(device)",
"Some weights of the model checkpoint at roberta-base were not used when initializing RobertaForMultipleChoice: ['lm_head.layer_norm.bias', 'lm_head.dense.weight', 'lm_head.layer_norm.weight', 'lm_head.bias', 'lm_head.decoder.weight', 'lm_head.dense.bias']\n- This IS expected if you are initializing RobertaForMultipleChoice from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing RobertaForMultipleChoice from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\nSome weights of RobertaForMultipleChoice were not initialized from the model checkpoint at roberta-base and are newly initialized: ['classifier.bias', 'classifier.weight']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
],
[
"ce = nn.CrossEntropyLoss()\nsoftmax = nn.Softmax(dim=0)\noptimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\nscheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)\n\nepochs = 52\nper_num_epoch = 1\n\n# train_acc = np.zeros(epochs)\ntrain_loss_by_epoch = np.zeros(epochs)\ndev_acc = np.zeros(epochs)\ndev_loss_by_epoch = np.zeros(epochs)\n\nstart_time = datetime.now()\n\nfor j in range(epochs):\n if j % per_num_epoch == 0:\n print('--------------Epoch: ' + str(j+1) + '--------------')\n \n if j % per_num_epoch == 0:\n print(f'Training for epoch {j + 1}.......')\n \n av_train_loss = 0\n # print(\"av_train_loss_original: \", av_train_loss)\n model.train()\n for i in range(0, dev_raw_data.shape[0] - 100):\n # print(\"av_train_loss_track: \", av_train_loss)\n prompt = dev_raw_data.iloc[i]['asks-for'] + \". \" + dev_raw_data.iloc[i]['p']\n choice0 = dev_raw_data.iloc[i]['a1']\n choice1 = dev_raw_data.iloc[i]['a2']\n label = torch.tensor(dev_raw_data.iloc[i]['most-plausible-alternative'] - 1).unsqueeze(0).to(device)\n # print(\"label is: \", label)\n\n encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True).to(device)\n # encoding = {(prompt+choice0), (prompt+choice1)}\n # outputs = model(input_ids=encoding['input_ids'].unsqueeze(0), attention_mask=encoding['attention_mask'].unsqueeze(0), labels=label)\n outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=label)\n # print(\"outputs: \", outputs)\n\n train_loss = outputs.loss\n train_logits = outputs.logits\n av_train_loss += train_loss\n\n if i == 0:\n print(\"train_loss: \", train_loss)\n print(\"train_logits: \", train_logits)\n print(\"label: \", label)\n if i == 1:\n print(\"train_loss: \", train_loss)\n print(\"train_logits: \", train_logits)\n print(\"label: \", label)\n\n train_loss.backward()\n\n optimizer.step()\n \n optimizer.zero_grad()\n\n train_loss_by_epoch[j] = av_train_loss / (dev_raw_data.shape[0] - 100)\n print(\"av_train_loss: \", train_loss_by_epoch[j])\n\n # validation\n # if (j + 1) % per_num_epoch == 0:\n # print(f'.......Validating for epoch {j + 1}')\n if (j) % per_num_epoch == 0:\n print(f'.......Validating for epoch {j + 1}')\n av_dev_loss = 0\n # model.eval()\n with torch.no_grad():\n for i in range(dev_raw_data.shape[0] - 99, dev_raw_data.shape[0]):\n # print(\"av_dev_loss_track: \", av_dev_loss)\n prompt_val = dev_raw_data.iloc[i]['asks-for'] + \". \" + dev_raw_data.iloc[i]['p']\n choice0_val = dev_raw_data.iloc[i]['a1']\n choice1_val = dev_raw_data.iloc[i]['a2']\n label_val = torch.tensor(dev_raw_data.iloc[i]['most-plausible-alternative'] - 1).unsqueeze(0).to(device)\n\n encoding_val = tokenizer([prompt_val, prompt_val], [choice0_val, choice1_val], return_tensors='pt', padding=True).to(device)\n # outputs = model(input_ids=encoding['input_ids'].unsqueeze(0), attention_mask=encoding['attention_mask'].unsqueeze(0), labels=label)\n outputs_val = model(**{k: v.unsqueeze(0) for k,v in encoding_val.items()}, labels=label_val)\n \n dev_loss = outputs_val.loss\n dev_logits = outputs_val.logits\n av_dev_loss += dev_loss\n \n if i == dev_raw_data.shape[0] - 99:\n print(\"dev_loss: \", dev_loss)\n print(\"dev_logits: \", dev_logits)\n print(\"label: \", label_val)\n if i == dev_raw_data.shape[0] - 1:\n print(\"dev_loss: \", dev_loss)\n print(\"dev_logits: \", dev_logits)\n print(\"label: \", label_val)\n\n #calculate accuracy\n y_pred = 1 if outputs_val.logits[0][1] > outputs_val.logits[0][0] else 0\n y_pred = torch.tensor(y_pred).unsqueeze(0).to(device)\n # print(\"y_pred: \", y_pred)\n # print(\"label: \", label)\n # print(\"y_pred =? label: \", y_pred == label)\n if y_pred == label_val:\n dev_acc[j] += 1\n \n dev_acc[j] /= 100\n print(\"dev_acc[j]: \", dev_acc[j])\n dev_loss_by_epoch[j] = av_dev_loss / 100\n \n # learning rate decay\n # if j == 5:\n # optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n # elif j == 15:\n # optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n # elif j == 20:\n # optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)\n # elif j == 40:\n # optimizer = torch.optim.Adam(model.parameters(), lr=1e-6)\n # elif j == 50:\n # optimizer = torch.optim.Adam(model.parameters(), lr=1e-7) \n scheduler.step()\n\nend_time = datetime.now()\nprint(f'Training completed in {str(end_time - start_time)}')",
"--------------Epoch: 1--------------\nTraining for epoch 1.......\ntrain_loss: tensor(1.0145, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.2321, -6.6679]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(2.1292, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.3377, -5.3351]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 1.093465805053711\n.......Validating for epoch 1\ndev_loss: tensor(0.2771, device='cuda:0')\ndev_logits: tensor([[-4.4653, -3.3236]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 53.0\ndev_acc[j]: 54.0\ndev_loss: tensor(0.9445, device='cuda:0')\ndev_logits: tensor([[-5.1815, -5.6335]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 54.0\n--------------Epoch: 2--------------\nTraining for epoch 2.......\ntrain_loss: tensor(0.7258, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-4.7509, -4.6866]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.0591, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-4.9796, -7.7781]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 1.1721071004867554\n.......Validating for epoch 2\ndev_loss: tensor(2.0170, device='cuda:0')\ndev_logits: tensor([[-4.6715, -6.5457]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_loss: tensor(0.0664, device='cuda:0')\ndev_logits: tensor([[-6.1951, -3.5165]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 50.0\n--------------Epoch: 3--------------\nTraining for epoch 3.......\ntrain_loss: tensor(2.6867, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.4392, -4.8230]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.0255, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-4.3327, -7.9884]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 1.2429717779159546\n.......Validating for epoch 3\ndev_loss: tensor(1.0223, device='cuda:0')\ndev_logits: tensor([[-1.0750, -1.6513]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_loss: tensor(0.3926, device='cuda:0')\ndev_logits: tensor([[-2.3238, -1.5917]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 52.0\n--------------Epoch: 4--------------\nTraining for epoch 4.......\ntrain_loss: tensor(3.3987, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-3.5012, -0.1366]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.3746, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[ 0.1935, -0.5953]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 1.2903330326080322\n.......Validating for epoch 4\ndev_loss: tensor(1.0326, device='cuda:0')\ndev_logits: tensor([[-5.1778, -5.7702]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_loss: tensor(0.0530, device='cuda:0')\ndev_logits: tensor([[-6.6206, -3.7099]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 48.0\n--------------Epoch: 5--------------\nTraining for epoch 5.......\ntrain_loss: tensor(3.7577, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.0622, -3.3281]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.6622, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-2.9537, -3.0167]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 1.2250010967254639\n.......Validating for epoch 5\ndev_loss: tensor(0.1163, device='cuda:0')\ndev_logits: tensor([[-3.9811, -1.8886]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_loss: tensor(0.0354, device='cuda:0')\ndev_logits: tensor([[-6.6629, -3.3401]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 43.0\n--------------Epoch: 6--------------\nTraining for epoch 6.......\ntrain_loss: tensor(0.1351, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-3.4060, -5.3394]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.1050, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-5.1232, -7.3241]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 1.15818190574646\n.......Validating for epoch 6\ndev_loss: tensor(2.4914, device='cuda:0')\ndev_logits: tensor([[ -9.5947, -11.9997]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_loss: tensor(0.0263, device='cuda:0')\ndev_logits: tensor([[-10.2586, -6.6338]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 43.0\n--------------Epoch: 7--------------\nTraining for epoch 7.......\ntrain_loss: tensor(3.8707, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.2238, -4.3742]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.9572, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.5520, -6.7470]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 1.173902988433838\n.......Validating for epoch 7\ndev_loss: tensor(1.2510, device='cuda:0')\ndev_logits: tensor([[-5.4386, -6.3524]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_loss: tensor(2.2884, device='cuda:0')\ndev_logits: tensor([[-4.6569, -6.8383]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 47.0\n--------------Epoch: 8--------------\nTraining for epoch 8.......\ntrain_loss: tensor(1.2080, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.5913, -6.7383]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.3395, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.2029, -8.1088]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 1.1467746496200562\n.......Validating for epoch 8\ndev_loss: tensor(0.0329, device='cuda:0')\ndev_logits: tensor([[-9.4285, -6.0299]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_loss: tensor(0.6732, device='cuda:0')\ndev_logits: tensor([[-7.3134, -7.2731]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 49.0\n--------------Epoch: 9--------------\nTraining for epoch 9.......\ntrain_loss: tensor(0.2978, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-5.6943, -6.7532]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.5174, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.9101, -8.2991]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 1.0407772064208984\n.......Validating for epoch 9\ndev_loss: tensor(1.1727, device='cuda:0')\ndev_logits: tensor([[-8.1714, -8.9736]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_loss: tensor(1.2766, device='cuda:0')\ndev_logits: tensor([[-7.0350, -7.9844]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 49.0\n--------------Epoch: 10--------------\nTraining for epoch 10.......\ntrain_loss: tensor(0.6376, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-5.9890, -6.1034]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.1934, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.5469, -9.0916]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.9895613789558411\n.......Validating for epoch 10\ndev_loss: tensor(0.3310, device='cuda:0')\ndev_logits: tensor([[-6.1371, -5.2016]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_loss: tensor(0.0376, device='cuda:0')\ndev_logits: tensor([[-7.7408, -4.4785]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 48.0\n--------------Epoch: 11--------------\nTraining for epoch 11.......\ntrain_loss: tensor(0.5812, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.1057, -6.3437]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.4295, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.7222, -7.5664]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.9894933700561523\n.......Validating for epoch 11\ndev_loss: tensor(0.4250, device='cuda:0')\ndev_logits: tensor([[-6.6095, -5.9738]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_loss: tensor(0.9675, device='cuda:0')\ndev_logits: tensor([[-6.8983, -7.3878]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 48.0\n--------------Epoch: 12--------------\nTraining for epoch 12.......\ntrain_loss: tensor(1.9728, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.1491, -5.3261]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.8282, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-5.4109, -5.1569]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.9458529949188232\n.......Validating for epoch 12\ndev_loss: tensor(0.2779, device='cuda:0')\ndev_logits: tensor([[-6.2402, -5.1019]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_loss: tensor(0.0366, device='cuda:0')\ndev_logits: tensor([[-8.2777, -4.9896]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 51.0\n--------------Epoch: 13--------------\nTraining for epoch 13.......\ntrain_loss: tensor(0.2559, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-5.6029, -6.8350]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.3392, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-4.9134, -5.8201]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.9291983842849731\n.......Validating for epoch 13\ndev_loss: tensor(1.0596, device='cuda:0')\ndev_logits: tensor([[-7.4861, -8.1201]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_loss: tensor(0.6701, device='cuda:0')\ndev_logits: tensor([[-8.2852, -8.2386]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 51.0\n--------------Epoch: 14--------------\nTraining for epoch 14.......\ntrain_loss: tensor(4.4508, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-10.3076, -5.8686]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.2514, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-9.2243, -8.3100]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.9405280351638794\n.......Validating for epoch 14\ndev_loss: tensor(0.3756, device='cuda:0')\ndev_logits: tensor([[-8.6005, -7.8151]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_loss: tensor(0.6886, device='cuda:0')\ndev_logits: tensor([[-10.1946, -10.1856]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 54.0\n--------------Epoch: 15--------------\nTraining for epoch 15.......\ntrain_loss: tensor(1.3952, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-10.6107, -9.5002]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.2591, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[ -8.8253, -10.0435]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.9213146567344666\n.......Validating for epoch 15\ndev_loss: tensor(1.3565, device='cuda:0')\ndev_logits: tensor([[-8.2438, -9.3025]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_loss: tensor(0.1561, device='cuda:0')\ndev_logits: tensor([[-7.8347, -6.0565]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 46.0\n--------------Epoch: 16--------------\nTraining for epoch 16.......\ntrain_loss: tensor(1.3832, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.7077, -7.6132]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.1962, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[ -8.5922, -10.1212]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.9412047266960144\n.......Validating for epoch 16\ndev_loss: tensor(1.7100, device='cuda:0')\ndev_logits: tensor([[-7.5194, -9.0299]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_loss: tensor(0.7339, device='cuda:0')\ndev_logits: tensor([[-7.5996, -7.6794]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 45.0\n--------------Epoch: 17--------------\nTraining for epoch 17.......\ntrain_loss: tensor(0.1894, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.0297, -8.5973]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.3108, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.3592, -8.3685]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.886634349822998\n.......Validating for epoch 17\ndev_loss: tensor(0.3563, device='cuda:0')\ndev_logits: tensor([[-7.8554, -7.0069]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_loss: tensor(1.3865, device='cuda:0')\ndev_logits: tensor([[-7.1417, -8.2406]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 49.0\n--------------Epoch: 18--------------\nTraining for epoch 18.......\ntrain_loss: tensor(0.9722, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.8583, -7.3613]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.8283, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.1563, -7.9022]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.8542928695678711\n.......Validating for epoch 18\ndev_loss: tensor(0.0814, device='cuda:0')\ndev_logits: tensor([[-9.0063, -6.5391]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_loss: tensor(0.7888, device='cuda:0')\ndev_logits: tensor([[-8.5074, -8.6904]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 44.0\n--------------Epoch: 19--------------\nTraining for epoch 19.......\ntrain_loss: tensor(0.9701, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.2991, -7.8055]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.1920, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.5243, -9.0771]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.9035339951515198\n.......Validating for epoch 19\ndev_loss: tensor(2.7315, device='cuda:0')\ndev_logits: tensor([[ -8.2340, -10.8982]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_loss: tensor(0.7426, device='cuda:0')\ndev_logits: tensor([[-7.8421, -7.9386]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 37.0\n--------------Epoch: 20--------------\nTraining for epoch 20.......\ntrain_loss: tensor(0.0968, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[ -7.9393, -10.2256]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.7069, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.8029, -8.7757]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.8388018608093262\n.......Validating for epoch 20\ndev_loss: tensor(1.7041, device='cuda:0')\ndev_logits: tensor([[ -9.2144, -10.7177]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 53.0\ndev_loss: tensor(0.4162, device='cuda:0')\ndev_logits: tensor([[-9.0738, -8.4126]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 54.0\n--------------Epoch: 21--------------\nTraining for epoch 21.......\ntrain_loss: tensor(0.8405, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-9.7960, -9.5202]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.9467, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.3907, -7.9350]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.8593012094497681\n.......Validating for epoch 21\ndev_loss: tensor(0.3167, device='cuda:0')\ndev_logits: tensor([[-7.7165, -6.7292]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 54.0\ndev_acc[j]: 55.0\ndev_acc[j]: 56.0\ndev_loss: tensor(0.1416, device='cuda:0')\ndev_logits: tensor([[-8.1639, -6.2809]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 57.0\n--------------Epoch: 22--------------\nTraining for epoch 22.......\ntrain_loss: tensor(0.4350, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.7853, -7.3923]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.0881, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.4943, -8.8795]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.8220749497413635\n.......Validating for epoch 22\ndev_loss: tensor(0.2473, device='cuda:0')\ndev_logits: tensor([[-8.2311, -6.9603]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 51.0\ndev_acc[j]: 51.0\ndev_loss: tensor(1.4917, device='cuda:0')\ndev_logits: tensor([[-7.1181, -8.3550]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 51.0\n--------------Epoch: 23--------------\nTraining for epoch 23.......\ntrain_loss: tensor(0.2115, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-5.8376, -7.2837]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.9382, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.9592, -6.5175]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.8045014142990112\n.......Validating for epoch 23\ndev_loss: tensor(0.5032, device='cuda:0')\ndev_logits: tensor([[-7.6817, -7.2571]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_loss: tensor(0.5959, device='cuda:0')\ndev_logits: tensor([[-7.4311, -7.2261]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 47.0\n--------------Epoch: 24--------------\nTraining for epoch 24.......\ntrain_loss: tensor(0.4153, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.6461, -8.3101]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.6449, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.8509, -7.9497]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.8160671591758728\n.......Validating for epoch 24\ndev_loss: tensor(0.7121, device='cuda:0')\ndev_logits: tensor([[-6.6578, -6.6954]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 54.0\ndev_acc[j]: 54.0\ndev_loss: tensor(0.8947, device='cuda:0')\ndev_logits: tensor([[-6.8065, -7.1757]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 54.0\n--------------Epoch: 25--------------\nTraining for epoch 25.......\ntrain_loss: tensor(1.8029, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.8265, -6.2037]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.6105, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.9037, -7.0766]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.8415746688842773\n.......Validating for epoch 25\ndev_loss: tensor(0.9125, device='cuda:0')\ndev_logits: tensor([[-6.0658, -6.4650]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_loss: tensor(0.4770, device='cuda:0')\ndev_logits: tensor([[-6.7560, -6.2636]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 47.0\n--------------Epoch: 26--------------\nTraining for epoch 26.......\ntrain_loss: tensor(0.1068, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.0903, -8.2730]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.3052, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.8552, -5.8664]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.8107126355171204\n.......Validating for epoch 26\ndev_loss: tensor(0.9148, device='cuda:0')\ndev_logits: tensor([[-6.8101, -7.2131]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_loss: tensor(1.3365, device='cuda:0')\ndev_logits: tensor([[-6.0950, -7.1267]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 40.0\n--------------Epoch: 27--------------\nTraining for epoch 27.......\ntrain_loss: tensor(1.2126, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.4453, -6.5858]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.3365, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.7086, -6.6769]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7702555656433105\n.......Validating for epoch 27\ndev_loss: tensor(0.8932, device='cuda:0')\ndev_logits: tensor([[-5.9273, -6.2939]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_loss: tensor(0.3752, device='cuda:0')\ndev_logits: tensor([[-6.5141, -5.7273]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 48.0\n--------------Epoch: 28--------------\nTraining for epoch 28.......\ntrain_loss: tensor(0.5204, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.2402, -6.6220]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.9515, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.0965, -5.6331]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7573811411857605\n.......Validating for epoch 28\ndev_loss: tensor(0.8258, device='cuda:0')\ndev_logits: tensor([[-6.4490, -6.6988]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 53.0\ndev_acc[j]: 53.0\ndev_acc[j]: 54.0\ndev_acc[j]: 55.0\ndev_acc[j]: 56.0\ndev_acc[j]: 56.0\ndev_acc[j]: 57.0\ndev_acc[j]: 57.0\ndev_acc[j]: 57.0\ndev_acc[j]: 57.0\ndev_acc[j]: 58.0\ndev_acc[j]: 58.0\ndev_acc[j]: 59.0\ndev_loss: tensor(0.8121, device='cuda:0')\ndev_logits: tensor([[-5.7432, -5.9684]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 59.0\n--------------Epoch: 29--------------\nTraining for epoch 29.......\ntrain_loss: tensor(0.9191, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.4666, -6.0565]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.7059, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.3081, -6.2827]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.8136330842971802\n.......Validating for epoch 29\ndev_loss: tensor(0.6185, device='cuda:0')\ndev_logits: tensor([[-6.1596, -6.0043]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_loss: tensor(0.5315, device='cuda:0')\ndev_logits: tensor([[-6.9137, -6.5591]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 49.0\n--------------Epoch: 30--------------\nTraining for epoch 30.......\ntrain_loss: tensor(0.5975, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.4794, -6.6808]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.9824, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.3309, -5.8175]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.8074605464935303\n.......Validating for epoch 30\ndev_loss: tensor(1.4040, device='cuda:0')\ndev_logits: tensor([[-5.5925, -6.7146]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_loss: tensor(0.1866, device='cuda:0')\ndev_logits: tensor([[-7.0988, -5.5145]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 50.0\n--------------Epoch: 31--------------\nTraining for epoch 31.......\ntrain_loss: tensor(0.7377, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-5.9452, -5.8580]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.0213, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-5.6111, -5.0362]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7986893057823181\n.......Validating for epoch 31\ndev_loss: tensor(0.3279, device='cuda:0')\ndev_logits: tensor([[-7.4549, -6.5082]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_loss: tensor(0.1608, device='cuda:0')\ndev_logits: tensor([[-7.3599, -5.6140]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 47.0\n--------------Epoch: 32--------------\nTraining for epoch 32.......\ntrain_loss: tensor(0.2063, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.4225, -7.8962]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(2.0081, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.2922, -6.4282]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7506538033485413\n.......Validating for epoch 32\ndev_loss: tensor(1.3502, device='cuda:0')\ndev_logits: tensor([[-7.1454, -8.1956]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 51.0\ndev_acc[j]: 51.0\ndev_acc[j]: 51.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 53.0\ndev_acc[j]: 53.0\ndev_loss: tensor(0.7330, device='cuda:0')\ndev_logits: tensor([[-6.0753, -6.1536]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 53.0\n--------------Epoch: 33--------------\nTraining for epoch 33.......\ntrain_loss: tensor(1.8808, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.8529, -6.1375]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.4839, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.2131, -7.6873]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.768488347530365\n.......Validating for epoch 33\ndev_loss: tensor(0.5291, device='cuda:0')\ndev_logits: tensor([[-7.4435, -7.0830]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 54.0\ndev_acc[j]: 55.0\ndev_acc[j]: 55.0\ndev_loss: tensor(1.2957, device='cuda:0')\ndev_logits: tensor([[-6.9201, -7.8960]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 55.0\n--------------Epoch: 34--------------\nTraining for epoch 34.......\ntrain_loss: tensor(0.6967, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.4828, -7.4758]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.6041, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.0712, -7.2581]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7533155679702759\n.......Validating for epoch 34\ndev_loss: tensor(1.2560, device='cuda:0')\ndev_logits: tensor([[-7.0244, -7.9452]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_loss: tensor(1.3850, device='cuda:0')\ndev_logits: tensor([[-6.7306, -7.8275]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 42.0\n--------------Epoch: 35--------------\nTraining for epoch 35.......\ntrain_loss: tensor(0.9977, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.6662, -7.1285]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.7145, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.9946, -6.9523]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7745379209518433\n.......Validating for epoch 35\ndev_loss: tensor(0.6684, device='cuda:0')\ndev_logits: tensor([[-7.4275, -7.3775]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_loss: tensor(0.7002, device='cuda:0')\ndev_logits: tensor([[-7.5611, -7.5751]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 48.0\n--------------Epoch: 36--------------\nTraining for epoch 36.......\ntrain_loss: tensor(1.5071, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.9582, -5.7016]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.2667, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.7649, -6.8292]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7636578679084778\n.......Validating for epoch 36\ndev_loss: tensor(0.8413, device='cuda:0')\ndev_logits: tensor([[-7.6554, -7.9326]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_loss: tensor(0.8966, device='cuda:0')\ndev_logits: tensor([[-7.2578, -7.6301]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 38.0\n--------------Epoch: 37--------------\nTraining for epoch 37.......\ntrain_loss: tensor(0.4881, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.4635, -7.9269]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.0642, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.0130, -7.3720]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7201663851737976\n.......Validating for epoch 37\ndev_loss: tensor(0.2634, device='cuda:0')\ndev_logits: tensor([[-7.9715, -6.7718]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_loss: tensor(0.5999, device='cuda:0')\ndev_logits: tensor([[-7.8534, -7.6574]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 46.0\n--------------Epoch: 38--------------\nTraining for epoch 38.......\ntrain_loss: tensor(0.8941, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.7498, -7.3816]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.5134, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.4279, -6.8270]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7546659111976624\n.......Validating for epoch 38\ndev_loss: tensor(1.3306, device='cuda:0')\ndev_logits: tensor([[-6.8725, -7.8961]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 53.0\ndev_acc[j]: 54.0\ndev_acc[j]: 54.0\ndev_acc[j]: 54.0\ndev_acc[j]: 54.0\ndev_acc[j]: 55.0\ndev_acc[j]: 55.0\ndev_acc[j]: 56.0\ndev_acc[j]: 56.0\ndev_acc[j]: 57.0\ndev_acc[j]: 58.0\ndev_acc[j]: 58.0\ndev_loss: tensor(0.8026, device='cuda:0')\ndev_logits: tensor([[-7.3090, -7.5170]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 58.0\n--------------Epoch: 39--------------\nTraining for epoch 39.......\ntrain_loss: tensor(1.2053, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.9321, -7.0829]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.5341, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.2668, -7.6150]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7398672103881836\n.......Validating for epoch 39\ndev_loss: tensor(0.2580, device='cuda:0')\ndev_logits: tensor([[-7.6552, -6.4321]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_loss: tensor(0.6073, device='cuda:0')\ndev_logits: tensor([[-7.8631, -7.6834]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 51.0\n--------------Epoch: 40--------------\nTraining for epoch 40.......\ntrain_loss: tensor(1.4637, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.4488, -7.2482]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.8185, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.3017, -7.0651]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7582509517669678\n.......Validating for epoch 40\ndev_loss: tensor(0.3914, device='cuda:0')\ndev_logits: tensor([[-7.8587, -7.1226]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 54.0\ndev_acc[j]: 55.0\ndev_acc[j]: 55.0\ndev_acc[j]: 56.0\ndev_acc[j]: 56.0\ndev_acc[j]: 57.0\ndev_acc[j]: 58.0\ndev_acc[j]: 59.0\ndev_acc[j]: 60.0\ndev_loss: tensor(0.6218, device='cuda:0')\ndev_logits: tensor([[-7.5025, -7.3544]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 61.0\n--------------Epoch: 41--------------\nTraining for epoch 41.......\ntrain_loss: tensor(0.5279, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.1667, -7.5299]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.0527, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.8726, -7.2491]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7351158857345581\n.......Validating for epoch 41\ndev_loss: tensor(1.2791, device='cuda:0')\ndev_logits: tensor([[-7.3199, -8.2728]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_loss: tensor(0.3222, device='cuda:0')\ndev_logits: tensor([[-7.7713, -6.8040]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 50.0\n--------------Epoch: 42--------------\nTraining for epoch 42.......\ntrain_loss: tensor(0.7809, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.1110, -7.9426]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.3763, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.4807, -8.2639]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7270342707633972\n.......Validating for epoch 42\ndev_loss: tensor(0.6131, device='cuda:0')\ndev_logits: tensor([[-6.8378, -6.6709]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_loss: tensor(0.8121, device='cuda:0')\ndev_logits: tensor([[-7.6714, -7.8967]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 53.0\n--------------Epoch: 43--------------\nTraining for epoch 43.......\ntrain_loss: tensor(0.3751, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.7498, -7.5368]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.6315, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.0196, -7.1470]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7657537460327148\n.......Validating for epoch 43\ndev_loss: tensor(0.5991, device='cuda:0')\ndev_logits: tensor([[-7.5249, -7.3270]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_loss: tensor(1.0419, device='cuda:0')\ndev_logits: tensor([[-7.7947, -8.4014]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 51.0\n--------------Epoch: 44--------------\nTraining for epoch 44.......\ntrain_loss: tensor(2.0222, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.4341, -6.5539]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.1970, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.7690, -8.2934]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.719317615032196\n.......Validating for epoch 44\ndev_loss: tensor(1.1725, device='cuda:0')\ndev_logits: tensor([[-7.3904, -8.1925]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 53.0\ndev_loss: tensor(0.8364, device='cuda:0')\ndev_logits: tensor([[-6.7822, -7.0507]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 53.0\n--------------Epoch: 45--------------\nTraining for epoch 45.......\ntrain_loss: tensor(1.5126, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.3936, -7.1299]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.5357, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.9763, -7.3206]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7310871481895447\n.......Validating for epoch 45\ndev_loss: tensor(0.5935, device='cuda:0')\ndev_logits: tensor([[-7.4735, -7.2632]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_loss: tensor(0.4480, device='cuda:0')\ndev_logits: tensor([[-7.6018, -7.0312]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 48.0\n--------------Epoch: 46--------------\nTraining for epoch 46.......\ntrain_loss: tensor(0.4529, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.2063, -7.7634]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.9507, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.9328, -7.4706]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7472051978111267\n.......Validating for epoch 46\ndev_loss: tensor(0.2570, device='cuda:0')\ndev_logits: tensor([[-8.5357, -7.3082]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 54.0\ndev_acc[j]: 55.0\ndev_acc[j]: 56.0\ndev_acc[j]: 56.0\ndev_loss: tensor(0.6759, device='cuda:0')\ndev_logits: tensor([[-7.0697, -7.0349]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 57.0\n--------------Epoch: 47--------------\nTraining for epoch 47.......\ntrain_loss: tensor(0.7151, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.6038, -7.5603]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.9657, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.3168, -6.5020]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7424657344818115\n.......Validating for epoch 47\ndev_loss: tensor(0.3832, device='cuda:0')\ndev_logits: tensor([[-7.9212, -7.1597]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 54.0\ndev_acc[j]: 55.0\ndev_acc[j]: 55.0\ndev_loss: tensor(1.0998, device='cuda:0')\ndev_logits: tensor([[-7.7899, -8.4848]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 55.0\n--------------Epoch: 48--------------\nTraining for epoch 48.......\ntrain_loss: tensor(0.7954, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.0142, -7.8191]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.5773, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.4752, -7.7222]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7560969591140747\n.......Validating for epoch 48\ndev_loss: tensor(0.5569, device='cuda:0')\ndev_logits: tensor([[-7.5056, -7.2115]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 50.0\ndev_loss: tensor(1.2381, device='cuda:0')\ndev_logits: tensor([[-7.3725, -8.2683]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 50.0\n--------------Epoch: 49--------------\nTraining for epoch 49.......\ntrain_loss: tensor(0.3415, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.7010, -7.5997]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(0.5437, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.4604, -7.7857]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7404201626777649\n.......Validating for epoch 49\ndev_loss: tensor(0.6166, device='cuda:0')\ndev_logits: tensor([[-7.5301, -7.3706]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_loss: tensor(0.9606, device='cuda:0')\ndev_logits: tensor([[-7.2078, -7.6861]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 49.0\n--------------Epoch: 50--------------\nTraining for epoch 50.......\ntrain_loss: tensor(0.5166, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.2146, -7.6057]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.2192, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.2806, -7.4116]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7624594569206238\n.......Validating for epoch 50\ndev_loss: tensor(0.6064, device='cuda:0')\ndev_logits: tensor([[-7.2783, -7.0967]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 1.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_loss: tensor(1.0632, device='cuda:0')\ndev_logits: tensor([[-7.9086, -8.5482]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 50.0\n--------------Epoch: 51--------------\nTraining for epoch 51.......\ntrain_loss: tensor(1.7949, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.0139, -6.4007]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.1869, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-7.8695, -7.0467]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.7553675174713135\n.......Validating for epoch 51\ndev_loss: tensor(0.9036, device='cuda:0')\ndev_logits: tensor([[-7.2420, -7.6263]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_loss: tensor(1.2197, device='cuda:0')\ndev_logits: tensor([[-6.9550, -7.8248]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 40.0\n--------------Epoch: 52--------------\nTraining for epoch 52.......\ntrain_loss: tensor(0.3845, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-6.8961, -7.6535]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\ntrain_loss: tensor(1.0052, device='cuda:0', grad_fn=<NllLossBackward0>)\ntrain_logits: tensor([[-8.1064, -7.5570]], device='cuda:0', grad_fn=<ViewBackward0>)\nlabel: tensor([0], device='cuda:0')\nav_train_loss: 0.739622950553894\n.......Validating for epoch 52\ndev_loss: tensor(0.7588, device='cuda:0')\ndev_logits: tensor([[-7.4846, -7.6118]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 0.0\ndev_acc[j]: 0.0\ndev_acc[j]: 1.0\ndev_acc[j]: 2.0\ndev_acc[j]: 3.0\ndev_acc[j]: 4.0\ndev_acc[j]: 5.0\ndev_acc[j]: 6.0\ndev_acc[j]: 6.0\ndev_acc[j]: 7.0\ndev_acc[j]: 7.0\ndev_acc[j]: 8.0\ndev_acc[j]: 9.0\ndev_acc[j]: 10.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 11.0\ndev_acc[j]: 12.0\ndev_acc[j]: 13.0\ndev_acc[j]: 14.0\ndev_acc[j]: 15.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 16.0\ndev_acc[j]: 17.0\ndev_acc[j]: 17.0\ndev_acc[j]: 18.0\ndev_acc[j]: 19.0\ndev_acc[j]: 20.0\ndev_acc[j]: 21.0\ndev_acc[j]: 22.0\ndev_acc[j]: 23.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 24.0\ndev_acc[j]: 25.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 26.0\ndev_acc[j]: 27.0\ndev_acc[j]: 28.0\ndev_acc[j]: 29.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 30.0\ndev_acc[j]: 31.0\ndev_acc[j]: 31.0\ndev_acc[j]: 32.0\ndev_acc[j]: 33.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 34.0\ndev_acc[j]: 35.0\ndev_acc[j]: 35.0\ndev_acc[j]: 36.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 37.0\ndev_acc[j]: 38.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 39.0\ndev_acc[j]: 40.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 41.0\ndev_acc[j]: 42.0\ndev_acc[j]: 43.0\ndev_acc[j]: 44.0\ndev_acc[j]: 45.0\ndev_acc[j]: 45.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 46.0\ndev_acc[j]: 47.0\ndev_acc[j]: 48.0\ndev_acc[j]: 49.0\ndev_acc[j]: 49.0\ndev_acc[j]: 50.0\ndev_acc[j]: 51.0\ndev_acc[j]: 52.0\ndev_acc[j]: 53.0\ndev_acc[j]: 54.0\ndev_acc[j]: 55.0\ndev_acc[j]: 56.0\ndev_acc[j]: 57.0\ndev_acc[j]: 58.0\ndev_acc[j]: 59.0\ndev_acc[j]: 59.0\ndev_acc[j]: 60.0\ndev_loss: tensor(1.3966, device='cuda:0')\ndev_logits: tensor([[-7.4250, -8.5373]], device='cuda:0')\nlabel: tensor([1], device='cuda:0')\ndev_acc[j]: 60.0\nTraining completed in 0:24:17.708705\n"
],
[
"# plot\nplt.figure(figsize=(14, 7))\nplt.title(\"Loss VS Epoch\")\n\nplt.plot(train_loss_by_epoch, label=\"train_loss\")\nplt.plot(dev_loss_by_epoch, label=\"dev_loss\")\nplt.xlabel(\"epoch\")\nplt.ylabel(\"loss\")\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"# plot\nplt.figure(figsize=(14, 7))\nplt.title(\"acc VS Epoch\")\n\nplt.plot(dev_acc, label=\"dev_acc\")\nplt.xlabel(\"epoch\")\nplt.ylabel(\"acc\")\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"# save the model\ntorch.save(model, 'RoBERTa.pth')# epochs = 100, lr=1e-2 -> scheular",
"_____no_output_____"
]
],
[
[
"## Testing",
"_____no_output_____"
]
],
[
[
"test_model = torch.load('RoBERTa.pth')",
"_____no_output_____"
],
[
"test_model.eval()\nnum_correct_pred = 0\n\nwith torch.no_grad():\n for i in range(0, test_raw_data.shape[0]):\n prompt = test_raw_data.iloc[i]['asks-for'] + \". \" + test_raw_data.iloc[i]['p']\n choice0 = test_raw_data.iloc[i]['a1']\n choice1 = test_raw_data.iloc[i]['a2']\n label_test = torch.tensor(test_raw_data.iloc[i]['most-plausible-alternative']).unsqueeze(0).to(device)\n\n encoding_test = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True).to(device)\n # outputs = test_model(input_ids=encoding['input_ids'].unsqueeze(0), attention_mask=encoding['attention_mask'].unsqueeze(0), labels=label)\n outputs_test = test_model(**{k: v.unsqueeze(0) for k,v in encoding_test.items()}, labels=label_test)\n print(\"outputs_test.logits: \", outputs_test.logits)\n # test_logits = outputs_test.logits\n\n #calculate accuracy\n y_pred_test = 1 if outputs_test.logits[0][1] > outputs_test.logits[0][0] else 0\n y_pred_test = torch.tensor(y_pred_test).unsqueeze(0).to(device)\n\n if y_pred_test == label_test:\n print(\"test_logits: \", outputs_test.logits)\n print(\"y_pred: \", y_pred_test)\n print(\"label: \", label_test)\n num_correct_pred += 1\n\nacc = num_correct_pred / test_raw_data.shape[0]\nprint(\"test_accuracy = \", acc)",
"_____no_output_____"
],
[
"# test_model.eval()\nnum_correct_pred = 0\n\nwith torch.no_grad():\n for i in range(0, test_raw_data.shape[0]):\n prompt = test_raw_data.iloc[i]['question'] + \". \" + test_raw_data.iloc[i]['premise']\n choice0 = test_raw_data.iloc[i]['choice1']\n choice1 = test_raw_data.iloc[i]['choice2']\n label = torch.tensor(test_raw_data.iloc[i]['label']).unsqueeze(0).to(device)\n\n encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True).to(device)\n outputs = test_model(input_ids=encoding['input_ids'].unsqueeze(0), attention_mask=encoding['attention_mask'].unsqueeze(0), labels=label)\n\n test_logits = outputs.logits\n\n #calculate accuracy\n y_pred = 1 if outputs.logits[0][1] > outputs.logits[0][0] else 0\n y_pred = torch.tensor(y_pred).unsqueeze(0).to(device)\n\n if y_pred == label:\n print(\"test_logits: \", test_logits)\n print(\"y_pred: \", y_pred)\n print(\"label: \", label)\n num_correct_pred += 1\n\nacc = num_correct_pred / test_raw_data.shape[0]\nprint(\"test_accuracy = \", acc)",
"test_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_logits: tensor([[1193.0547, 1193.0547]], device='cuda:0')\ny_pred: tensor([0], device='cuda:0')\nlabel: tensor([0], device='cuda:0')\ntest_accuracy = 0.55\n"
],
[
"# Revise\n# Training\nce = nn.CrossEntropyLoss()\nsoftmax = nn.Softmax(dim=0)\noptimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\n\nepochs = 100\nper_num_epoch = 1\n\n# train_acc = np.zeros(epochs)\ntrain_loss_by_epoch = np.zeros(epochs)\ndev_acc = np.zeros(epochs)\ndev_loss_by_epoch = np.zeros(epochs)\n\nstart_time = datetime.now()\n\nfor j in range(epochs):\n if j % per_num_epoch == 0:\n print('--------------Epoch: ' + str(j+1) + '--------------')\n \n if j % per_num_epoch == 0:\n print(f'Training for epoch {j + 1}.......')\n av_train_loss = 0\n # print(\"av_train_loss_original: \", av_train_loss)\n model.train()\n for i in range(0, train_raw_data.shape[0]):\n # print(\"av_train_loss_track: \", av_train_loss)\n prompt = train_raw_data.iloc[i]['question'] + \". \" + train_raw_data.iloc[i]['premise']\n choice0 = train_raw_data.iloc[i]['choice1']\n choice1 = train_raw_data.iloc[i]['choice2']\n label = torch.tensor(train_raw_data.iloc[i]['label']).unsqueeze(0).to(device)\n # print(\"label is: \", label)\n # label = torch.tensor(rawdata.iloc[i]['label']).unsqueeze(0).to(device)\n\n encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors='pt', padding=True).to(device)\n # outputs = model(input_ids=encoding['input_ids'].unsqueeze(0), attention_mask=encoding['attention_mask'].unsqueeze(0), labels=label)\n \n outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=label)\n \n train_loss = outputs.loss\n train_logits = outputs.logits\n av_train_loss += train_loss\n\n if i == 0:\n print(\"train_loss: \", train_loss)\n print(\"train_logits: \", train_logits)\n print(\"label: \", label)\n if i == 1:\n print(\"train_loss: \", train_loss)\n print(\"train_logits: \", train_logits)\n print(\"label: \", label)\n\n train_loss.backward()\n\n optimizer.step()\n\n # learning rate decay\n if j == 25:\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)\n elif j == 50:\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-7)\n \n optimizer.zero_grad()\n\n train_loss_by_epoch[j] = av_train_loss / train_raw_data.shape[0]\n print(\"av_train_loss: \", train_loss_by_epoch[j])\n\nend_time = datetime.now()\nprint(f'Training completed in {str(end_time - start_time)}')\n",
"_____no_output_____"
]
],
[
[
"## NOTES\n1. Ask about whether the very last output of RoBERTaMultipleChoice is the possibility score for one input embedding.\n\n(pooler): RobertaPooler(\n (dense): Linear(in_features=768, out_features=768, bias=True)\n (activation): Tanh()\n )\n )\n (dropout): Dropout(p=0.1, inplace=False)\n (classifier): Linear(in_features=768, out_features=1, bias=True)\n\n\n2. What's wrong with the model.eval()?\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
d0d4eb717c7415a934e6340a3f44e4a387135f8c | 399,563 | ipynb | Jupyter Notebook | 1 - Python Stock Trading Quick Start with Alpaca.ipynb | Billwaa/python-algorithmic-stock-trading- | 8719752abe0e0dd1d43f07654eacb98dcbfb372f | [
"MIT"
] | null | null | null | 1 - Python Stock Trading Quick Start with Alpaca.ipynb | Billwaa/python-algorithmic-stock-trading- | 8719752abe0e0dd1d43f07654eacb98dcbfb372f | [
"MIT"
] | null | null | null | 1 - Python Stock Trading Quick Start with Alpaca.ipynb | Billwaa/python-algorithmic-stock-trading- | 8719752abe0e0dd1d43f07654eacb98dcbfb372f | [
"MIT"
] | null | null | null | 128.559524 | 122,976 | 0.820546 | [
[
[
"# Python Stock Trading Quick Start with Alpaca\n#### by Billy Hau",
"_____no_output_____"
],
[
"The purpose of this tutorial is to provide a quick start guide to trade stock with python. We will use the Alpaca trading platform since it is free and support paper trading. We will go over the fundamental operations, such as connecting to an account, check account asset, quote stock price and placing an order. We will also program a simple trading bot as an exercise. ",
"_____no_output_____"
],
[
"## Setup Python Data Science Library\n\nWe are going to import the commonly used Data Science libraries here: Pandas, Numpy and Matplotlib. They should come pre-installed with Anaconda, but if not, here's how to install them from pip. ",
"_____no_output_____"
]
],
[
[
"! pip install pandas\n! pip install numpy\n! pip install matplotlib",
"Requirement already satisfied: pandas in c:\\users\\billw\\anaconda3\\lib\\site-packages (1.1.3)\nRequirement already satisfied: pytz>=2017.2 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from pandas) (2020.1)\nRequirement already satisfied: numpy>=1.15.4 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from pandas) (1.19.2)\nRequirement already satisfied: python-dateutil>=2.7.3 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from pandas) (2.8.1)\nRequirement already satisfied: six>=1.5 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from python-dateutil>=2.7.3->pandas) (1.15.0)\nRequirement already satisfied: numpy in c:\\users\\billw\\anaconda3\\lib\\site-packages (1.19.2)\nRequirement already satisfied: matplotlib in c:\\users\\billw\\anaconda3\\lib\\site-packages (3.3.2)\nRequirement already satisfied: pillow>=6.2.0 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from matplotlib) (8.0.1)\nRequirement already satisfied: certifi>=2020.06.20 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from matplotlib) (2020.6.20)\nRequirement already satisfied: kiwisolver>=1.0.1 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from matplotlib) (1.3.0)\nRequirement already satisfied: numpy>=1.15 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from matplotlib) (1.19.2)\nRequirement already satisfied: python-dateutil>=2.1 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from matplotlib) (2.8.1)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from matplotlib) (2.4.7)\nRequirement already satisfied: cycler>=0.10 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from matplotlib) (0.10.0)\nRequirement already satisfied: six>=1.5 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from python-dateutil>=2.1->matplotlib) (1.15.0)\n"
],
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport time",
"_____no_output_____"
],
[
"! pip install alpaca-trade-api",
"Requirement already satisfied: alpaca-trade-api in c:\\users\\billw\\anaconda3\\lib\\site-packages (1.2.1)\nRequirement already satisfied: urllib3<2,>1.24 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from alpaca-trade-api) (1.25.11)\nRequirement already satisfied: numpy in c:\\users\\billw\\anaconda3\\lib\\site-packages (from alpaca-trade-api) (1.19.2)\nRequirement already satisfied: msgpack==1.0.2 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from alpaca-trade-api) (1.0.2)\nRequirement already satisfied: requests<3,>2 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from alpaca-trade-api) (2.24.0)\nRequirement already satisfied: websockets<9,>=8.0 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from alpaca-trade-api) (8.1)\nRequirement already satisfied: websocket-client<1,>=0.56.0 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from alpaca-trade-api) (0.59.0)\nRequirement already satisfied: pandas in c:\\users\\billw\\anaconda3\\lib\\site-packages (from alpaca-trade-api) (1.1.3)\nRequirement already satisfied: idna<3,>=2.5 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from requests<3,>2->alpaca-trade-api) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from requests<3,>2->alpaca-trade-api) (2020.6.20)\nRequirement already satisfied: chardet<4,>=3.0.2 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from requests<3,>2->alpaca-trade-api) (3.0.4)\nRequirement already satisfied: six in c:\\users\\billw\\anaconda3\\lib\\site-packages (from websocket-client<1,>=0.56.0->alpaca-trade-api) (1.15.0)\nRequirement already satisfied: pytz>=2017.2 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from pandas->alpaca-trade-api) (2020.1)\nRequirement already satisfied: python-dateutil>=2.7.3 in c:\\users\\billw\\anaconda3\\lib\\site-packages (from pandas->alpaca-trade-api) (2.8.1)\n"
]
],
[
[
"#### 3) Enter API Credentials from https://app.alpaca.markets/paper/dashboard/overview",
"_____no_output_____"
]
],
[
[
"ALPACA_ENDPOINT = 'https://paper-api.alpaca.markets'\nALPACA_API = 'API Key ID' # Replace with API Key ID from Alpaca Paper Account\nALPACA_API_SECRET = 'Secret Key' # Replace with Secret Key from Alpaca Paper Account",
"_____no_output_____"
]
],
[
[
"#### 4) Import Alpaca API and Connect to Account",
"_____no_output_____"
]
],
[
[
"import alpaca_trade_api",
"_____no_output_____"
],
[
"alpaca = alpaca_trade_api.REST(ALPACA_API, ALPACA_API_SECRET, ALPACA_ENDPOINT)",
"_____no_output_____"
]
],
[
[
"## 1) Access Account Information",
"_____no_output_____"
]
],
[
[
"account = alpaca.get_account()\nprint(account)",
"Account({ 'account_blocked': False,\n 'account_number': 'PA2QIYXZJEXP',\n 'buying_power': '111133.208',\n 'cash': '20090.75',\n 'created_at': '2020-12-17T08:28:12.995558Z',\n 'currency': 'USD',\n 'daytrade_count': 0,\n 'daytrading_buying_power': '111133.208',\n 'equity': '31366.31',\n 'id': 'a9214812-7bb8-48fb-8a25-12f2210334db',\n 'initial_margin': '5637.78',\n 'last_equity': '31080.11',\n 'last_maintenance_margin': '3296.808',\n 'long_market_value': '11275.56',\n 'maintenance_margin': '3382.668',\n 'multiplier': '4',\n 'pattern_day_trader': False,\n 'portfolio_value': '31366.31',\n 'regt_buying_power': '51457.06',\n 'short_market_value': '0',\n 'shorting_enabled': False,\n 'sma': '0',\n 'status': 'ACTIVE',\n 'trade_suspended_by_user': False,\n 'trading_blocked': False,\n 'transfers_blocked': False})\n"
]
],
[
[
"##### Account Number",
"_____no_output_____"
]
],
[
[
"account.account_number",
"_____no_output_____"
]
],
[
[
"##### Account Cash",
"_____no_output_____"
]
],
[
[
"account.cash",
"_____no_output_____"
]
],
[
[
"##### Account Portfolio Value",
"_____no_output_____"
]
],
[
[
"account.portfolio_value",
"_____no_output_____"
]
],
[
[
"##### Account Stock Holdings / Positions",
"_____no_output_____"
]
],
[
[
"position = alpaca.list_positions()\nposition",
"_____no_output_____"
]
],
[
[
"## 2) Get Stock Exchange Status",
"_____no_output_____"
]
],
[
[
"clock = alpaca.get_clock()\nclock",
"_____no_output_____"
]
],
[
[
"##### Current Time",
"_____no_output_____"
]
],
[
[
"clock.timestamp",
"_____no_output_____"
]
],
[
[
"##### Is the Market Currently Open?",
"_____no_output_____"
]
],
[
[
"clock.is_open",
"_____no_output_____"
]
],
[
[
"## 3) Get Stock Information",
"_____no_output_____"
]
],
[
[
"trade = alpaca.get_latest_trade('NIO')\ntrade",
"_____no_output_____"
]
],
[
[
"##### Last Trade Price",
"_____no_output_____"
]
],
[
[
"trade.p",
"_____no_output_____"
]
],
[
[
"##### Last Trade Size",
"_____no_output_____"
]
],
[
[
"trade.s",
"_____no_output_____"
]
],
[
[
"##### Last Trade Time",
"_____no_output_____"
]
],
[
[
"trade.t",
"_____no_output_____"
]
],
[
[
"### Historic Data\nHistoric Data is stored in Barsets and can be reported in timeframe of <b>1Min / 5Min / 15Min / 1D</b>.\n- c: close\n- h: high\n- l: low\n- o: open\n- t: timestamp\n- v: volume",
"_____no_output_____"
]
],
[
[
"historic_data = alpaca.get_barset('NIO', timeframe='1D', limit=20)\nhistoric_data",
"_____no_output_____"
],
[
"NIO_DATA = historic_data.df\nNIO_DATA",
"_____no_output_____"
],
[
"NIO_DATA['NIO','close'].plot(figsize=(16,5), title='NIO Closing Price')",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(16,5))\n\nl1 = ax.plot(NIO_DATA['NIO', 'close'], 'g-', label='Close')\nax.set_xlabel('Date')\nax.set_ylabel('Closing Price $ USD')\nax.legend(loc = 'upper left')\n\nl2 = ax2 = ax.twinx()\nax2.plot(NIO_DATA['NIO', 'volume'], 'y-', label='Volume')\nax2.set_ylabel('Trading Volume')\nax2.legend(loc = 'upper right')\n\nplt.title('NIO')\n\n",
"_____no_output_____"
]
],
[
[
"### Historic Data for Multiple Stock",
"_____no_output_____"
]
],
[
[
"portfolio_list = ['SPY', 'VNQ', 'BND', 'GLD']",
"_____no_output_____"
],
[
"portfolio_data = alpaca.get_barset(portfolio_list, '1D', limit=100).df\nportfolio_data",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(16,5))\n\nfor i in range(len(portfolio_list)):\n percentage_return = (portfolio_data[portfolio_list[i],'close'] - portfolio_data[portfolio_list[i],'close'][0]) / portfolio_data[portfolio_list[i],'close'][0]\n plt.plot(percentage_return, label=portfolio_list[i])\n\nplt.legend()\nplt.title('Portfolio List Return')",
"_____no_output_____"
]
],
[
[
"## 4) Placing Stock Order",
"_____no_output_____"
],
[
"##### Placing a Buy Order at Market Price",
"_____no_output_____"
]
],
[
[
"order0 = alpaca.submit_order('FB', qty=25, side='buy', type='market')\norder0",
"_____no_output_____"
]
],
[
[
"##### Placing a Buy Order at Limit Price",
"_____no_output_____"
]
],
[
[
"order1 = alpaca.submit_order('NIO', qty=10, side='buy', type='limit', limit_price=28)\norder1",
"_____no_output_____"
]
],
[
[
"##### Placing a Sell Order at Market Price",
"_____no_output_____"
]
],
[
[
"order2 = alpaca.submit_order('FB', qty=10, side='sell', type='market')\norder2",
"_____no_output_____"
]
],
[
[
"##### Shorting a Stock\n\nConfigure Account to Allow / Disallow Shorting Stock\nhttps://app.alpaca.markets/paper/dashboard/config",
"_____no_output_____"
]
],
[
[
"alpaca.submit_order('GE', qty=100, side='sell', type='market')",
"_____no_output_____"
]
],
[
[
"##### List All Current Orders",
"_____no_output_____"
]
],
[
[
"order = alpaca.list_orders()\norder",
"_____no_output_____"
]
],
[
[
"##### Cancel an Order",
"_____no_output_____"
]
],
[
[
"alpaca.cancel_order(order[0].id)",
"_____no_output_____"
],
[
"alpaca.list_orders()",
"_____no_output_____"
]
],
[
[
"##### Cancel All Orders",
"_____no_output_____"
]
],
[
[
"alpaca.cancel_all_orders()",
"_____no_output_____"
],
[
"alpaca.list_orders()",
"_____no_output_____"
]
],
[
[
"##### Check Specific Order Status",
"_____no_output_____"
]
],
[
[
"alpaca.get_order(order0.id)",
"_____no_output_____"
],
[
"print(order0.symbol + '\\t' + order0.side + '\\t' + order0.qty + '\\t' + order0.type + '\\t' + alpaca.get_order(order0.id).status)",
"FB\tbuy\t25\tmarket\tfilled\n"
],
[
"print(order1.symbol + '\\t' + order1.side + '\\t' + order1.qty + '\\t' + order1.type + '\\t' +alpaca.get_order(order1.id).status)",
"NIO\tbuy\t10\tlimit\tnew\n"
],
[
"print(order2.symbol + '\\t' + order2.side + '\\t' + order2.qty + '\\t' + order1.type + '\\t' +alpaca.get_order(order2.id).status)",
"FB\tsell\t10\tlimit\tfilled\n"
]
],
[
[
"## [PROJECT] Simple Paper Stock Trading Bot\n\nThis is a simple trading bot project to practice everything we learned here. THIS IS ONLY MEANT FOR A PRACTICE, DO NOT USE FOR LIVE TRADING! AS ALWAYS, USE AT YOUR OWN RISK! In future tutorials, we will build a back-testing program to test our strategies. But right now, we just want an automatic high frequency trading bot. The strategy is simple, make a trade decision when the short term moving average crosses over the long term moving average. Buy when there is positive momentum and sell when there is negative momentum.\n\nMAKE SURE TO ENABLE SHORT SELLING FOR THIS EXERCISE!",
"_____no_output_____"
]
],
[
[
"stock = 'GME'\ndata = alpaca.get_barset(stock, timeframe='1Min', limit=100).df\navg = data[stock,'close'].rolling(30).mean()\navg2 = data[stock,'close'].rolling(15).mean()\ndiff2 = avg2.diff()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(16,10))\n\nax.plot(avg, 'b-', label = 'Moving Average 30Min')\nax.plot(avg2, 'm-', label = 'Moving Average 15Min')\nax.plot(data[stock,'close'], label='Price', alpha=0.3)\nax.legend(loc = 'upper left')\n\nax2 = ax.twinx()\nax2.plot(diff2, 'y--', label = 'Moving Average 15Min Diff', alpha = 0.8)\nax2.legend(loc='upper right')\n\nformatter = mdates.DateFormatter('%m/%d %T %Z', tz=data.index.tz)\nplt.gca().xaxis.set_major_formatter(formatter)",
"_____no_output_____"
],
[
"# Percentage Change with initial reset when crossed moving average?\n# Momentum... buy when cross MA line moving upward, sell when cross MA line moving downward",
"_____no_output_____"
],
[
"stock = 'GME'\ntrade_share = 100\n#trigger_threshold = 0.001\n\nMA_Diff0 = 0\n\nwhile(True):\n\n print('\\n\\n')\n \n # Get Market Clock\n clock = alpaca.get_clock()\n timestr = clock.timestamp.strftime('[%m/%d/%y %H:%M:%S]')\n\n # Check if Market is Open\n if clock.is_open:\n print(timestr + ' Market is Open... Executing Trading Bot Sequence!')\n\n # Get Stock Data\n print(' - [' + stock + '] Retrieving Market Data ...' )\n data = alpaca.get_barset(stock, timeframe='1Min', limit=120).df\n\n # Analysis Stock Price\n print(' - [' + stock + '] Performing Stock Price Analysis ...' )\n avg30 = data[stock,'close'].rolling(30).mean()\n avg15 = data[stock,'close'].rolling(15).mean()\n diff15 = avg15.diff() \n MA_Diff = (avg15[-1] - avg30[-1]) / avg30[-1]*100\n price = alpaca.get_last_trade(stock).price \n print(' --- [{}] Price: {:,.2f} MA15: {:,.2f} DIFF15: {:,.2f} MA30: {:,.2f} MA_Diff: {:,.2f}'.format(stock, price, avg15[-1], diff15[-1], avg30[-1], MA_Diff) ) \n\n # Technical Analysis - Buy or Sell Decision\n # Strategy: Trigger when MA15 cross over to MA30... positive momentum - buy ... negative momentum - sell\n print(' - [' + stock + '] Performing Technical Analysis ...' )\n \n if MA_Diff * MA_Diff0 < 0:\n print('--- Trigger Decision Analysis...')\n\n # Positive Momentum\n if MA_Diff > 0:\n target_share = trade_share\n print('---- LONG: targeted {} shares'.format( target_share ))\n\n # Negative Momentum\n if MA_Diff < 0:\n target_share = -trade_share\n print('---- SHORT: targeted {} shares'.format(target_share) )\n\n\n # Retrieve Account Holdings\n position = alpaca.list_positions()\n\n stock_holding = 0\n\n for i in range(len(position)):\n if position[i].symbol == stock:\n stock_holding = float(position[i].qty)\n\n print(' ----- [{}] current position: {}'.format(stock, stock_holding))\n\n\n\n\n # Submit Trade Order if Needed \n\n if stock_holding != 0 and stock_holding != target_share:\n order = alpaca.close_position(stock)\n print(' ----- [{}] closing current position ... '.format(stock))\n\n for i in range(100):\n status = alpaca.get_order(order.id).status\n if status == 'filled':\n print(' ----- [{}] position closed!'.format(stock))\n break \n\n\n\n if target_share != stock_holding and target_share > 0:\n\n order = alpaca.submit_order(stock, qty=abs(target_share), side='buy', type='market')\n print(' ----- [{}] BUYING {} shares... '.format(stock, abs(target_share)))\n\n for i in range(100):\n status = alpaca.get_order(order.id).status\n if status == 'filled':\n print(' ----- [{}] order excuted!'.format(stock))\n break \n\n\n if target_share != stock_holding and target_share < 0:\n\n order = alpaca.submit_order(stock, qty=abs(target_share), side='sell', type='market')\n print(' ----- [{}] SHORT SELLING {} shares... '.format(stock, abs(target_share)))\n\n for i in range(100):\n status = alpaca.get_order(order.id).status\n if status == 'filled':\n print(' ----- [{}] order excuted!'.format(stock))\n break \n\n\n\n else:\n print('--- Pass Through')\n \n \n \n # Retrieve Account Holdings\n position = alpaca.list_positions()\n\n stock_holding = 0\n\n for i in range(len(position)):\n if position[i].symbol == stock:\n stock_holding = float(position[i].qty)\n\n print(' - [{}] current position: {}'.format(stock, stock_holding))\n\n\n\n MA_Diff0 = MA_Diff\n \n \n # Close Stock Position Near Ends of Market Hour and EXIT BOT\n \n if clock.timestamp.hour >= 15 and clock.timestamp.minute >= 58:\n print(' - [{}] MARKET CLOSING! CLOSING STOCK POSITION...'.format(stock, stock_holding))\n\n if stock_holding > 0:\n order = alpaca.close_position(stock)\n\n for i in range(100):\n status = alpaca.get_order(order.id).status\n if status == 'filled':\n break\n \n print(' [{}] Position Closed! Terminating Trade Bot!'.format(stock))\n\n\n break\n \n else:\n printt(timestr + 'Market is currently closed... please come back later!')\n\n \n # Loop Control\n time.sleep(30)",
"\n\n\n[05/17/21 15:11:01] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 176.29 MA15: 177.31 DIFF15: -0.02 MA30: 175.76 MA_Diff: 0.89\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:11:31] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 176.52 MA15: 177.31 DIFF15: -0.02 MA30: 175.76 MA_Diff: 0.89\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:12:01] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 175.70 MA15: 177.25 DIFF15: -0.06 MA30: 175.87 MA_Diff: 0.78\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:12:31] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 175.27 MA15: 177.25 DIFF15: -0.06 MA30: 175.87 MA_Diff: 0.78\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:13:02] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 175.14 MA15: 177.13 DIFF15: -0.12 MA30: 175.94 MA_Diff: 0.68\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:13:32] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 175.14 MA15: 177.13 DIFF15: -0.12 MA30: 175.94 MA_Diff: 0.68\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:14:02] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 175.63 MA15: 177.05 DIFF15: -0.09 MA30: 176.04 MA_Diff: 0.57\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:14:32] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 175.40 MA15: 177.05 DIFF15: -0.09 MA30: 176.04 MA_Diff: 0.57\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:15:02] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 175.40 MA15: 176.96 DIFF15: -0.09 MA30: 176.11 MA_Diff: 0.48\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:15:32] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 175.40 MA15: 176.96 DIFF15: -0.09 MA30: 176.11 MA_Diff: 0.48\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:16:02] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 175.48 MA15: 176.83 DIFF15: -0.13 MA30: 176.19 MA_Diff: 0.36\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:16:32] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 175.69 MA15: 176.83 DIFF15: -0.13 MA30: 176.19 MA_Diff: 0.36\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:17:03] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 176.34 MA15: 176.72 DIFF15: -0.12 MA30: 176.28 MA_Diff: 0.25\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:17:33] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 176.48 MA15: 176.72 DIFF15: -0.12 MA30: 176.28 MA_Diff: 0.25\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:18:03] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 176.63 MA15: 176.62 DIFF15: -0.10 MA30: 176.36 MA_Diff: 0.15\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:18:33] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 176.69 MA15: 176.62 DIFF15: -0.10 MA30: 176.36 MA_Diff: 0.15\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:19:03] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 176.69 MA15: 176.50 DIFF15: -0.13 MA30: 176.44 MA_Diff: 0.03\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:19:33] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 177.04 MA15: 176.50 DIFF15: -0.13 MA30: 176.44 MA_Diff: 0.03\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: 0\n\n\n\n[05/17/21 15:20:03] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 177.40 MA15: 176.42 DIFF15: -0.08 MA30: 176.51 MA_Diff: -0.05\n - [GME] Performing Technical Analysis ...\n--- Trigger Decision Analysis...\n---- SHORT: targeted -100 shares\n ----- [GME] current position: 0\n ----- [GME] SHORT SELLING 100 shares... \n ----- [GME] order excuted!\n\n\n\n[05/17/21 15:20:34] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 177.19 MA15: 176.42 DIFF15: -0.08 MA30: 176.51 MA_Diff: -0.05\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: -100.0\n\n\n\n[05/17/21 15:21:04] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 177.20 MA15: 176.35 DIFF15: -0.07 MA30: 176.60 MA_Diff: -0.14\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: -100.0\n\n\n\n[05/17/21 15:21:34] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 176.72 MA15: 176.35 DIFF15: -0.07 MA30: 176.60 MA_Diff: -0.14\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: -100.0\n\n\n\n[05/17/21 15:22:04] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 176.53 MA15: 176.33 DIFF15: -0.02 MA30: 176.66 MA_Diff: -0.19\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: -100.0\n\n\n\n[05/17/21 15:22:35] Market is Open... Executing Trading Bot Sequence!\n - [GME] Retrieving Market Data ...\n - [GME] Performing Stock Price Analysis ...\n --- [GME] Price: 175.95 MA15: 176.33 DIFF15: -0.02 MA30: 176.66 MA_Diff: -0.19\n - [GME] Performing Technical Analysis ...\n--- Pass Through\n - [GME] current position: -100.0\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
d0d4fd983867ec7ed1cb25d0e2917e4b9b3042f3 | 78,939 | ipynb | Jupyter Notebook | homework_3_custom_framework/homework_main.ipynb | Vinogradov-Mikhail/Spbu_ML_hw_19 | 564ed068b07656f4807ca955e6335b7e7dd5b340 | [
"MIT"
] | 1 | 2021-06-26T11:49:10.000Z | 2021-06-26T11:49:10.000Z | homework_3_custom_framework/homework_main.ipynb | Vinogradov-Mikhail/Spbu_ML_hw_19 | 564ed068b07656f4807ca955e6335b7e7dd5b340 | [
"MIT"
] | null | null | null | homework_3_custom_framework/homework_main.ipynb | Vinogradov-Mikhail/Spbu_ML_hw_19 | 564ed068b07656f4807ca955e6335b7e7dd5b340 | [
"MIT"
] | 1 | 2021-04-13T13:16:22.000Z | 2021-04-13T13:16:22.000Z | 142.232432 | 22,416 | 0.879211 | [
[
[
"# Homework: Basic Artificial Neural Networks",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nfrom time import time, sleep\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython import display",
"_____no_output_____"
]
],
[
[
"# Framework",
"_____no_output_____"
],
[
"Implement everything in `Modules.ipynb`. Read all the comments thoughtfully to ease the pain. Please try not to change the prototypes.\n\nDo not forget, that each module should return **AND** store `output` and `gradInput`.\n\nThe typical assumption is that `module.backward` is always executed after `module.forward`,\nso `output` is stored, this would be useful for `SoftMax`. ",
"_____no_output_____"
]
],
[
[
"# (re-)load layers\n%run homework_modules.ipynb",
"_____no_output_____"
]
],
[
[
"Optimizer is implemented for you. ",
"_____no_output_____"
]
],
[
[
"def sgd_momentum(x, dx, config, state):\n \"\"\"\n This is a very ugly implementation of sgd with momentum \n just to show an example how to store old grad in state.\n \n config:\n - momentum\n - learning_rate\n state:\n - old_grad\n \"\"\"\n \n # x and dx have complex structure, old dx will be stored in a simpler one\n state.setdefault('old_grad', {})\n \n i = 0 \n for cur_layer_x, cur_layer_dx in zip(x,dx): \n for cur_x, cur_dx in zip(cur_layer_x,cur_layer_dx):\n \n cur_old_grad = state['old_grad'].setdefault(i, np.zeros_like(cur_dx))\n \n np.add(config['momentum'] * cur_old_grad, config['learning_rate'] * cur_dx, out = cur_old_grad)\n\n cur_x -= cur_old_grad\n i += 1 ",
"_____no_output_____"
],
[
"def normalization(column):\n column_min = column.min()\n column_max = column.max()\n \n column_range = column_max - column_min\n if(column_range == 0):\n return (column - column_min)\n\n return (column - column_min) / column_range",
"_____no_output_____"
],
[
"def create_onehot(column):\n class_count = column.max() + 1\n size = column.shape[0]\n onehot = np.zeros((size, class_count), dtype=float)\n \n for i in range(size):\n onehot[i][column[i]] = 1.0\n \n return onehot",
"_____no_output_____"
],
[
"# Open MNIST dataset and prepare for train\nfrom mlxtend.data import loadlocal_mnist\n\nx_train, y_train = loadlocal_mnist(images_path='Dataset/train-images-idx3-ubyte', labels_path='Dataset/train-labels-idx1-ubyte')\n\nx_test, y_test = loadlocal_mnist(images_path='Dataset/t10k-images-idx3-ubyte', labels_path='Dataset/t10k-labels-idx1-ubyte')\n\n# normalize\nx_train = normalization(x_train)\nx_test = normalization(x_test)\n\n# create onehot for y\ny_train_onehot = create_onehot(y_train)\ny_test_onehot = create_onehot(y_test)",
"_____no_output_____"
],
[
"# batch generator\ndef get_batches(dataset, batch_size):\n X, Y = dataset\n n_samples = X.shape[0]\n \n # Shuffle at the start of epoch\n indices = np.arange(n_samples)\n np.random.shuffle(indices)\n \n for start in range(0, n_samples, batch_size):\n end = min(start + batch_size, n_samples)\n \n batch_idx = indices[start:end]\n \n yield X[batch_idx], Y[batch_idx]",
"_____no_output_____"
],
[
"features = x_train.shape[1]\n\n# Iptimizer params\noptimizer_config = {'learning_rate' : 1e-1, 'momentum': 0.9}\noptimizer_state = {}\n\n# Looping params\nn_epoch = 6\nbatch_size = 180",
"_____no_output_____"
]
],
[
[
"### Build NN",
"_____no_output_____"
]
],
[
[
"net = Sequential()\nnet.add(Linear(features, 300))\nnet.add(ReLU())\nnet.add(Linear(300, 10))\nnet.add(SoftMax())\n\ncriterion = MSECriterion()\n\nprint(net)",
"Linear 784 -> 300\nReLU\nLinear 300 -> 10\nSoftMax\n\n"
]
],
[
[
"### Train",
"_____no_output_____"
],
[
"Basic training loop. Examine it.",
"_____no_output_____"
]
],
[
[
"loss_history = []\n\nfor i in range(n_epoch):\n for x_batch, y_batch in get_batches((x_train, y_train_onehot), batch_size):\n \n net.zeroGradParameters()\n \n # Forward\n predictions = net.forward(x_batch)\n loss = criterion.forward(predictions, y_batch)\n \n # Backward\n dp = criterion.backward(predictions, y_batch)\n net.backward(x_batch, dp)\n \n # Update weights\n sgd_momentum(net.getParameters(), \n net.getGradParameters(), \n optimizer_config,\n optimizer_state) \n \n loss_history.append(loss)\n \n # Visualize\n display.clear_output(wait=True)\n plt.figure(figsize=(8, 6))\n \n plt.title(\"Training loss\")\n plt.xlabel(\"#iteration\")\n plt.ylabel(\"loss\")\n plt.plot(loss_history, 'b')\n plt.show()\n \n print('Current loss: %f' % loss) ",
"_____no_output_____"
]
],
[
[
"### Build NN with dropout",
"_____no_output_____"
]
],
[
[
"net = Sequential()\nnet.add(Linear(features, 300))\nnet.add(ReLU())\nnet.add(Dropout(0.7))\nnet.add(Linear(300, 10))\nnet.add(SoftMax())\n\ncriterion = MSECriterion()\n\nprint(net)",
"Linear 784 -> 300\nReLU\nDropout\nLinear 300 -> 10\nSoftMax\n\n"
],
[
"loss_history = []\n\nfor i in range(n_epoch):\n for x_batch, y_batch in get_batches((x_train, y_train_onehot), batch_size):\n \n net.zeroGradParameters()\n \n # Forward\n predictions = net.forward(x_batch)\n loss = criterion.forward(predictions, y_batch)\n \n # Backward\n dp = criterion.backward(predictions, y_batch)\n net.backward(x_batch, dp)\n \n # Update weights\n sgd_momentum(net.getParameters(), \n net.getGradParameters(), \n optimizer_config,\n optimizer_state) \n \n loss_history.append(loss)\n\n # Visualize\n display.clear_output(wait=True)\n plt.figure(figsize=(8, 6))\n \n plt.title(\"Training loss\")\n plt.xlabel(\"#iteration\")\n plt.ylabel(\"loss\")\n plt.plot(loss_history, 'b')\n plt.show()\n \n print('Current loss: %f' % loss) ",
"_____no_output_____"
],
[
"# Your answer goes here. ################################################",
"_____no_output_____"
],
[
"net = Sequential()\nnet.add(Linear(features, 600))\nnet.add(ReLU())\nnet.add(Dropout(0.7))\nnet.add(Linear(600, 300))\nnet.add(ReLU())\nnet.add(Linear(300, 100))\nnet.add(ReLU())\nnet.add(Linear(100, 10))\nnet.add(SoftMax())\n\ncriterion = MSECriterion()\n\nprint(net)",
"Linear 784 -> 600\nReLU\nDropout\nLinear 600 -> 300\nReLU\nLinear 300 -> 100\nReLU\nLinear 100 -> 10\nSoftMax\n\n"
],
[
"loss_history = []\n\nfor i in range(n_epoch):\n for x_batch, y_batch in get_batches((x_train, y_train_onehot), batch_size):\n \n net.zeroGradParameters()\n \n # Forward\n predictions = net.forward(x_batch)\n loss = criterion.forward(predictions, y_batch)\n \n # Backward\n dp = criterion.backward(predictions, y_batch)\n net.backward(x_batch, dp)\n \n # Update weights\n sgd_momentum(net.getParameters(), \n net.getGradParameters(), \n optimizer_config,\n optimizer_state) \n \n loss_history.append(loss)\n\n # Visualize\n display.clear_output(wait=True)\n plt.figure(figsize=(8, 6))\n \n plt.title(\"Training loss\")\n plt.xlabel(\"#iteration\")\n plt.ylabel(\"loss\")\n plt.plot(loss_history, 'b')\n plt.show()\n \n print('Current loss: %f' % loss) ",
"_____no_output_____"
],
[
"# Your code goes here. ################################################",
"_____no_output_____"
],
[
"# np.clip(prediction,0,1)\n#\n# Your code goes here. ################################################",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d510300cec1d536944a12a22dc7a8820cb48c1 | 2,822 | ipynb | Jupyter Notebook | model_test.ipynb | arymandeshwal/Predict-Bank-Credit-Risk-using-South-German-Credit-Data | 8766c7756fc0884b50eb6609d862cea94bc42b09 | [
"MIT"
] | 1 | 2021-08-10T21:33:20.000Z | 2021-08-10T21:33:20.000Z | model_test.ipynb | arymandeshwal/Predict-Bank-Credit-Risk-using-South-German-Credit-Data | 8766c7756fc0884b50eb6609d862cea94bc42b09 | [
"MIT"
] | null | null | null | model_test.ipynb | arymandeshwal/Predict-Bank-Credit-Risk-using-South-German-Credit-Data | 8766c7756fc0884b50eb6609d862cea94bc42b09 | [
"MIT"
] | null | null | null | 19.328767 | 108 | 0.51949 | [
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_csv(\"South_German_Credit_formatted.csv\")\ndf.shape",
"_____no_output_____"
],
[
"X = df.drop(\"credit_risk\",axis=1)\ny = df[\"credit_risk\"]",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, shuffle=True)",
"_____no_output_____"
],
[
"from sklearn.neighbors import KNeighborsRegressor\nknn_model = KNeighborsRegressor(n_neighbors=3)",
"_____no_output_____"
],
[
"knn_model.fit(X_train, y_train)",
"_____no_output_____"
],
[
"from sklearn.metrics import mean_squared_error\nfrom math import sqrt\ntest_preds = knn_model.predict(X_test)\nmse = mean_squared_error(y_test, test_preds)\nrmse = sqrt(mse)\nrmse",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d5175284dfe0f8b53c47ad4764c3f8d21f88ec | 95,395 | ipynb | Jupyter Notebook | sample_epidemic.ipynb | jrtabash/math_models | cd706bc6194ec921068fa58ef729dcdf18230a5a | [
"MIT"
] | null | null | null | sample_epidemic.ipynb | jrtabash/math_models | cd706bc6194ec921068fa58ef729dcdf18230a5a | [
"MIT"
] | null | null | null | sample_epidemic.ipynb | jrtabash/math_models | cd706bc6194ec921068fa58ef729dcdf18230a5a | [
"MIT"
] | null | null | null | 728.206107 | 33,088 | 0.954306 | [
[
[
"import math_models_epidemic as epidemic\n\n%matplotlib inline",
"_____no_output_____"
],
[
"# SIR Model\n# Transmit Rate: 3.5\n# Remove Rate: 0.5\n\nsirModel = epidemic.SIRModel(transmitRate=3.5, removeRate=0.5)\nt, sir = epidemic.solve(sirModel, 15, 150)\nepidemic.plot(sirModel, t, sir)",
"_____no_output_____"
],
[
"# SEIR Model\n# Transmit Rate: 3.5\n# Infect Rate: 1.0\n# Remove Rate: 0.5\n\nseirModel = epidemic.SEIRModel(transmitRate=3.5, infectRate=1.0, removeRate=0.5)\nt, seir = epidemic.solve(seirModel, 20, 200)\nepidemic.plot(seirModel, t, seir)",
"_____no_output_____"
],
[
"# SEIR Model with Infectivity During Exposed Period\n# Transmit Rate: 3.5\n# Reduce Exposed Infectivity Rate: 0.25\n# Infect Rate: 1.0\n# Remove Rate: 0.5\n\nseir2Model = epidemic.SEIRModel(transmitRate=3.5, reducedEIRate=0.25, infectRate=1.0, removeRate=0.5)\nt, seir = epidemic.solve(seir2Model, 20, 200)\nepidemic.plot(seir2Model, t, seir)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
d0d51e84892436614222a0fff3f72ad3661b9a81 | 273,014 | ipynb | Jupyter Notebook | pymc3/examples/gaussian_mixture_model.ipynb | dhiapet/PyMC3 | f37198653e7d09881e7bc411cbd10fffbab442c2 | [
"Apache-2.0"
] | 1 | 2020-09-29T12:32:32.000Z | 2020-09-29T12:32:32.000Z | pymc3/examples/gaussian_mixture_model.ipynb | dhiapet/PyMC3 | f37198653e7d09881e7bc411cbd10fffbab442c2 | [
"Apache-2.0"
] | null | null | null | pymc3/examples/gaussian_mixture_model.ipynb | dhiapet/PyMC3 | f37198653e7d09881e7bc411cbd10fffbab442c2 | [
"Apache-2.0"
] | null | null | null | 784.522989 | 136,506 | 0.93934 | [
[
[
"!date\nimport numpy as np, pandas as pd, matplotlib.pyplot as plt, seaborn as sns\n%matplotlib inline\nsns.set_context('paper')\nsns.set_style('darkgrid')",
"Mon Aug 17 14:54:44 CEST 2015\r\n"
]
],
[
[
"# Mixture Model in PyMC3\n\nOriginal NB by Abe Flaxman, modified by Thomas Wiecki\n",
"_____no_output_____"
]
],
[
[
"import pymc3 as pm, theano.tensor as tt",
"/home/wiecki/miniconda3/lib/python3.4/site-packages/IPython/html.py:14: ShimWarning: The `IPython.html` package has been deprecated. You should import from `notebook` instead. `IPython.html.widgets` has moved to `ipywidgets`.\n \"`IPython.html.widgets` has moved to `ipywidgets`.\", ShimWarning)\n/home/wiecki/miniconda3/lib/python3.4/site-packages/IPython/utils/traitlets.py:5: UserWarning: IPython.utils.traitlets has moved to a top-level traitlets package.\n warn(\"IPython.utils.traitlets has moved to a top-level traitlets package.\")\n"
],
[
"# simulate data from a known mixture distribution\nnp.random.seed(12345) # set random seed for reproducibility\n\nk = 3\nndata = 500\nspread = 5\ncenters = np.array([-spread, 0, spread])\n\n# simulate data from mixture distribution\nv = np.random.randint(0, k, ndata)\ndata = centers[v] + np.random.randn(ndata)\n\nplt.hist(data);",
"_____no_output_____"
],
[
"# setup model\nmodel = pm.Model()\nwith model:\n # cluster sizes\n a = pm.constant(np.array([1., 1., 1.]))\n p = pm.Dirichlet('p', a=a, shape=k)\n # ensure all clusters have some points\n p_min_potential = pm.Potential('p_min_potential', tt.switch(tt.min(p) < .1, -np.inf, 0))\n\n\n # cluster centers\n means = pm.Normal('means', mu=[0, 0, 0], sd=15, shape=k)\n # break symmetry\n order_means_potential = pm.Potential('order_means_potential',\n tt.switch(means[1]-means[0] < 0, -np.inf, 0)\n + tt.switch(means[2]-means[1] < 0, -np.inf, 0))\n \n # measurement error\n sd = pm.Uniform('sd', lower=0, upper=20)\n\n # latent cluster of each observation\n category = pm.Categorical('category',\n p=p,\n shape=ndata)\n\n # likelihood for each observed value\n points = pm.Normal('obs',\n mu=means[category],\n sd=sd,\n observed=data)",
"_____no_output_____"
],
[
"# fit model\nwith model:\n step1 = pm.Metropolis(vars=[p, sd, means])\n step2 = pm.ElemwiseCategoricalStep(var=category, values=[0, 1, 2])\n tr = pm.sample(10000, step=[step1, step2])",
" [-----------------100%-----------------] 10000 of 10000 complete in 93.9 sec"
]
],
[
[
"## Full trace",
"_____no_output_____"
]
],
[
[
"pm.plots.traceplot(tr, ['p', 'sd', 'means']);",
"_____no_output_____"
]
],
[
[
"## After convergence",
"_____no_output_____"
]
],
[
[
"# take a look at traceplot for some model parameters\n# (with some burn-in and thinning)\npm.plots.traceplot(tr[5000::5], ['p', 'sd', 'means']);",
"_____no_output_____"
],
[
"# I prefer autocorrelation plots for serious confirmation of MCMC convergence\npm.autocorrplot(tr[5000::5], ['sd'])",
"_____no_output_____"
]
],
[
[
"## Sampling of cluster for individual data point",
"_____no_output_____"
]
],
[
[
"i=0\nplt.plot(tr['category'][5000::5, i], drawstyle='steps-mid')\nplt.axis(ymin=-.1, ymax=2.1)",
"_____no_output_____"
],
[
"def cluster_posterior(i=0):\n print('true cluster:', v[i])\n print(' data value:', np.round(data[i],2))\n plt.hist(tr['category'][5000::5,i], bins=[-.5,.5,1.5,2.5,], rwidth=.9)\n plt.axis(xmin=-.5, xmax=2.5)\n plt.xticks([0,1,2])\ncluster_posterior(i)",
"true cluster: 2\n data value: 3.29\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d0d52610aaa8aa29657e2eb93e47a51fb03d9f43 | 16,147 | ipynb | Jupyter Notebook | sorting.ipynb | rakeshnavsoft/django-docker-s3 | ed38d0f8d442f7d977c1ddec297a49b123e31d6b | [
"MIT"
] | null | null | null | sorting.ipynb | rakeshnavsoft/django-docker-s3 | ed38d0f8d442f7d977c1ddec297a49b123e31d6b | [
"MIT"
] | null | null | null | sorting.ipynb | rakeshnavsoft/django-docker-s3 | ed38d0f8d442f7d977c1ddec297a49b123e31d6b | [
"MIT"
] | null | null | null | 24.765337 | 143 | 0.430668 | [
[
[
"def bubbleSort(arr):\n n=len(arr)\n for i in range(n):\n for j in range(0, n-i-1):\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n \narr = [3,2,6,4,5,8]\nbubbleSort(arr)\n \nprint (\"Sorted array is:\")\nfor i in range(len(arr)):\n print (\"%d\" %arr[i]), \n ",
"Sorted array is:\n2\n3\n4\n5\n6\n8\n"
],
[
"def countPaire(arr, sum):\n count=0\n for i in range(0,len(arr)):\n for j in range(i+1,len(arr)):\n if (arr[i] + arr[j]) <= sum:\n print(arr[i], arr[j])\n count +=1\n return count\n\narr=[4, 5, 7, 3]\nsum=10\nprint(countPaire(arr,sum))",
"4 5\n4 3\n5 3\n7 3\n4\n"
],
[
"def countPair1(arr, sum):\n i=0\n j=len(arr)-1\n \n while(j>i):\n if(arr[i]+ arr[j] == sum):\n return True\n elif(arr[i]+ arr[j] > sum):\n j -=1\n else:\n i +=1\n return False\n \n \narr=[1, 5, 7, -1]\nsum=6\nprint(countPair1(arr,sum))",
"True\n"
],
[
"#given an array of numbers, find a pair of number that add up to 10\ndef IsPairOf10 (given_array):\n seen_numbers = {}\n for item in given_array:\n if (10 - item) in seen_numbers:\n print('The following pair of number in array adds up to 10: ' + str(item) + ' and ' + str(10 - item))\n return\n else:\n seen_numbers[item] = 'number in the list'\n \n print('there is no a pair of numbers that adds up to 10')\n \nlist1 = [4, 5, 7, 3]\nlist2 = [5, 7, 0, 6,5]\nlist3 = [9, 2, 8, 1, 3]\nlist4 = [-12, 4, -67, 2]\n\nprint(IsPairOf10(list2)) # should return 5\n\n",
"The following pair of number in array adds up to 10: 5 and 5\nNone\n"
],
[
"# Python program to check for the sum condition to be satisified \n \ndef hasArrayTwoCandidates(A, arr_size, sum): \n \n # sort the array \n quickSort(A, 0, arr_size-1) \n l = 0\n r = arr_size-1\n \n # traverse the array for the two elements \n while l<r: \n if (A[l] + A[r] == sum): \n return 1\n elif (A[l] + A[r] < sum): \n l += 1\n else: \n r -= 1\n return 0\n \n# Implementation of Quick Sort \n# A[] --> Array to be sorted \n# si --> Starting index \n# ei --> Ending index \ndef quickSort(A, si, ei): \n if si < ei: \n pi = partition(A, si, ei) \n quickSort(A, si, pi-1) \n quickSort(A, pi + 1, ei) \n \ndef partition(A, si, ei): \n x = A[ei] \n i = (si-1) \n for j in range(si, ei): \n if A[j] <= x: \n i += 1\n \n # This operation is used to swap two variables is python \n A[i], A[j] = A[j], A[i] \n \n A[i + 1], A[ei] = A[ei], A[i + 1] \n \n return i + 1\n \n\n \n# Driver program to test the functions \nA = [1, 4, 45, 6, 10, -8] \nn = 16\nif (hasArrayTwoCandidates(A, len(A), n)): \n print(\"Array has two elements with the given sum\") \nelse: \n print(\"Array doesn't have two elements with the given sum\") ",
"Array has two elements with the given sum\n"
],
[
"def printPairs(arr, arr_size, sum): \n # Create an empty hash set \n s = set() \n \n for i in range(0, arr_size): \n temp = sum-arr[i] \n if (temp in s): \n print (\"Pair with given sum \"+ str(sum) + \" is (\" + str(arr[i]) + \", \" + str(temp) + \")\")\n s.add(arr[i]) \n\n# driver program to check the above function \nA = [1, 4, 45, 6, 10, 8] \nn = 16\nprintPairs(A, len(A), n) ",
"Pair with given sum 16 is (10, 6)\n"
],
[
"# Given two unsorted arrays, find all pairs whose sum is x\n\n# Naiv approach\n\ndef naivSumOfPair(list1,list2,sum_x):\n count=0\n for i in arr1:\n for j in arr2:\n if(i+j==sum_x):\n count +=1\n print(i,' ',j)\n# print(count)\n return count\n\n# using Hash table\ndef sumOfPair(list1,list2,sum_x):\n list3=[]\n for i in list1:\n list3.append(i)\n for j in list2:\n if sum_x>j:\n if (sum_x-j) in list3:\n print(j,sum_x-j)\n \n return 1\n\n# arr1=[-1,-2,4,-6,5,7]\n# arr2=[6,3,4,0]\n\narr1 = [1, 0, -4, 7, 6, 4] \narr2 = [0, 2, 4, -3, 2, 1] \nsum_x=8\nprint(naivSumOfPair(arr1,arr2,sum_x))",
"7 1\n6 2\n6 2\n4 4\n4\n"
],
[
"# python program to count subarrays \n# having sum less than k. \n\n# Function to find number of subarrays \n# having sum less than k. \ndef countSubarray(arr, n, k): \n\tcount = 0\n\n\tfor i in range(0, n): \n\t\tsum = 0; \n\t\tfor j in range(i, n): \n\t\t\t\n\t\t\t# If sum is less than k \n\t\t\t# then update sum and \n\t\t\t# increment count \n\t\t\tif (sum + arr[j] < k): \n\t\t\t\tsum = arr[j] + sum\n\t\t\t\tcount+= 1\n\t\t\telse: \n\t\t\t\tbreak\n\treturn count; \n\n\n# Driver Code \narray = [1, 11, 2, 3, 15] \nk = 10\nsize = len(array) \ncount = countSubarray(array, size, k); \nprint(count) \n\n# This code is contributed by Sam007 \n",
"4\n"
],
[
"# Python 3 program to count subarrays \n# having sum less than k. \n\n# Function to find number of subarrays \n# having sum less than k. \ndef countSubarrays(arr, n, k): \n\n\tstart = 0\n\tend = 0\n\tcount = 0\n\tsum = arr[0] \n\n\twhile (start < n and end < n) : \n\n\t\t# If sum is less than k, move end \n\t\t# by one position. Update count and \n\t\t# sum accordingly. \n\t\tif (sum < k) : \n\t\t\tend += 1 \n\n\t\t\tif (end >= start): \n\t\t\t\tcount += end - start \n\n\t\t\t# For last element, end may become n \n\t\t\tif (end < n): \n\t\t\t\tsum += arr[end] \n\n\t\t# If sum is greater than or equal to k, \n\t\t# subtract arr[start] from sum and \n\t\t# decrease sliding window by moving \n\t\t# start by one position \n\t\telse : \n\t\t\tsum -= arr[start] \n\t\t\tstart += 1\n\n\treturn count \n\n# Driver Code \nif __name__ == \"__main__\": \n\t\n\tarray = [ 1, 11, 2, 3, 15 ] \n\tk = 10\n\tsize = len(array) \n\tprint(countSubarrays(array, size, k)) \n\n# This code is contributed by ita_c \n",
"4\n"
]
],
[
[
"Number of subarrays having product less than K\nGiven an Array of positive numbers, calculate the number of possible contiguous subarrays having product lesser than a given number K.\n\nExamples :\n\nInput : arr[] = [1, 2, 3, 4] \n K = 10\nOutput : 7\nThe subarrays are {1}, {2}, {3}, {4}\n{1, 2}, {1, 2, 3} and {2, 3}\n\nInput : arr[] = [1, 9, 2, 8, 6, 4, 3] \n K = 100\nOutput : 16",
"_____no_output_____"
]
],
[
[
"# Number of subarrays having product less than K\n\n\n# Python3 program to count subarrays \n# having product less than k. \n\ndef countsubarray(array, n, k): \n\tcount = 0\n\tfor i in range(0, n): \n\t\t\n\t\t# Counter for single element \n\t\tif array[i] <= k: \n\t\t\tcount += 1\n\n\t\tmul = array[i] \n\n\t\tfor j in range(i + 1, n): \n\t\t\t\n\t\t\t# Multiple subarray \n\t\t\tmul = mul * array[j] \n\t\t\t\n\t\t\t# If this multiple is less \n\t\t\t# than k, then increment \n\t\t\tif mul <= k: \n\t\t\t\tcount += 1\n\t\t\telse: \n\t\t\t\tbreak\n\treturn count \n\n# Driver Code \narray = [ 1, 2, 3, 4 ] \nk = 10\nsize = len(array) \ncount = countsubarray(array, size, k); \nprint (count, end = \" \") \n\n# This code is contributed by Shreyanshi Arun. \n",
"7 "
]
],
[
[
" a = [5, 3, 2]\n k = 16\n \n counter = 0\n Window: [5]\n Product: 5\n\n 5 counter += 1+ (0-0)\n counter = 1\n Window: [5,3]\n Product: 15\n\n 15 counter += 1 + (1-0)\n counter = 3\n Window: [5,3,2]\n Product: 30\n\n 30 > 16 --> Adjust the left border\n New Window: [3,2]\n New Product: 6\n\n 6 counter += 1 + (2-1)\n counter = 5\n Answer: 5",
"_____no_output_____"
]
],
[
[
"# Python3 program to count \n# subarrays having product \n# less than k. \n\ndef countSubArrayProductLessThanK(a,k): \n\tn = len(a) \n\tp = 1\n\tres = 0\n\tstart = 0\n\tend = 0\n\twhile(end < n): \n\n\t\t# Move right bound by 1 \n\t\t# step. Update the product. \n\t\tp *= a[end] \n\t\t\n\t\t# Move left bound so guarantee \n\t\t# that p is again less than k. \n\t\twhile (start < end and p >= k): \n\t\t\tp =int(p//a[start]) \n\t\t\tstart+=1\n\t\t\n\t\t# If p is less than k, update \n\t\t# the counter. Note that this \n\t\t# is working even for (start == end): \n\t\t# it means that the previous \n\t\t# window cannot grow anymore \n\t\t# and a single array element \n\t\t# is the only addendum. \n\t\tif (p < k): \n\t\t\tl = end - start + 1\n\t\t\tres += l \n\t\t\n\t\tend+=1\n\n\treturn res \n\n\n# Driver Code \nif __name__=='__main__': \n\tprint(countSubArrayProductLessThanK([1, 2, 3, 4], 10)) \n\tprint(countSubArrayProductLessThanK([1, 9, 2, 8, 6, 4, 3], 100)) \n\tprint(countSubArrayProductLessThanK([5, 3, 2], 16)) \n\tprint(countSubArrayProductLessThanK([100, 200], 100)) \n\tprint(countSubArrayProductLessThanK([100, 200], 101)) \n\t\n# This code is contributed by mits \n",
"7\n16\n5\n0\n1\n"
],
[
"# Sliding Window technique\n\n# O(n) solution for finding \n# maximum sum of a subarray of size k \nimport sys \nINT_MIN = -sys.maxsize -1\n\ndef maxSum(arr, n, k): \n\n\t# n must be greater than k \n\tif not n > k: \n\t\tprint(\"Invalid\") \n\t\treturn -1\n\n\t# Compute sum of first window of size k \n\tmax_sum = INT_MIN \n\twindow_sum = sum([arr[i] for i in range(k)]) \n\n\t# Compute sums of remaining windows by \n\t# removing first element of previous \n\t# window and adding last element of \n\t# current window. \n\tfor i in range(n-k): \n\t\twindow_sum = window_sum - arr[i] + arr[i + k] \n\t\tmax_sum = max(window_sum, max_sum) \n\n\treturn max_sum \n\n# Driver code \narr = [1, 4, 2, 10, 2, 3, 1, 0, 20] \nk = 4\nn = len(arr) \nprint(maxSum(arr, n, k)) \n\n# This code is contributed by Kyle McClay \n\n",
"24\n"
]
]
] | [
"code",
"raw",
"code",
"raw",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code"
],
[
"raw"
],
[
"code",
"code"
]
] |
d0d56251f3286f1db846d6cda668de9eec3f267f | 36,179 | ipynb | Jupyter Notebook | nbs/02_timestep.ipynb | qiz5/nbdev_qq_test | 300301a828d5bc60dd4200c168de2a5cd7ef5b8d | [
"Apache-2.0"
] | null | null | null | nbs/02_timestep.ipynb | qiz5/nbdev_qq_test | 300301a828d5bc60dd4200c168de2a5cd7ef5b8d | [
"Apache-2.0"
] | null | null | null | nbs/02_timestep.ipynb | qiz5/nbdev_qq_test | 300301a828d5bc60dd4200c168de2a5cd7ef5b8d | [
"Apache-2.0"
] | null | null | null | 37.106667 | 194 | 0.505652 | [
[
[
"# default_exp timestep",
"_____no_output_____"
],
[
"#hide\nimport sys\n[sys.path.append(i) for i in ['.', '..']]",
"_____no_output_____"
],
[
"#hide \nfrom nbdev.showdoc import *\n\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"#export\nfrom nbdev_qq_test.solution import *\nfrom nbdev_qq_test.initialize import calculate_HI_linear, calculate_HIGC\nfrom nbdev_qq_test.classes import *\nimport numpy as np\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"# timestep\n\n> run one timestep of model\n ",
"_____no_output_____"
]
],
[
[
"#export\ndef solution(InitCond,ParamStruct,ClockStruct,weather_step,Outputs):\n \"\"\"\n Function to perform AquaCrop-OS solution for a single time step\n \n \n \n *Arguments:*\\n\n \n `InitCond` : `InitCondClass` : containing current model paramaters\n\n `ClockStruct` : `ClockStructClass` : model time paramaters\n\n `weather_step`: `np.array` : containing P,ET,Tmax,Tmin for current day\n\n `Outputs` : `OutputClass` : object to store outputs\n \n *Returns:*\n \n `NewCond` : `InitCondClass` : containing updated model paramaters\n \n `Outputs` : `OutputClass` : object to store outputs\n\n \n \n \"\"\"\n \n \n # Unpack structures \n Soil = ParamStruct.Soil\n CO2 = ParamStruct.CO2\n if ParamStruct.WaterTable == 1:\n Groundwater = ParamStruct.zGW[ClockStruct.TimeStepCounter]\n else:\n Groundwater = 0\n\n\n\n P = weather_step[2]\n Tmax = weather_step[1]\n Tmin = weather_step[0]\n Et0 = weather_step[3]\n \n\n \n # Store initial conditions in structure for updating %%\n NewCond = InitCond\n \n # Check if growing season is active on current time step %%\n if ClockStruct.SeasonCounter >= 0:\n # Check if in growing season\n CurrentDate = ClockStruct.StepStartTime\n PlantingDate = ClockStruct.PlantingDates[ClockStruct.SeasonCounter]\n HarvestDate = ClockStruct.HarvestDates[ClockStruct.SeasonCounter]\n \n\n\n if (PlantingDate <= CurrentDate) and \\\n (HarvestDate >= CurrentDate) and \\\n (NewCond.CropMature == False) and \\\n (NewCond.CropDead == False):\n GrowingSeason = True\n else:\n GrowingSeason = False\n\n # Assign crop, irrigation management, and field management structures\n Crop = ParamStruct.Seasonal_Crop_List[ClockStruct.SeasonCounter]\n Crop_Name = ParamStruct.CropChoices[ClockStruct.SeasonCounter]\n IrrMngt = ParamStruct.IrrMngt\n \n\n\n if GrowingSeason == True:\n FieldMngt = ParamStruct.FieldMngt\n else:\n FieldMngt = ParamStruct.FallowFieldMngt\n\n else:\n # Not yet reached start of first growing season\n GrowingSeason = False\n # Assign crop, irrigation management, and field management structures\n # Assign first crop as filler crop\n Crop = ParamStruct.Fallow_Crop\n Crop_Name = \"fallow\"\n\n Crop.Aer = 5; Crop.Zmin = 0.3\n IrrMngt = ParamStruct.FallowIrrMngt\n FieldMngt = ParamStruct.FallowFieldMngt\n \n \n\n\n # Increment time counters %%\n if GrowingSeason == True:\n # Calendar days after planting\n NewCond.DAP = NewCond.DAP+1\n # Growing degree days after planting\n \n GDD = growing_degree_day(Crop.GDDmethod,Crop.Tupp,Crop.Tbase,Tmax,Tmin)\n\n ## Update cumulative GDD counter ##\n NewCond.GDD = GDD\n NewCond.GDDcum = NewCond.GDDcum+GDD\n \n NewCond.GrowingSeason = True\n else:\n NewCond.GrowingSeason = False\n\n # Calendar days after planting\n NewCond.DAP = 0\n # Growing degree days after planting\n GDD = 0.3\n NewCond.GDDcum = 0\n\n \n # save current timestep counter\n NewCond.TimeStepCounter = ClockStruct.TimeStepCounter\n NewCond.P = weather_step[2]\n NewCond.Tmax = weather_step[1]\n NewCond.Tmin = weather_step[0]\n NewCond.Et0 = weather_step[3]\n \n \n # Run simulations %%\n # 1. Check for groundwater table\n NewCond,Soil.Profile = check_groundwater_table(ClockStruct.TimeStepCounter,Soil.Profile,\n NewCond,ParamStruct.WaterTable,Groundwater)\n\n # 2. Root development \n NewCond = root_development(Crop,Soil.Profile,NewCond,GDD,GrowingSeason,ParamStruct.WaterTable)\n\n # 3. Pre-irrigation\n NewCond, PreIrr = pre_irrigation(Soil.Profile,Crop,NewCond,GrowingSeason,IrrMngt)\n\n # 4. Drainage\n NewCond.th,DeepPerc,FluxOut = drainage(Soil.Profile,NewCond.th,NewCond.th_fc_Adj)\n\n # 5. Surface runoff\n Runoff,Infl,NewCond = rainfall_partition(P,NewCond,FieldMngt,\n Soil.CN, Soil.AdjCN, Soil.zCN, Soil.nComp,Soil.Profile)\n\n # 6. Irrigation\n NewCond, Irr = irrigation(NewCond,IrrMngt,Crop,Soil.Profile,Soil.zTop,GrowingSeason,P,Runoff)\n \n # 7. Infiltration\n NewCond,DeepPerc,RunoffTot,Infl,FluxOut = infiltration(Soil.Profile,NewCond,Infl,Irr,IrrMngt.AppEff,FieldMngt,\n FluxOut,DeepPerc,Runoff,GrowingSeason)\n # 8. Capillary Rise\n NewCond,CR = capillary_rise(Soil.Profile,Soil.nLayer,Soil.fshape_cr,NewCond,FluxOut,ParamStruct.WaterTable)\n\n # 9. Check germination\n NewCond = germination(NewCond,Soil.zGerm,Soil.Profile,Crop.GermThr,Crop.PlantMethod,GDD,GrowingSeason)\n \n # 10. Update growth stage\n NewCond = growth_stage(Crop,NewCond,GrowingSeason)\n\n \n # 11. Canopy cover development\n NewCond = canopy_cover(Crop,Soil.Profile,Soil.zTop,NewCond,GDD,Et0,GrowingSeason)\n \n\n # 12. Soil evaporation\n NewCond,Es,EsPot = soil_evaporation(ClockStruct.EvapTimeSteps,ClockStruct.SimOffSeason,ClockStruct.TimeStepCounter,\n Soil.EvapZmin,Soil.EvapZmax,Soil.Profile,Soil.REW,Soil.Kex,Soil.fwcc,Soil.fWrelExp,Soil.fevap,\n Crop.CalendarType,Crop.Senescence,\n IrrMngt.IrrMethod,IrrMngt.WetSurf,\n FieldMngt,\n NewCond,Et0,Infl,P,Irr,GrowingSeason)\n \n # 13. Crop transpiration\n Tr,TrPot_NS,TrPot,NewCond,IrrNet = transpiration(Soil.Profile,Soil.nComp,Soil.zTop,\n Crop,\n IrrMngt.IrrMethod,IrrMngt.NetIrrSMT,\n NewCond,Et0,CO2,GrowingSeason,GDD)\n \n \n # 14. Groundwater inflow\n NewCond,GwIn = groundwater_inflow(Soil.Profile,NewCond)\n\n \n # 15. Reference harvest index\n NewCond = HIref_current_day(NewCond,Crop,GrowingSeason)\n\n # 16. Biomass accumulation\n NewCond = biomass_accumulation(Crop,NewCond,Tr,TrPot_NS,Et0,GrowingSeason)\n \n\n # 17. Harvest index\n NewCond = harvest_index(Soil.Profile,Soil.zTop,\n Crop,\n NewCond,Et0,Tmax,Tmin,GrowingSeason)\n\n\n \n # 18. Crop yield\n if GrowingSeason == True:\n # Calculate crop yield (tonne/ha)\n NewCond.Y = (NewCond.B/100)*NewCond.HIadj\n #print( ClockStruct.TimeStepCounter,(NewCond.B/100),NewCond.HIadj)\n # Check if crop has reached maturity\n if ((Crop.CalendarType == 1) and (NewCond.DAP >= Crop.Maturity)) \\\n or ((Crop.CalendarType == 2) and (NewCond.GDDcum >= Crop.Maturity)):\n # Crop has reached maturity\n NewCond.CropMature = True\n\n elif GrowingSeason == False:\n # Crop yield is zero outside of growing season\n NewCond.Y = 0\n\n\n \n\n # 19. Root zone water\n Wr,_Dr,_TAW,_thRZ = root_zone_water(Soil.Profile,NewCond.Zroot,NewCond.th,Soil.zTop,float(Crop.Zmin),Crop.Aer)\n \n\n\n # 20. Update net irrigation to add any pre irrigation\n IrrNet = IrrNet+PreIrr\n NewCond.IrrNetCum = NewCond.IrrNetCum+PreIrr\n\n\n # Update model outputs %%\n row_day = ClockStruct.TimeStepCounter\n row_gs = ClockStruct.SeasonCounter\n\n\n # Irrigation\n if GrowingSeason == True:\n if IrrMngt.IrrMethod == 4:\n # Net irrigation\n IrrDay = IrrNet\n IrrTot = NewCond.IrrNetCum\n else:\n # Irrigation\n IrrDay = Irr\n IrrTot = NewCond.IrrCum\n\n else:\n IrrDay = 0\n IrrTot = 0\n\n NewCond.Depletion = _Dr.Rz\n NewCond.TAW = _TAW.Rz\n \n \n # Water contents\n Outputs.Water[row_day,:3] = np.array([ClockStruct.TimeStepCounter,GrowingSeason,NewCond.DAP])\n Outputs.Water[row_day,3:] = NewCond.th\n\n # Water fluxes\n Outputs.Flux[row_day,:] = [ClockStruct.TimeStepCounter,\\\n ClockStruct.SeasonCounter,NewCond.DAP,Wr,NewCond.zGW,\\\n NewCond.SurfaceStorage,IrrDay,\\\n Infl,Runoff,DeepPerc,CR,GwIn,Es,EsPot,Tr,P]\n\n # Crop growth\n Outputs.Growth[row_day,:] = [ClockStruct.TimeStepCounter,ClockStruct.SeasonCounter,NewCond.DAP,GDD,\\\n NewCond.GDDcum,NewCond.Zroot,\\\n NewCond.CC,NewCond.CC_NS,NewCond.B,\\\n NewCond.B_NS,NewCond.HI,NewCond.HIadj,\\\n NewCond.Y]\n\n # Final output (if at end of growing season) \n if ClockStruct.SeasonCounter > -1:\n if ((NewCond.CropMature == True) \\\n or (NewCond.CropDead == True) \\\n or (ClockStruct.HarvestDates[ClockStruct.SeasonCounter] == ClockStruct.StepEndTime )) \\\n and (NewCond.HarvestFlag == False):\n\n # Store final outputs\n Outputs.Final.loc[ClockStruct.SeasonCounter] = [ClockStruct.SeasonCounter,Crop_Name,\\\n ClockStruct.StepEndTime,ClockStruct.TimeStepCounter,\\\n NewCond.Y,IrrTot]\n\n # Set harvest flag\n NewCond.HarvestFlag = True\n\n\n\n return NewCond,ParamStruct,Outputs",
"_____no_output_____"
],
[
"#hide\nshow_doc(solution)",
"_____no_output_____"
],
[
"#export\ndef check_model_termination(ClockStruct,InitCond):\n \"\"\"\n Function to check and declare model termination\n\n \n *Arguments:*\\n\n \n `ClockStruct` : `ClockStructClass` : model time paramaters\n\n `InitCond` : `InitCondClass` : containing current model paramaters\n\n *Returns:*\n \n `ClockStruct` : `ClockStructClass` : updated clock paramaters\n\n \n \"\"\"\n\n ## Check if current time-step is the last\n CurrentTime = ClockStruct.StepEndTime\n if CurrentTime < ClockStruct.SimulationEndDate:\n ClockStruct.ModelTermination = False\n elif CurrentTime >= ClockStruct.SimulationEndDate:\n ClockStruct.ModelTermination = True\n \n\n ## Check if at the end of last growing season ##\n # Allow model to exit early if crop has reached maturity or died, and in\n # the last simulated growing season\n if (InitCond.HarvestFlag == True) \\\n and (ClockStruct.SeasonCounter == ClockStruct.nSeasons-1):\n \n ClockStruct.ModelTermination = True\n \n\n \n return ClockStruct",
"_____no_output_____"
],
[
"#hide\nshow_doc(check_model_termination)",
"_____no_output_____"
],
[
"#export\ndef reset_initial_conditions(ClockStruct,InitCond,ParamStruct,weather):\n\n \"\"\"\n Function to reset initial model conditions for start of growing\n season (when running model over multiple seasons) \n \n *Arguments:*\\n\n \n `ClockStruct` : `ClockStructClass` : model time paramaters\n\n `InitCond` : `InitCondClass` : containing current model paramaters\n\n `weather`: `np.array` : weather data for simulation period\n\n\n *Returns:*\n \n `InitCond` : `InitCondClass` : containing reset model paramaters\n\n \n \n \"\"\"\n\n ## Extract crop type ##\n CropType = ParamStruct.CropChoices[ClockStruct.SeasonCounter]\n\n ## Extract structures for updating ##\n Soil = ParamStruct.Soil\n Crop = ParamStruct.Seasonal_Crop_List[ClockStruct.SeasonCounter]\n FieldMngt = ParamStruct.FieldMngt\n CO2 = ParamStruct.CO2\n CO2_data = ParamStruct.CO2data\n\n ## Reset counters ##\n InitCond.AgeDays = 0\n InitCond.AgeDays_NS = 0\n InitCond.AerDays = 0\n InitCond.IrrCum = 0\n InitCond.DelayedGDDs = 0\n InitCond.DelayedCDs = 0\n InitCond.PctLagPhase = 0\n InitCond.tEarlySen = 0\n InitCond.GDDcum = 0\n InitCond.DaySubmerged = 0\n InitCond.IrrNetCum = 0\n InitCond.DAP = 0\n\n InitCond.AerDaysComp = np.zeros(int(Soil.nComp))\n\n ## Reset states ##\n # States\n InitCond.PreAdj = False\n InitCond.CropMature = False\n InitCond.CropDead = False\n InitCond.Germination = False\n InitCond.PrematSenes = False\n InitCond.HarvestFlag = False\n\n\n\n # Harvest index\n # HI\n InitCond.Stage = 1\n InitCond.Fpre = 1\n InitCond.Fpost = 1\n InitCond.fpost_dwn = 1\n InitCond.fpost_upp = 1\n\n InitCond.HIcor_Asum = 0\n InitCond.HIcor_Bsum = 0\n InitCond.Fpol = 0\n InitCond.sCor1 = 0\n InitCond.sCor2 = 0\n\n # Growth stage\n InitCond.GrowthStage = 0\n\n # Transpiration\n InitCond.TrRatio = 1\n\n # crop growth\n InitCond.rCor = 1\n\n InitCond.CC = 0\n InitCond.CCadj = 0\n InitCond.CC_NS = 0\n InitCond.CCadj_NS = 0\n InitCond.B = 0\n InitCond.B_NS = 0\n InitCond.HI = 0\n InitCond.HIadj = 0\n InitCond.CCxAct = 0\n InitCond.CCxAct_NS = 0\n InitCond.CCxW = 0\n InitCond.CCxW_NS = 0\n InitCond.CCxEarlySen = 0\n InitCond.CCprev = 0\n InitCond.ProtectedSeed = 0\n\n ## Update CO2 concentration ##\n # Get CO2 concentration\n \n \n if ParamStruct.CO2concAdj != None:\n CO2.CurrentConc = ParamStruct.CO2concAdj\n else:\n Yri = pd.DatetimeIndex([ClockStruct.StepStartTime]).year[0]\n CO2.CurrentConc = CO2_data.loc[Yri]\n # Get CO2 weighting factor for first year\n CO2conc = CO2.CurrentConc\n CO2ref = CO2.RefConc\n if CO2conc <= CO2ref:\n fw = 0\n else:\n if CO2conc >= 550:\n fw = 1\n else:\n fw = 1-((550-CO2conc)/(550-CO2ref))\n\n\n # Determine initial adjustment\n fCO2 = (CO2conc/CO2ref)/(1+(CO2conc-CO2ref)\\\n *((1-fw)*Crop.bsted+fw*((Crop.bsted*Crop.fsink)\\\n +(Crop.bface*(1-Crop.fsink)))))\n\n # Consider crop type\n if Crop.WP >= 40:\n # No correction for C4 crops\n ftype = 0\n elif Crop.WP <= 20:\n # Full correction for C3 crops\n ftype = 1\n else:\n ftype = (40-Crop.WP)/(40-20)\n\n # Total adjustment\n Crop.fCO2 = 1+ftype*(fCO2-1)\n \n \n ## Reset soil water conditions (if not running off-season) ##\n if ClockStruct.SimOffSeason==False:\n # Reset water content to starting conditions\n InitCond.th = InitCond.thini\n # Reset surface storage\n if (FieldMngt.Bunds) and (FieldMngt.zBund > 0.001):\n # Get initial storage between surface bunds\n InitCond.SurfaceStorage = min(FieldMngt.BundWater,FieldMngt.zBund)\n else:\n # No surface bunds\n InitCond.SurfaceStorage = 0\n\n\n ## Update crop parameters (if in GDD mode) ##\n if Crop.CalendarType == 2:\n # Extract weather data for upcoming growing season\n wdf = weather[weather[:,4]>=ClockStruct.PlantingDates[ClockStruct.SeasonCounter]]\n #wdf = wdf[wdf[:,4]<=ClockStruct.HarvestDates[ClockStruct.SeasonCounter]]\n Tmin = wdf[:,0]\n Tmax = wdf[:,1]\n\n # Calculate GDD's\n if Crop.GDDmethod == 1:\n Tmean = (Tmax+Tmin)/2\n Tmean[Tmean>Crop.Tupp] = Crop.Tupp\n Tmean[Tmean<Crop.Tbase] = Crop.Tbase\n GDD = Tmean-Crop.Tbase\n elif Crop.GDDmethod == 2:\n Tmax[Tmax>Crop.Tupp] = Crop.Tupp\n Tmax[Tmax<Crop.Tbase] = Crop.Tbase\n Tmin[Tmin>Crop.Tupp] = Crop.Tupp\n Tmin[Tmin<Crop.Tbase] = Crop.Tbase\n Tmean = (Tmax+Tmin)/2\n GDD = Tmean-Crop.Tbase\n elif Crop.GDDmethod == 3:\n Tmax[Tmax>Crop.Tupp] = Crop.Tupp\n Tmax[Tmax<Crop.Tbase] = Crop.Tbase\n Tmin[Tmin>Crop.Tupp] = Crop.Tupp\n Tmean = (Tmax+Tmin)/2\n Tmean[Tmean<Crop.Tbase] = Crop.Tbase\n GDD = Tmean-Crop.Tbase\n\n \n \n GDDcum = np.cumsum(GDD)\n\n assert GDDcum[-1] > Crop.Maturity, f\"not enough growing degree days in simulation ({GDDcum[-1]}) to reach maturity ({Crop.Maturity})\"\n\n Crop.MaturityCD = np.argmax((GDDcum>Crop.Maturity))+1\n \n assert Crop.MaturityCD < 365, \"crop will take longer than 1 year to mature\"\n\n \n \n \n # 1. GDD's from sowing to maximum canopy cover\n Crop.MaxCanopyCD = (GDDcum>Crop.MaxCanopy).argmax()+1\n # 2. GDD's from sowing to end of vegetative growth\n Crop.CanopyDevEndCD = (GDDcum>Crop.CanopyDevEnd).argmax()+1\n # 3. Calendar days from sowing to start of yield formation\n Crop.HIstartCD = (GDDcum>Crop.HIstart).argmax()+1\n # 4. Calendar days from sowing to end of yield formation\n Crop.HIendCD = (GDDcum>Crop.HIend).argmax()+1\n # 5. Duration of yield formation in calendar days\n Crop.YldFormCD = Crop.HIendCD-Crop.HIstartCD\n if Crop.CropType == 3:\n # 1. Calendar days from sowing to end of flowering\n FloweringEnd = (GDDcum>Crop.FloweringEnd).argmax()+1\n # 2. Duration of flowering in calendar days\n Crop.FloweringCD = FloweringEnd-Crop.HIstartCD\n else:\n Crop.FloweringCD = -999\n \n\n\n # Update harvest index growth coefficient\n Crop = calculate_HIGC(Crop)\n\n # Update day to switch to linear HI build-up\n if Crop.CropType == 3:\n # Determine linear switch point and HIGC rate for fruit/grain crops\n Crop = calculate_HI_linear(Crop)\n\n else:\n # No linear switch for leafy vegetable or root/tiber crops\n Crop.tLinSwitch = 0\n Crop.dHILinear = 0.\n\n\n\n ## Update global variables ##\n ParamStruct.Seasonal_Crop_List[ClockStruct.SeasonCounter] = Crop\n ParamStruct.CO2 = CO2\n\n return InitCond,ParamStruct",
"_____no_output_____"
],
[
"#hide\nshow_doc(reset_initial_conditions)",
"_____no_output_____"
],
[
"#export\ndef update_time(ClockStruct,InitCond,ParamStruct,Outputs,weather):\n \"\"\"\n Function to update current time in model\n \n *Arguments:*\\n\n \n `ClockStruct` : `ClockStructClass` : model time paramaters\n\n `InitCond` : `InitCondClass` : containing current model paramaters\n\n `weather`: `np.array` : weather data for simulation period\n\n\n *Returns:*\n \n `ClockStruct` : `ClockStructClass` : model time paramaters\n\n \n `InitCond` : `InitCondClass` : containing reset model paramaters\n\n \n \"\"\"\n ## Update time ##\n if ClockStruct.ModelTermination == False:\n if (InitCond.HarvestFlag == True) \\\n and ((ClockStruct.SimOffSeason==False)):\n # End of growing season has been reached and not simulating\n # off-season soil water balance. Advance time to the start of the\n # next growing season.\n # Check if in last growing season \n if ClockStruct.SeasonCounter < ClockStruct.nSeasons-1:\n # Update growing season counter\n ClockStruct.SeasonCounter = ClockStruct.SeasonCounter+1\n # Update time-step counter\n #ClockStruct.TimeSpan = pd.Series(ClockStruct.TimeSpan)\n ClockStruct.TimeStepCounter = ClockStruct.TimeSpan.get_loc(ClockStruct.PlantingDates[ClockStruct.SeasonCounter])\n # Update start time of time-step\n ClockStruct.StepStartTime = ClockStruct.TimeSpan[ClockStruct.TimeStepCounter]\n # Update end time of time-step\n ClockStruct.StepEndTime = ClockStruct.TimeSpan[ClockStruct.TimeStepCounter + 1]\n # Reset initial conditions for start of growing season\n InitCond,ParamStruct = reset_initial_conditions(ClockStruct,InitCond,ParamStruct,weather)\n\n else:\n # Simulation considers off-season, so progress by one time-step\n # (one day)\n # Time-step counter\n ClockStruct.TimeStepCounter = ClockStruct.TimeStepCounter+1\n # Start of time step (beginning of current day)\n #ClockStruct.TimeSpan = pd.Series(ClockStruct.TimeSpan)\n ClockStruct.StepStartTime = ClockStruct.TimeSpan[ClockStruct.TimeStepCounter]\n # End of time step (beginning of next day)\n ClockStruct.StepEndTime = ClockStruct.TimeSpan[ClockStruct.TimeStepCounter + 1]\n # Check if in last growing season\n if ClockStruct.SeasonCounter < ClockStruct.nSeasons-1:\n # Check if upcoming day is the start of a new growing season\n if ClockStruct.StepStartTime == ClockStruct.PlantingDates[ClockStruct.SeasonCounter+1]:\n # Update growing season counter\n ClockStruct.SeasonCounter = ClockStruct.SeasonCounter+1\n # Reset initial conditions for start of growing season\n InitCond,ParamStruct = reset_initial_conditions(ClockStruct,InitCond,ParamStruct,weather)\n\n\n elif ClockStruct.ModelTermination == True:\n ClockStruct.StepStartTime = ClockStruct.StepEndTime\n ClockStruct.StepEndTime = ClockStruct.StepEndTime + np.timedelta64(1, 'D')\n \n Outputs.Flux = pd.DataFrame(Outputs.Flux, columns=[\"TimeStepCounter\",\\\n \"SeasonCounter\",\"DAP\",\"Wr\",\"zGW\",\\\n \"SurfaceStorage\",\"IrrDay\",\\\n \"Infl\",\"Runoff\",\"DeepPerc\",\"CR\",\\\n \"GwIn\",\"Es\",\"EsPot\",\"Tr\",\"P\"])\n \n \n Outputs.Water =pd.DataFrame(Outputs.Water, columns=[\"TimeStepCounter\",\"GrowingSeason\",\"DAP\"]\\\n +['th'+str(i) for i in range(1,Outputs.Water.shape[1]-2)])\n \n Outputs.Growth = pd.DataFrame(Outputs.Growth, columns = [\"TimeStepCounter\",'SeasonCounter',\"DAP\",'GDD',\\\n 'GDDcum','Zroot',\\\n 'CC','CC_NS','B',\\\n 'B_NS','HI','HIadj',\\\n 'Y'])\n \n \n return ClockStruct,InitCond,ParamStruct,Outputs\n\n\n\n",
"_____no_output_____"
],
[
"#hide\nshow_doc(update_time)",
"_____no_output_____"
],
[
"#hide\nfrom nbdev.export import notebook2script\nnotebook2script()",
"Converted 00_core.ipynb.\nConverted 01_initialize.ipynb.\nConverted 02_timestep.ipynb.\nConverted 03_solution.ipynb.\nConverted 04_classes.ipynb.\nConverted 05_comparison.ipynb.\nConverted index.ipynb.\n"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d5653379c8b535bbfd020de08abed5563426c9 | 522,043 | ipynb | Jupyter Notebook | P1.ipynb | ddonco/CarND-Finding-Lane-Lines | fc6b506d6b8d936f2135200d99b908edf397da60 | [
"MIT"
] | null | null | null | P1.ipynb | ddonco/CarND-Finding-Lane-Lines | fc6b506d6b8d936f2135200d99b908edf397da60 | [
"MIT"
] | null | null | null | P1.ipynb | ddonco/CarND-Finding-Lane-Lines | fc6b506d6b8d936f2135200d99b908edf397da60 | [
"MIT"
] | null | null | null | 621.479762 | 246,568 | 0.944091 | [
[
[
"# Self-Driving Car Engineer Nanodegree\n\n\n## Project: **Finding Lane Lines on the Road** \n***\nIn this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip \"raw-lines-example.mp4\" (also contained in this repository) to see what the output should look like after using the helper functions below. \n\nOnce you have a result that looks roughly like \"raw-lines-example.mp4\", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.\n\nIn addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.\n\n---\nLet's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the \"play\" button above) to display the image.\n\n**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the \"Kernel\" menu above and selecting \"Restart & Clear Output\".**\n\n---",
"_____no_output_____"
],
[
"**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**\n\n---\n\n<figure>\n <img src=\"examples/line-segments-example.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> \n </figcaption>\n</figure>\n <p></p> \n<figure>\n <img src=\"examples/laneLines_thirdPass.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your goal is to connect/average/extrapolate line segments to get output like this</p> \n </figcaption>\n</figure>",
"_____no_output_____"
],
[
"**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** ",
"_____no_output_____"
],
[
"## Import Packages",
"_____no_output_____"
]
],
[
[
"#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport math\nimport numpy as np\nimport cv2\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Read in an Image",
"_____no_output_____"
]
],
[
[
"#reading in an image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg')\n\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimensions:', image.shape)\nplt.figure(figsize=(10,6))\nplt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')",
"This image is: <class 'numpy.ndarray'> with dimensions: (540, 960, 3)\n"
]
],
[
[
"## Ideas for Lane Detection Pipeline",
"_____no_output_____"
],
[
"**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**\n\n`cv2.inRange()` for color selection \n`cv2.fillPoly()` for regions selection \n`cv2.line()` to draw lines on an image given endpoints \n`cv2.addWeighted()` to coadd / overlay two images \n`cv2.cvtColor()` to grayscale or change color \n`cv2.imwrite()` to output images to file \n`cv2.bitwise_and()` to apply a mask to an image\n\n**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**",
"_____no_output_____"
],
[
"## Helper Functions",
"_____no_output_____"
],
[
"Below are some helper functions to help get you started. They should look familiar from the lesson!",
"_____no_output_____"
]
],
[
[
"import math\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\ndef remove_outliers(data, m=1):\n if len(data) <= 2:\n return data\n return data[abs(data - np.mean(data)) < m * np.std(data)]\n\ndef draw_label(img, text, pos, bg_color):\n font_face = cv2.FONT_HERSHEY_SIMPLEX\n scale = 1\n color = (0, 0, 0)\n thickness = cv2.FILLED\n margin = 2\n\n txt_size = cv2.getTextSize(text, font_face, scale, thickness)\n\n end_x = pos[0] + txt_size[0][0] + margin\n end_y = pos[1] - txt_size[0][1] - margin\n\n cv2.rectangle(img, pos, (end_x, end_y), bg_color, thickness)\n cv2.putText(img, text, pos, font_face, scale, color, 4, cv2.LINE_AA)\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=2):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to \n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4). \n \n Think about things like separating line segments by their \n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of \n the lines and extrapolate to the top and bottom of the lane.\n \n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n right_x = []\n right_y = []\n left_x = []\n left_y = []\n \n for line in lines:\n for x1,y1,x2,y2 in line:\n slope = float((y2 - y1) / (x2 - x1))\n if slope > 0.45:\n left_x.extend([x1, x2])\n left_y.extend([y1, y2])\n \n if slope < -0.45:\n right_x.extend([x1, x2])\n right_y.extend([y1, y2])\n \n# cv2.line(img, (x1, y1), (x2, y2), [0,0,255], thickness)\n \n if len(right_x) > 0 and len(left_x) > 0 and len(right_y) > 0 and len(left_y) > 0:\n right_line_coeffs = np.polyfit(right_x, right_y, 1)\n left_line_coeffs = np.polyfit(left_x, left_y, 1)\n \n# draw_label(img, \"R({:.2f}, {:.2f})\".format(left_line_coeffs[0], left_line_coeffs[1]), (600,320), (255,255,255))\n# draw_label(img, \"L({:.2f}, {:.2f})\".format(right_line_coeffs[0], right_line_coeffs[1]), (150,320), (255,255,255))\n# print(\"right slope, int: ({:.2f}, {:.2f}) left slope, int: ({:.2f}, {:.2f})\".format(mean_right_slope, mean_right_yint, mean_left_slope, mean_left_yint))\n cv2.line(img, \n (int((320 - right_line_coeffs[1]) / right_line_coeffs[0]), 320), \n (int((540 - right_line_coeffs[1]) / right_line_coeffs[0]), 540), \n color, \n thickness)\n cv2.line(img, \n (int((320 - left_line_coeffs[1]) / left_line_coeffs[0]), 320), \n (int((540 - left_line_coeffs[1]) / left_line_coeffs[0]), 540), \n color, \n thickness)\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n \n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines, thickness=10)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + γ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, γ)",
"_____no_output_____"
]
],
[
[
"## Test Images\n\nBuild your pipeline to work on the images in the directory \"test_images\" \n**You should make sure your pipeline works well on these images before you try the videos.**",
"_____no_output_____"
]
],
[
[
"import os\nos.listdir(\"test_images/\")",
"_____no_output_____"
]
],
[
[
"## Build a Lane Finding Pipeline\n\n",
"_____no_output_____"
],
[
"Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.\n\nTry tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.",
"_____no_output_____"
]
],
[
[
"# Read in and grayscale the image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg')\ngray = grayscale(image)\n\n# Define a kernel size and apply Gaussian smoothing\nkernel_size = 5\nblur_gray = gaussian_blur(gray, kernel_size)\n\n# Define our parameters for Canny and apply\nlow_threshold = 55\nhigh_threshold = 175\nedges = canny(blur_gray, low_threshold, high_threshold) \n\n# This time we are defining a four sided polygon to mask\nimshape = image.shape\nvertices = np.array([[(0,imshape[0]),(imshape[1]*0.47, imshape[0]*0.6), (imshape[1]*0.51, imshape[0]*0.6), (imshape[1],imshape[0])]], dtype=np.int32)\nmasked_edges = region_of_interest(edges, vertices)\n\nprint(imshape[0], imshape[1])\n\n# Define the Hough transform parameters\n# Make a blank the same size as our image to draw on\nrho = 1 # distance resolution in pixels of the Hough grid\ntheta = np.pi/180 # angular resolution in radians of the Hough grid\nthreshold = 10 # minimum number of votes (intersections in Hough grid cell)\nmin_line_length = 20 #minimum number of pixels making up a line\nmax_line_gap = 20 # maximum gap in pixels between connectable line segments\nline_image = np.copy(image)*0 # creating a blank to draw lines on\n\n# Run Hough on edge detected image\nline_image = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap)\n\n# Draw the lines on the edge image\nlines_edges = weighted_img(line_image, image, α=0.8, β=1., γ=0.) \nplt.figure(figsize=(10,6))\nplt.imshow(lines_edges)",
"540 960\n"
]
],
[
[
"## Test on Videos\n\nYou know what's cooler than drawing lanes over images? Drawing lanes over video!\n\nWe can test our solution on two provided videos:\n\n`solidWhiteRight.mp4`\n\n`solidYellowLeft.mp4`\n\n**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**\n\n**If you get an error that looks like this:**\n```\nNeedDownloadError: Need ffmpeg exe. \nYou can download it by calling: \nimageio.plugins.ffmpeg.download()\n```\n**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**",
"_____no_output_____"
]
],
[
[
"# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML",
"_____no_output_____"
],
[
"def process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n\n gray = grayscale(image)\n\n # Define a kernel size and apply Gaussian smoothing\n kernel_size = 5\n blur_gray = gaussian_blur(gray, kernel_size)\n\n # Define our parameters for Canny and apply\n low_threshold = 55\n high_threshold = 175\n edges = canny(blur_gray, low_threshold, high_threshold) \n\n # This time we are defining a four sided polygon to mask\n imshape = image.shape\n vertices = np.array([[(0,imshape[0]),(imshape[1]*0.47, imshape[0]*0.6), (imshape[1]*0.51, imshape[0]*0.6), (imshape[1],imshape[0])]], dtype=np.int32)\n masked_edges = region_of_interest(edges, vertices)\n\n # Define the Hough transform parameters\n # Make a blank the same size as our image to draw on\n rho = 1 # distance resolution in pixels of the Hough grid\n theta = np.pi/180 # angular resolution in radians of the Hough grid\n threshold = 10 # minimum number of votes (intersections in Hough grid cell)\n min_line_length = 20 #minimum number of pixels making up a line\n max_line_gap = 20 # maximum gap in pixels between connectable line segments\n\n # Run Hough on edge detected image\n line_image = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap)\n\n # Draw the lines on the edge image\n result = weighted_img(line_image, image, α=0.8, β=1., γ=0.)\n \n return result",
"_____no_output_____"
]
],
[
[
"Let's try the one with the solid white lane on the right first ...",
"_____no_output_____"
]
],
[
[
"white_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)",
"[MoviePy] >>>> Building video test_videos_output/solidWhiteRight.mp4\n[MoviePy] Writing video test_videos_output/solidWhiteRight.mp4\n"
]
],
[
[
"Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.",
"_____no_output_____"
]
],
[
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))",
"_____no_output_____"
]
],
[
[
"## Improve the draw_lines() function\n\n**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\".**\n\n**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**",
"_____no_output_____"
],
[
"Now for the one with the solid yellow lane on the left. This one's more tricky!",
"_____no_output_____"
]
],
[
[
"yellow_output = 'test_videos_output/solidYellowLeft.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)",
"[MoviePy] >>>> Building video test_videos_output/solidYellowLeft.mp4\n[MoviePy] Writing video test_videos_output/solidYellowLeft.mp4\n"
],
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))",
"_____no_output_____"
]
],
[
[
"## Writeup and Submission\n\nIf you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.\n",
"_____no_output_____"
],
[
"## Optional Challenge\n\nTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!",
"_____no_output_____"
]
],
[
[
"challenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\n%time challenge_clip.write_videofile(challenge_output, audio=False)",
"[MoviePy] >>>> Building video test_videos_output/challenge.mp4\n[MoviePy] Writing video test_videos_output/challenge.mp4\n"
],
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(challenge_output))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
d0d5724dc3c439151655451ee3fa5789a1c14918 | 9,329 | ipynb | Jupyter Notebook | notebooks/draft.ipynb | bsronald2/XRay_Object_Detection_DL | 3bc1173913bffd1f12fd7816da80b02525055aeb | [
"MIT"
] | null | null | null | notebooks/draft.ipynb | bsronald2/XRay_Object_Detection_DL | 3bc1173913bffd1f12fd7816da80b02525055aeb | [
"MIT"
] | null | null | null | notebooks/draft.ipynb | bsronald2/XRay_Object_Detection_DL | 3bc1173913bffd1f12fd7816da80b02525055aeb | [
"MIT"
] | null | null | null | 33.08156 | 96 | 0.524922 | [
[
[
"# Set in root_directory\n%cd /home/ronald/PycharmProjects/x-ray-deep-learning/X-ray_Object_Detection/\n#%ls \n# libs \nimport numpy as np \nfrom pathlib import Path\nimport json\n\nnp.random.seed(1)\n\n# Directories\nROOT = Path('data/raw')\nimages_path = ROOT / 'images'\nann_path = ROOT/ 'annotation'\nprint('Images Path:', images_path)\nprint('Annotation Path:', ann_path)\n\n# Labels/n_classes\nlabels = ['gun'] #, 'knife', 'shuriken', 'razor_blade']\nn_classes = len(labels) + 1 # count background\n\n# Image Dimenssions\ndim = (256, 256, 1)\n\n# Collect all files absolute Path \nimgs_paths = sorted([i.absolute() for i in images_path.glob(\"*.png\") if i.is_file()])\n\nindexes = np.arange(len(imgs_paths))\n\nbatch_size = 4\nindex = 15\n# Set batch indexes\n# if index 0 and batch 4 in range(0, 17) retrieve values [0 1 2 3]\n# if index 1 and batch 4 in range(0, 17) retrieve values [4 5 6 7]\n#indexes = indexes[index * batch_size:(index + 1) * batch_size]\n\n#imgs_paths = [imgs_paths[index] for index in indexes]\nimgs_name = [img.name for img in imgs_paths]\n\n# Create empty data-set.\nX = np.empty((batch_size, *dim), dtype=np.float32)\ny = np.empty((batch_size, dim[0], dim[1], n_classes), dtype=np.float32)",
"/home/ronald/PycharmProjects/x-ray-deep-learning/X-ray_Object_Detection\nImages Path: data/raw/images\nAnnotation Path: data/raw/annotation\n"
],
[
"# Open imgs annotations\nwith open(\"data/raw/annotation/coco_annotation.json\", \"r\") as read_it: \n ann_data = json.load(read_it)\n \n",
"_____no_output_____"
],
[
"import cv2\nimport numpy as np\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nfrom imgaug.augmentables import Keypoint, KeypointsOnImage\n%matplotlib inline\nfrom matplotlib import pyplot as plt\ndict_imgs = ann_data.get('images')\ndict_ann = ann_data.get('annotations')\ndict_cat = ann_data.get('categories')\n\nseq = iaa.Sequential([\n iaa.Fliplr(0.5),# horizontal flips\n # Small gaussian blur with random sigma between 0 and 0.5.\n iaa.GaussianBlur(sigma=(0, 0.5)), \n # Crop image with random from 0 to 10% \n # But we only crop about 50% of all images.\n iaa.Sometimes(\n 0.5,\n iaa.Crop(percent=(0, 0.1), keep_size=True)),\n # Strengthen or weaken the contrast in each images.\n iaa.LinearContrast((0.75, 1)),\n\n # Add gaussian noise.\n # For 30% of all images, we sample the noise once per pixel.\n # For the other 30% of all images, we sample the noise per pixel AND\n # channel. This can change the color (not only brightness) of the\n # pixels.\n iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05), per_channel=0.3),\n\n # Apply affine transformations to each images.\n # Scale/zoom them.\n iaa.Affine(\n scale={\"x\": (1.0, 1.1), \"y\": (1.0, 1.1)})\n], random_order=True) # apply augmenters in random order\n ",
"_____no_output_____"
],
[
"def search_array(array, key, value):\n return next((obj for obj in array if obj[key] == value), None) # return object\n\ndef get_img_seg_kps(img_seg):\n points = list()\n for i in range(0, len(img_seg), 2): # iterate every two steps\n chunk = img_seg[i:i+2]\n points.append(Keypoint(x=chunk[0], y=chunk[1]))\n \n return points\n\ndef get_img_info(img_name):\n \"\"\"\n return img_label and segmentation points of the image\n \"\"\"\n img_seg, label = None, None\n img_obj = search_array(dict_imgs, 'file_name', img_name)\n if img_obj is not None:\n ann_obj = search_array(dict_ann, 'image_id', str(img_obj['id']))\n if ann_obj is not None:\n kps = get_img_seg_kps(ann_obj['segmentation'])\n label = search_array(dict_cat, 'id', ann_obj['category_id'])\n return label['name'], kps\n else: # Create annotation for image\n kps = create_img_seg(img_obj)\n return 'background', kps\n \n return None\n\ndef create_img_seg(img_obj):\n height = img_obj['height']\n width = img_obj['width']\n points = [\n Keypoint(x=0, y=0),\n Keypoint(x=width-1, y=0),\n Keypoint(x=width-1, y=height-1),\n Keypoint(x=0, y=height-1)\n ]\n# print(points)\n return points\n\ndef get_augimg(img, img_info):\n label, points = img_info\n kps = KeypointsOnImage(points, shape=img.shape)\n if img.shape != dim:\n img = ia.imresize_single_image(img, dim[0:2])\n kps = kps.on(img)\n # Augment keypoints and images.\n seq_det = seq.to_deterministic()\n img_aug = seq_det.augment_images([img])[0]\n kps_aug = seq_det.augment_keypoints([kps])[0]\n# print(kps)\n# print(\"--------\\n\", kps_aug)\n# img_aug, kps_aug = seq(image=img, keypoints=kps)\n aug_points = [[kp.x, kp.y] for kp in kps_aug.keypoints]\n aug_points_dic = {'label': label, 'points': aug_points}\n# ia.imshow(np.hstack([\n# kps.draw_on_image(img, size=10),\n# kps_aug.draw_on_image(img_aug, size=10)]))\n\n return img_aug, aug_points_dic\n \ndef show(img):\n print(img.shape)\n plt.imshow(img)\n plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis\n plt.show()\n\ndef get_mask(img, imgaug_shape):\n blank = np.zeros(shape=(img.shape[0], img.shape[1]), dtype=np.float32)\n points = np.array(imgaug_shape['points'], dtype=np.int32)\n label = imgaug_shape['label']\n cv2.fillPoly(blank, [points], 255)\n blank = blank / 255.0\n# ia.imshow(img)\n# ia.imshow(blank)\n return np.expand_dims(blank, axis=2)\n\n \ndef data_generation(img_path):\n X = np.empty((batch_size, *dim), dtype=np.float32)\n y = np.empty((batch_size, dim[0], dim[1], n_classes), dtype=np.float32) \n \n # retrieve img in gray_scale as numpy\n img = cv2.imread(str(img_path), 0) # our images are gray_scale\n img = np.expand_dims(img, axis=2)\n# img = (img / 255.0).astype(np.float32)\n images = [np.copy(img) for _ in range(batch_size)]\n img_info = get_img_info(img_path.name)\n for i, image in enumerate(images):\n imgaug, imgaug_shape = get_augimg(img, img_info)\n imgaug_mask = get_mask(imgaug, imgaug_shape)\n print(imgaug.shape)\n print(imgaug_mask.shape)\n X[i,] = imgaug\n y[i,] = imgaug_mask\n \n return X, y\n \nimg_pol = data_generation(imgs_paths[index])\n#print(img_pol)",
"(256, 256, 1)\n(256, 256, 1)\n(256, 256, 1)\n(256, 256, 1)\n(256, 256, 1)\n(256, 256, 1)\n(256, 256, 1)\n(256, 256, 1)\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
d0d57e97b08b94e36c32d9528d89c220f6cf524e | 122,416 | ipynb | Jupyter Notebook | charpter7_erercise.ipynb | Avriliar/data_analysis | 968e353900c256bf8fc307081e85e085ab2e24d4 | [
"MIT"
] | null | null | null | charpter7_erercise.ipynb | Avriliar/data_analysis | 968e353900c256bf8fc307081e85e085ab2e24d4 | [
"MIT"
] | null | null | null | charpter7_erercise.ipynb | Avriliar/data_analysis | 968e353900c256bf8fc307081e85e085ab2e24d4 | [
"MIT"
] | null | null | null | 23.756258 | 356 | 0.353189 | [
[
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"string_data=pd.Series(['aardvark','artichoke',np.nan,'avocado'])",
"_____no_output_____"
],
[
"string_data",
"_____no_output_____"
],
[
"string_data.isnull()",
"_____no_output_____"
],
[
"string_data[0]=None",
"_____no_output_____"
],
[
"string_data.isnull()",
"_____no_output_____"
],
[
"from numpy import nan as NA",
"_____no_output_____"
],
[
"data=pd.Series([1,NA,3.5,NA,7])",
"_____no_output_____"
],
[
"data.dropna()",
"_____no_output_____"
],
[
"data[data.notnull()]",
"_____no_output_____"
],
[
"data=pd.DataFrame([[1.,6.5,3.],[1.,NA,NA],[NA,NA,NA],[NA,6.5,3.]])",
"_____no_output_____"
],
[
"cleaned=data.dropna()",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"cleaned",
"_____no_output_____"
],
[
"data.dropna(how='all')",
"_____no_output_____"
],
[
"data[4]=NA",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"data.dropna(axis=1,how='all')",
"_____no_output_____"
],
[
"df=pd.DataFrame(np.random.randn(7,3))",
"_____no_output_____"
],
[
"df.iloc[:4,1]=NA",
"_____no_output_____"
],
[
"df.iloc[:2,2]=NA",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.dropna()",
"_____no_output_____"
],
[
"df.dropna(thresh=2)",
"_____no_output_____"
],
[
"df.fillna(0)",
"_____no_output_____"
],
[
"df.fillna({1:0.5,2:0})",
"_____no_output_____"
],
[
"_=df.fillna(0,inplace=True)",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df=pd.DataFrame(np.random.randn(6,3))",
"_____no_output_____"
],
[
"df.iloc[2:,1]",
"_____no_output_____"
],
[
"df.iloc[4:,2]",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.fillna(method='ffill')",
"_____no_output_____"
],
[
"df.fillna(method='ffill',limit=2)",
"_____no_output_____"
],
[
"data=pd.Series([1.,NA,3.5,NA,7])",
"_____no_output_____"
],
[
"df.fillna(data.mean())",
"_____no_output_____"
],
[
"data=pd.DataFrame({'k1':['one','two']*3+['two'],'k2':[1,1,2,3,3,4,4]})",
"_____no_output_____"
],
[
"data.duplicated()",
"_____no_output_____"
],
[
"data.drop_duplicates()",
"_____no_output_____"
],
[
"data['v1']=range(7)",
"_____no_output_____"
],
[
"data.drop_duplicates(['k1'])",
"_____no_output_____"
],
[
"data.drop_duplicates(['k1','k2'],keep='last')",
"_____no_output_____"
],
[
"data = pd.DataFrame({'food': ['bacon', 'pulled pork', 'bacon',\n....: 'Pastrami', 'corned beef', 'Bacon',\n....: 'pastrami', 'honey ham', 'nova lox'],\n....: 'ounces': [4, 3, 12, 6, 7.5, 8, 3, 5, 6]})",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"meat_to_animal = {\n'bacon': 'pig',\n'pulled pork': 'pig',\n'pastrami': 'cow',\n'corned beef': 'cow',\n'honey ham': 'pig',\n'nova lox': 'salmon'\n}",
"_____no_output_____"
],
[
"lowercased = data['food'].str.lower()= data['food'].str.lower()",
"_____no_output_____"
],
[
"lowercased",
"_____no_output_____"
],
[
"data['animal'] = lowercased.map(meat_to_animal)",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"data = pd.Series([1., -999., 2., -999., -1000., 3.])",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"data.replace([-999, -1000], np.nan)",
"_____no_output_____"
],
[
"data.replace([-999, -1000], [np.nan, 0])",
"_____no_output_____"
],
[
"data.replace({-999: np.nan, -1000: 0})",
"_____no_output_____"
],
[
"data = pd.DataFrame(np.arange(12).reshape((3, 4)),\n....: index=['Ohio', 'Colorado', 'New York'],\n....: columns=['one', 'two', 'three', 'four'])",
"_____no_output_____"
],
[
"transform = lambda x: x[:4].upper()",
"_____no_output_____"
],
[
"data.index.map(transform)",
"_____no_output_____"
],
[
"data.index = data.index.map(transform)",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"data.rename(index=str.title, columns=str.upper)",
"_____no_output_____"
],
[
"data.rename(index={'OHIO': 'INDIANA'},\n....: columns={'three': 'peekaboo'})",
"_____no_output_____"
],
[
"data.rename(index={'OHIO':'INDIANA'},inplace=True)",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"ages=[20,22,25,27,21,23,37,31,61,45,41,32]",
"_____no_output_____"
],
[
"bins=[18,25,35,60,100]",
"_____no_output_____"
],
[
"cats=pd.cut(ages,bins)",
"_____no_output_____"
],
[
"cats",
"_____no_output_____"
],
[
"cats.codes",
"_____no_output_____"
],
[
"cats.categories",
"_____no_output_____"
],
[
"pd.value_counts(cats)",
"_____no_output_____"
],
[
"pd.value_counts(cats)",
"_____no_output_____"
],
[
"pd.cut(ages,[18,25,35,60,100],right=False)",
"_____no_output_____"
],
[
"group_names = ['Youth', 'YoungAdult', 'MiddleAged', 'Senior']",
"_____no_output_____"
],
[
"pd.cut(ages,bins,labels=group_names)",
"_____no_output_____"
],
[
"data=np.random.rand(20)",
"_____no_output_____"
],
[
"pd.cut(data,4,precision=2)",
"_____no_output_____"
],
[
"data=np.random.randn(1000)",
"_____no_output_____"
],
[
"cats=pd.qcut(data,4)",
"_____no_output_____"
],
[
"cats",
"_____no_output_____"
],
[
"pd.value_counts(cats)",
"_____no_output_____"
],
[
"pd.qcut(data,[0,0.1,0.5,0.9,1.])",
"_____no_output_____"
],
[
"data=pd.DataFrame(np.random.randn(1000,4))",
"_____no_output_____"
],
[
"data.describe()",
"_____no_output_____"
],
[
"col=data[2]",
"_____no_output_____"
],
[
"col[np.abs(col)>3]",
"_____no_output_____"
],
[
"data[(np.abs(data)>3).any(1)]",
"_____no_output_____"
],
[
"data[(np.abs(data)>3)]=np.sign(data)*3",
"_____no_output_____"
],
[
"data.describe()",
"_____no_output_____"
],
[
"np.sign(data).head()",
"_____no_output_____"
],
[
"df=pd.DataFrame(np.arange(5*4).reshape((5,4)))",
"_____no_output_____"
],
[
"sampler=np.random.permutation(5)",
"_____no_output_____"
],
[
"sampler",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.take(sampler)",
"_____no_output_____"
],
[
"df.sample(n=3)",
"_____no_output_____"
],
[
"choices=pd.Series([5,7,-1,6,4])",
"_____no_output_____"
],
[
"draws=choices.sample(n=10,replace=True)",
"_____no_output_____"
],
[
"draws",
"_____no_output_____"
],
[
"df = pd.DataFrame({'key': ['b', 'b', 'a', 'c', 'a', 'b'],\n.....: 'data1': range(6)})",
"_____no_output_____"
],
[
"pd.get_dummies(df['key'])",
"_____no_output_____"
],
[
"dummies=pd.get_dummies(df['key'],prefix='key')",
"_____no_output_____"
],
[
"df_with_dummy=df[['data1']].join(dummies)",
"_____no_output_____"
],
[
"df_with_dummy",
"_____no_output_____"
],
[
"mnames=['movie_id','title','genres']",
"_____no_output_____"
],
[
"movies = pd.read_table('datasets/movielens/movies.dat', sep='::',\n.....: header=None, names=mnames)",
"c:\\users\\zusi\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\ipykernel_launcher.py:2: FutureWarning: read_table is deprecated, use read_csv instead.\n \nc:\\users\\zusi\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\ipykernel_launcher.py:2: ParserWarning: Falling back to the 'python' engine because the 'c' engine does not support regex separators (separators > 1 char and different from '\\s+' are interpreted as regex); you can avoid this warning by specifying engine='python'.\n \n"
],
[
"movies[:10]",
"_____no_output_____"
],
[
"all_genres=[]",
"_____no_output_____"
],
[
"for x in movies.genres:all_genres.extend(x.split('|'))",
"_____no_output_____"
],
[
"genres=pd.unique(all_genres)",
"_____no_output_____"
],
[
"genres",
"_____no_output_____"
],
[
"zero_matrix=np.zeros((len(movies),len(genres)))",
"_____no_output_____"
],
[
"dummies=pd.DataFrame(zero_matrix,columns=genres)",
"_____no_output_____"
],
[
"gen=movies.genres[0]",
"_____no_output_____"
],
[
"gen.split('|')",
"_____no_output_____"
],
[
"dummies.columns.get_indexer(gen.split('|'))",
"_____no_output_____"
],
[
"for i, gen in enumerate(movies.genres):\n indices = dummies.columns.get_indexer(gen.split('|'))\n dummies.iloc[i, indices] = 1",
"_____no_output_____"
],
[
"movies_windic = movies.join(dummies.add_prefix('Genre_'))",
"_____no_output_____"
],
[
"movies_windic.iloc[0]",
"_____no_output_____"
],
[
"np.random.seed(12345)",
"_____no_output_____"
],
[
"values = np.random.rand(10)",
"_____no_output_____"
],
[
"values",
"_____no_output_____"
],
[
"bins = [0, 0.2, 0.4, 0.6, 0.8, 1]",
"_____no_output_____"
],
[
"pd.get_dummies(pd.cut(values, bins))",
"_____no_output_____"
],
[
"val = 'a,b, guido'",
"_____no_output_____"
],
[
"val.split(',')",
"_____no_output_____"
],
[
"pieces = [x.strip() for x in val.split(',')]",
"_____no_output_____"
],
[
"pieces ",
"_____no_output_____"
],
[
"first, second, third = pieces",
"_____no_output_____"
],
[
"first + '::' + second + '::' + third",
"_____no_output_____"
],
[
"'::'.join(pieces)",
"_____no_output_____"
],
[
"'guido' in val",
"_____no_output_____"
],
[
"val.index(',')",
"_____no_output_____"
],
[
"val.find(':')",
"_____no_output_____"
],
[
"val.index(':')",
"_____no_output_____"
],
[
"val.count(',')",
"_____no_output_____"
],
[
"val.replace(',', '::')",
"_____no_output_____"
],
[
"val.replace(',', '')",
"_____no_output_____"
],
[
"import re",
"_____no_output_____"
],
[
"text = \"foo bar\\t baz \\tqux\"",
"_____no_output_____"
],
[
"re.split('\\s+', text)",
"_____no_output_____"
],
[
"regex = re.compile('\\s+')",
"_____no_output_____"
],
[
"regex.split(text)",
"_____no_output_____"
],
[
"regex.findall(text)",
"_____no_output_____"
],
[
"text = \"\"\"Dave [email protected]\nSteve [email protected]\nRob [email protected]\nRyan [email protected]\n\"\"\"\npattern = r'[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,4}'\n# re.IGNORECASE makes the regex case-insensitive\nregex = re.compile(pattern, flags=re.IGNORECASE)",
"_____no_output_____"
],
[
"regex.findall(text)",
"_____no_output_____"
],
[
"m = regex.search(text)",
"_____no_output_____"
],
[
"m",
"_____no_output_____"
],
[
"text[m.start():m.end()]",
"_____no_output_____"
],
[
"print(regex.match(text))",
"None\n"
],
[
"print(regex.sub('REDACTED', text))",
"Dave REDACTED\nSteve REDACTED\nRob REDACTED\nRyan REDACTED\n\n"
],
[
"pattern = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\\.([A-Z]{2,4})'",
"_____no_output_____"
],
[
"regex = re.compile(pattern, flags=re.IGNORECASE)",
"_____no_output_____"
],
[
"m = regex.match('[email protected]')",
"_____no_output_____"
],
[
"m.groups()",
"_____no_output_____"
],
[
"regex.findall(text)",
"_____no_output_____"
],
[
"print(regex.sub(r'Username: \\1, Domain: \\2, Suffix: \\3',text))\n",
"Dave Username: dave, Domain: google, Suffix: com\nSteve Username: steve, Domain: gmail, Suffix: com\nRob Username: rob, Domain: gmail, Suffix: com\nRyan Username: ryan, Domain: yahoo, Suffix: com\n\n"
],
[
"data = {'Dave': '[email protected]', 'Steve': '[email protected]',\n.....: 'Rob': '[email protected]', 'Wes': np.nan}",
"_____no_output_____"
],
[
"data = pd.Series(data)",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"data.isnull()",
"_____no_output_____"
],
[
"data.str.contains('gmail')",
"_____no_output_____"
],
[
"pattern",
"_____no_output_____"
],
[
"data.str.findall(pattern, flags=re.IGNORECASE)",
"_____no_output_____"
],
[
"matches = data.str.match(pattern, flags=re.IGNORECASE)",
"_____no_output_____"
],
[
"matches",
"_____no_output_____"
],
[
"matches.str.get(1)",
"_____no_output_____"
],
[
"matches.str[0]",
"_____no_output_____"
],
[
"data.str[:5]",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d5840a4d4f78a3654e8c23f77e23900dd023fb | 43,122 | ipynb | Jupyter Notebook | doc/FeatureSelection/ml2cpp_SelectKBest_boston.ipynb | antoinecarme/ml2cpp | 2b241d44de00eafda620c2b605690276faf5f8fb | [
"BSD-3-Clause"
] | null | null | null | doc/FeatureSelection/ml2cpp_SelectKBest_boston.ipynb | antoinecarme/ml2cpp | 2b241d44de00eafda620c2b605690276faf5f8fb | [
"BSD-3-Clause"
] | 33 | 2020-09-13T09:55:01.000Z | 2022-01-06T11:53:55.000Z | doc/FeatureSelection/ml2cpp_SelectKBest_boston.ipynb | antoinecarme/ml2cpp | 2b241d44de00eafda620c2b605690276faf5f8fb | [
"BSD-3-Clause"
] | 1 | 2021-01-26T14:41:58.000Z | 2021-01-26T14:41:58.000Z | 32.717754 | 428 | 0.347502 | [
[
[
"# ML2CPP",
"_____no_output_____"
],
[
"## Preparing the dataset",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\nimport numpy as np\nimport pandas as pd\n\nboston = datasets.load_boston()\n\ndef populate_table(tablename, feature_names):\n X = boston.data \n y = boston.target\n N = X.shape[0]\n y = y.reshape(N,1)\n k = np.arange(N).reshape(N, 1)\n k_X_y = np.concatenate((k, X, y) , axis=1)\n lTable=pd.DataFrame(k_X_y)\n # print(lTable.head())\n lTable.columns = ['idx'] + feature_names + ['TGT'];\n lTable['TGT'] = lTable['TGT'].apply(int)\n lTable['idx'] = lTable['idx'].apply(int)\n lTable.to_csv(tablename , float_format='%.14g')\n\n",
"_____no_output_____"
],
[
"metadata = {\"primary_key\" : \"KEY\",\n \"features\" : list(boston.feature_names),\n \"targets\" : [\"TGT\"],\n \"table\" : \"iris\"}",
"_____no_output_____"
],
[
"populate_table(\"/tmp/boston.csv\" , metadata[\"features\"])\n",
"_____no_output_____"
],
[
"df = pd.read_csv(\"/tmp/boston.csv\")\ndf.sample(12, random_state=1960)",
"_____no_output_____"
]
],
[
[
"## Training a Model",
"_____no_output_____"
]
],
[
[
"\n\n# train any scikit model on the iris dataset\nfrom sklearn.feature_selection import SelectKBest, chi2\n\nclf = SelectKBest(chi2, k=5)\n\nclf.fit(df[metadata['features']].values, df[metadata['targets']].values)\n",
"_____no_output_____"
]
],
[
[
"## Deploying the Model",
"_____no_output_____"
]
],
[
[
"\ndef generate_cpp_for_model(model):\n import pickle, json, requests, base64\n b64_data = base64.b64encode(pickle.dumps(model)).decode('utf-8')\n # send the model th the web service\n json_data={\"Name\":\"model_cpp_sample\", \n \"PickleData\":b64_data , \n \"SQLDialect\":\"CPP\",\n \"FeatureNames\" : metadata['features']}\n r = requests.post(\"https://sklearn2sql.herokuapp.com/model\", json=json_data)\n content = r.json()\n lCPP = content[\"model\"][\"SQLGenrationResult\"][0][\"SQL\"]\n # print(lCPP);\n return lCPP\n\n\nlCPPCode = generate_cpp_for_model(clf);\n",
"_____no_output_____"
],
[
"print(lCPPCode)",
"namespace {\n\n\tstd::vector<std::string> get_input_names(){\n\t\tstd::vector<std::string> lFeatures = { \"Feature_0\", \"Feature_1\", \"Feature_2\", \"Feature_3\", \"Feature_4\", \"Feature_5\", \"Feature_6\", \"Feature_7\", \"Feature_8\", \"Feature_9\", \"Feature_10\", \"Feature_11\", \"Feature_12\" };\n\n\t\treturn lFeatures;\n\t}\n\n\tstd::vector<std::string> get_output_names(){\n\t\tstd::vector<std::string> lOutputs = { \"Feature_0\", \"Feature_1\", \"Feature_6\", \"Feature_9\", \"Feature_11\" };\n\n\t\treturn lOutputs;\n\t}\n\n\ttTable compute_features(std::any Feature_0, std::any Feature_1, std::any Feature_2, std::any Feature_3, std::any Feature_4, std::any Feature_5, std::any Feature_6, std::any Feature_7, std::any Feature_8, std::any Feature_9, std::any Feature_10, std::any Feature_11, std::any Feature_12) {\n\n\t\ttTable lTable;\n\n\t\tlTable[\"Feature_0\"] = { Feature_0 };\n\t\tlTable[\"Feature_1\"] = { Feature_1 };\n\t\tlTable[\"Feature_6\"] = { Feature_6 };\n\t\tlTable[\"Feature_9\"] = { Feature_9 };\n\t\tlTable[\"Feature_11\"] = { Feature_11 };\n\n\t\treturn lTable;\n\t}\n\n\ttTable compute_model_outputs_from_table( tTable const & iTable) {\n\t\ttTable lTable = compute_features(iTable.at(\"Feature_0\")[0], iTable.at(\"Feature_1\")[0], iTable.at(\"Feature_2\")[0], iTable.at(\"Feature_3\")[0], iTable.at(\"Feature_4\")[0], iTable.at(\"Feature_5\")[0], iTable.at(\"Feature_6\")[0], iTable.at(\"Feature_7\")[0], iTable.at(\"Feature_8\")[0], iTable.at(\"Feature_9\")[0], iTable.at(\"Feature_10\")[0], iTable.at(\"Feature_11\")[0], iTable.at(\"Feature_12\")[0]);\n\n\t\treturn lTable;\n\t}\n\n} // eof namespace \n\n"
],
[
" def write_text_to_file(iCPPCode, oCPPFile): \n with open(oCPPFile, \"w\") as text_file:\n text_file.write(iCPPCode)\n\n def add_cpp_main_function(iCPPCode, iCSVFile):\n lCPPCode = \"#include \\\"Generic.i\\\"\\n\\n\"\n lCPPCode = lCPPCode + iCPPCode\n lCPPCode = lCPPCode + \"\\tint main() {\\n\"\n lCPPCode = lCPPCode + \"\\t\\tscore_csv_file(\\\"\" + iCSVFile +\"\\\");\\n\"\n lCPPCode = lCPPCode + \"\\treturn 0;\\n}\\n\"\n return lCPPCode\n\n def compile_cpp_code_as_executable(iName):\n import subprocess\n lCommand = [\"g++\", \"-Wall\", \"-Wno-unused-function\", \"-std=c++17\" , \"-g\" , \"-o\", iName + \".exe\", iName + \".cpp\"]\n print(\"EXECUTING\" , \"'\" + \" \".join(lCommand) + \"'\")\n result = subprocess.check_output(lCommand)\n # print(result)\n\n def execute_cpp_model(iName, iCSVFile):\n import subprocess\n result2 = subprocess.check_output([iName + \".exe\", iCSVFile])\n result2 = result2.decode()\n print(result2[:100])\n print(result2[-100:])\n return result2\n \n def execute_cpp_code(iCPPCode, iCSVFile):\n lName = \"/tmp/sklearn2sql_cpp_\" + str(id(clf));\n lCPPCode = add_cpp_main_function(iCPPCode, iCSVFile)\n write_text_to_file(lCPPCode, lName + \".cpp\")\n compile_cpp_code_as_executable(lName)\n result = execute_cpp_model(lName, iCSVFile)\n write_text_to_file(str(result), lName + \".out\")\n return lName + \".out\"\n",
"_____no_output_____"
],
[
"populate_table(\"/tmp/boston2.csv\" , [\"Feature_\" + str(i) for i,x in enumerate(metadata[\"features\"])])\nlCPPOutput = execute_cpp_code(lCPPCode , \"/tmp/boston2.csv\")\ncpp_output = pd.read_csv(lCPPOutput)",
"EXECUTING 'g++ -Wall -Wno-unused-function -std=c++17 -g -o /tmp/sklearn2sql_cpp_140569063877312.exe /tmp/sklearn2sql_cpp_140569063877312.cpp'\nidx,Feature_0,Feature_1,Feature_6,Feature_9,Feature_11\n0,0.00632000000000,18.00000000000000,65.20000\n3,393.44999999999999\n505,0.04741000000000,0.00000000000000,80.80000000000000,273,396.89999999999998\n\n"
],
[
"cpp_output.sample(12, random_state=1960)",
"_____no_output_____"
],
[
"skl_outputs = pd.DataFrame()\nX = df[metadata['features']].values\nskl_output_key = pd.DataFrame(list(range(X.shape[0])), columns=['idx']);\n\nskl_output_transform = pd.DataFrame(clf.transform(X), columns=cpp_output.columns[1:]);\nskl_output = pd.concat([skl_output_key, skl_output_transform] , axis=1)\nskl_output.sample(12, random_state=1960)\n",
"_____no_output_____"
],
[
"cpp_skl_join = skl_output.join(cpp_output , how='left', on='idx', lsuffix='_skl', rsuffix='_cpp')",
"_____no_output_____"
],
[
"cpp_skl_join.sample(12, random_state=1960)",
"_____no_output_____"
],
[
"for col in cpp_output.columns:\n lDiff = cpp_skl_join[col + \"_skl\"] - cpp_skl_join[col + \"_cpp\"]\n print(lDiff.describe())\n ",
"count 506.0\nmean 0.0\nstd 0.0\nmin 0.0\n25% 0.0\n50% 0.0\n75% 0.0\nmax 0.0\ndtype: float64\ncount 5.060000e+02\nmean -3.471161e-17\nstd 6.703551e-16\nmin -1.421085e-14\n25% 0.000000e+00\n50% 0.000000e+00\n75% 0.000000e+00\nmax 1.776357e-15\ndtype: float64\ncount 506.0\nmean 0.0\nstd 0.0\nmin 0.0\n25% 0.0\n50% 0.0\n75% 0.0\nmax 0.0\ndtype: float64\ncount 5.060000e+02\nmean -3.089316e-16\nstd 4.324300e-15\nmin -1.421085e-14\n25% 0.000000e+00\n50% 0.000000e+00\n75% 0.000000e+00\nmax 1.421085e-14\ndtype: float64\ncount 506.0\nmean 0.0\nstd 0.0\nmin 0.0\n25% 0.0\n50% 0.0\n75% 0.0\nmax 0.0\ndtype: float64\ncount 5.060000e+02\nmean -2.527622e-16\nstd 3.623895e-15\nmin -5.684342e-14\n25% 0.000000e+00\n50% 0.000000e+00\n75% 0.000000e+00\nmax 0.000000e+00\ndtype: float64\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d5a99546c7f65097ed5f6ffa0dc09618732547 | 482,979 | ipynb | Jupyter Notebook | week10_interpretability/bonus_style_transfer/style_transfer_pytorch.ipynb | silviageiser/Practical_DL | f1161dfeb0ab06a013a954e405b069312edc2461 | [
"MIT"
] | 1 | 2022-02-08T18:53:48.000Z | 2022-02-08T18:53:48.000Z | week10_interpretability/bonus_style_transfer/style_transfer_pytorch.ipynb | silviageiser/Practical_DL | f1161dfeb0ab06a013a954e405b069312edc2461 | [
"MIT"
] | null | null | null | week10_interpretability/bonus_style_transfer/style_transfer_pytorch.ipynb | silviageiser/Practical_DL | f1161dfeb0ab06a013a954e405b069312edc2461 | [
"MIT"
] | null | null | null | 553.875 | 451,842 | 0.937432 | [
[
[
"### Neural style transfer in PyTorch\n\nThis tutorial implements the \"slow\" neural style transfer based on the VGG19 model.\n\nIt closely follows the official neural style tutorial you can find [here](http://pytorch.org/tutorials/advanced/neural_style_tutorial.html).\n\n__Note:__ if you didn't sit through the explanation of neural style transfer in the on-campus lecture, you're _strongly recommended_ to follow the link above instead of this notebook.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom matplotlib.pyplot import imread\nfrom skimage.transform import resize, rotate\n\nimport torch, torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n# desired size of the output image\nimsize = 512 # REDUCE THIS TO 128 IF THE OPTIMIZATION IS TOO SLOW FOR YOU\ndef image_loader(image_name):\n image = resize(imread(image_name), [imsize, imsize])\n image = image.transpose([2,0,1]) / image.max()\n image = Variable(dtype(image))\n # fake batch dimension required to fit network's input dimensions\n image = image.unsqueeze(0)\n return image\n\nuse_cuda = torch.cuda.is_available()\n\nprint(\"torch\", torch.__version__)\nif use_cuda:\n print(\"Using GPU.\")\nelse:\n print(\"Not using GPU.\")\ndtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor\n\n",
"torch 1.10.0+cu111\nUsing GPU.\n"
]
],
[
[
"### Draw input images",
"_____no_output_____"
]
],
[
[
"!mkdir -p images\n!wget https://github.com/yandexdataschool/Practical_DL/raw/fall21/week10_interpretability/bonus_style_transfer/images/wave.jpg -O images/wave.jpg\nstyle_img = image_loader(\"images/wave.jpg\").type(dtype)\n\n!wget http://cdn.cnn.com/cnnnext/dam/assets/170809210024-trump-nk.jpg -O images/my_img.jpg\ncontent_img = image_loader(\"images/my_img.jpg\").type(dtype)\n\nassert style_img.size() == content_img.size(), \\\n \"we need to import style and content images of the same size\"",
"--2021-11-26 17:42:59-- https://github.com/yandexdataschool/Practical_DL/raw/fall21/week10_interpretability/bonus_style_transfer/images/wave.jpg\nResolving github.com (github.com)... 140.82.121.4\nConnecting to github.com (github.com)|140.82.121.4|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/yandexdataschool/Practical_DL/fall21/week10_interpretability/bonus_style_transfer/images/wave.jpg [following]\n--2021-11-26 17:43:00-- https://raw.githubusercontent.com/yandexdataschool/Practical_DL/fall21/week10_interpretability/bonus_style_transfer/images/wave.jpg\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 123262 (120K) [image/jpeg]\nSaving to: ‘images/wave.jpg’\n\nimages/wave.jpg 100%[===================>] 120.37K --.-KB/s in 0.008s \n\n2021-11-26 17:43:00 (14.4 MB/s) - ‘images/wave.jpg’ saved [123262/123262]\n\n--2021-11-26 17:43:12-- http://cdn.cnn.com/cnnnext/dam/assets/170809210024-trump-nk.jpg\nResolving cdn.cnn.com (cdn.cnn.com)... 23.65.203.53, 2a02:26f0:9400:1b4::3134, 2a02:26f0:9400:1b5::3134\nConnecting to cdn.cnn.com (cdn.cnn.com)|23.65.203.53|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 1230999 (1.2M) [image/jpeg]\nSaving to: ‘images/my_img.jpg’\n\nimages/my_img.jpg 100%[===================>] 1.17M --.-KB/s in 0.03s \n\n2021-11-26 17:43:13 (38.6 MB/s) - ‘images/my_img.jpg’ saved [1230999/1230999]\n\n"
],
[
"def imshow(tensor, title=None):\n image = tensor.clone().cpu() # we clone the tensor to not do changes on it\n image = image.view(3, imsize, imsize) # remove the fake batch dimension\n image = image.numpy().transpose([1,2,0])\n plt.imshow(image / np.max(image))\n if title is not None:\n plt.title(title)\n\nplt.figure(figsize=[12,6])\nplt.subplot(1,2,1)\nimshow(style_img.data, title='Style Image')\nplt.subplot(1,2,2)\nimshow(content_img.data, title='Content Image')",
"_____no_output_____"
]
],
[
[
"### Define Style Transfer Losses\n\nAs shown in the lecture, we define two loss functions: content and style losses.\n\nContent loss is simply a pointwise mean squared error of high-level features while style loss is the error between gram matrices of intermediate feature layers.\n\nTo obtain the feature representations we use a pre-trained VGG19 network.",
"_____no_output_____"
]
],
[
[
"import torchvision.models as models\n\ncnn = models.vgg19(pretrained=True).features\n\n# move it to the GPU if possible:\nif use_cuda:\n cnn = cnn.cuda()",
"Downloading: \"https://download.pytorch.org/models/vgg19-dcbb9e9d.pth\" to /root/.cache/torch/hub/checkpoints/vgg19-dcbb9e9d.pth\n"
],
[
"class ContentLoss(nn.Module):\n\n def __init__(self, target, weight):\n super(ContentLoss, self).__init__()\n # we 'detach' the target content from the tree used\n self.target = target.detach() * weight\n self.weight = weight\n\n def forward(self, input):\n self.loss = F.mse_loss(input * self.weight, self.target)\n return input.clone()\n\n def backward(self, retain_graph=True):\n self.loss.backward(retain_graph=retain_graph)\n return self.loss",
"_____no_output_____"
],
[
"def gram_matrix(input):\n a, b, c, d = input.size() # a=batch size(=1)\n # b=number of feature maps\n # (c,d)=dimensions of a f. map (N=c*d)\n\n features = input.view(a * b, c * d) # resise F_XL into \\hat F_XL\n\n G = torch.mm(features, features.t()) # compute the gram product\n\n # we 'normalize' the values of the gram matrix\n # by dividing by the number of element in each feature maps.\n return G.div(a * b * c * d)\n \nclass StyleLoss(nn.Module):\n\n def __init__(self, target, weight):\n super(StyleLoss, self).__init__()\n self.target = target.detach() * weight\n self.weight = weight\n\n def forward(self, input):\n self.G = gram_matrix(input)\n self.G.mul_(self.weight)\n self.loss = F.mse_loss(self.G, self.target)\n return input.clone()\n\n def backward(self, retain_graph=True):\n self.loss.backward(retain_graph=retain_graph)\n return self.loss",
"_____no_output_____"
]
],
[
[
"### Style transfer pipeline\n\nWe can now define a unified \"model\" that computes all the losses on the image triplet (content image, style image, optimized image) so that we could optimize them with backprop (over image pixels).",
"_____no_output_____"
]
],
[
[
"content_weight=1 # coefficient for content loss\nstyle_weight=1000 # coefficient for style loss\ncontent_layers=('conv_4',) # use these layers for content loss\nstyle_layers=('conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5') # use these layers for style loss\n",
"_____no_output_____"
],
[
"content_losses = []\nstyle_losses = []\n\nmodel = nn.Sequential() # the new Sequential module network\n# move these modules to the GPU if possible:\nif use_cuda:\n model = model.cuda()\n\ni = 1\nfor layer in list(cnn):\n if isinstance(layer, nn.Conv2d):\n name = \"conv_\" + str(i)\n model.add_module(name, layer)\n\n if name in content_layers:\n # add content loss:\n target = model(content_img).clone()\n content_loss = ContentLoss(target, content_weight)\n model.add_module(\"content_loss_\" + str(i), content_loss)\n content_losses.append(content_loss)\n\n if name in style_layers:\n # add style loss:\n target_feature = model(style_img).clone()\n target_feature_gram = gram_matrix(target_feature)\n style_loss = StyleLoss(target_feature_gram, style_weight)\n model.add_module(\"style_loss_\" + str(i), style_loss)\n style_losses.append(style_loss)\n\n if isinstance(layer, nn.ReLU):\n name = \"relu_\" + str(i)\n model.add_module(name, layer)\n\n if name in content_layers:\n # add content loss:\n target = model(content_img).clone()\n content_loss = ContentLoss(target, content_weight)\n model.add_module(\"content_loss_\" + str(i), content_loss)\n content_losses.append(content_loss)\n\n if name in style_layers:\n # add style loss:\n target_feature = model(style_img).clone()\n target_feature_gram = gram_matrix(target_feature)\n style_loss = StyleLoss(target_feature_gram, style_weight)\n model.add_module(\"style_loss_\" + str(i), style_loss)\n style_losses.append(style_loss)\n\n i += 1\n\n if isinstance(layer, nn.MaxPool2d):\n name = \"pool_\" + str(i)\n model.add_module(name, layer) # ***",
"_____no_output_____"
]
],
[
[
"### Optimization\n\nWe can now optimize both style and content loss over input image.",
"_____no_output_____"
]
],
[
[
"input_image = Variable(content_img.clone().data, requires_grad=True)\noptimizer = torch.optim.Adam([input_image], lr=0.1)",
"_____no_output_____"
],
[
"num_steps = 300\n\nfor i in range(num_steps):\n # correct the values of updated input image\n input_image.data.clamp_(0, 1)\n\n model(input_image)\n style_score = 0\n content_score = 0\n for sl in style_losses:\n style_score += sl.backward()\n for cl in content_losses:\n content_score += cl.backward()\n \n if i % 10 == 0: # <--- adjust the value to see updates more frequently\n \n print('Step # {} Style Loss : {:4f} Content Loss: {:4f}'.format(\n i, style_score.data.item(), content_score.item()))\n plt.figure(figsize=[10,10])\n imshow(input_image.data)\n plt.show()\n \n loss = style_score + content_score\n \n optimizer.step(lambda: loss)\n optimizer.zero_grad()\n \n# a last correction...\ninput_image.data.clamp_(0, 1)",
"_____no_output_____"
]
],
[
[
"### Final image",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=[10,10])\nimshow(input_image.data)\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d5b42bb3e30fe205fde4ecb7fcde4481baa181 | 90,629 | ipynb | Jupyter Notebook | notebooks/chapter_06/05 - Sequence processing with convnets.ipynb | lucone83/deep-learning-with-python | a1aa11aa0b493e862664222550fd0734c30afa11 | [
"MIT"
] | 39 | 2020-05-11T01:13:15.000Z | 2022-02-02T14:21:59.000Z | notebooks/chapter_06/05 - Sequence processing with convnets.ipynb | Juan-glitch/deep-learning-with-python | a1aa11aa0b493e862664222550fd0734c30afa11 | [
"MIT"
] | null | null | null | notebooks/chapter_06/05 - Sequence processing with convnets.ipynb | Juan-glitch/deep-learning-with-python | a1aa11aa0b493e862664222550fd0734c30afa11 | [
"MIT"
] | 18 | 2020-05-12T12:17:11.000Z | 2022-01-06T18:24:05.000Z | 131.728198 | 16,732 | 0.832228 | [
[
[
"## Implementing a 1D convnet\n\nIn Keras, you would use a 1D convnet via the `Conv1D` layer, which has a very similar interface to `Conv2D`. It **takes as input 3D tensors with shape (samples, time, features) and also returns similarly-shaped 3D tensors**. The convolution window is a 1D window on the temporal axis, axis 1 in the input tensor.\n\nLet's build a simple 2-layer 1D convnet and apply it to the IMDB sentiment classification task altready seen previously.",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.datasets import imdb\nfrom tensorflow.keras.preprocessing import sequence\n\nmax_features = 10000 # number of words to consider as features\nmax_len = 500 # cut texts after this number of words (among top max_features most common words)\n\nprint('Loading data...')\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)\nprint(len(x_train), 'train sequences')\nprint(len(x_test), 'test sequences')\n\nprint('Pad sequences (samples x time)')\nx_train = sequence.pad_sequences(x_train, maxlen=max_len)\nx_test = sequence.pad_sequences(x_test, maxlen=max_len)\n\nprint('x_train shape:', x_train.shape)\nprint('x_test shape:', x_test.shape)",
"Loading data...\n25000 train sequences\n25000 test sequences\nPad sequences (samples x time)\nx_train shape: (25000, 500)\nx_test shape: (25000, 500)\n"
]
],
[
[
"**1D convnets are structured in the same way as their 2D counter-parts**: they consist of a stack of `Conv1D` and `MaxPooling1D layers`, eventually ending in either a global pooling layer or a `Flatten` layer, turning the 3D outputs into 2D outputs, allowing to add one or more Dense layers to the model, for classification or regression.\n\nOne difference, though, is the fact that **we can afford to use larger convolution windows with 1D convnets**. Indeed, with a 2D convolution layer, a 3x3 convolution window contains `3*3 = 9` feature vectors, but with a 1D convolution layer, a convolution window of size 3 would only contain 3 feature vectors. We can thus easily afford 1D convolution windows of size 7 or 9.\n\nThis is our example 1D convnet for the IMDB dataset:",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.optimizers import RMSprop\n\nmodel = Sequential()\nmodel.add(layers.Embedding(max_features, 128, input_length=max_len))\nmodel.add(layers.Conv1D(32, 7, activation='relu'))\nmodel.add(layers.MaxPooling1D(5))\nmodel.add(layers.Conv1D(32, 7, activation='relu'))\nmodel.add(layers.GlobalMaxPooling1D())\nmodel.add(layers.Dense(1))\n\nmodel.summary()\n\nmodel.compile(\n optimizer=RMSprop(lr=1e-4),\n loss='binary_crossentropy',\n metrics=['acc']\n)\n\nhistory = model.fit(\n x_train, \n y_train,\n epochs=10,\n batch_size=128,\n validation_split=0.2\n)",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding (Embedding) (None, 500, 128) 1280000 \n_________________________________________________________________\nconv1d (Conv1D) (None, 494, 32) 28704 \n_________________________________________________________________\nmax_pooling1d (MaxPooling1D) (None, 98, 32) 0 \n_________________________________________________________________\nconv1d_1 (Conv1D) (None, 92, 32) 7200 \n_________________________________________________________________\nglobal_max_pooling1d (Global (None, 32) 0 \n_________________________________________________________________\ndense (Dense) (None, 1) 33 \n=================================================================\nTotal params: 1,315,937\nTrainable params: 1,315,937\nNon-trainable params: 0\n_________________________________________________________________\nTrain on 20000 samples, validate on 5000 samples\nEpoch 1/10\n20000/20000 [==============================] - 23s 1ms/sample - loss: 0.7036 - acc: 0.5314 - val_loss: 0.6854 - val_acc: 0.5668\nEpoch 2/10\n20000/20000 [==============================] - 24s 1ms/sample - loss: 0.6666 - acc: 0.6873 - val_loss: 0.6678 - val_acc: 0.6430\nEpoch 3/10\n20000/20000 [==============================] - 27s 1ms/sample - loss: 0.6296 - acc: 0.7703 - val_loss: 0.6266 - val_acc: 0.7372\nEpoch 4/10\n20000/20000 [==============================] - 25s 1ms/sample - loss: 0.5567 - acc: 0.8093 - val_loss: 0.5301 - val_acc: 0.7748\nEpoch 5/10\n20000/20000 [==============================] - 24s 1ms/sample - loss: 0.4419 - acc: 0.8374 - val_loss: 0.4356 - val_acc: 0.8298\nEpoch 6/10\n20000/20000 [==============================] - 27s 1ms/sample - loss: 0.3591 - acc: 0.8662 - val_loss: 0.4390 - val_acc: 0.8352\nEpoch 7/10\n20000/20000 [==============================] - 26s 1ms/sample - loss: 0.3091 - acc: 0.8888 - val_loss: 0.4064 - val_acc: 0.8592\nEpoch 8/10\n20000/20000 [==============================] - 27s 1ms/sample - loss: 0.2739 - acc: 0.9017 - val_loss: 0.4073 - val_acc: 0.8640\nEpoch 9/10\n20000/20000 [==============================] - 26s 1ms/sample - loss: 0.2424 - acc: 0.9145 - val_loss: 0.4295 - val_acc: 0.8618\nEpoch 10/10\n20000/20000 [==============================] - 26s 1ms/sample - loss: 0.2193 - acc: 0.9233 - val_loss: 0.4482 - val_acc: 0.8706\n"
]
],
[
[
"Here are our training and validation results: validation accuracy is slightly lower than that of the LSTM example we used two sections ago, but runtime is faster, both on CPU and GPU (albeit the exact speedup will vary greatly depending on your exact configuration). \n\nAt that point, we could re-train this model for the right number of epochs (8), and run it on the test set. This is a convincing demonstration that a 1D convnet can offer a fast, cheap alternative to a recurrent network on a word-level sentiment classification task.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(acc))\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"## Combining CNNs and RNNs to process long sequences\n\nBecause 1D convnets process input patches independently, **they are not sensitive to the order of the timesteps** (beyond a local scale, the size of the convolution windows), unlike RNNs. Of course, in order to be able to recognize longer-term patterns, one could stack many convolution layers and pooling layers, resulting in upper layers that would \"see\" long chunks of the original inputs -- but that's still a fairly weak way to induce order-sensitivity. \n\nOne way to evidence this weakness is to try 1D convnets on the temperature forecasting problem from the previous notebook, where **order-sensitivity was key to produce good predictions**:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport os\n\n# Import data\ndata_dir = './datasets/jena'\nfname = os.path.join(data_dir, 'jena_climate_2009_2016.csv')\n\nf = open(fname)\ndata = f.read()\nf.close()\n\nlines = data.split('\\n')\nheader = lines[0].split(',')\nlines = lines[1:]\n\nprint(header)\nprint()\nprint(len(lines))\n\n\n# Preprocessing\nfloat_data = np.zeros((len(lines), len(header) - 1))\nfor i, line in enumerate(lines):\n values = [float(x) for x in line.split(',')[1:]]\n float_data[i, :] = values\n \nmean = float_data[:200000].mean(axis=0)\nfloat_data -= mean\nstd = float_data[:200000].std(axis=0)\nfloat_data /= std\n\n\n# Create datasets\ndef generator(data, lookback, delay, min_index, max_index, shuffle=False, batch_size=128, step=6):\n if max_index is None:\n max_index = len(data) - delay - 1\n i = min_index + lookback\n while 1:\n if shuffle:\n rows = np.random.randint(min_index + lookback, max_index, size=batch_size)\n else:\n if i + batch_size >= max_index:\n i = min_index + lookback\n rows = np.arange(i, min(i + batch_size, max_index))\n i += len(rows)\n\n samples = np.zeros((len(rows), lookback // step, data.shape[-1]))\n targets = np.zeros((len(rows),))\n \n for j, row in enumerate(rows):\n indices = range(rows[j] - lookback, rows[j], step)\n samples[j] = data[indices]\n targets[j] = data[rows[j] + delay][1]\n \n yield samples, targets\n\n\n\nlookback = 1440\nstep = 6\ndelay = 144\nbatch_size = 128\n\ntrain_gen = generator(\n float_data,\n lookback=lookback,\n delay=delay,\n min_index=0,\n max_index=200000,\n shuffle=True,\n step=step, \n batch_size=batch_size\n)\nval_gen = generator(\n float_data,\n lookback=lookback,\n delay=delay,\n min_index=200001,\n max_index=300000,\n step=step,\n batch_size=batch_size\n)\ntest_gen = generator(\n float_data,\n lookback=lookback,\n delay=delay,\n min_index=300001,\n max_index=None,\n step=step,\n batch_size=batch_size\n)\n\n# This is how many steps to draw from `val_gen` in order to see the whole validation set:\nval_steps = (300000 - 200001 - lookback) // batch_size\n\n# This is how many steps to draw from `test_gen` in order to see the whole test set:\ntest_steps = (len(float_data) - 300001 - lookback) // batch_size",
"['\"Date Time\"', '\"p (mbar)\"', '\"T (degC)\"', '\"Tpot (K)\"', '\"Tdew (degC)\"', '\"rh (%)\"', '\"VPmax (mbar)\"', '\"VPact (mbar)\"', '\"VPdef (mbar)\"', '\"sh (g/kg)\"', '\"H2OC (mmol/mol)\"', '\"rho (g/m**3)\"', '\"wv (m/s)\"', '\"max. wv (m/s)\"', '\"wd (deg)\"']\n\n420551\n"
],
[
"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.optimizers import RMSprop\n\nmodel = Sequential()\nmodel.add(layers.Conv1D(32, 5, activation='relu', input_shape=(None, float_data.shape[-1])))\nmodel.add(layers.MaxPooling1D(3))\nmodel.add(layers.Conv1D(32, 5, activation='relu'))\nmodel.add(layers.MaxPooling1D(3))\nmodel.add(layers.Conv1D(32, 5, activation='relu'))\nmodel.add(layers.GlobalMaxPooling1D())\nmodel.add(layers.Dense(1))\n\nmodel.compile(optimizer=RMSprop(), loss='mae')\n\nhistory = model.fit(\n train_gen,\n steps_per_epoch=500,\n epochs=20,\n validation_data=val_gen,\n validation_steps=val_steps\n)",
"WARNING:tensorflow:sample_weight modes were coerced from\n ...\n to \n ['...']\nWARNING:tensorflow:sample_weight modes were coerced from\n ...\n to \n ['...']\nTrain for 500 steps, validate for 769 steps\nEpoch 1/20\n500/500 [==============================] - 17s 34ms/step - loss: 0.4162 - val_loss: 0.4447\nEpoch 2/20\n500/500 [==============================] - 16s 32ms/step - loss: 0.3550 - val_loss: 0.4686\nEpoch 3/20\n500/500 [==============================] - 16s 32ms/step - loss: 0.3322 - val_loss: 0.4608\nEpoch 4/20\n500/500 [==============================] - 16s 31ms/step - loss: 0.3159 - val_loss: 0.4595\nEpoch 5/20\n500/500 [==============================] - 16s 32ms/step - loss: 0.3053 - val_loss: 0.4601\nEpoch 6/20\n500/500 [==============================] - 17s 34ms/step - loss: 0.2980 - val_loss: 0.4688\nEpoch 7/20\n500/500 [==============================] - 16s 33ms/step - loss: 0.2894 - val_loss: 0.4623\nEpoch 8/20\n500/500 [==============================] - 16s 31ms/step - loss: 0.2843 - val_loss: 0.4700\nEpoch 9/20\n500/500 [==============================] - 16s 32ms/step - loss: 0.2781 - val_loss: 0.4662\nEpoch 10/20\n500/500 [==============================] - 15s 31ms/step - loss: 0.2748 - val_loss: 0.4677\nEpoch 11/20\n500/500 [==============================] - 15s 30ms/step - loss: 0.2692 - val_loss: 0.4713\nEpoch 12/20\n500/500 [==============================] - 15s 30ms/step - loss: 0.2656 - val_loss: 0.4706\nEpoch 13/20\n500/500 [==============================] - 15s 30ms/step - loss: 0.2624 - val_loss: 0.4755\nEpoch 14/20\n500/500 [==============================] - 15s 30ms/step - loss: 0.2576 - val_loss: 0.5165\nEpoch 15/20\n500/500 [==============================] - 15s 30ms/step - loss: 0.2569 - val_loss: 0.4926\nEpoch 16/20\n500/500 [==============================] - 15s 30ms/step - loss: 0.2547 - val_loss: 0.4885\nEpoch 17/20\n500/500 [==============================] - 15s 30ms/step - loss: 0.2498 - val_loss: 0.4754\nEpoch 18/20\n500/500 [==============================] - 15s 31ms/step - loss: 0.2488 - val_loss: 0.5125\nEpoch 19/20\n500/500 [==============================] - 15s 30ms/step - loss: 0.2465 - val_loss: 0.4836\nEpoch 20/20\n500/500 [==============================] - 15s 30ms/step - loss: 0.2437 - val_loss: 0.4949\n"
],
[
"import matplotlib.pyplot as plt\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(loss))\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"The validation MAE stays in the low 0.40s: **we cannot even beat our common-sense baseline using the small convnet**. Again, this is because **our convnet looks for patterns anywhere in the input timeseries, and has no knowledge of the temporal position of a pattern it sees** (e.g. towards the beginning, towards the end, etc.). Since more recent datapoints should be interpreted differently from older datapoints in the case of this specific forecasting problem, the convnet fails at producing meaningful results here. **This limitation of convnets was not an issue on IMDB**, because **patterns of keywords that are associated with a positive or a negative sentiment will be informative independently of where they are found in the input sentences**.\n\nOne strategy to combine the speed and lightness of convnets with the order-sensitivity of RNNs is to use a 1D convnet as a preprocessing step before a RNN. **This is especially beneficial when dealing with sequences that are so long that they couldn't realistically be processed with RNNs**, e.g. sequences with thousands of steps. The convnet will turn the long input sequence into much shorter (downsampled) sequences of higher-level features. This sequence of extracted features then becomes the input to the RNN part of the network.\n\nBecause this strategy allows us to manipulate much longer sequences, we could either look at data from further back (by increasing the `lookback` parameter of the data generator), or look at high-resolution timeseries (by decreasing the step parameter of the generator). Here, we will chose (somewhat arbitrarily) to use a `step` twice smaller, resulting in twice longer timeseries, where the weather data is being sampled at a rate of one point per 30 minutes.",
"_____no_output_____"
]
],
[
[
"# This was previously set to 6 (one point per hour). Now 3 (one point per 30 min).\nstep = 3\nlookback = 720 # Unchanged\ndelay = 144 # Unchanged\n\ntrain_gen = generator(\n float_data,\n lookback=lookback,\n delay=delay,\n min_index=0,\n max_index=200000,\n shuffle=True,\n step=step\n)\nval_gen = generator(\n float_data,\n lookback=lookback,\n delay=delay,\n min_index=200001,\n max_index=300000,\n step=step\n)\ntest_gen = generator(\n float_data,\n lookback=lookback,\n delay=delay,\n min_index=300001,\n max_index=None,\n step=step\n)\n\nval_steps = (300000 - 200001 - lookback) // 128\ntest_steps = (len(float_data) - 300001 - lookback) // 128",
"_____no_output_____"
]
],
[
[
"This is our new model, **starting with two `Conv1D` layers and following-up with a `GRU` layer**:",
"_____no_output_____"
]
],
[
[
"model = Sequential()\nmodel.add(layers.Conv1D(32, 5, activation='relu',input_shape=(None, float_data.shape[-1])))\nmodel.add(layers.MaxPooling1D(3))\nmodel.add(layers.Conv1D(32, 5, activation='relu'))\nmodel.add(layers.GRU(32, dropout=0.1, recurrent_dropout=0.5))\nmodel.add(layers.Dense(1))\n\nmodel.summary()\n\nmodel.compile(optimizer=RMSprop(), loss='mae')\n\nhistory = model.fit(\n train_gen,\n steps_per_epoch=500,\n epochs=20,\n validation_data=val_gen,\n validation_steps=val_steps\n)",
"Model: \"sequential_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv1d_5 (Conv1D) (None, None, 32) 2272 \n_________________________________________________________________\nmax_pooling1d_3 (MaxPooling1 (None, None, 32) 0 \n_________________________________________________________________\nconv1d_6 (Conv1D) (None, None, 32) 5152 \n_________________________________________________________________\ngru (GRU) (None, 32) 6336 \n_________________________________________________________________\ndense_2 (Dense) (None, 1) 33 \n=================================================================\nTotal params: 13,793\nTrainable params: 13,793\nNon-trainable params: 0\n_________________________________________________________________\nWARNING:tensorflow:sample_weight modes were coerced from\n ...\n to \n ['...']\nWARNING:tensorflow:sample_weight modes were coerced from\n ...\n to \n ['...']\nTrain for 500 steps, validate for 775 steps\nEpoch 1/20\n500/500 [==============================] - 54s 107ms/step - loss: 0.3395 - val_loss: 0.2979\nEpoch 2/20\n500/500 [==============================] - 51s 103ms/step - loss: 0.3023 - val_loss: 0.2736\nEpoch 3/20\n500/500 [==============================] - 51s 103ms/step - loss: 0.2919 - val_loss: 0.2752\nEpoch 4/20\n500/500 [==============================] - 51s 102ms/step - loss: 0.2837 - val_loss: 0.2801\nEpoch 5/20\n500/500 [==============================] - 51s 101ms/step - loss: 0.2771 - val_loss: 0.2769\nEpoch 6/20\n500/500 [==============================] - 51s 102ms/step - loss: 0.2706 - val_loss: 0.2732\nEpoch 7/20\n500/500 [==============================] - 52s 103ms/step - loss: 0.2678 - val_loss: 0.2833\nEpoch 8/20\n500/500 [==============================] - 51s 101ms/step - loss: 0.2624 - val_loss: 0.2769\nEpoch 9/20\n500/500 [==============================] - 51s 103ms/step - loss: 0.2580 - val_loss: 0.2792\nEpoch 10/20\n500/500 [==============================] - 51s 102ms/step - loss: 0.2543 - val_loss: 0.2731\nEpoch 11/20\n500/500 [==============================] - 55s 110ms/step - loss: 0.2493 - val_loss: 0.2779\nEpoch 12/20\n500/500 [==============================] - 56s 112ms/step - loss: 0.2460 - val_loss: 0.2841\nEpoch 13/20\n500/500 [==============================] - 58s 117ms/step - loss: 0.2425 - val_loss: 0.2876\nEpoch 14/20\n500/500 [==============================] - 54s 109ms/step - loss: 0.2399 - val_loss: 0.2873\nEpoch 15/20\n500/500 [==============================] - 53s 105ms/step - loss: 0.2352 - val_loss: 0.2893\nEpoch 16/20\n500/500 [==============================] - 51s 102ms/step - loss: 0.2330 - val_loss: 0.2866\nEpoch 17/20\n500/500 [==============================] - 53s 107ms/step - loss: 0.2308 - val_loss: 0.2848\nEpoch 18/20\n500/500 [==============================] - 54s 108ms/step - loss: 0.2287 - val_loss: 0.2944\nEpoch 19/20\n500/500 [==============================] - 57s 114ms/step - loss: 0.2252 - val_loss: 0.2974\nEpoch 20/20\n500/500 [==============================] - 61s 122ms/step - loss: 0.2232 - val_loss: 0.2955\n"
],
[
"loss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(loss))\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Judging from the validation loss, **this setup is not quite as good as the regularized GRU alone, but it's significantly faster**. It is looking at twice more data, which in this case doesn't appear to be hugely helpful, but may be important for other datasets.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
d0d5c74a661a438b0cd416f555e98d12fa9aa81d | 3,628 | ipynb | Jupyter Notebook | datasets/original_csv_data/yelp_shortest_review.ipynb | Saibo-creator/Text-Summrize-Project | d5ce54193110452a18cc0b223360c2bd004b4b28 | [
"Apache-2.0"
] | null | null | null | datasets/original_csv_data/yelp_shortest_review.ipynb | Saibo-creator/Text-Summrize-Project | d5ce54193110452a18cc0b223360c2bd004b4b28 | [
"Apache-2.0"
] | null | null | null | datasets/original_csv_data/yelp_shortest_review.ipynb | Saibo-creator/Text-Summrize-Project | d5ce54193110452a18cc0b223360c2bd004b4b28 | [
"Apache-2.0"
] | null | null | null | 22.675 | 129 | 0.457828 | [
[
[
"from pyspark.sql import SparkSession\nspark = SparkSession \\\n .builder \\\n .appName(\"Python Spark create RDD example\") \\\n .config(\"spark.some.config.option\", \"some-value\") \\\n .getOrCreate()",
"_____no_output_____"
],
[
"df = spark.read.format('json').options(header='true', inferschema='true').load(\"../yelp_dataset/review.json\",header=True)",
"_____no_output_____"
],
[
"df.printSchema()",
"root\n |-- business_id: string (nullable = true)\n |-- cool: long (nullable = true)\n |-- date: string (nullable = true)\n |-- funny: long (nullable = true)\n |-- review_id: string (nullable = true)\n |-- stars: double (nullable = true)\n |-- text: string (nullable = true)\n |-- useful: long (nullable = true)\n |-- user_id: string (nullable = true)\n\n"
],
[
"import pyspark.sql.functions as F\ndf=df.withColumn('lenth', F.length('text'))",
"_____no_output_____"
],
[
"df.groupBy('lenth').count().orderBy('lenth').show()",
"+-----+-----+\n|lenth|count|\n+-----+-----+\n| 1| 55|\n| 2| 28|\n| 3| 16|\n| 4| 30|\n| 5| 26|\n| 6| 33|\n| 7| 36|\n| 8| 23|\n| 9| 24|\n| 10| 39|\n| 11| 43|\n| 12| 36|\n| 13| 38|\n| 14| 48|\n| 15| 51|\n| 16| 62|\n| 17| 59|\n| 18| 53|\n| 19| 56|\n| 20| 57|\n+-----+-----+\nonly showing top 20 rows\n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
d0d5ca684bb14f024bf8ce0760aac67d9daced3e | 4,742 | ipynb | Jupyter Notebook | src/24/serial_finder.ipynb | j-carson/advent_2021 | 3e6d7bd7630eade75bf441d66817c019313159e6 | [
"MIT"
] | 1 | 2021-12-29T16:49:30.000Z | 2021-12-29T16:49:30.000Z | src/24/serial_finder.ipynb | j-carson/advent_2021 | 3e6d7bd7630eade75bf441d66817c019313159e6 | [
"MIT"
] | null | null | null | src/24/serial_finder.ipynb | j-carson/advent_2021 | 3e6d7bd7630eade75bf441d66817c019313159e6 | [
"MIT"
] | null | null | null | 50.989247 | 1,309 | 0.636651 | [
[
[
"# Notebook Day 24\n\nWell, I got it to brute force search at ~160,000 ids per second. \nThat's not fast enough to crack a 14-digit number.\n\nThe next step would be to somehow-optimize my programmatically-generated \ncode. But, I'm stuck on that for now.",
"_____no_output_____"
]
],
[
[
"from output import eval_license\nfrom itertools import product\nfrom tqdm import tqdm",
"_____no_output_____"
],
[
"for serial in tqdm(product(range(9,0,-1), repeat=14)):\n result = eval_license(serial)\n if result:\n break",
"46078644it [04:44, 161786.75it/s]\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
d0d5ca85b843986565f93399b8452b9d7c1025e1 | 236,799 | ipynb | Jupyter Notebook | gather/gather_report.ipynb | dib-lab/2021-paper-sourmash-gather-pipeline | a5a7d46d335487baf1e61062c8c9b323ff8d0f89 | [
"BSD-3-Clause"
] | null | null | null | gather/gather_report.ipynb | dib-lab/2021-paper-sourmash-gather-pipeline | a5a7d46d335487baf1e61062c8c9b323ff8d0f89 | [
"BSD-3-Clause"
] | null | null | null | gather/gather_report.ipynb | dib-lab/2021-paper-sourmash-gather-pipeline | a5a7d46d335487baf1e61062c8c9b323ff8d0f89 | [
"BSD-3-Clause"
] | null | null | null | 696.467647 | 128,620 | 0.947605 | [
[
[
"# Figures 2 and 5 for gather paper",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport pylab\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"## Preparation: load genome-grist summary CSVs",
"_____no_output_____"
]
],
[
[
"class SampleDFs:\n def __init__(self, name, all_df, left_df, gather_df, names_df):\n self.name = name\n self.all_df = all_df\n self.left_df = left_df\n self.gather_df = gather_df\n self.names_df = names_df\n\ndef load_sample_dfs(name, sample_id, subsample_to=None, debug=False):\n print(f'loading sample {sample_id}')\n # load mapping CSVs\n all_df = pd.read_csv(f'inputs/mapping/{sample_id}.summary.csv')\n left_df = pd.read_csv(f'inputs/leftover/{sample_id}.summary.csv')\n\n # load gather CSV\n gather_df = pd.read_csv(f'inputs/gather/{sample_id}.gather.csv')\n\n # names!\n names_df = pd.read_csv(f'inputs/gather/{sample_id}.genomes.info.csv')\n\n # connect gather_df to all_df and left_df using 'genome_id'\n def fix_name(x):\n return \"_\".join(x.split('_')[:2]).split('.')[0]\n\n gather_df['genome_id'] = gather_df['name'].apply(fix_name)\n names_df['genome_id'] = names_df['ident'].apply(fix_name)\n\n # this ensures that only rows that share genome_id are in all the dataframes\n in_gather = set(gather_df.genome_id)\n if debug:\n print(f'{len(in_gather)} in gather results')\n in_left = set(left_df.genome_id)\n if debug:\n print(f'{len(in_left)} in leftover results')\n\n in_both = in_left.intersection(in_gather)\n if debug:\n print(f'{len(in_both)} in both')\n print('diff gather example:', list(in_gather - in_both)[:5])\n print('diff left example:', list(in_left - in_both)[:5])\n \n assert not in_gather - in_both\n assert not in_left - in_both\n\n all_df = all_df[all_df.genome_id.isin(in_both)]\n left_df = left_df[left_df.genome_id.isin(in_both)]\n gather_df = gather_df[gather_df.genome_id.isin(in_both)]\n names_df = names_df[names_df.genome_id.isin(in_both)]\n\n # reassign index now that we've maybe dropped rows\n all_df.index = range(len(all_df))\n left_df.index = range(len(left_df))\n gather_df.index = range(len(gather_df))\n names_df.index = range(len(names_df))\n\n assert len(all_df) == len(gather_df)\n assert len(left_df) == len(gather_df)\n assert len(names_df) == len(gather_df)\n assert len(names_df) == len(in_both)\n\n #in_left\n\n # re-sort left_df and all_df to match gather_df order, using matching genome_id column\n all_df = all_df.set_index(\"genome_id\")\n all_df = all_df.reindex(index=gather_df[\"genome_id\"])\n all_df = all_df.reset_index()\n\n left_df = left_df.set_index(\"genome_id\")\n left_df = left_df.reindex(index=gather_df[\"genome_id\"])\n left_df = left_df.reset_index()\n\n #left_df[\"mapped_bp\"] = (1 - left_df[\"percent missed\"]/100) * left_df[\"genome bp\"]\n #left_df[\"unique_mapped_coverage\"] = left_df.coverage / (1 - left_df[\"percent missed\"] / 100.0)\n\n names_df = names_df.set_index(\"genome_id\")\n names_df = names_df.reindex(index=gather_df[\"genome_id\"])\n names_df = names_df.reset_index()\n\n # subsample? take top N...\n if subsample_to:\n left_df = left_df[:subsample_to]\n all_df = all_df[:subsample_to]\n gather_df = gather_df[:subsample_to]\n names_df = names_df[:subsample_to]\n\n sample_df = SampleDFs(name, all_df, left_df, gather_df, names_df)\n return sample_df\n\nSUBSAMPLE_TO = 36\npodar_mock = load_sample_dfs('(A) podar mock', 'SRR606249', subsample_to=SUBSAMPLE_TO,)\noil_well = load_sample_dfs('(D) oil well', 'SRR1976948', subsample_to=SUBSAMPLE_TO)\ngut = load_sample_dfs('(C) gut', 'p8808mo11', subsample_to=SUBSAMPLE_TO)\nzymo_mock = load_sample_dfs('(B) zymo mock', 'SRR12324253', subsample_to=SUBSAMPLE_TO)",
"loading sample SRR606249\nloading sample SRR1976948\nloading sample p8808mo11\nloading sample SRR12324253\n"
]
],
[
[
"## Figure 2: K-mer decomposition of a metagenome into constituent genomes. ",
"_____no_output_____"
]
],
[
[
"\nfig, (ax1, ax2) = pylab.subplots(1, 2, figsize=(10, 8), constrained_layout=True)\n\n#pylab.plot(left_df.covered_bp / 1e6, left_df.iloc[::-1].index, 'b.', label='mapped bp to this genome')\nax1.plot(podar_mock.gather_df.intersect_bp / 1e6, podar_mock.gather_df.iloc[::-1].index, 'g<',\n label='total k-mers matched')\nax1.plot(podar_mock.gather_df.unique_intersect_bp / 1e6, podar_mock.gather_df.iloc[::-1].index, 'ro',\n label='remaining k-mers matched')\n\npositions = list(podar_mock.gather_df.index)\nlabels = list(reversed(podar_mock.names_df.display_name))\n\nax1.set_yticks(positions)\nax1.set_yticklabels(labels, fontsize='small')\n\nax1.set_xlabel('millions of k-mers')\nax1.axis(ymin=-1, ymax=SUBSAMPLE_TO)\nax1.legend(loc='lower right')\nax1.grid(True, axis='both')\n\nax2.plot(podar_mock.gather_df.f_match_orig * 100, podar_mock.gather_df.iloc[::-1].index, 'g<', label='total k-mer cover')\nax2.plot(podar_mock.gather_df.f_match * 100, podar_mock.gather_df.iloc[::-1].index, 'ro', label='remaining k-mer cover')\nax2.set_yticks(positions)\nax2.set_yticklabels([])\nax2.set_xlabel('% of genome covered')\nax2.legend(loc='lower left')\nax2.axis(xmin=40, xmax=102)\nax2.axis(ymin=-1, ymax=SUBSAMPLE_TO)\nax2.grid(True)\n\n#fig.tight_layout()\n\nNone\nfig.savefig('fig2.svg')\n",
"_____no_output_____"
]
],
[
[
"## Figure 5: Hash-based k-mer decomposition of a metagenome into constituent genomes compares well to bases covered by read mapping. ",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nfig, axes = plt.subplots(figsize=(20, 12), nrows=2, ncols=2)\n\nsamples = (podar_mock, zymo_mock, gut, oil_well)\n\nfor n, (ax, sample) in enumerate(zip(axes.flat, samples)):\n ax.plot(sample.left_df.index, sample.left_df.covered_bp / 1e6, 'b*', label='genome bases covered by mapped reads')\n ax.plot(sample.gather_df.index, sample.gather_df.unique_intersect_bp / 1e6, 'ro', label='remaining genome hashes in metagenome')\n\n ax.plot(sample.gather_df.index, (sample.gather_df.unique_intersect_bp - sample.left_df.covered_bp) / 1e6, \n '-', label='difference b/t covered bp and hashes')\n ax.plot(sample.gather_df.index, [0]*len(sample.gather_df), '--')\n\n ax.axis(xmin=-0.5, xmax=len(sample.gather_df.index) - 0.5)\n\n positions = list(sample.gather_df.index)\n labels = [ i + 1 for i in positions ]\n ax.set_xticks(positions)\n ax.set_xticklabels(labels)\n #print(sample.name, positions)\n\n ax.set_xlabel('genome rank (ordered by gather results)')\n ax.set_ylabel('number per genome (million)')\n if n == 0:\n ax.legend(loc='upper right')\n ax.set_title(sample.name)\n \n #ax.label_outer()\n\nfig.tight_layout()\n\npylab.savefig('fig5.svg')\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d5cbd4e4cdfac219b6a5b28dcc1be59709a702 | 8,994 | ipynb | Jupyter Notebook | Face-Recognition-pca-svm/Facial_Recognition(Exercise).ipynb | abhisngh/Data-Science | c7fa9e4d81c427382fb9a9d3b97912ef2b21f3ae | [
"MIT"
] | 1 | 2020-05-29T20:07:49.000Z | 2020-05-29T20:07:49.000Z | Face-Recognition-pca-svm/Facial_Recognition(Exercise).ipynb | abhisngh/Data-Science | c7fa9e4d81c427382fb9a9d3b97912ef2b21f3ae | [
"MIT"
] | null | null | null | Face-Recognition-pca-svm/Facial_Recognition(Exercise).ipynb | abhisngh/Data-Science | c7fa9e4d81c427382fb9a9d3b97912ef2b21f3ae | [
"MIT"
] | null | null | null | 26.767857 | 263 | 0.489104 | [
[
[
"<a href=\"https://colab.research.google.com/github/samarth0174/Face-Recognition-pca-svm/blob/master/Facial_Recognition(Exercise).ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# **In this project we implement the Identification system using Machine Learning concepts such as Principal Component Analysis (PCA) and Support Vector Machine (SVM).**\n## Steps Involved:\n- Importing Libraries\n- Loading the Dataset\n- Data Exploration\n- Splitting the dataset \n- Compute PCA(eigen faces) \n- Train a SVM classification model\n- * Using GridSearch to find best Parameters\n- Model Evaluation\n- Conclusion",
"_____no_output_____"
],
[
"## **Importing Libraries**\n* We need to first import the scikit-learn library for using the PCA function API that is provided into this library.\n* The scikit-learn library also provided an API to fetch **LFW_peoples dataset**. \n* We also required matplotlib to plot faces.",
"_____no_output_____"
]
],
[
[
"#downnlading datasets sklearn\nfrom sklearn.datasets import fetch_lfw_people\n\n#todo import other libraries such sklearn for pca,svc,classification report,plotting",
"_____no_output_____"
]
],
[
[
"## **Loading the dataset**",
"_____no_output_____"
]
],
[
[
"# Download the data, if not already on disk and load it as numpy arrays\nlfw_people = fetch_lfw_people('data', min_faces_per_person=70, resize=0.4)",
"Downloading LFW metadata: https://ndownloader.figshare.com/files/5976012\nDownloading LFW metadata: https://ndownloader.figshare.com/files/5976009\nDownloading LFW metadata: https://ndownloader.figshare.com/files/5976006\nDownloading LFW data (~200MB): https://ndownloader.figshare.com/files/5976015\n"
],
[
"# introspect the images arrays to find the shapes (for plotting)\n#todo:check shape",
"_____no_output_____"
],
[
"# for machine learning we use the data directly (as relative pixel\n# position info is ignored by this model)\n'''todo:assign X for model'''\n\n## the label to predict is the id of the person \n'''Todo:assign y for model ie. the no. of classes''''",
"_____no_output_____"
]
],
[
[
"## **Data Exploration**",
"_____no_output_____"
]
],
[
[
"# plot and explore images and their respective classes\n# hint: use matplotlib",
"_____no_output_____"
]
],
[
[
"## **Splitting the dataset**",
"_____no_output_____"
]
],
[
[
"#use sklearn test-train split",
"_____no_output_____"
]
],
[
[
"## **Compute PCA**\nWe can now compute a PCA (eigenfaces) on the face dataset (treated as unlabeled dataset): unsupervised feature extraction / dimensionality reduction.",
"_____no_output_____"
]
],
[
[
"#Apply the PCA algorithm on the training dataset which computes EigenFaces. \n#Here, take n_components = 150 or 300 means we extract the top 150 (or 300) Eigenfaces from the algorithm. \n#Also print the time taken to apply this algorithm.\n\n# TODO: Create an instance of PCA, initializing with n_components=n_components and whiten=True\n\n#TODO: pass the training dataset (X_train) to pca's 'fit()' method\n",
"_____no_output_____"
]
],
[
[
"## **Train a SVM classification model**\nFit a SVM classifier to the training set.Use GridSearchCV to find a good set of parameters for the classifier.",
"_____no_output_____"
]
],
[
[
"#todo : SVM with Gridsearch algo",
"_____no_output_____"
]
],
[
[
"## **Evaluation of the model quality on the test set**",
"_____no_output_____"
]
],
[
[
"#TODO: Test the model and Generate a classification report",
"_____no_output_____"
]
],
[
[
"# **plot the eigen faces for your visualisation**",
"_____no_output_____"
]
],
[
[
"#TODO:plot most significant eigen faces",
"_____no_output_____"
]
],
[
[
"## **Conclusion**",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d5e14afa878833840a81be734e76ee3a6539fc | 491,766 | ipynb | Jupyter Notebook | S_GAN_image.ipynb | garg-akash/Steganography_GANs | 672b9dd2c3c5e620610eeec83e2fd13b5fd4889c | [
"MIT"
] | 3 | 2022-02-21T08:54:39.000Z | 2022-03-29T08:54:23.000Z | S_GAN_image.ipynb | garg-akash/Steganography_GANs | 672b9dd2c3c5e620610eeec83e2fd13b5fd4889c | [
"MIT"
] | null | null | null | S_GAN_image.ipynb | garg-akash/Steganography_GANs | 672b9dd2c3c5e620610eeec83e2fd13b5fd4889c | [
"MIT"
] | null | null | null | 316.859536 | 184,630 | 0.903092 | [
[
[
"!pip install torch # framework\n!pip install --upgrade reedsolo\n!pip install --upgrade librosa\n!pip install torchvision\n#!pip install torchaudio \n#!pip install tensorboard\n#!pip install soundfile\n!pip install librosa==0.7.1",
"Requirement already satisfied: torch in /usr/local/lib/python3.6/dist-packages (1.5.1+cu101)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch) (0.16.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from torch) (1.18.5)\nRequirement already up-to-date: reedsolo in /usr/local/lib/python3.6/dist-packages (1.5.4)\nProcessing /root/.cache/pip/wheels/ee/10/1e/382bb4369e189938d5c02e06d10c651817da8d485bfd1647c9/librosa-0.8.0-cp36-none-any.whl\nRequirement already satisfied, skipping upgrade: pooch>=1.0 in /usr/local/lib/python3.6/dist-packages (from librosa) (1.1.1)\nRequirement already satisfied, skipping upgrade: joblib>=0.14 in /usr/local/lib/python3.6/dist-packages (from librosa) (0.16.0)\nRequirement already satisfied, skipping upgrade: scipy>=1.0.0 in /usr/local/lib/python3.6/dist-packages (from librosa) (1.4.1)\nRequirement already satisfied, skipping upgrade: soundfile>=0.9.0 in /usr/local/lib/python3.6/dist-packages (from librosa) (0.10.3.post1)\nRequirement already satisfied, skipping upgrade: resampy>=0.2.2 in /usr/local/lib/python3.6/dist-packages (from librosa) (0.2.2)\nRequirement already satisfied, skipping upgrade: decorator>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from librosa) (4.4.2)\nRequirement already satisfied, skipping upgrade: numpy>=1.15.0 in /usr/local/lib/python3.6/dist-packages (from librosa) (1.18.5)\nRequirement already satisfied, skipping upgrade: scikit-learn!=0.19.0,>=0.14.0 in /usr/local/lib/python3.6/dist-packages (from librosa) (0.22.2.post1)\nRequirement already satisfied, skipping upgrade: audioread>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from librosa) (2.1.8)\nRequirement already satisfied, skipping upgrade: numba>=0.43.0 in /usr/local/lib/python3.6/dist-packages (from librosa) (0.48.0)\nRequirement already satisfied, skipping upgrade: requests in /usr/local/lib/python3.6/dist-packages (from pooch>=1.0->librosa) (2.23.0)\nRequirement already satisfied, skipping upgrade: packaging in /usr/local/lib/python3.6/dist-packages (from pooch>=1.0->librosa) (20.4)\nRequirement already satisfied, skipping upgrade: appdirs in /usr/local/lib/python3.6/dist-packages (from pooch>=1.0->librosa) (1.4.4)\nRequirement already satisfied, skipping upgrade: cffi>=1.0 in /usr/local/lib/python3.6/dist-packages (from soundfile>=0.9.0->librosa) (1.14.0)\nRequirement already satisfied, skipping upgrade: six>=1.3 in /usr/local/lib/python3.6/dist-packages (from resampy>=0.2.2->librosa) (1.15.0)\nRequirement already satisfied, skipping upgrade: setuptools in /usr/local/lib/python3.6/dist-packages (from numba>=0.43.0->librosa) (49.1.0)\nRequirement already satisfied, skipping upgrade: llvmlite<0.32.0,>=0.31.0dev0 in /usr/local/lib/python3.6/dist-packages (from numba>=0.43.0->librosa) (0.31.0)\nRequirement already satisfied, skipping upgrade: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->pooch>=1.0->librosa) (2.10)\nRequirement already satisfied, skipping upgrade: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->pooch>=1.0->librosa) (3.0.4)\nRequirement already satisfied, skipping upgrade: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->pooch>=1.0->librosa) (1.24.3)\nRequirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->pooch>=1.0->librosa) (2020.6.20)\nRequirement already satisfied, skipping upgrade: pyparsing>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from packaging->pooch>=1.0->librosa) (2.4.7)\nRequirement already satisfied, skipping upgrade: pycparser in /usr/local/lib/python3.6/dist-packages (from cffi>=1.0->soundfile>=0.9.0->librosa) (2.20)\nInstalling collected packages: librosa\n Found existing installation: librosa 0.7.1\n Uninstalling librosa-0.7.1:\n Successfully uninstalled librosa-0.7.1\nSuccessfully installed librosa-0.8.0\n"
],
[
"from google.colab import drive\ndrive.mount('/content/drive',force_remount=True) \n%cd /content/drive/My\\ Drive/",
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n/content/drive/My Drive\n"
],
[
"import numpy as np\nimport librosa\nimport librosa.display \nimport datetime\nimport matplotlib.pyplot as plt\nfrom torch.nn.functional import binary_cross_entropy_with_logits, mse_loss\nfrom torchvision import datasets, transforms\nfrom IPython.display import clear_output\nimport torchvision\nfrom torchvision.datasets.vision import VisionDataset\nfrom torch.optim import Adam\nfrom tqdm import notebook\nimport torch\nimport os.path\nimport os\nimport gc\nimport sys\nfrom PIL import ImageFile, Image\n#from torchaudio import transforms as audiotransforms\n#import torchaudio\n#import soundfile \n#from IPython.display import Audio\nimport random\n\n\nImageFile.LOAD_TRUNCATED_IMAGES = True",
"_____no_output_____"
],
[
"epochs = 64\ndata_depth = 4\nhidden_size = 32\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nLOAD_MODEL=True\n#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_0.041_2020-07-25_15:31:19.dat'\n#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_-0.003_2020-07-24_20:01:33.dat'\n#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_-0.022_2020-07-24_05:11:17.dat'\n#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_-0.041_2020-07-23_23:01:25.dat'\nPATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_0.042_2020-07-23_02:08:27.dat' ##Depth4Epoch64\n#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_0.005_2020-07-22_20:05:49.dat'\n#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_-0.019_2020-07-22_15:02:29.dat'\n#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_-0.020_2020-07-22_13:43:02.dat'\n#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_+0.048_2020-07-22_12:21:23.dat'\n#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_+0.017_2020-07-22_08:18:00.dat'",
"_____no_output_____"
],
[
"import torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\nfrom math import exp\n\n# -*- coding: utf-8 -*-\n\nimport zlib\nfrom math import exp\n\nimport torch\nfrom reedsolo import RSCodec\nfrom torch.nn.functional import conv2d\n\nrs = RSCodec(250)\n\n\ndef text_to_bits(text):\n \"\"\"Convert text to a list of ints in {0, 1}\"\"\"\n return bytearray_to_bits(text_to_bytearray(text))\n\n\ndef bits_to_text(bits):\n \"\"\"Convert a list of ints in {0, 1} to text\"\"\"\n return bytearray_to_text(bits_to_bytearray(bits))\n\n\ndef bytearray_to_bits(x):\n \"\"\"Convert bytearray to a list of bits\"\"\"\n result = []\n for i in x:\n bits = bin(i)[2:]\n bits = '00000000'[len(bits):] + bits\n result.extend([int(b) for b in bits])\n\n return result\n\n\ndef bits_to_bytearray(bits):\n \"\"\"Convert a list of bits to a bytearray\"\"\"\n ints = []\n for b in range(len(bits) // 8):\n byte = bits[b * 8:(b + 1) * 8]\n ints.append(int(''.join([str(bit) for bit in byte]), 2))\n\n return bytearray(ints)\n\n\ndef text_to_bytearray(text):\n \"\"\"Compress and add error correction\"\"\"\n assert isinstance(text, str), \"expected a string\"\n x = zlib.compress(text.encode(\"utf-8\"))\n x = rs.encode(bytearray(x))\n return x\n\ndef bytearray_to_text(x):\n \"\"\"Apply error correction and decompress\"\"\"\n try:\n #print('1: ',x)\n text = rs.decode(x)[0]\n #print('2: ',x)\n text = zlib.decompress(text)\n #print('3: ',x)\n return text.decode(\"utf-8\")\n except BaseException as e: \n print(e)\n return False\n\ndef gaussian(window_size, sigma):\n gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])\n return gauss/gauss.sum()\n\ndef create_window(window_size, channel):\n _1D_window = gaussian(window_size, 1.5).unsqueeze(1)\n _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)\n window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())\n return window\n\ndef _ssim(img1, img2, window, window_size, channel, size_average = True):\n mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)\n mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)\n\n mu1_sq = mu1.pow(2)\n mu2_sq = mu2.pow(2)\n mu1_mu2 = mu1*mu2\n\n sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq\n sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq\n sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2\n\n C1 = 0.01**2\n C2 = 0.03**2\n\n ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))\n\n if size_average:\n return ssim_map.mean()\n else:\n return ssim_map.mean(1).mean(1).mean(1)\n\nclass SSIM(torch.nn.Module):\n def __init__(self, window_size = 11, size_average = True):\n super(SSIM, self).__init__()\n self.window_size = window_size\n self.size_average = size_average\n self.channel = 1\n self.window = create_window(window_size, self.channel)\n\n def forward(self, img1, img2):\n (_, channel, _, _) = img1.size()\n\n if channel == self.channel and self.window.data.type() == img1.data.type():\n window = self.window\n else:\n window = create_window(self.window_size, channel)\n \n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n \n self.window = window\n self.channel = channel\n\n\n return _ssim(img1, img2, window, self.window_size, channel, self.size_average)\n\ndef ssim(img1, img2, window_size = 11, size_average = True):\n (_, channel, _, _) = img1.size()\n window = create_window(window_size, channel)\n \n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n \n return _ssim(img1, img2, window, window_size, channel, size_average)\n",
"_____no_output_____"
],
[
"import torch\nfrom torch import nn\nimport numpy\n\nclass BasicEncoder(nn.Module):\n \"\"\"\n The BasicEncoder module takes an cover image and a data tensor and combines\n them into a steganographic image.\n\n \"\"\"\n def _name(self):\n return \"BasicEncoder\"\n\n def _conv2d(self, in_channels, out_channels):\n return nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n padding=1\n )\n\n def _build_models(self):\n self.conv1 = nn.Sequential(\n self._conv2d(3, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n nn.BatchNorm2d(self.hidden_size),\n )\n self.conv2 = nn.Sequential(\n self._conv2d(self.hidden_size + self.data_depth, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n nn.BatchNorm2d(self.hidden_size),\n )\n self.conv3 = nn.Sequential(\n self._conv2d(self.hidden_size, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n nn.BatchNorm2d(self.hidden_size),\n )\n self.conv4 = nn.Sequential(\n self._conv2d(self.hidden_size, 3),\n )\n return self.conv1, self.conv2, self.conv3, self.conv4\n\n def __init__(self, data_depth, hidden_size):\n super().__init__()\n self.data_depth = data_depth\n self.hidden_size = hidden_size\n self._models = self._build_models()\n self.name = self._name()\n\n def forward(self, image, data):\n x = self._models[0](image)\n x_1 = self._models[1](torch.cat([x] + [data], dim=1))\n x_2 = self._models[2](x_1)\n x_3 = self._models[3](x_2)\n return x_3\n\n\nclass ResidualEncoder(BasicEncoder):\n def _name(self):\n return \"ResidualEncoder\"\n\n def forward(self, image, data):\n return image + super().forward(self, image, data)\n\n\nclass DenseEncoder(BasicEncoder):\n def _name(self):\n return \"DenseEncoder\"\n\n def _build_models(self):\n self.conv1 = super()._build_models()[0]\n self.conv2 = super()._build_models()[1]\n self.conv3 = nn.Sequential(\n self._conv2d(self.hidden_size * 2 +\n self.data_depth, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n nn.BatchNorm2d(self.hidden_size),\n )\n self.conv4 = nn.Sequential(\n self._conv2d(self.hidden_size * 3 + self.data_depth, 3)\n )\n\n return self.conv1, self.conv2, self.conv3, self.conv4\n\n def forward(self, image, data):\n x = self._models[0](image)\n x_list = [x]\n x_1 = self._models[1](torch.cat(x_list+[data], dim=1))\n x_list.append(x_1)\n x_2 = self._models[2](torch.cat(x_list+[data], dim=1))\n x_list.append(x_2)\n x_3 = self._models[3](torch.cat(x_list+[data], dim=1))\n x_list.append(x_3)\n return image + x_3\n",
"_____no_output_____"
],
[
"import torch\nfrom torch import nn\n#from torch.nn import Sigmoid\n#from torch.distributions import Bernoulli\n\n\nclass BasicDecoder(nn.Module):\n \"\"\"\n The BasicDecoder module takes an steganographic image and attempts to decode\n the embedded data tensor.\n\n Input: (N, 3, H, W)\n Output: (N, D, H, W)\n \"\"\"\n def _name(self):\n return \"BasicDecoder\"\n\n def _conv2d(self, in_channels, out_channels):\n return nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n padding=1\n )\n\n def _build_models(self):\n self.conv1 = nn.Sequential(\n self._conv2d(3, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n nn.BatchNorm2d(self.hidden_size),\n )\n self.conv2 = nn.Sequential(\n self._conv2d(self.hidden_size, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n nn.BatchNorm2d(self.hidden_size),\n )\n self.conv3 = nn.Sequential(\n self._conv2d(self.hidden_size, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n nn.BatchNorm2d(self.hidden_size),\n )\n self.conv4 = nn.Sequential(\n self._conv2d(self.hidden_size, self.data_depth),\n #nn.Sigmoid(),\n )\n\n return self.conv1, self.conv2, self.conv3, self.conv4\n\n def forward(self, image):\n x = self._models[0](image)\n x_1 = self._models[1](x)\n x_2 = self._models[2](x_1)\n x_3 = self._models[3](x_2)\n #x_4 = Bernoulli(x_3).sample()\n return x_3\n\n def __init__(self, data_depth, hidden_size):\n super().__init__()\n self.data_depth = data_depth\n self.hidden_size = hidden_size\n self._models = self._build_models()\n self.name = self._name()\n\n\nclass DenseDecoder(BasicDecoder):\n def _name(self):\n return \"DenseDecoder\"\n\n def _build_models(self):\n self.conv1 = super()._build_models()[0]\n self.conv2 = super()._build_models()[1]\n self.conv3 = nn.Sequential(\n self._conv2d(self.hidden_size * 2, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n nn.BatchNorm2d(self.hidden_size)\n )\n self.conv4 = nn.Sequential(\n self._conv2d(self.hidden_size * 3, self.data_depth),\n #nn.Sigmoid(),\n )\n\n return self.conv1, self.conv2, self.conv3, self.conv4\n\n def forward(self, image):\n x = self._models[0](image)\n x_list = [x]\n x_1 = self._models[1](torch.cat(x_list, dim=1))\n x_list.append(x_1)\n x_2 = self._models[2](torch.cat(x_list, dim=1))\n x_list.append(x_2)\n x_3 = self._models[3](torch.cat(x_list, dim=1))\n x_list.append(x_3)\n return x_3\n",
"_____no_output_____"
],
[
"import torch\nfrom torch import nn\n\n\nclass BasicCritic(nn.Module):\n \"\"\"\n The BasicCritic module takes an image and predicts whether it is a cover\n image or a steganographic image (N, 1).\n\n Input: (N, 3, H, W)\n Output: (N, 1)\n \"\"\"\n def _name(self):\n return \"BasicCritic\"\n\n def _conv2d(self, in_channels, out_channels):\n return nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3\n )\n\n def _build_models(self):\n\n self.conv1 = nn.Sequential(\n self._conv2d(3, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n nn.BatchNorm2d(self.hidden_size),\n )\n self.conv2 = nn.Sequential(\n self._conv2d(self.hidden_size, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n nn.BatchNorm2d(self.hidden_size),\n )\n self.conv3 = nn.Sequential(\n self._conv2d(self.hidden_size, self.hidden_size),\n nn.LeakyReLU(inplace=True),\n nn.BatchNorm2d(self.hidden_size),\n ) \n self.conv4 = nn.Sequential(\n self._conv2d(self.hidden_size, 1)\n ) \n\n return self.conv1,self.conv2,self.conv3,self.conv4\n\n def __init__(self, hidden_size):\n super().__init__()\n self.hidden_size = hidden_size\n self._models = self._build_models()\n self.name = self._name()\n\n def forward(self, image):\n x = self._models[0](image)\n x_1 = self._models[1](x)\n x_2 = self._models[2](x_1)\n x_3 = self._models[3](x_2)\n return torch.mean(x_3.view(x_3.size(0), -1), dim=1)\n ",
"_____no_output_____"
],
[
"def plot(name, train_epoch, values, path, save):\n clear_output(wait=True)\n plt.close('all')\n fig = plt.figure()\n fig = plt.ion()\n fig = plt.subplot(1, 1, 1)\n fig = plt.title('epoch: %s -> %s: %s' % (train_epoch, name, values[-1]))\n fig = plt.ylabel(name)\n fig = plt.xlabel('validation_set')\n fig = plt.plot(values)\n fig = plt.grid()\n get_fig = plt.gcf()\n fig = plt.draw() # draw the plot\n fig = plt.pause(1) # show it for 1 second\n if save:\n now = datetime.datetime.now()\n get_fig.savefig('%s/%s_%.3f_%d_%s.png' %\n (path, name, train_epoch, values[-1], now.strftime(\"%Y-%m-%d_%H:%M:%S\")))",
"_____no_output_____"
],
[
"def test(encoder,decoder,data_depth,train_epoch,cover,payload):\n %matplotlib inline\n generated = encoder.forward(cover, payload)\n decoded = decoder.forward(generated)\n decoder_loss = binary_cross_entropy_with_logits(decoded, payload)\n decoder_acc = (decoded >= 0.0).eq(\n payload >= 0.5).sum().float() / payload.numel() # .numel() calculate the number of element in a tensor\n print(\"Decoder loss: %.3f\"% decoder_loss.item())\n print(\"Decoder acc: %.3f\"% decoder_acc.item())\n f, ax = plt.subplots(1, 2)\n plt.title(\"%s_%s\"%(encoder.name,decoder.name))\n cover=np.transpose(np.squeeze(cover.cpu()), (1, 2, 0))\n ax[0].imshow(cover)\n ax[0].axis('off')\n print(generated.shape)\n generated_=np.transpose(np.squeeze((generated.cpu()).detach().numpy()), (1, 2, 0))\n ax[1].imshow(generated_)\n ax[1].axis('off')\n #now = datetime.datetime.now()\n #print(\"payload :\")\n #print(payload)\n #print(\"decoded :\")\n #decoded[decoded<0]=0\n #decoded[decoded>0]=1\n #print(decoded)\n\n # plt.savefig('results/samples/%s_%s_%d_%.3f_%d_%s.png' %\n # (encoder.name,decoder.name, data_depth,decoder_acc, train_epoch, now.strftime(\"%Y-%m-%d_%H:%M:%S\")))\n return generated",
"_____no_output_____"
],
[
"def save_model(encoder,decoder,critic,en_de_optimizer,cr_optimizer,metrics,ep):\n now = datetime.datetime.now()\n cover_score = metrics['val.cover_score'][-1]\n name = \"%s_%s_%+.3f_%s.dat\" % (encoder.name,decoder.name,cover_score,\n now.strftime(\"%Y-%m-%d_%H:%M:%S\"))\n fname = os.path.join('.', 'myresults/model', name)\n states = {\n 'state_dict_critic': critic.state_dict(),\n 'state_dict_encoder': encoder.state_dict(),\n 'state_dict_decoder': decoder.state_dict(),\n 'en_de_optimizer': en_de_optimizer.state_dict(),\n 'cr_optimizer': cr_optimizer.state_dict(),\n 'metrics': metrics,\n 'train_epoch': ep,\n 'date': now.strftime(\"%Y-%m-%d_%H:%M:%S\"),\n }\n torch.save(states, fname)\n path='myresults/plots/train_%s_%s_%s'% (encoder.name,decoder.name,now.strftime(\"%Y-%m-%d_%H:%M:%S\"))\n try:\n os.mkdir(os.path.join('.', path))\n except Exception as error:\n print(error)\n\n plot('encoder_mse', ep, metrics['val.encoder_mse'], path, True)\n plot('decoder_loss', ep, metrics['val.decoder_loss'], path, True)\n plot('decoder_acc', ep, metrics['val.decoder_acc'], path, True)\n plot('cover_score', ep, metrics['val.cover_score'], path, True)\n plot('generated_score', ep, metrics['val.generated_score'], path, True)\n plot('ssim', ep, metrics['val.ssim'], path, True)\n plot('psnr', ep, metrics['val.psnr'], path, True)\n plot('bpp', ep, metrics['val.bpp'], path, True)",
"_____no_output_____"
],
[
"def fit_gan(encoder,decoder,critic,en_de_optimizer,cr_optimizer,metrics,train_loader,valid_loader):\n for ep in range(epochs):\n print(\"Epoch %d\" %(ep+1))\n for cover, _ in notebook.tqdm(train_loader):\n gc.collect()\n cover = cover.to(device)\n N, _, H, W = cover.size()\n # sampled from the discrete uniform distribution over 0 to 2\n payload = torch.zeros((N, data_depth, H, W),\n device=device).random_(0, 2)\n generated = encoder.forward(cover, payload)\n cover_score = torch.mean(critic.forward(cover))\n generated_score = torch.mean(critic.forward(generated))\n\n cr_optimizer.zero_grad()\n (cover_score - generated_score).backward(retain_graph=False)\n cr_optimizer.step()\n\n for p in critic.parameters():\n p.data.clamp_(-0.1, 0.1)\n metrics['train.cover_score'].append(cover_score.item())\n metrics['train.generated_score'].append(generated_score.item())\n\n for cover, _ in notebook.tqdm(train_loader):\n gc.collect()\n cover = cover.to(device)\n N, _, H, W = cover.size()\n # sampled from the discrete uniform distribution over 0 to 2\n payload = torch.zeros((N, data_depth, H, W),\n device=device).random_(0, 2)\n generated = encoder.forward(cover, payload)\n decoded = decoder.forward(generated)\n encoder_mse = mse_loss(generated, cover)\n decoder_loss = binary_cross_entropy_with_logits(decoded, payload)\n decoder_acc = (decoded >= 0.0).eq(\n payload >= 0.5).sum().float() / payload.numel()\n generated_score = torch.mean(critic.forward(generated))\n\n en_de_optimizer.zero_grad()\n (100 * encoder_mse + decoder_loss +\n generated_score).backward() # Why 100?\n en_de_optimizer.step()\n\n metrics['train.encoder_mse'].append(encoder_mse.item())\n metrics['train.decoder_loss'].append(decoder_loss.item())\n metrics['train.decoder_acc'].append(decoder_acc.item())\n\n for cover, _ in notebook.tqdm(valid_loader):\n gc.collect()\n cover = cover.to(device)\n N, _, H, W = cover.size()\n # sampled from the discrete uniform distribution over 0 to 2\n payload = torch.zeros((N, data_depth, H, W),\n device=device).random_(0, 2)\n generated = encoder.forward(cover, payload)\n decoded = decoder.forward(generated)\n\n encoder_mse = mse_loss(generated, cover)\n decoder_loss = binary_cross_entropy_with_logits(decoded, payload)\n decoder_acc = (decoded >= 0.0).eq(\n payload >= 0.5).sum().float() / payload.numel()\n generated_score = torch.mean(critic.forward(generated))\n cover_score = torch.mean(critic.forward(cover))\n\n metrics['val.encoder_mse'].append(encoder_mse.item())\n metrics['val.decoder_loss'].append(decoder_loss.item())\n metrics['val.decoder_acc'].append(decoder_acc.item())\n metrics['val.cover_score'].append(cover_score.item())\n metrics['val.generated_score'].append(generated_score.item())\n metrics['val.ssim'].append(\n ssim(cover, generated).item())\n metrics['val.psnr'].append(\n 10 * torch.log10(4 / encoder_mse).item())\n metrics['val.bpp'].append(\n data_depth * (2 * decoder_acc.item() - 1))\n print('encoder_mse: %.3f - decoder_loss: %.3f - decoder_acc: %.3f - cover_score: %.3f - generated_score: %.3f - ssim: %.3f - psnr: %.3f - bpp: %.3f'\n %(encoder_mse.item(),decoder_loss.item(),decoder_acc.item(),cover_score.item(),generated_score.item(), ssim(cover, generated).item(),10 * torch.log10(4 / encoder_mse).item(),data_depth * (2 * decoder_acc.item() - 1)))\n save_model(encoder,decoder,critic,en_de_optimizer,cr_optimizer,metrics,ep)",
"_____no_output_____"
],
[
"if __name__ == '__main__':\n for func in [\n lambda: os.mkdir(os.path.join('.', 'results')),\n lambda: os.mkdir(os.path.join('.', 'results/model')),\n lambda: os.mkdir(os.path.join('.', 'results/plots'))]: # create directories\n try:\n func()\n except Exception as error:\n print(error)\n continue\n\n METRIC_FIELDS = [\n 'val.encoder_mse',\n 'val.decoder_loss',\n 'val.decoder_acc',\n 'val.cover_score',\n 'val.generated_score',\n 'val.ssim',\n 'val.psnr',\n 'val.bpp',\n 'train.encoder_mse',\n 'train.decoder_loss',\n 'train.decoder_acc',\n 'train.cover_score',\n 'train.generated_score',\n ]\n\n print('image')\n data_dir = 'div2k'\n mu = [.5, .5, .5]\n sigma = [.5, .5, .5]\n transform = transforms.Compose([transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(\n 360, pad_if_needed=True),\n transforms.ToTensor(),\n transforms.Normalize(mu, sigma)])\n train_set = datasets.ImageFolder(os.path.join(\n data_dir, \"train/\"), transform=transform)\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size=4, shuffle=True)\n valid_set = datasets.ImageFolder(os.path.join( \n data_dir, \"val/\"), transform=transform)\n valid_loader = torch.utils.data.DataLoader(\n valid_set, batch_size=4, shuffle=False)\n\n encoder = DenseEncoder(data_depth, hidden_size).to(device)\n decoder = DenseDecoder(data_depth, hidden_size).to(device)\n critic = BasicCritic(hidden_size).to(device)\n cr_optimizer = Adam(critic.parameters(), lr=1e-4)\n en_de_optimizer = Adam(list(decoder.parameters()) + list(encoder.parameters()), lr=1e-4)\n metrics = {field: list() for field in METRIC_FIELDS}\n\n if LOAD_MODEL: \n if torch.cuda.is_available():\n checkpoint = torch.load(PATH)\n else:\n checkpoint = torch.load(PATH, map_location=lambda storage, loc: storage)\n \n critic.load_state_dict(checkpoint['state_dict_critic'])\n encoder.load_state_dict(checkpoint['state_dict_encoder'])\n decoder.load_state_dict(checkpoint['state_dict_decoder'])\n en_de_optimizer.load_state_dict(checkpoint['en_de_optimizer'])\n cr_optimizer.load_state_dict(checkpoint['cr_optimizer'])\n metrics=checkpoint['metrics']\n ep=checkpoint['train_epoch']\n date=checkpoint['date']\n critic.train(mode=False)\n encoder.train(mode=False)\n decoder.train(mode=False)\n print('GAN loaded: ', ep)\n print(critic)\n print(encoder)\n print(decoder)\n print(en_de_optimizer)\n print(cr_optimizer)\n print(date)\n else:\n fit_gan(encoder,decoder,critic,en_de_optimizer,cr_optimizer,metrics,train_loader,valid_loader)\n ",
"[Errno 17] File exists: './results'\n[Errno 17] File exists: './results/model'\n[Errno 17] File exists: './results/plots'\nimage\nGAN loaded: 63\nBasicCritic(\n (conv1): Sequential(\n (0): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1))\n (1): LeakyReLU(negative_slope=0.01, inplace=True)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv2): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1))\n (1): LeakyReLU(negative_slope=0.01, inplace=True)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv3): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1))\n (1): LeakyReLU(negative_slope=0.01, inplace=True)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv4): Sequential(\n (0): Conv2d(32, 1, kernel_size=(3, 3), stride=(1, 1))\n )\n)\nDenseEncoder(\n (conv1): Sequential(\n (0): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01, inplace=True)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv2): Sequential(\n (0): Conv2d(36, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01, inplace=True)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv3): Sequential(\n (0): Conv2d(68, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01, inplace=True)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv4): Sequential(\n (0): Conv2d(100, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n)\nDenseDecoder(\n (conv1): Sequential(\n (0): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01, inplace=True)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv2): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01, inplace=True)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv3): Sequential(\n (0): Conv2d(64, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01, inplace=True)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv4): Sequential(\n (0): Conv2d(96, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n)\nAdam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)\nAdam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)\n2020-07-23_02:08:27\n"
],
[
"from collections import Counter\ndef make_payload(width, height, depth, text):\n \"\"\"\n This takes a piece of text and encodes it into a bit vector. It then\n fills a matrix of size (width, height) with copies of the bit vector.\n \"\"\"\n message = text_to_bits(text) + [0] * 32\n\n payload = message\n while len(payload) < width * height * depth:\n payload += message\n\n payload = payload[:width * height * depth]\n\n return torch.FloatTensor(payload).view(1, depth, height, width)\n\ndef make_message(image):\n #image = torch.FloatTensor(image).permute(2, 1, 0).unsqueeze(0)\n image = image.to(device)\n\n image = decoder(image).view(-1) > 0\n image=torch.tensor(image, dtype=torch.uint8)\n\n # split and decode messages\n candidates = Counter()\n bits = image.data.cpu().numpy().tolist()\n for candidate in bits_to_bytearray(bits).split(b'\\x00\\x00\\x00\\x00'):\n candidate = bytearray_to_text(bytearray(candidate))\n if candidate:\n #print(candidate)\n candidates[candidate] += 1\n\n # choose most common message\n if len(candidates) == 0:\n raise ValueError('Failed to find message.')\n\n candidate, count = candidates.most_common(1)[0]\n return candidate\n",
"_____no_output_____"
]
],
[
[
"###Check a sample from validation dataset",
"_____no_output_____"
]
],
[
[
"# to see one image\ncover,*rest = next(iter(valid_set))\n_, H, W = cover.size()\ncover = cover[None].to(device)\ntext = \"We are busy in Neural Networks project. Anyhow, how is your day going?\"\npayload = make_payload(W, H, data_depth, text)\npayload = payload.to(device)\n#generated = encoder.forward(cover, payload)\ngenerated = test(encoder,decoder,data_depth,epochs,cover,payload)\ntext_return = make_message(generated)\nprint(text_return)",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\nClipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n"
]
],
[
[
"###Testing begins (from a loaded model)",
"_____no_output_____"
],
[
"####Test1 - Save steganographic images",
"_____no_output_____"
]
],
[
[
"##Take all images from test folder (one by one) and message requested by user to encode\n\nfrom imageio import imread, imwrite\n \nepochs = 64\ndata_depth = 4 \ntest_folder = \"div2k/myval/_\"\nsave_dir = os.mkdir(os.path.join(\"div2k/myval\",str(data_depth)+\"_\"+str(epochs)))\n\nfor filename in os.listdir(test_folder):\n print(os.path.join(test_folder,filename))\n\n cover_im = imread(os.path.join(test_folder,filename), pilmode='RGB') / 127.5 - 1.0\n\n cover = torch.FloatTensor(cover_im).permute(2, 1, 0).unsqueeze(0)\n cover_size = cover.size()\n # _, _, height, width = cover.size()\n text = \"We are busy in Neural Networks project. The deadline is near. Anyhow, how is your day going?\"\n payload = make_payload(cover_size[3], cover_size[2], data_depth, text)\n\n cover = cover.to(device)\n payload = payload.to(device)\n generated = encoder.forward(cover, payload)[0].clamp(-1.0, 1.0)\n #print(generated.size())\n generated = (generated.permute(2, 1, 0).detach().cpu().numpy() + 1.0) * 127.5\n\n imwrite(os.path.join(\"div2k/myval/\",str(data_depth)+\"_\"+str(epochs),(str(data_depth)+\"_\"+str(epochs)+\"_\"+filename)), generated.astype('uint8'))",
"div2k/myval/_/0805.png\ndiv2k/myval/_/0804.png\ndiv2k/myval/_/0833.png\ndiv2k/myval/_/0855.png\ndiv2k/myval/_/0874.png\ndiv2k/myval/_/0894.png\n"
]
],
[
[
"####Test2 - Take a steganographic image from a folder and decode",
"_____no_output_____"
]
],
[
[
"##[Individual]Take an image requested by user to decode\n\nfrom imageio import imread, imwrite\n\nsteg_folder = \"div2k/myval/4_64\"\nfilename = \"4_64_0855.png\"\nimage = imread(os.path.join(steg_folder,filename), pilmode='RGB') / 127.5 - 1.0\nplt.imshow(image)\nimage = torch.FloatTensor(image).permute(2, 1, 0).unsqueeze(0)\ntext_return = make_message(image)\nprint(text_return)\n#f = open(steg_folder+\".csv\", \"a\")\n#f.write(\"\\n\" + filename + \"\\t\" + str(text_return))",
"WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:22: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n"
]
],
[
[
"####Test3 - Encode to decode in one cell",
"_____no_output_____"
]
],
[
[
"##Input to outut (both encode decode in one cell)\nfrom imageio import imread, imwrite\n\ncover_im = imread(\"div2k/myval/_/0805.png\", pilmode='RGB') / 127.5 - 1.0\nplt.imshow(cover_im)\ncover = torch.FloatTensor(cover_im).permute(2, 1, 0).unsqueeze(0)\ncover_size = cover.size()\n# _, _, height, width = cover.size()\ntext = \"We are busy in Neural Networks project. Anyhow, how is your day going?\"\npayload = make_payload(cover_size[3], cover_size[2], data_depth, text)\n\ncover = cover.to(device)\npayload = payload.to(device)\ngenerated = encoder.forward(cover, payload)\ntext_return = make_message(generated)\nprint(text_return)",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:22: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n"
]
],
[
[
"####Generate Difference Image",
"_____no_output_____"
]
],
[
[
"from skimage.metrics import structural_similarity as ssim\nfrom imageio import imread, imwrite\n\ndiff_epochs = 64\ndiff_data_depth = 4\ncover_folder = \"div2k/myval/_\"\nsteg_folder = \"div2k/myval/\"+str(diff_data_depth)+\"_\"+str(diff_epochs)\n\nfor filename in os.listdir(cover_folder):\n print(os.path.join(cover_folder,filename))\n\n cover = imread(os.path.join(cover_folder,filename), as_gray=True) \n gen = imread(os.path.join(steg_folder,str(diff_data_depth)+\"_\"+str(diff_epochs)+\"_\"+filename), as_gray=True)\n\n (score, diff) = ssim(cover, gen, full=True)\n imwrite(\"div2k/myval/\"+str(diff_data_depth)+\"_\"+str(diff_epochs)+\"/\"+\"%d_%d_diff_%s\"%(diff_data_depth,diff_epochs,filename),diff)\n print(\"Score: \",score)",
"div2k/myval/_/0805.png\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d5f02a69cf330ae96f040c28a290110303ed2e | 209,186 | ipynb | Jupyter Notebook | controller-verification.ipynb | google/nsc-tutorial | cf12f837ed8072c55b4c3c802ab8720fe04e2061 | [
"MIT"
] | 3 | 2021-10-21T13:24:55.000Z | 2022-02-26T21:47:03.000Z | controller-verification.ipynb | google/nsc-tutorial | cf12f837ed8072c55b4c3c802ab8720fe04e2061 | [
"MIT"
] | null | null | null | controller-verification.ipynb | google/nsc-tutorial | cf12f837ed8072c55b4c3c802ab8720fe04e2061 | [
"MIT"
] | 2 | 2021-10-16T09:20:06.000Z | 2022-02-26T21:24:53.000Z | 85.837505 | 200 | 0.746001 | [
[
[
"# **[Adversarial Disturbances for Controller Verification](http://proceedings.mlr.press/v144/ghai21a/ghai21a.pdf)**\n\n[](https://colab.research.google.com/github/google/nsc-tutorial/blob/main/controller-verification.ipynb) ",
"_____no_output_____"
],
[
"## Housekeeping\nImports [jax](https://github.com/google/jax), numpy, scipy, plotting utils...",
"_____no_output_____"
]
],
[
[
"#@title\n\nimport jax\nimport itertools\nimport numpy as onp\nimport jax.numpy as np\nimport matplotlib.pyplot as plt\nimport ipywidgets as widgets\n\nfrom jax.numpy.linalg import inv, pinv\nfrom scipy.linalg import solve_discrete_are as dare\nfrom jax import jit, grad, hessian\nfrom IPython import display\nfrom toolz.dicttoolz import valmap, itemmap\nfrom itertools import chain\n\ndef liveplot(costss, xss, wss, cmax=30, cumcmax=15, wmax=2, xmax=20, logcmax=100, logcumcmax=1000):\n cummean = lambda x: np.cumsum(np.array(x))/np.arange(1, len(x)+1)\n cumcostss = valmap(cummean, costss)\n disturbances = valmap(lambda x: list(map(lambda w: w[0], x)), wss)\n\n plt.style.use('seaborn')\n colors = {\n \"Zero Control\": \"gray\",\n \"LQR / H2\": \"green\",\n \"Finite-horizon LQR / H2\": \"teal\",\n \"Optimal LQG for GRW\": \"aqua\",\n \"Robust / Hinf Control\": \"orange\",\n \"GPC\": \"red\"\n }\n\n fig, ax = plt.subplots(3, 2, figsize=(21, 12))\n\n costssline = {}\n for Cstr, costs in costss.items():\n costssline[Cstr], = ax[0, 0].plot([], label=Cstr, color=colors[Cstr])\n ax[0, 0].set_xlabel(\"Time\")\n ax[0, 0].set_ylabel(\"Instantaneous Cost\")\n ax[0, 0].set_ylim([-1, cmax])\n ax[0, 0].set_xlim([0, 100])\n ax[0, 0].legend()\n\n cumcostssline = {}\n for Cstr, costs in cumcostss.items():\n cumcostssline[Cstr], = ax[0, 1].plot([], label=Cstr, color=colors[Cstr])\n ax[0, 1].set_xlabel(\"Time\")\n ax[0, 1].set_ylabel(\"Average Cost\")\n ax[0, 1].set_ylim([-1, cumcmax])\n ax[0, 1].set_xlim([0, 100])\n ax[0, 1].legend()\n\n perturblines = {}\n for Cstr, W in disturbances.items():\n perturblines[Cstr], = ax[1, 0].plot([], label=Cstr, color=colors[Cstr])\n ax[1, 0].set_xlabel(\"Time\")\n ax[1, 0].set_ylabel(\"Generated Disturbances\")\n ax[1, 0].set_ylim([-wmax, wmax])\n ax[1, 0].set_xlim([0, 100])\n ax[1, 0].legend()\n\n pointssline, trailssline = {}, {}\n for Cstr, C in xss.items():\n pointssline[Cstr], = ax[1,1].plot([], [], label=Cstr, color=colors[Cstr], ms=20, marker='s')\n trailssline[Cstr], = ax[1,1].plot([], [], label=Cstr, color=colors[Cstr], lw=2)\n ax[1, 1].set_xlabel(\"Position\")\n ax[1, 1].set_ylabel(\"\")\n ax[1, 1].set_ylim([-1, 6])\n ax[1, 1].set_xlim([-xmax, xmax])\n ax[1, 1].legend()\n\n logcostssline = {}\n for Cstr, costs in costss.items():\n logcostssline[Cstr], = ax[2, 0].plot([1], label=Cstr, color=colors[Cstr])\n ax[2, 0].set_xlabel(\"Time\")\n ax[2, 0].set_ylabel(\"Instantaneous Cost (Log Scale)\")\n ax[2, 0].set_xlim([0, 100])\n ax[2, 0].set_ylim([0.1, logcmax])\n ax[2, 0].set_yscale('log')\n ax[2, 0].legend()\n\n logcumcostssline = {}\n for Cstr, costs in cumcostss.items():\n logcumcostssline[Cstr], = ax[2, 1].plot([1], label=Cstr, color=colors[Cstr])\n ax[2, 1].set_xlabel(\"Time\")\n ax[2, 1].set_ylabel(\"Average Cost (Log Scale)\")\n ax[2, 1].set_xlim([0, 100])\n ax[2, 1].set_ylim([0.1, logcumcmax])\n ax[2, 1].set_yscale('log')\n ax[2, 1].legend()\n\n def livedraw(t):\n for Cstr, costsline in costssline.items():\n costsline.set_data(np.arange(t), costss[Cstr][:t])\n for Cstr, cumcostsline in cumcostssline.items():\n cumcostsline.set_data(np.arange(t), cumcostss[Cstr][:t])\n for i, (Cstr, pointsline) in enumerate(pointssline.items()):\n pointsline.set_data(xss[Cstr][t][0], i)\n for Cstr, perturbline in perturblines.items():\n perturbline.set_data(np.arange(t), disturbances[Cstr][:t])\n for i, (Cstr, trailsline) in enumerate(trailssline.items()):\n trailsline.set_data(list(map(lambda x: x[0], xss[Cstr][max(t-10, 0):t])), i)\n for Cstr, logcostsline in logcostssline.items():\n logcostsline.set_data(np.arange(t), costss[Cstr][:t])\n for Cstr, logcumcostsline in logcumcostssline.items():\n logcumcostsline.set_data(np.arange(t), cumcostss[Cstr][:t])\n return chain(costssline.values(), cumcostssline.values(), perturblines.values(), pointssline.values(), trailssline.values(), logcostssline.values(), logcumcostssline.values())\n\n print(\"🧛 reanimating :) meanwhile...\")\n livedraw(99)\n plt.show()\n\n from matplotlib import animation\n anim = animation.FuncAnimation(fig, livedraw, frames=100, interval=50, blit=True)\n from IPython.display import HTML\n display.clear_output(wait=True)\n return HTML(anim.to_html5_video())",
"_____no_output_____"
]
],
[
[
"## A simple dynamical system\nDefines a discrete-time [double-integrator](https://en.wikipedia.org/wiki/Double_integrator) -- a simple linear dynamical system that mirrors 1d kinematics -- along with a quadratic cost.\n\nBelow $\\mathbf{x}_t$ is the state, $\\mathbf{u}_t$ is the control input (or action), $\\mathbf{w}_t$ is the disturbance.\n\n$$ \\mathbf{x}_{t+1} = A\\mathbf{x}_t + B\\mathbf{u}_t + \\mathbf{w}_t, \\qquad c(\\mathbf{x},\\mathbf{u}) = \\mathbf{x}^\\top Q \\mathbf{x} + \\mathbf{u}^\\top R \\mathbf{u}$$\n\n$$ A = \\begin{bmatrix}\n1 & 1\\\\\n0 & 1\n\\end{bmatrix},\\quad B = \\begin{bmatrix}\n0\\\\\n1\n\\end{bmatrix}, \\quad Q = \\begin{bmatrix}\n1 & 0\\\\\n0 & 1\n\\end{bmatrix}, \\quad R = \\begin{bmatrix}\n1\n\\end{bmatrix}$$\n\nIn the task of controller verification, the **verifier** selects $\\mathbf{w}_t$ adaptively as a function of past state-action pairs $(\\mathbf{x}_s,\\mathbf{u}_s:s\\leq t)$.",
"_____no_output_____"
]
],
[
[
"dx, du, T = 2, 1, 100\nA, B = np.array([[1.0, 1.0], [0.0, 1.0]]), np.array([[0.0], [1.0]])\nQ, R = np.eye(dx), np.eye(du)\n\ndyn = lambda x, u, w, t: A @ x + B @ u + w\ncost = lambda x, u, t: x.T @ A @ x + u.T @ R @ u\n\n# A basic control loop. \n# (x, z) is the environ-controller state.\n# w is disturbance and z_w disturbance generator state\ndef eval(control, disturbance):\n x, z, z_w = np.zeros(dx), None, None\n for t in range(T):\n u, z = control(x, z, t)\n w, z_w = disturbance(x, u, z_w, t)\n c = cost(x, u, t)\n yield (x, u, w, c)\n x = dyn(x, u, w, t)",
"WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n"
]
],
[
[
"## Control Algorithms\nThe segment below puts forth a few basic control strategies, whose performance characteristics we would like to verify.\n\n+ **Zero Control**: Executes $\\mathbf{u}=\\mathbf{0}$.\n+ **LQR / H2**: A discrete-time [linear-quadratic regulator](https://en.wikipedia.org/wiki/Linear%E2%80%93quadratic_regulator).\n+ **Finite-horizon LQR / H2**: A finite-horizon variant of the above.\n+ **Robust / $H_\\infty$ Control**: A worst-case [robust](https://en.wikipedia.org/wiki/H-infinity_methods_in_control_theory) controller.\n+ **GPC**: [Gradient-perturbation](https://arxiv.org/abs/1902.08721) controller.",
"_____no_output_____"
]
],
[
[
"#@title\n\ndef zero():\n return lambda x, z, t: (np.zeros(du), z)\n\n\ndef h2(A=A, B=B, Q=Q, R=R):\n P = dare(A, B, Q, R)\n K = - inv(R + B.T @ P @ B) @ (B.T @ P @ A)\n return lambda x, z, t: (K @ x, z)\n\n\ndef h2nonstat(A=A, B=B, Q=Q, R=R, T=T):\n dx, du = B.shape\n P, K = [np.zeros((dx, dx)) for _ in range(T + 1)], [np.zeros((du, dx)) for _ in range(T)]\n P[T] = Q\n for t in range(T - 1, -1, -1):\n P[t] = Q + A.T @ P[t + 1] @ A - (A.T @ P[t + 1] @ B) @ inv(R + B.T @ P[t + 1] @ B) @ (B.T @ P[t + 1] @ A)\n K[t] = - inv(R + B.T @ P[t + 1] @ B) @ (B.T @ P[t + 1] @ A)\n return lambda x, z, t: (K[t] @ x, z)\n\n\ndef hinf(A=A, B=B, Q=Q, R=R, T=T, gamma=1.0):\n dx, du = B.shape\n P, K = [np.zeros((dx, dx)) for _ in range(T + 1)], [np.zeros((du, dx)) for _ in range(T)], \n P[T] = Q\n for t in range(T - 1, -1, -1):\n Lambda = np.eye(dx) + (B @ inv(R) @ B.T - gamma ** -2 * np.eye(dx)) @ P[t + 1]\n P[t] = Q + A.T @ P[t + 1] @ pinv(Lambda) @ A\n K[t] = - np.linalg.inv(R) @ B.T @ P[t + 1] @ pinv(Lambda) @ A\n return lambda x, z, t: (K[t] @ x, z)\n\n\ndef gpc(A=A, B=B, Q=Q, R=R, T=T, H=3, M=3, lr=0.01, dyn=dyn, cost=cost):\n dx, du = B.shape\n P = dare(A, B, Q, R)\n K = - np.array(inv(R + B.T @ P @ B) @ (B.T @ P @ A))\n\n def proxy(E, off, W):\n y = np.zeros(dx)\n for h in range(H):\n v = K @ y + np.tensordot(E, W[h: h + M], axes=([0, 2], [0, 1]))\n y = dyn(y, v, W[h + M], h + M)\n v = K @ y + np.tensordot(E, W[h: h + M], axes=([0, 2], [0, 1]))\n c = cost(y, v, None)\n return c\n\n proxygrad = jit(grad(proxy, argnums=(0, 1)))\n\n def gpc_u(x, z, t):\n if z is None or t == 0:\n z = np.zeros(dx), np.zeros(du), np.zeros((H + M, dx)), np.zeros((M, du, dx)), np.zeros(du)\n xprev, uprev, W, E, off = z\n W = jax.ops.index_update(W, 0, x - A @ xprev - B @ uprev)\n W = np.roll(W, -1, axis=0)\n if t >= H + M:\n Edelta, offdelta = proxygrad(E, off, W)\n E -= lr * Edelta\n off -= lr * offdelta\n u = K @ x + np.tensordot(E, W[-M:], axes=([0, 2], [0, 1])) + off\n return u, (x, u, W, E, off)\n\n return gpc_u\n\ndef controllers(gamma, H, M, lr):\n return {\n \"Zero Control\": zero(),\n \"LQR / H2\": h2(),\n \"Finite-horizon LQR / H2\": h2nonstat(),\n \"Robust / Hinf Control\": hinf(gamma=gamma),\n \"GPC\": gpc(H=H, M=M, lr=lr),\n }",
"_____no_output_____"
]
],
[
[
"## [Memory Online Trust Region](https://arxiv.org/abs/2012.06695) (**MOTR**) disturbances\n\nThis is an online learning approach to disturbance generation, akin to nonstochastic control but with the role of control and disturbance swapped.",
"_____no_output_____"
]
],
[
[
"# Author: Udaya Ghai ([email protected])\ndef motr(A=A, B=B, Q=Q, R=R, r_off=0.5, r_E= 1.0, T=T, H=3, M=3, lr=0.001, dyn=dyn, cost=cost):\n dx, du = B.shape\n\n def proxy(E, off, U, X):\n x = X[0]\n for h in range(H):\n w = np.tensordot(E, U[h: h + M], axes=([0, 2], [0, 1])) + off\n x = dyn(x, U[h + H], w, h+M)\n return np.sum(x.T @ Q @ x)\n\n proxygrad = jit(grad(proxy, argnums=(0, 1)))\n proxyhess = jit(hessian(proxy))\n\n def project(x, r):\n norm_x = np.linalg.norm(x)\n return x if norm_x < r else (r / norm_x) * x\n\n def motr_w(x, u, z_w, t):\n if z_w is None or t == 0:\n z_w = np.zeros((H+M, du, 1)),np.zeros((H, dx, 1)), np.zeros((M, dx, du)), np.ones((dx, 1))\n U, X, E, off = z_w\n\n U = jax.ops.index_update(U, 0, u)\n U = np.roll(U, -1, axis=0)\n X = jax.ops.index_update(X, 0, np.reshape(x, (dx,1)))\n X = np.roll(X, -1, axis=0)\n\n if t >= H + M:\n Edelta, offdelta = proxygrad(E, off, U, X)\n\n E = project(E + lr*Edelta, r_E)\n off = project(off + lr * offdelta, r_off)\n\n w = np.tensordot(E, U[-M:], axes=([0, 2], [0, 1])) + off\n return np.squeeze(w), (U, X, E, off)\n \n return motr_w",
"_____no_output_____"
],
[
"#@title MOTR Pertrubation\n#@markdown Environment Parameters\nmotr_offset_radius = 1 #@param {type:\"slider\", min:0, max:2, step:0.01}\nmotr_radius = 0.4 #@param {type:\"slider\", min:0, max:2, step:0.01}\nmotr_lookback = 5 #@param {type:\"slider\", min:1, max:20, step:1}\nmotr_memory = 5 #@param {type:\"slider\", min:1, max:20, step:1}\nmotr_gen = motr(r_off=motr_offset_radius, r_E=motr_radius, M=motr_memory, H=motr_lookback)\n\n#@markdown Constant Pertrubation: Control parameters\nhinf_log_gamma = 2 #@param {type:\"slider\", min:-2, max:5, step:0.01}\nhinf_gamma = 10**(hinf_log_gamma)\ngpc_lookback = 5 #@param {type:\"slider\", min:1, max:20, step:1}\ngpc_memory = 5 #@param {type:\"slider\", min:1, max:20, step:1}\ngpc_log_lr = -3 #@param {type:\"slider\", min:-5, max:0, step:0.01}\ngpc_lr = 10**(gpc_log_lr)\n\nCs = controllers(hinf_gamma, gpc_lookback, gpc_memory, gpc_lr)\n\nprint(\"🧛 evaluating controllers\")\ntraces = {Cstr: list(zip(*eval(C, motr_gen))) for Cstr, C in Cs.items()}\nxss = valmap(lambda x: x[0], traces)\nuss = valmap(lambda x: x[1], traces)\nwss = valmap(lambda x: x[2], traces)\ncostss = valmap(lambda x: x[3], traces)\n\nliveplot(costss, xss, wss, 250, 200, 4, 20, 10**5, 10**5)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
d0d5f4d7fa9bf5c9cb95757b852ec758fd27a530 | 22,912 | ipynb | Jupyter Notebook | lab7/LogisticRegression-Tweets.ipynb | modhvadiyac25/077_ChetanModhavadiya | bfc56063f537112c65fee0609996fdb22c74930f | [
"MIT"
] | null | null | null | lab7/LogisticRegression-Tweets.ipynb | modhvadiyac25/077_ChetanModhavadiya | bfc56063f537112c65fee0609996fdb22c74930f | [
"MIT"
] | null | null | null | lab7/LogisticRegression-Tweets.ipynb | modhvadiyac25/077_ChetanModhavadiya | bfc56063f537112c65fee0609996fdb22c74930f | [
"MIT"
] | null | null | null | 22,912 | 22,912 | 0.621334 | [
[
[
"# Aim:\n* Extract features for logistic regression given some text\n* Implement logistic regression from scratch\n* Apply logistic regression on a natural language processing task\n* Test logistic regression\n\nWe will be using a data set of tweets.",
"_____no_output_____"
],
[
"## Import functions and data",
"_____no_output_____"
]
],
[
[
"import nltk\r\nfrom nltk.corpus import twitter_samples \r\nimport pandas as pd",
"_____no_output_____"
],
[
"\r\nnltk.download('twitter_samples')\r\nnltk.download('stopwords')",
"[nltk_data] Downloading package twitter_samples to\n[nltk_data] C:\\Users\\modhv\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package twitter_samples is already up-to-date!\n[nltk_data] Downloading package stopwords to\n[nltk_data] C:\\Users\\modhv\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n"
],
[
"import re\r\nimport string\r\nimport numpy as np\r\n\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import PorterStemmer\r\nfrom nltk.tokenize import TweetTokenizer",
"_____no_output_____"
],
[
"#process_tweet(): cleans the text, tokenizes it into separate words, removes stopwords, and converts words to stems.\r\ndef process_tweet(tweet):\r\n \"\"\"Process tweet function.\r\n Input:\r\n tweet: a string containing a tweet\r\n Output:\r\n tweets_clean: a list of words containing the processed tweet\r\n\r\n \"\"\"\r\n stemmer = PorterStemmer()\r\n stopwords_english = stopwords.words('english')\r\n\r\n # remove stock market tickers like $GE\r\n tweet = re.sub(r'\\$\\w*', '', tweet)\r\n # remove old style retweet text \"RT\"\r\n tweet = re.sub(r'^RT[\\s]+', '', tweet)\r\n # remove hyperlinks\r\n tweet = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', tweet)\r\n # remove hashtags\r\n # only removing the hash # sign from the word\r\n tweet = re.sub(r'#', '', tweet)\r\n # tokenize tweets\r\n\r\n\r\n tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,\r\n reduce_len=True)\r\n tweet_tokens = tokenizer.tokenize(tweet)\r\n\r\n tweets_clean = []\r\n for word in tweet_tokens:\r\n if(word not in stopwords_english and word not in string.punctuation):\r\n stem_word = stemmer.stem(word)\r\n tweets_clean.append(stem_word)\r\n \r\n #############################################################\r\n # 1 remove stopwords\r\n # 2 remove punctuation\r\n # 3 stemming word\r\n # 4 Add it to tweets_clean\r\n\r\n return tweets_clean",
"_____no_output_____"
],
[
"#build_freqs counts how often a word in the 'corpus' (the entire set of tweets) was associated with\r\n # a positive label '1' or \r\n # a negative label '0', \r\n\r\n#then builds the freqs dictionary, where each key is a (word,label) tuple, \r\n\r\n#and the value is the count of its frequency within the corpus of tweets.\r\n\r\ndef build_freqs(tweets, ys):\r\n \"\"\"Build frequencies.\r\n Input:\r\n tweets: a list of tweets\r\n ys: an m x 1 array with the sentiment label of each tweet\r\n (either 0 or 1)\r\n Output:\r\n freqs: a dictionary mapping each (word, sentiment) pair to its\r\n frequency\r\n \"\"\"\r\n # Convert np array to list since zip needs an iterable.\r\n # The squeeze is necessary or the list ends up with one element.\r\n # Also note that this is just a NOP if ys is already a list.\r\n yslist = np.squeeze(ys).tolist()\r\n\r\n # Start with an empty dictionary and populate it by looping over all tweets\r\n # and over all processed words in each tweet.\r\n freqs = {}\r\n\r\n for y, tweet in zip(yslist, tweets):\r\n for word in process_tweet(tweet):\r\n pair = (word, y)\r\n \r\n #############################################################\r\n #Update the count of pair if present, set it to 1 otherwise\r\n if pair in freqs:\r\n freqs[pair] += 1\r\n else:\r\n freqs[pair] = 1\r\n\r\n return freqs",
"_____no_output_____"
]
],
[
[
"### Prepare the data\n* The `twitter_samples` contains subsets of 5,000 positive tweets, 5,000 negative tweets, and the full set of 10,000 tweets. ",
"_____no_output_____"
]
],
[
[
"# select the set of positive and negative tweets\r\nall_positive_tweets = twitter_samples.strings('positive_tweets.json')\r\nall_negative_tweets = twitter_samples.strings('negative_tweets.json')",
"_____no_output_____"
]
],
[
[
"* Train test split: 20% will be in the test set, and 80% in the training set.\n",
"_____no_output_____"
]
],
[
[
"# split the data into two pieces, one for training and one for testing\r\n#############################################################\r\ntest_pos = all_positive_tweets[4000:]\r\ntrain_pos = all_positive_tweets[:4000]\r\ntest_neg = all_negative_tweets[4000:]\r\ntrain_neg = all_negative_tweets[:4000] \r\n\r\ntrain_x = train_pos + train_neg\r\ntest_x = test_pos + test_neg",
"_____no_output_____"
]
],
[
[
"* Create the numpy array of positive labels and negative labels.",
"_____no_output_____"
]
],
[
[
"# combine positive and negative labels\r\ntrain_y = np.append(np.ones((len(train_pos), 1)), np.zeros((len(train_neg), 1)), axis=0)\r\ntest_y = np.append(np.ones((len(test_pos), 1)), np.zeros((len(test_neg), 1)), axis=0)",
"_____no_output_____"
]
],
[
[
"* Create the frequency dictionary using the `build_freqs()` function. \n \n",
"_____no_output_____"
]
],
[
[
"# create frequency dictionary\r\n#############################################################\r\nfreqs = build_freqs(train_x,train_y)\r\n\r\n# check the output\r\nprint(\"type(freqs) = \" + str(type(freqs)))\r\nprint(\"len(freqs) = \" + str(len(freqs.keys())))",
"type(freqs) = <class 'dict'>\nlen(freqs) = 11339\n"
]
],
[
[
"* HERE, The `freqs` dictionary is the frequency dictionary that's being built. \r\n* The key is the tuple (word, label), such as (\"happy\",1) or (\"happy\",0). The value stored for each key is the count of how many times the word \"happy\" was associated with a positive label, or how many times \"happy\" was associated with a negative label.",
"_____no_output_____"
],
[
"Process tweet",
"_____no_output_____"
]
],
[
[
"# Example\r\nprint('This is an example of a positive tweet: \\n', train_x[0])\r\nprint('\\nThis is an example of the processed version of the tweet: \\n', process_tweet(train_x[0]))",
"This is an example of a positive tweet: \n #FollowFriday @France_Inte @PKuchly57 @Milipol_Paris for being top engaged members in my community this week :)\n\nThis is an example of the processed version of the tweet: \n ['followfriday', 'top', 'engag', 'member', 'commun', 'week', ':)']\n"
]
],
[
[
"#Logistic regression :\n\n\n### Sigmoid\n\n$$ h(z) = \\frac{1}{1+\\exp^{-z}} $$\n\nIt maps the input 'x' to a value that ranges between 0 and 1, and so it can be treated as a probability. \n\n\n",
"_____no_output_____"
]
],
[
[
"def sigmoid(z): \r\n # calculate the sigmoid of z\r\n #############################################################\r\n h = 1/(1+np.exp(-z))\r\n return h",
"_____no_output_____"
]
],
[
[
"### Logistic regression: regression and a sigmoid\n\nLogistic regression takes a regular linear regression, and applies a sigmoid to the output of the linear regression.\n\nLogistic regression\n$$ h(z) = \\frac{1}{1+\\exp^{-z}}$$\n$$z = \\theta_0 x_0 + \\theta_1 x_1 + \\theta_2 x_2 + ... \\theta_N x_N$$\n",
"_____no_output_____"
],
[
"#### Update the weights:Gradient Descent\n\n\n$$\\nabla_{\\theta_j}J(\\theta) = \\frac{1}{m} \\sum_{i=1}^m(h^{(i)}-y^{(i)})x_j $$\n\n* To update the weight $\\theta_j$, we adjust it by subtracting a fraction of the gradient determined by $\\alpha$:\n$$\\theta_j = \\theta_j - \\alpha \\times \\nabla_{\\theta_j}J(\\theta) $$\n\n* The learning rate $\\alpha$ is a value that we choose to control how big a single update will be.\n",
"_____no_output_____"
]
],
[
[
"def gradientDescent(x, y, theta, alpha, num_iters):\r\n \r\n # get 'm', the number of rows in matrix x\r\n m = len(x)\r\n for i in range(0, num_iters):\r\n \r\n # get z, the dot product of x and theta\r\n #############################################################\r\n z = np.dot(x,theta)\r\n \r\n # get the sigmoid of z\r\n #############################################################\r\n h = sigmoid(z)\r\n \r\n # calculate the cost function\r\n J = (-1/m)*(y.T @ np.log(h) + (1-y).T @ np.log(1-h))\r\n\r\n # update the weights theta\r\n #############################################################\r\n grad = (1/m) * np.dot(x.T, h-y)\r\n theta -= (alpha * grad)\r\n \r\n J = float(J)\r\n return J, theta",
"_____no_output_____"
]
],
[
[
"## Extracting the features\n\n* Given a list of tweets, extract the features and store them in a matrix. You will extract two features.\n * The first feature is the number of positive words in a tweet.\n * The second feature is the number of negative words in a tweet. \n* Then train your logistic regression classifier on these features.\n* Test the classifier on a validation set. \n",
"_____no_output_____"
]
],
[
[
"def extract_features(tweet, freqs):\r\n '''\r\n Input: \r\n tweet: a list of words for one tweet\r\n freqs: a dictionary corresponding to the frequencies of each tuple (word, label)\r\n Output: \r\n x: a feature vector of dimension (1,3)\r\n '''\r\n # tokenizes, stems, and removes stopwords\r\n #############################################################\r\n word_l = process_tweet(tweet)\r\n \r\n # 3 elements in the form of a 1 x 3 vector\r\n x = np.zeros((1, 3)) \r\n \r\n #bias term is set to 1\r\n x[0,0] = 1 \r\n \r\n # loop through each word in the list of words\r\n for word in word_l:\r\n \r\n # increment the word count for the positive label 1\r\n #############################################################\r\n x[0,1] += freqs.get((word,1.0),0) \r\n # increment the word count for the negative label 0\r\n #############################################################\r\n x[0,2] += freqs.get((word,0.0),0)\r\n \r\n assert(x.shape == (1, 3))\r\n return x",
"_____no_output_____"
],
[
"# Check the function\r\n\r\n# test 1\r\n# test on training data\r\ntmp1 = extract_features(train_x[0], freqs)\r\nprint(tmp1)",
"[[1.00e+00 3.02e+03 6.10e+01]]\n"
],
[
"# test 2:\r\n# check for when the words are not in the freqs dictionary\r\ntmp2 = extract_features('Hariom pandya', freqs)\r\nprint(tmp2)",
"[[1. 0. 0.]]\n"
]
],
[
[
"## Training Your Model\n\nTo train the model:\n* Stack the features for all training examples into a matrix `X`. \n* Call `gradientDescent`",
"_____no_output_____"
]
],
[
[
"# collect the features 'x' and stack them into a matrix 'X'\r\nX = np.zeros((len(train_x), 3))\r\nfor i in range(len(train_x)):\r\n X[i, :]= extract_features(train_x[i], freqs)\r\n\r\n# training labels corresponding to X\r\nY = train_y\r\n\r\n# Apply gradient descent\r\nJ, theta = gradientDescent(X, Y, np.zeros((3, 1)), 1e-9, 1500)\r\nprint(f\"The cost after training is {J:.8f}.\")\r\n",
"The cost after training is 0.24215613.\n"
]
],
[
[
"# Test logistic regression\n\nPredict whether a tweet is positive or negative.\n\n* Given a tweet, process it, then extract the features.\n* Apply the model's learned weights on the features to get the logits.\n* Apply the sigmoid to the logits to get the prediction (a value between 0 and 1).\n\n$$y_{pred} = sigmoid(\\mathbf{x} \\cdot \\theta)$$",
"_____no_output_____"
]
],
[
[
"def predict_tweet(tweet, freqs, theta):\r\n '''\r\n Input: \r\n tweet: a string\r\n freqs: a dictionary corresponding to the frequencies of each tuple (word, label)\r\n theta: (3,1) vector of weights\r\n Output: \r\n y_pred: the probability of a tweet being positive or negative\r\n '''\r\n \r\n # extract the features of the tweet and store it into x\r\n #############################################################\r\n x = extract_features(tweet,freqs)\r\n \r\n # make the prediction using x and theta\r\n #############################################################\r\n z = np.dot(x,theta)\r\n y_pred = sigmoid(z)\r\n \r\n \r\n return y_pred",
"_____no_output_____"
],
[
"# Run this cell to test your function\r\nfor tweet in ['I am happy', 'I am bad', 'this movie should have been great.', 'great', 'great great', 'great great great', 'great great great great']:\r\n print( '%s -> %f' % (tweet, predict_tweet(tweet, freqs, theta)))",
"I am happy -> 0.518581\nI am bad -> 0.494339\nthis movie should have been great. -> 0.515331\ngreat -> 0.515464\ngreat great -> 0.530899\ngreat great great -> 0.546275\ngreat great great great -> 0.561562\n"
]
],
[
[
"## Check performance using the test set\n",
"_____no_output_____"
]
],
[
[
"def test_logistic_regression(test_x, test_y, freqs, theta):\r\n \"\"\"\r\n Input: \r\n test_x: a list of tweets\r\n test_y: (m, 1) vector with the corresponding labels for the list of tweets\r\n freqs: a dictionary with the frequency of each pair (or tuple)\r\n theta: weight vector of dimension (3, 1)\r\n Output: \r\n accuracy: (# of tweets classified correctly) / (total # of tweets)\r\n \"\"\"\r\n \r\n \r\n # the list for storing predictions\r\n y_hat = []\r\n \r\n for tweet in test_x:\r\n # get the label prediction for the tweet\r\n y_pred = predict_tweet(tweet, freqs, theta)\r\n \r\n if y_pred > 0.5:\r\n # append 1.0 to the list\r\n y_hat.append(1)\r\n else:\r\n # append 0 to the list\r\n y_hat.append(0)\r\n\r\n # With the above implementation, y_hat is a list, but test_y is (m,1) array\r\n # convert both to one-dimensional arrays in order to compare them using the '==' operator\r\n count=0\r\n y_hat=np.array(y_hat)\r\n m=len(test_y)\r\n print(m)\r\n \r\n test_y=np.reshape(test_y,m)\r\n print(y_hat.shape)\r\n print(test_y.shape)\r\n \r\n accuracy = ((test_y == y_hat).sum())/m\r\n \r\n return accuracy",
"_____no_output_____"
],
[
"tmp_accuracy = test_logistic_regression(test_x, test_y, freqs, theta)\r\nprint(f\"Logistic regression model's accuracy = {tmp_accuracy:.4f}\")",
"2000\n(2000,)\n(2000,)\nLogistic regression model's accuracy = 0.9950\n"
]
],
[
[
"#Lab Assignment:\n\n##Replace Manual version of Logistic Regression with TF based version. \n####[Reference : Lab-6]",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
d0d5fcd8b01080d13bbbd4d55b0e5d5eb71b296f | 310,258 | ipynb | Jupyter Notebook | BME590_Final_Project_keras.ipynb | tiffanylin43/BME590L_Final_Project_addon | 6cfd501a8253597fe92abc9ea2fb5effb97e8af3 | [
"MIT"
] | null | null | null | BME590_Final_Project_keras.ipynb | tiffanylin43/BME590L_Final_Project_addon | 6cfd501a8253597fe92abc9ea2fb5effb97e8af3 | [
"MIT"
] | null | null | null | BME590_Final_Project_keras.ipynb | tiffanylin43/BME590L_Final_Project_addon | 6cfd501a8253597fe92abc9ea2fb5effb97e8af3 | [
"MIT"
] | null | null | null | 215.606671 | 22,072 | 0.854882 | [
[
[
"# import data",
"_____no_output_____"
]
],
[
[
"import os\nimport glob\nfrom PIL import Image\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\ndef read_feature(folder, num):\n filename = glob.glob(os.path.join(folder, '*'))\n img_arr = np.zeros([len(filename), 100, 100, 3])\n label = num * np.ones(len(filename), dtype=\"float32\")\n for i, name in enumerate(filename):\n img = Image.open(name)\n img_arr[i, :, :, :] = np.asarray(img, dtype=\"uint8\")\n return img_arr, label",
"_____no_output_____"
],
[
"tb_img_arr, tb_label = read_feature('./TB_Image', 1)\nnon_tb_img_arr, non_tb_label = read_feature('./Non-TB_Image', 0)\nimages = np.concatenate((tb_img_arr, non_tb_img_arr))\nlabels = np.concatenate((tb_label, non_tb_label))\n\nprint(np.shape(images))\nprint(np.shape(labels))\nX_train, X_val, y_train, y_val = train_test_split(images, labels, test_size=0.1)\n\nX_train = X_train.astype(np.int)\nX_val = X_val.astype(np.int)\ny_train = y_train.astype(np.int)\ny_val = y_val.astype(np.int)\n\n# change into one-hot vector\ny_train = tf.keras.utils.to_categorical(y_train, 2) \ny_val = tf.keras.utils.to_categorical(y_val, 2)\n\n# reshape dataset\nX_train = X_train.reshape(X_train.shape[0], 100, 100, 3)\nX_val = X_val.reshape(X_val.shape[0], 100, 100, 3)\n\nfrom matplotlib import pyplot as plt\n%matplotlib inline\n\nprint('Training data shape', X_train.shape)\n_, (ax1, ax2) = plt.subplots(1, 2)\nax1.imshow(X_train[0].reshape(100, 100, 3), cmap=plt.cm.Greys);\nax2.imshow(X_train[1].reshape(100, 100, 3), cmap=plt.cm.Greys);",
"(8076, 100, 100, 3)\n(8076,)\nTraining data shape (7268, 100, 100, 3)\n"
]
],
[
[
"## Define trainning function",
"_____no_output_____"
]
],
[
[
"def train_data(model):\n loss = []\n acc = []\n val_loss = []\n val_acc = []\n early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=3)\n tensorboard = tf.keras.callbacks.TensorBoard(log_dir='logs/{}'.format('model_name'))\n hist = model.fit(X_train, y_train,\n batch_size=64,\n epochs=50, # Run thru all the data point in each epoch\n verbose=1,\n validation_data=(X_val, y_val),\n #callbacks=[tensorboard])\n callbacks=[early_stop, tensorboard])\n #val_err.append(hist.history['val_mean_absolute_error'][-1]) # a dict\n loss.append(hist.history['loss'][-1])\n val_loss.append(hist.history['val_loss'][-1])\n acc.append(hist.history['acc'][-1])\n val_acc.append(hist.history['val_acc'][-1]) \n \n return loss, val_loss, hist",
"_____no_output_____"
]
],
[
[
"## Define a VGG network",
"_____no_output_____"
]
],
[
[
"def VGG(activ):\n model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, (3,3), padding='same', activation=activ, input_shape=(100, 100, 3)),\n tf.keras.layers.MaxPool2D(padding='same'),\n\n tf.keras.layers.Conv2D(128, (3,3), padding='same', activation=activ),\n tf.keras.layers.MaxPool2D(padding='same'),\n \n tf.keras.layers.Conv2D(256, (3,3), padding='same', activation=activ),\n tf.keras.layers.Conv2D(256, (3,3), padding='same', activation=activ),\n tf.keras.layers.MaxPool2D(padding='same'),\n \n tf.keras.layers.Conv2D(512, (3,3), padding='same', activation=activ),\n tf.keras.layers.Conv2D(512, (3,3), padding='same', activation=activ),\n tf.keras.layers.MaxPool2D(padding='same'),\n\n tf.keras.layers.Conv2D(512, (3,3), padding='same', activation=activ),\n tf.keras.layers.Conv2D(512, (3,3), padding='same', activation=activ),\n tf.keras.layers.MaxPool2D(padding='same'),\n\n\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation=activ),\n tf.keras.layers.Dense(4096, activation=activ),\n tf.keras.layers.Dense(1000, activation=activ),\n tf.keras.layers.Dense(2, activation='softmax')\n ])\n param = model.count_params()\n model.compile(optimizer=tf.train.AdamOptimizer(0.000001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n \n\n model.summary()\n \n return model, param",
"_____no_output_____"
]
],
[
[
"## Define a DNN model",
"_____no_output_____"
]
],
[
[
"def dnnmodel(n, activ):\n param = []\n model = tf.keras.Sequential([])\n model.add(tf.keras.layers.Flatten(input_shape=(100, 100, 3)))\n for i in range(n):\n model.add(tf.keras.layers.Dense(100, activation=activ))\n model.add(tf.keras.layers.Dense(2, activation='softmax'))\n # model.summary()\n # model.count_params()\n param.append(model.count_params())\n model.compile(optimizer=tf.train.AdamOptimizer(0.000001),\n loss='categorical_crossentropy',\n metrics=['accuracy', 'mae'])\n return model, param",
"_____no_output_____"
]
],
[
[
"## Trainning with VGG",
"_____no_output_____"
],
[
"### VGG with activation \"relu\"",
"_____no_output_____"
]
],
[
[
"activ = 'relu'\nmodel_VGG1, param_VGG1 = VGG(activ)\nloss_VGG1, val_loss_VGG1, hist_VGG1= train_data(model_VGG1)",
"WARNING:tensorflow:From /usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 100, 100, 64) 1792 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 50, 50, 64) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 50, 50, 128) 73856 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 25, 25, 128) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 25, 25, 256) 295168 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 25, 25, 256) 590080 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 13, 13, 256) 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 13, 13, 512) 1180160 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 13, 13, 512) 2359808 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 7, 7, 512) 0 \n_________________________________________________________________\nconv2d_6 (Conv2D) (None, 7, 7, 512) 2359808 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 7, 7, 512) 2359808 \n_________________________________________________________________\nmax_pooling2d_4 (MaxPooling2 (None, 4, 4, 512) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 8192) 0 \n_________________________________________________________________\ndense (Dense) (None, 4096) 33558528 \n_________________________________________________________________\ndense_1 (Dense) (None, 4096) 16781312 \n_________________________________________________________________\ndense_2 (Dense) (None, 1000) 4097000 \n_________________________________________________________________\ndense_3 (Dense) (None, 2) 2002 \n=================================================================\nTotal params: 63,659,322\nTrainable params: 63,659,322\nNon-trainable params: 0\n_________________________________________________________________\nTrain on 7268 samples, validate on 808 samples\nWARNING:tensorflow:From /usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\nEpoch 1/50\n7268/7268 [==============================] - 26s 4ms/sample - loss: 0.6770 - acc: 0.5886 - val_loss: 0.6501 - val_acc: 0.5879\nEpoch 2/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.6139 - acc: 0.6896 - val_loss: 0.5899 - val_acc: 0.7735\nEpoch 3/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.5676 - acc: 0.7452 - val_loss: 0.5390 - val_acc: 0.7550\nEpoch 4/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.5160 - acc: 0.7910 - val_loss: 0.4911 - val_acc: 0.7834\nEpoch 5/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.4548 - acc: 0.8415 - val_loss: 0.4265 - val_acc: 0.8750\nEpoch 6/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.3940 - acc: 0.8771 - val_loss: 0.3609 - val_acc: 0.8899\nEpoch 7/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.3328 - acc: 0.9104 - val_loss: 0.3045 - val_acc: 0.9208\nEpoch 8/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.2870 - acc: 0.9203 - val_loss: 0.2729 - val_acc: 0.9047\nEpoch 9/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.2531 - acc: 0.9263 - val_loss: 0.2388 - val_acc: 0.9282\nEpoch 10/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.2214 - acc: 0.9377 - val_loss: 0.2143 - val_acc: 0.9307\nEpoch 11/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.2046 - acc: 0.9406 - val_loss: 0.1979 - val_acc: 0.9431\nEpoch 12/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.1965 - acc: 0.9411 - val_loss: 0.2240 - val_acc: 0.9121\nEpoch 13/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.1864 - acc: 0.9446 - val_loss: 0.1915 - val_acc: 0.9307\nEpoch 14/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1785 - acc: 0.9444 - val_loss: 0.1735 - val_acc: 0.9381\nEpoch 15/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.1702 - acc: 0.9479 - val_loss: 0.1766 - val_acc: 0.9505\nEpoch 16/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.1683 - acc: 0.9451 - val_loss: 0.1759 - val_acc: 0.9517\nEpoch 17/50\n7268/7268 [==============================] - 8s 1ms/sample - loss: 0.1653 - acc: 0.9473 - val_loss: 0.1777 - val_acc: 0.9493\n"
]
],
[
[
"### Define the function for plots",
"_____no_output_____"
]
],
[
[
"def plot_acc_and_loss(hist):\n acc = hist.history['acc']\n loss = hist.history['loss']\n val_acc = hist.history['val_acc']\n val_loss = hist.history['val_loss']\n \n plt.plot(acc, 'r-o')\n plt.title(\"Trainning accuracy\")\n plt.show()\n \n plt.plot(loss, 'g-o')\n plt.title(\"Trainning loss\")\n plt.show()\n \n plt.plot(val_acc, 'b-o')\n plt.title(\"Validation accuracy\")\n plt.show()\n \n plt.plot(val_loss, 'm-o')\n plt.title(\"Validation loss\")\n plt.show()",
"_____no_output_____"
],
[
"plot_acc_and_loss(hist_VGG1)",
"_____no_output_____"
]
],
[
[
"### Calculate sensitivity and specificity",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import confusion_matrix\n\npredictions = model_VGG1.predict(X_val)\ny_val = np.argmax(y_val, axis=-1)\npredictions = np.argmax(predictions, axis=-1)\nc = confusion_matrix(y_val, predictions)\nprint('Confusion matrix:\\n', c)\nprint('sensitivity', c[0, 0] / (c[0, 1] + c[0, 0]))\nprint('specificity', c[1, 1] / (c[1, 1] + c[1, 0]))",
"Confusion matrix:\n [[428 29]\n [ 12 339]]\nsensitivity 0.936542669584245\nspecificity 0.9658119658119658\n"
]
],
[
[
"### VGG with activation \"relu\"",
"_____no_output_____"
]
],
[
[
"activ = 'tanh'\nmodel_VGG2, param_VGG2 = VGG(activ)\nloss_VGG2, val_loss_VGG2, hist_VGG2= train_data(model_VGG2)",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_24 (Conv2D) (None, 100, 100, 64) 1792 \n_________________________________________________________________\nmax_pooling2d_15 (MaxPooling (None, 50, 50, 64) 0 \n_________________________________________________________________\nconv2d_25 (Conv2D) (None, 50, 50, 128) 73856 \n_________________________________________________________________\nmax_pooling2d_16 (MaxPooling (None, 25, 25, 128) 0 \n_________________________________________________________________\nconv2d_26 (Conv2D) (None, 25, 25, 256) 295168 \n_________________________________________________________________\nconv2d_27 (Conv2D) (None, 25, 25, 256) 590080 \n_________________________________________________________________\nmax_pooling2d_17 (MaxPooling (None, 13, 13, 256) 0 \n_________________________________________________________________\nconv2d_28 (Conv2D) (None, 13, 13, 512) 1180160 \n_________________________________________________________________\nconv2d_29 (Conv2D) (None, 13, 13, 512) 2359808 \n_________________________________________________________________\nmax_pooling2d_18 (MaxPooling (None, 7, 7, 512) 0 \n_________________________________________________________________\nconv2d_30 (Conv2D) (None, 7, 7, 512) 2359808 \n_________________________________________________________________\nconv2d_31 (Conv2D) (None, 7, 7, 512) 2359808 \n_________________________________________________________________\nmax_pooling2d_19 (MaxPooling (None, 4, 4, 512) 0 \n_________________________________________________________________\nflatten_3 (Flatten) (None, 8192) 0 \n_________________________________________________________________\ndense_12 (Dense) (None, 4096) 33558528 \n_________________________________________________________________\ndense_13 (Dense) (None, 4096) 16781312 \n_________________________________________________________________\ndense_14 (Dense) (None, 1000) 4097000 \n_________________________________________________________________\ndense_15 (Dense) (None, 2) 2002 \n=================================================================\nTotal params: 63,659,322\nTrainable params: 63,659,322\nNon-trainable params: 0\n_________________________________________________________________\nTrain on 7268 samples, validate on 808 samples\nEpoch 1/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.5975 - acc: 0.7005 - val_loss: 0.5156 - val_acc: 0.8032\nEpoch 2/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.4259 - acc: 0.8407 - val_loss: 0.3412 - val_acc: 0.8725\nEpoch 3/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.2821 - acc: 0.9077 - val_loss: 0.2568 - val_acc: 0.9233\nEpoch 4/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.2248 - acc: 0.9243 - val_loss: 0.2164 - val_acc: 0.9282\nEpoch 5/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.2053 - acc: 0.9293 - val_loss: 0.2032 - val_acc: 0.9344\nEpoch 6/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1957 - acc: 0.9326 - val_loss: 0.1972 - val_acc: 0.9270\nEpoch 7/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1897 - acc: 0.9338 - val_loss: 0.1967 - val_acc: 0.9332\nEpoch 8/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1853 - acc: 0.9367 - val_loss: 0.1895 - val_acc: 0.9356\nEpoch 9/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1811 - acc: 0.9386 - val_loss: 0.1883 - val_acc: 0.9381\nEpoch 10/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1786 - acc: 0.9411 - val_loss: 0.1860 - val_acc: 0.9369\nEpoch 11/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1754 - acc: 0.9412 - val_loss: 0.1810 - val_acc: 0.9431\nEpoch 12/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1724 - acc: 0.9414 - val_loss: 0.1803 - val_acc: 0.9394\nEpoch 13/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1686 - acc: 0.9422 - val_loss: 0.1804 - val_acc: 0.9394\nEpoch 14/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1660 - acc: 0.9455 - val_loss: 0.1784 - val_acc: 0.9381\nEpoch 15/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1658 - acc: 0.9443 - val_loss: 0.1767 - val_acc: 0.9381\nEpoch 16/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1599 - acc: 0.9481 - val_loss: 0.1783 - val_acc: 0.9455\nEpoch 17/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1591 - acc: 0.9487 - val_loss: 0.1718 - val_acc: 0.9418\nEpoch 18/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1552 - acc: 0.9481 - val_loss: 0.1740 - val_acc: 0.9431\nEpoch 19/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1531 - acc: 0.9494 - val_loss: 0.1713 - val_acc: 0.9356\nEpoch 20/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1505 - acc: 0.9505 - val_loss: 0.1674 - val_acc: 0.9431\nEpoch 21/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1506 - acc: 0.9516 - val_loss: 0.1671 - val_acc: 0.9406\nEpoch 22/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1463 - acc: 0.9516 - val_loss: 0.1675 - val_acc: 0.9406\nEpoch 23/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1441 - acc: 0.9534 - val_loss: 0.1628 - val_acc: 0.9418\nEpoch 24/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1425 - acc: 0.9545 - val_loss: 0.1612 - val_acc: 0.9443\nEpoch 25/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1386 - acc: 0.9565 - val_loss: 0.1607 - val_acc: 0.9406\nEpoch 26/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1373 - acc: 0.9558 - val_loss: 0.1612 - val_acc: 0.9418\nEpoch 27/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1347 - acc: 0.9571 - val_loss: 0.1582 - val_acc: 0.9493\nEpoch 28/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1316 - acc: 0.9586 - val_loss: 0.1687 - val_acc: 0.9319\nEpoch 29/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1347 - acc: 0.9545 - val_loss: 0.1601 - val_acc: 0.9394\nEpoch 30/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1290 - acc: 0.9584 - val_loss: 0.1519 - val_acc: 0.9480\nEpoch 31/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1257 - acc: 0.9606 - val_loss: 0.1557 - val_acc: 0.9480\nEpoch 32/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1268 - acc: 0.9602 - val_loss: 0.1527 - val_acc: 0.9480\nEpoch 33/50\n7268/7268 [==============================] - 9s 1ms/sample - loss: 0.1204 - acc: 0.9620 - val_loss: 0.1518 - val_acc: 0.9505\n"
],
[
"plot_acc_and_loss(hist_VGG2)",
"_____no_output_____"
],
[
"predictions = model_VGG2.predict(X_val)\ny_val1 = np.argmax(y_val, axis=-1)\npredictions = np.argmax(predictions, axis=-1)\nc = confusion_matrix(y_val1, predictions)\nprint('Confusion matrix:\\n', c)\nprint('sensitivity', c[0, 0] / (c[0, 1] + c[0, 0]))\nprint('specificity', c[1, 1] / (c[1, 1] + c[1, 0]))",
"Confusion matrix:\n [[450 18]\n [ 22 318]]\nsensitivity 0.9615384615384616\nspecificity 0.9352941176470588\n"
]
],
[
[
"## DNN",
"_____no_output_____"
]
],
[
[
"activ = 'relu'\nmodel_DNN, param1_DNN = dnnmodel(15, activ)\nloss_DNN, val_loss_DNN, hist_DNN= train_data(model_DNN)",
"Train on 7268 samples, validate on 808 samples\nEpoch 1/50\n7268/7268 [==============================] - 2s 258us/sample - loss: 0.9049 - acc: 0.5663 - mean_absolute_error: 0.4582 - val_loss: 0.7027 - val_acc: 0.5792 - val_mean_absolute_error: 0.4607\nEpoch 2/50\n7268/7268 [==============================] - 1s 194us/sample - loss: 0.6930 - acc: 0.5663 - mean_absolute_error: 0.4657 - val_loss: 0.6696 - val_acc: 0.5792 - val_mean_absolute_error: 0.4636\nEpoch 3/50\n7268/7268 [==============================] - 1s 192us/sample - loss: 0.6710 - acc: 0.5663 - mean_absolute_error: 0.4636 - val_loss: 0.6516 - val_acc: 0.5792 - val_mean_absolute_error: 0.4555\nEpoch 4/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.6483 - acc: 0.5663 - mean_absolute_error: 0.4545 - val_loss: 0.6432 - val_acc: 0.5792 - val_mean_absolute_error: 0.4626\nEpoch 5/50\n7268/7268 [==============================] - 1s 199us/sample - loss: 0.6302 - acc: 0.5691 - mean_absolute_error: 0.4462 - val_loss: 0.6193 - val_acc: 0.6064 - val_mean_absolute_error: 0.4462\nEpoch 6/50\n7268/7268 [==============================] - 1s 194us/sample - loss: 0.6164 - acc: 0.6069 - mean_absolute_error: 0.4391 - val_loss: 0.6072 - val_acc: 0.6188 - val_mean_absolute_error: 0.4244\nEpoch 7/50\n7268/7268 [==============================] - 1s 202us/sample - loss: 0.6061 - acc: 0.6657 - mean_absolute_error: 0.4327 - val_loss: 0.6288 - val_acc: 0.7067 - val_mean_absolute_error: 0.4612\nEpoch 8/50\n7268/7268 [==============================] - 1s 205us/sample - loss: 0.5873 - acc: 0.6962 - mean_absolute_error: 0.4237 - val_loss: 0.5995 - val_acc: 0.6287 - val_mean_absolute_error: 0.4046\nEpoch 9/50\n7268/7268 [==============================] - 2s 209us/sample - loss: 0.5751 - acc: 0.7208 - mean_absolute_error: 0.4157 - val_loss: 0.5523 - val_acc: 0.7624 - val_mean_absolute_error: 0.4072\nEpoch 10/50\n7268/7268 [==============================] - 1s 206us/sample - loss: 0.5545 - acc: 0.7434 - mean_absolute_error: 0.4041 - val_loss: 0.5334 - val_acc: 0.7760 - val_mean_absolute_error: 0.3946\nEpoch 11/50\n7268/7268 [==============================] - 2s 207us/sample - loss: 0.5386 - acc: 0.7584 - mean_absolute_error: 0.3942 - val_loss: 0.5325 - val_acc: 0.7116 - val_mean_absolute_error: 0.3771\nEpoch 12/50\n7268/7268 [==============================] - 1s 206us/sample - loss: 0.5238 - acc: 0.7661 - mean_absolute_error: 0.3839 - val_loss: 0.5208 - val_acc: 0.7203 - val_mean_absolute_error: 0.3662\nEpoch 13/50\n7268/7268 [==============================] - 2s 208us/sample - loss: 0.5107 - acc: 0.7816 - mean_absolute_error: 0.3733 - val_loss: 0.4874 - val_acc: 0.7822 - val_mean_absolute_error: 0.3588\nEpoch 14/50\n7268/7268 [==============================] - 1s 193us/sample - loss: 0.4929 - acc: 0.7922 - mean_absolute_error: 0.3627 - val_loss: 0.4764 - val_acc: 0.8391 - val_mean_absolute_error: 0.3600\nEpoch 15/50\n7268/7268 [==============================] - 1s 193us/sample - loss: 0.4871 - acc: 0.7914 - mean_absolute_error: 0.3553 - val_loss: 0.4534 - val_acc: 0.8391 - val_mean_absolute_error: 0.3418\nEpoch 16/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.4572 - acc: 0.8233 - mean_absolute_error: 0.3389 - val_loss: 0.4665 - val_acc: 0.8391 - val_mean_absolute_error: 0.3514\nEpoch 17/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.4678 - acc: 0.7999 - mean_absolute_error: 0.3383 - val_loss: 0.4502 - val_acc: 0.8428 - val_mean_absolute_error: 0.3387\nEpoch 18/50\n7268/7268 [==============================] - 1s 194us/sample - loss: 0.4372 - acc: 0.8288 - mean_absolute_error: 0.3224 - val_loss: 0.4270 - val_acc: 0.8106 - val_mean_absolute_error: 0.3031\nEpoch 19/50\n7268/7268 [==============================] - 1s 194us/sample - loss: 0.4214 - acc: 0.8381 - mean_absolute_error: 0.3109 - val_loss: 0.4091 - val_acc: 0.8218 - val_mean_absolute_error: 0.2948\nEpoch 20/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.4029 - acc: 0.8570 - mean_absolute_error: 0.2977 - val_loss: 0.4044 - val_acc: 0.8156 - val_mean_absolute_error: 0.2852\nEpoch 21/50\n7268/7268 [==============================] - 1s 198us/sample - loss: 0.3866 - acc: 0.8619 - mean_absolute_error: 0.2858 - val_loss: 0.3825 - val_acc: 0.8441 - val_mean_absolute_error: 0.2731\nEpoch 22/50\n7268/7268 [==============================] - 1s 193us/sample - loss: 0.3796 - acc: 0.8598 - mean_absolute_error: 0.2774 - val_loss: 0.3954 - val_acc: 0.8218 - val_mean_absolute_error: 0.2684\nEpoch 23/50\n7268/7268 [==============================] - 1s 196us/sample - loss: 0.3635 - acc: 0.8704 - mean_absolute_error: 0.2664 - val_loss: 0.3732 - val_acc: 0.8688 - val_mean_absolute_error: 0.2776\nEpoch 24/50\n7268/7268 [==============================] - 1s 197us/sample - loss: 0.3496 - acc: 0.8764 - mean_absolute_error: 0.2574 - val_loss: 0.3558 - val_acc: 0.8577 - val_mean_absolute_error: 0.2499\nEpoch 25/50\n7268/7268 [==============================] - 1s 198us/sample - loss: 0.3405 - acc: 0.8841 - mean_absolute_error: 0.2490 - val_loss: 0.3830 - val_acc: 0.8465 - val_mean_absolute_error: 0.2786\nEpoch 26/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.3412 - acc: 0.8759 - mean_absolute_error: 0.2462 - val_loss: 0.3356 - val_acc: 0.8651 - val_mean_absolute_error: 0.2348\nEpoch 27/50\n7268/7268 [==============================] - 1s 194us/sample - loss: 0.3292 - acc: 0.8879 - mean_absolute_error: 0.2385 - val_loss: 0.3344 - val_acc: 0.8577 - val_mean_absolute_error: 0.2304\nEpoch 28/50\n7268/7268 [==============================] - 1s 196us/sample - loss: 0.3180 - acc: 0.8886 - mean_absolute_error: 0.2307 - val_loss: 0.3214 - val_acc: 0.8725 - val_mean_absolute_error: 0.2228\nEpoch 29/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.3268 - acc: 0.8795 - mean_absolute_error: 0.2304 - val_loss: 0.3044 - val_acc: 0.8837 - val_mean_absolute_error: 0.2171\nEpoch 30/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.3062 - acc: 0.8942 - mean_absolute_error: 0.2201 - val_loss: 0.2993 - val_acc: 0.8923 - val_mean_absolute_error: 0.2142\nEpoch 31/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.3119 - acc: 0.8855 - mean_absolute_error: 0.2210 - val_loss: 0.2968 - val_acc: 0.8973 - val_mean_absolute_error: 0.2128\nEpoch 32/50\n7268/7268 [==============================] - 1s 196us/sample - loss: 0.3082 - acc: 0.8910 - mean_absolute_error: 0.2170 - val_loss: 0.2938 - val_acc: 0.8923 - val_mean_absolute_error: 0.2060\nEpoch 33/50\n7268/7268 [==============================] - 1s 199us/sample - loss: 0.2990 - acc: 0.8941 - mean_absolute_error: 0.2109 - val_loss: 0.2951 - val_acc: 0.8812 - val_mean_absolute_error: 0.2026\nEpoch 34/50\n7268/7268 [==============================] - 1s 196us/sample - loss: 0.2861 - acc: 0.9029 - mean_absolute_error: 0.2039 - val_loss: 0.2862 - val_acc: 0.9035 - val_mean_absolute_error: 0.2027\nEpoch 35/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.2921 - acc: 0.8931 - mean_absolute_error: 0.2048 - val_loss: 0.3049 - val_acc: 0.8936 - val_mean_absolute_error: 0.2142\nEpoch 36/50\n7268/7268 [==============================] - 1s 193us/sample - loss: 0.2873 - acc: 0.8989 - mean_absolute_error: 0.2007 - val_loss: 0.2812 - val_acc: 0.8948 - val_mean_absolute_error: 0.1916\nEpoch 37/50\n7268/7268 [==============================] - 1s 197us/sample - loss: 0.2899 - acc: 0.8942 - mean_absolute_error: 0.2006 - val_loss: 0.3509 - val_acc: 0.8502 - val_mean_absolute_error: 0.2102\nEpoch 38/50\n7268/7268 [==============================] - 1s 194us/sample - loss: 0.2929 - acc: 0.8950 - mean_absolute_error: 0.1997 - val_loss: 0.3219 - val_acc: 0.8651 - val_mean_absolute_error: 0.1986\nEpoch 39/50\n7268/7268 [==============================] - 1s 196us/sample - loss: 0.2769 - acc: 0.9005 - mean_absolute_error: 0.1917 - val_loss: 0.2787 - val_acc: 0.9047 - val_mean_absolute_error: 0.1943\nEpoch 40/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.2744 - acc: 0.9052 - mean_absolute_error: 0.1887 - val_loss: 0.2949 - val_acc: 0.8800 - val_mean_absolute_error: 0.1883\nEpoch 41/50\n7268/7268 [==============================] - 1s 194us/sample - loss: 0.2721 - acc: 0.9044 - mean_absolute_error: 0.1870 - val_loss: 0.2973 - val_acc: 0.8713 - val_mean_absolute_error: 0.1867\nEpoch 42/50\n7268/7268 [==============================] - 1s 196us/sample - loss: 0.2641 - acc: 0.9063 - mean_absolute_error: 0.1824 - val_loss: 0.2707 - val_acc: 0.8923 - val_mean_absolute_error: 0.1782\nEpoch 43/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.2579 - acc: 0.9141 - mean_absolute_error: 0.1776 - val_loss: 0.2584 - val_acc: 0.9121 - val_mean_absolute_error: 0.1771\nEpoch 44/50\n7268/7268 [==============================] - 1s 197us/sample - loss: 0.2636 - acc: 0.9048 - mean_absolute_error: 0.1794 - val_loss: 0.2688 - val_acc: 0.9022 - val_mean_absolute_error: 0.1834\nEpoch 45/50\n7268/7268 [==============================] - 1s 194us/sample - loss: 0.2537 - acc: 0.9155 - mean_absolute_error: 0.1742 - val_loss: 0.2748 - val_acc: 0.8874 - val_mean_absolute_error: 0.1745\nEpoch 46/50\n7268/7268 [==============================] - 1s 195us/sample - loss: 0.2521 - acc: 0.9147 - mean_absolute_error: 0.1722 - val_loss: 0.2661 - val_acc: 0.8948 - val_mean_absolute_error: 0.1707\n"
],
[
"plot_acc_and_loss(hist_DNN)",
"_____no_output_____"
],
[
"predictions = model_DNN.predict(X_val)\ny_val1 = np.argmax(y_val, axis=-1)\npredictions = np.argmax(predictions, axis=-1)\nc = confusion_matrix(y_val1, predictions)\nprint('Confusion matrix:\\n', c)\nprint('sensitivity', c[0, 0] / (c[0, 1] + c[0, 0]))\nprint('specificity', c[1, 1] / (c[1, 1] + c[1, 0]))",
"Confusion matrix:\n [[448 20]\n [ 65 275]]\nsensitivity 0.9572649572649573\nspecificity 0.8088235294117647\n"
]
],
[
[
"## ResNet",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.applications import ResNet50\n\ndef resnet():\n input_tensor = tf.keras.layers.Input(shape=(100, 100, 3))\n model = ResNet50(include_top=True, weights=None, input_tensor=input_tensor, input_shape=None, pooling=None, classes=2)\n param = model.count_params()\n model.compile(optimizer=tf.train.AdamOptimizer(0.00001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n return model, param",
"_____no_output_____"
],
[
"model_resnet, param_resnet = resnet()\nloss_resnet, val_loss_resnet, hist_resnet= train_data(model_resnet)",
"__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) (None, 100, 100, 3) 0 \n__________________________________________________________________________________________________\nconv1_pad (ZeroPadding2D) (None, 106, 106, 3) 0 input_1[0][0] \n__________________________________________________________________________________________________\nconv1 (Conv2D) (None, 50, 50, 64) 9472 conv1_pad[0][0] \n__________________________________________________________________________________________________\nbn_conv1 (BatchNormalizationV1) (None, 50, 50, 64) 256 conv1[0][0] \n__________________________________________________________________________________________________\nactivation (Activation) (None, 50, 50, 64) 0 bn_conv1[0][0] \n__________________________________________________________________________________________________\npool1_pad (ZeroPadding2D) (None, 52, 52, 64) 0 activation[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_20 (MaxPooling2D) (None, 25, 25, 64) 0 pool1_pad[0][0] \n__________________________________________________________________________________________________\nres2a_branch2a (Conv2D) (None, 25, 25, 64) 4160 max_pooling2d_20[0][0] \n__________________________________________________________________________________________________\nbn2a_branch2a (BatchNormalizati (None, 25, 25, 64) 256 res2a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_1 (Activation) (None, 25, 25, 64) 0 bn2a_branch2a[0][0] \n__________________________________________________________________________________________________\nres2a_branch2b (Conv2D) (None, 25, 25, 64) 36928 activation_1[0][0] \n__________________________________________________________________________________________________\nbn2a_branch2b (BatchNormalizati (None, 25, 25, 64) 256 res2a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_2 (Activation) (None, 25, 25, 64) 0 bn2a_branch2b[0][0] \n__________________________________________________________________________________________________\nres2a_branch2c (Conv2D) (None, 25, 25, 256) 16640 activation_2[0][0] \n__________________________________________________________________________________________________\nres2a_branch1 (Conv2D) (None, 25, 25, 256) 16640 max_pooling2d_20[0][0] \n__________________________________________________________________________________________________\nbn2a_branch2c (BatchNormalizati (None, 25, 25, 256) 1024 res2a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn2a_branch1 (BatchNormalizatio (None, 25, 25, 256) 1024 res2a_branch1[0][0] \n__________________________________________________________________________________________________\nadd (Add) (None, 25, 25, 256) 0 bn2a_branch2c[0][0] \n bn2a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_3 (Activation) (None, 25, 25, 256) 0 add[0][0] \n__________________________________________________________________________________________________\nres2b_branch2a (Conv2D) (None, 25, 25, 64) 16448 activation_3[0][0] \n__________________________________________________________________________________________________\nbn2b_branch2a (BatchNormalizati (None, 25, 25, 64) 256 res2b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_4 (Activation) (None, 25, 25, 64) 0 bn2b_branch2a[0][0] \n__________________________________________________________________________________________________\nres2b_branch2b (Conv2D) (None, 25, 25, 64) 36928 activation_4[0][0] \n__________________________________________________________________________________________________\nbn2b_branch2b (BatchNormalizati (None, 25, 25, 64) 256 res2b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_5 (Activation) (None, 25, 25, 64) 0 bn2b_branch2b[0][0] \n__________________________________________________________________________________________________\nres2b_branch2c (Conv2D) (None, 25, 25, 256) 16640 activation_5[0][0] \n__________________________________________________________________________________________________\nbn2b_branch2c (BatchNormalizati (None, 25, 25, 256) 1024 res2b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_1 (Add) (None, 25, 25, 256) 0 bn2b_branch2c[0][0] \n activation_3[0][0] \n__________________________________________________________________________________________________\nactivation_6 (Activation) (None, 25, 25, 256) 0 add_1[0][0] \n__________________________________________________________________________________________________\nres2c_branch2a (Conv2D) (None, 25, 25, 64) 16448 activation_6[0][0] \n__________________________________________________________________________________________________\nbn2c_branch2a (BatchNormalizati (None, 25, 25, 64) 256 res2c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_7 (Activation) (None, 25, 25, 64) 0 bn2c_branch2a[0][0] \n__________________________________________________________________________________________________\nres2c_branch2b (Conv2D) (None, 25, 25, 64) 36928 activation_7[0][0] \n__________________________________________________________________________________________________\nbn2c_branch2b (BatchNormalizati (None, 25, 25, 64) 256 res2c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_8 (Activation) (None, 25, 25, 64) 0 bn2c_branch2b[0][0] \n__________________________________________________________________________________________________\nres2c_branch2c (Conv2D) (None, 25, 25, 256) 16640 activation_8[0][0] \n__________________________________________________________________________________________________\nbn2c_branch2c (BatchNormalizati (None, 25, 25, 256) 1024 res2c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_2 (Add) (None, 25, 25, 256) 0 bn2c_branch2c[0][0] \n activation_6[0][0] \n__________________________________________________________________________________________________\nactivation_9 (Activation) (None, 25, 25, 256) 0 add_2[0][0] \n__________________________________________________________________________________________________\nres3a_branch2a (Conv2D) (None, 13, 13, 128) 32896 activation_9[0][0] \n__________________________________________________________________________________________________\nbn3a_branch2a (BatchNormalizati (None, 13, 13, 128) 512 res3a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_10 (Activation) (None, 13, 13, 128) 0 bn3a_branch2a[0][0] \n__________________________________________________________________________________________________\nres3a_branch2b (Conv2D) (None, 13, 13, 128) 147584 activation_10[0][0] \n__________________________________________________________________________________________________\nbn3a_branch2b (BatchNormalizati (None, 13, 13, 128) 512 res3a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_11 (Activation) (None, 13, 13, 128) 0 bn3a_branch2b[0][0] \n__________________________________________________________________________________________________\nres3a_branch2c (Conv2D) (None, 13, 13, 512) 66048 activation_11[0][0] \n__________________________________________________________________________________________________\nres3a_branch1 (Conv2D) (None, 13, 13, 512) 131584 activation_9[0][0] \n__________________________________________________________________________________________________\nbn3a_branch2c (BatchNormalizati (None, 13, 13, 512) 2048 res3a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn3a_branch1 (BatchNormalizatio (None, 13, 13, 512) 2048 res3a_branch1[0][0] \n__________________________________________________________________________________________________\nadd_3 (Add) (None, 13, 13, 512) 0 bn3a_branch2c[0][0] \n bn3a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_12 (Activation) (None, 13, 13, 512) 0 add_3[0][0] \n__________________________________________________________________________________________________\nres3b_branch2a (Conv2D) (None, 13, 13, 128) 65664 activation_12[0][0] \n__________________________________________________________________________________________________\nbn3b_branch2a (BatchNormalizati (None, 13, 13, 128) 512 res3b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_13 (Activation) (None, 13, 13, 128) 0 bn3b_branch2a[0][0] \n__________________________________________________________________________________________________\nres3b_branch2b (Conv2D) (None, 13, 13, 128) 147584 activation_13[0][0] \n__________________________________________________________________________________________________\nbn3b_branch2b (BatchNormalizati (None, 13, 13, 128) 512 res3b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_14 (Activation) (None, 13, 13, 128) 0 bn3b_branch2b[0][0] \n__________________________________________________________________________________________________\nres3b_branch2c (Conv2D) (None, 13, 13, 512) 66048 activation_14[0][0] \n__________________________________________________________________________________________________\nbn3b_branch2c (BatchNormalizati (None, 13, 13, 512) 2048 res3b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_4 (Add) (None, 13, 13, 512) 0 bn3b_branch2c[0][0] \n activation_12[0][0] \n__________________________________________________________________________________________________\nactivation_15 (Activation) (None, 13, 13, 512) 0 add_4[0][0] \n__________________________________________________________________________________________________\nres3c_branch2a (Conv2D) (None, 13, 13, 128) 65664 activation_15[0][0] \n__________________________________________________________________________________________________\nbn3c_branch2a (BatchNormalizati (None, 13, 13, 128) 512 res3c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_16 (Activation) (None, 13, 13, 128) 0 bn3c_branch2a[0][0] \n__________________________________________________________________________________________________\nres3c_branch2b (Conv2D) (None, 13, 13, 128) 147584 activation_16[0][0] \n__________________________________________________________________________________________________\nbn3c_branch2b (BatchNormalizati (None, 13, 13, 128) 512 res3c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_17 (Activation) (None, 13, 13, 128) 0 bn3c_branch2b[0][0] \n__________________________________________________________________________________________________\nres3c_branch2c (Conv2D) (None, 13, 13, 512) 66048 activation_17[0][0] \n__________________________________________________________________________________________________\nbn3c_branch2c (BatchNormalizati (None, 13, 13, 512) 2048 res3c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_5 (Add) (None, 13, 13, 512) 0 bn3c_branch2c[0][0] \n activation_15[0][0] \n__________________________________________________________________________________________________\nactivation_18 (Activation) (None, 13, 13, 512) 0 add_5[0][0] \n__________________________________________________________________________________________________\nres3d_branch2a (Conv2D) (None, 13, 13, 128) 65664 activation_18[0][0] \n__________________________________________________________________________________________________\nbn3d_branch2a (BatchNormalizati (None, 13, 13, 128) 512 res3d_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_19 (Activation) (None, 13, 13, 128) 0 bn3d_branch2a[0][0] \n__________________________________________________________________________________________________\nres3d_branch2b (Conv2D) (None, 13, 13, 128) 147584 activation_19[0][0] \n__________________________________________________________________________________________________\nbn3d_branch2b (BatchNormalizati (None, 13, 13, 128) 512 res3d_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_20 (Activation) (None, 13, 13, 128) 0 bn3d_branch2b[0][0] \n__________________________________________________________________________________________________\nres3d_branch2c (Conv2D) (None, 13, 13, 512) 66048 activation_20[0][0] \n__________________________________________________________________________________________________\nbn3d_branch2c (BatchNormalizati (None, 13, 13, 512) 2048 res3d_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_6 (Add) (None, 13, 13, 512) 0 bn3d_branch2c[0][0] \n activation_18[0][0] \n__________________________________________________________________________________________________\nactivation_21 (Activation) (None, 13, 13, 512) 0 add_6[0][0] \n__________________________________________________________________________________________________\nres4a_branch2a (Conv2D) (None, 7, 7, 256) 131328 activation_21[0][0] \n__________________________________________________________________________________________________\nbn4a_branch2a (BatchNormalizati (None, 7, 7, 256) 1024 res4a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_22 (Activation) (None, 7, 7, 256) 0 bn4a_branch2a[0][0] \n__________________________________________________________________________________________________\nres4a_branch2b (Conv2D) (None, 7, 7, 256) 590080 activation_22[0][0] \n__________________________________________________________________________________________________\nbn4a_branch2b (BatchNormalizati (None, 7, 7, 256) 1024 res4a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_23 (Activation) (None, 7, 7, 256) 0 bn4a_branch2b[0][0] \n__________________________________________________________________________________________________\nres4a_branch2c (Conv2D) (None, 7, 7, 1024) 263168 activation_23[0][0] \n__________________________________________________________________________________________________\nres4a_branch1 (Conv2D) (None, 7, 7, 1024) 525312 activation_21[0][0] \n__________________________________________________________________________________________________\nbn4a_branch2c (BatchNormalizati (None, 7, 7, 1024) 4096 res4a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn4a_branch1 (BatchNormalizatio (None, 7, 7, 1024) 4096 res4a_branch1[0][0] \n__________________________________________________________________________________________________\nadd_7 (Add) (None, 7, 7, 1024) 0 bn4a_branch2c[0][0] \n bn4a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_24 (Activation) (None, 7, 7, 1024) 0 add_7[0][0] \n__________________________________________________________________________________________________\nres4b_branch2a (Conv2D) (None, 7, 7, 256) 262400 activation_24[0][0] \n__________________________________________________________________________________________________\nbn4b_branch2a (BatchNormalizati (None, 7, 7, 256) 1024 res4b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_25 (Activation) (None, 7, 7, 256) 0 bn4b_branch2a[0][0] \n__________________________________________________________________________________________________\nres4b_branch2b (Conv2D) (None, 7, 7, 256) 590080 activation_25[0][0] \n__________________________________________________________________________________________________\nbn4b_branch2b (BatchNormalizati (None, 7, 7, 256) 1024 res4b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_26 (Activation) (None, 7, 7, 256) 0 bn4b_branch2b[0][0] \n__________________________________________________________________________________________________\nres4b_branch2c (Conv2D) (None, 7, 7, 1024) 263168 activation_26[0][0] \n__________________________________________________________________________________________________\nbn4b_branch2c (BatchNormalizati (None, 7, 7, 1024) 4096 res4b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_8 (Add) (None, 7, 7, 1024) 0 bn4b_branch2c[0][0] \n activation_24[0][0] \n__________________________________________________________________________________________________\nactivation_27 (Activation) (None, 7, 7, 1024) 0 add_8[0][0] \n__________________________________________________________________________________________________\nres4c_branch2a (Conv2D) (None, 7, 7, 256) 262400 activation_27[0][0] \n__________________________________________________________________________________________________\nbn4c_branch2a (BatchNormalizati (None, 7, 7, 256) 1024 res4c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_28 (Activation) (None, 7, 7, 256) 0 bn4c_branch2a[0][0] \n__________________________________________________________________________________________________\nres4c_branch2b (Conv2D) (None, 7, 7, 256) 590080 activation_28[0][0] \n__________________________________________________________________________________________________\nbn4c_branch2b (BatchNormalizati (None, 7, 7, 256) 1024 res4c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_29 (Activation) (None, 7, 7, 256) 0 bn4c_branch2b[0][0] \n__________________________________________________________________________________________________\nres4c_branch2c (Conv2D) (None, 7, 7, 1024) 263168 activation_29[0][0] \n__________________________________________________________________________________________________\nbn4c_branch2c (BatchNormalizati (None, 7, 7, 1024) 4096 res4c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_9 (Add) (None, 7, 7, 1024) 0 bn4c_branch2c[0][0] \n activation_27[0][0] \n__________________________________________________________________________________________________\nactivation_30 (Activation) (None, 7, 7, 1024) 0 add_9[0][0] \n__________________________________________________________________________________________________\nres4d_branch2a (Conv2D) (None, 7, 7, 256) 262400 activation_30[0][0] \n__________________________________________________________________________________________________\nbn4d_branch2a (BatchNormalizati (None, 7, 7, 256) 1024 res4d_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_31 (Activation) (None, 7, 7, 256) 0 bn4d_branch2a[0][0] \n__________________________________________________________________________________________________\nres4d_branch2b (Conv2D) (None, 7, 7, 256) 590080 activation_31[0][0] \n__________________________________________________________________________________________________\nbn4d_branch2b (BatchNormalizati (None, 7, 7, 256) 1024 res4d_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_32 (Activation) (None, 7, 7, 256) 0 bn4d_branch2b[0][0] \n__________________________________________________________________________________________________\nres4d_branch2c (Conv2D) (None, 7, 7, 1024) 263168 activation_32[0][0] \n__________________________________________________________________________________________________\nbn4d_branch2c (BatchNormalizati (None, 7, 7, 1024) 4096 res4d_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_10 (Add) (None, 7, 7, 1024) 0 bn4d_branch2c[0][0] \n activation_30[0][0] \n__________________________________________________________________________________________________\nactivation_33 (Activation) (None, 7, 7, 1024) 0 add_10[0][0] \n__________________________________________________________________________________________________\nres4e_branch2a (Conv2D) (None, 7, 7, 256) 262400 activation_33[0][0] \n__________________________________________________________________________________________________\nbn4e_branch2a (BatchNormalizati (None, 7, 7, 256) 1024 res4e_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_34 (Activation) (None, 7, 7, 256) 0 bn4e_branch2a[0][0] \n__________________________________________________________________________________________________\nres4e_branch2b (Conv2D) (None, 7, 7, 256) 590080 activation_34[0][0] \n__________________________________________________________________________________________________\nbn4e_branch2b (BatchNormalizati (None, 7, 7, 256) 1024 res4e_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_35 (Activation) (None, 7, 7, 256) 0 bn4e_branch2b[0][0] \n__________________________________________________________________________________________________\nres4e_branch2c (Conv2D) (None, 7, 7, 1024) 263168 activation_35[0][0] \n__________________________________________________________________________________________________\nbn4e_branch2c (BatchNormalizati (None, 7, 7, 1024) 4096 res4e_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_11 (Add) (None, 7, 7, 1024) 0 bn4e_branch2c[0][0] \n activation_33[0][0] \n__________________________________________________________________________________________________\nactivation_36 (Activation) (None, 7, 7, 1024) 0 add_11[0][0] \n__________________________________________________________________________________________________\nres4f_branch2a (Conv2D) (None, 7, 7, 256) 262400 activation_36[0][0] \n__________________________________________________________________________________________________\nbn4f_branch2a (BatchNormalizati (None, 7, 7, 256) 1024 res4f_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_37 (Activation) (None, 7, 7, 256) 0 bn4f_branch2a[0][0] \n__________________________________________________________________________________________________\nres4f_branch2b (Conv2D) (None, 7, 7, 256) 590080 activation_37[0][0] \n__________________________________________________________________________________________________\nbn4f_branch2b (BatchNormalizati (None, 7, 7, 256) 1024 res4f_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_38 (Activation) (None, 7, 7, 256) 0 bn4f_branch2b[0][0] \n__________________________________________________________________________________________________\nres4f_branch2c (Conv2D) (None, 7, 7, 1024) 263168 activation_38[0][0] \n__________________________________________________________________________________________________\nbn4f_branch2c (BatchNormalizati (None, 7, 7, 1024) 4096 res4f_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_12 (Add) (None, 7, 7, 1024) 0 bn4f_branch2c[0][0] \n activation_36[0][0] \n__________________________________________________________________________________________________\nactivation_39 (Activation) (None, 7, 7, 1024) 0 add_12[0][0] \n__________________________________________________________________________________________________\nres5a_branch2a (Conv2D) (None, 4, 4, 512) 524800 activation_39[0][0] \n__________________________________________________________________________________________________\nbn5a_branch2a (BatchNormalizati (None, 4, 4, 512) 2048 res5a_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_40 (Activation) (None, 4, 4, 512) 0 bn5a_branch2a[0][0] \n__________________________________________________________________________________________________\nres5a_branch2b (Conv2D) (None, 4, 4, 512) 2359808 activation_40[0][0] \n__________________________________________________________________________________________________\nbn5a_branch2b (BatchNormalizati (None, 4, 4, 512) 2048 res5a_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_41 (Activation) (None, 4, 4, 512) 0 bn5a_branch2b[0][0] \n__________________________________________________________________________________________________\nres5a_branch2c (Conv2D) (None, 4, 4, 2048) 1050624 activation_41[0][0] \n__________________________________________________________________________________________________\nres5a_branch1 (Conv2D) (None, 4, 4, 2048) 2099200 activation_39[0][0] \n__________________________________________________________________________________________________\nbn5a_branch2c (BatchNormalizati (None, 4, 4, 2048) 8192 res5a_branch2c[0][0] \n__________________________________________________________________________________________________\nbn5a_branch1 (BatchNormalizatio (None, 4, 4, 2048) 8192 res5a_branch1[0][0] \n__________________________________________________________________________________________________\nadd_13 (Add) (None, 4, 4, 2048) 0 bn5a_branch2c[0][0] \n bn5a_branch1[0][0] \n__________________________________________________________________________________________________\nactivation_42 (Activation) (None, 4, 4, 2048) 0 add_13[0][0] \n__________________________________________________________________________________________________\nres5b_branch2a (Conv2D) (None, 4, 4, 512) 1049088 activation_42[0][0] \n__________________________________________________________________________________________________\nbn5b_branch2a (BatchNormalizati (None, 4, 4, 512) 2048 res5b_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_43 (Activation) (None, 4, 4, 512) 0 bn5b_branch2a[0][0] \n__________________________________________________________________________________________________\nres5b_branch2b (Conv2D) (None, 4, 4, 512) 2359808 activation_43[0][0] \n__________________________________________________________________________________________________\nbn5b_branch2b (BatchNormalizati (None, 4, 4, 512) 2048 res5b_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_44 (Activation) (None, 4, 4, 512) 0 bn5b_branch2b[0][0] \n__________________________________________________________________________________________________\nres5b_branch2c (Conv2D) (None, 4, 4, 2048) 1050624 activation_44[0][0] \n__________________________________________________________________________________________________\nbn5b_branch2c (BatchNormalizati (None, 4, 4, 2048) 8192 res5b_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_14 (Add) (None, 4, 4, 2048) 0 bn5b_branch2c[0][0] \n activation_42[0][0] \n__________________________________________________________________________________________________\nactivation_45 (Activation) (None, 4, 4, 2048) 0 add_14[0][0] \n__________________________________________________________________________________________________\nres5c_branch2a (Conv2D) (None, 4, 4, 512) 1049088 activation_45[0][0] \n__________________________________________________________________________________________________\nbn5c_branch2a (BatchNormalizati (None, 4, 4, 512) 2048 res5c_branch2a[0][0] \n__________________________________________________________________________________________________\nactivation_46 (Activation) (None, 4, 4, 512) 0 bn5c_branch2a[0][0] \n__________________________________________________________________________________________________\nres5c_branch2b (Conv2D) (None, 4, 4, 512) 2359808 activation_46[0][0] \n__________________________________________________________________________________________________\nbn5c_branch2b (BatchNormalizati (None, 4, 4, 512) 2048 res5c_branch2b[0][0] \n__________________________________________________________________________________________________\nactivation_47 (Activation) (None, 4, 4, 512) 0 bn5c_branch2b[0][0] \n__________________________________________________________________________________________________\nres5c_branch2c (Conv2D) (None, 4, 4, 2048) 1050624 activation_47[0][0] \n__________________________________________________________________________________________________\nbn5c_branch2c (BatchNormalizati (None, 4, 4, 2048) 8192 res5c_branch2c[0][0] \n__________________________________________________________________________________________________\nadd_15 (Add) (None, 4, 4, 2048) 0 bn5c_branch2c[0][0] \n activation_45[0][0] \n__________________________________________________________________________________________________\nactivation_48 (Activation) (None, 4, 4, 2048) 0 add_15[0][0] \n__________________________________________________________________________________________________\navg_pool (GlobalAveragePooling2 (None, 2048) 0 activation_48[0][0] \n__________________________________________________________________________________________________\nfc1000 (Dense) (None, 2) 4098 avg_pool[0][0] \n==================================================================================================\nTotal params: 23,591,810\nTrainable params: 23,538,690\nNon-trainable params: 53,120\n__________________________________________________________________________________________________\nTrain on 7268 samples, validate on 808 samples\n"
],
[
"plot_acc_and_loss(hist_resnet)",
"_____no_output_____"
],
[
"predictions = model_resnet.predict(X_val)\ny_val1 = np.argmax(y_val, axis=-1)\npredictions = np.argmax(predictions, axis=-1)\nc = confusion_matrix(y_val1, predictions)\nprint('Confusion matrix:\\n', c)\nprint('sensitivity', c[0, 0] / (c[0, 1] + c[0, 0]))\nprint('specificity', c[1, 1] / (c[1, 1] + c[1, 0]))",
"Confusion matrix:\n [[404 64]\n [ 45 295]]\nsensitivity 0.8632478632478633\nspecificity 0.8676470588235294\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
d0d615144a702870967e71be8aa1dd5e577c26e7 | 39,333 | ipynb | Jupyter Notebook | notebooks/vgg16_cifar100_fastai.ipynb | codestar12/pruning-distilation-bias | 3e0c199ef9bcc4809175365744878da8fb322ba9 | [
"BSD-2-Clause"
] | 1 | 2021-07-11T07:29:53.000Z | 2021-07-11T07:29:53.000Z | notebooks/vgg16_cifar100_fastai.ipynb | codestar12/pruning-distilation-bias | 3e0c199ef9bcc4809175365744878da8fb322ba9 | [
"BSD-2-Clause"
] | 1 | 2021-07-11T07:28:35.000Z | 2021-07-13T15:36:04.000Z | notebooks/vgg16_cifar100_fastai.ipynb | codestar12/pruning-distilation-bias | 3e0c199ef9bcc4809175365744878da8fb322ba9 | [
"BSD-2-Clause"
] | null | null | null | 49.978399 | 15,224 | 0.609615 | [
[
[
"import os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2\"",
"_____no_output_____"
],
[
"from fastai.vision.all import *\nfrom fastai.distributed import *\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torchvision import models\nimport torchvision.transforms as transforms",
"_____no_output_____"
],
[
"cifar_stats",
"_____no_output_____"
],
[
"transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),\n])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),\n])\n\ntrainset = torchvision.datasets.CIFAR100(\n root='./data', train=True, download=True, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(\n trainset, batch_size=256, shuffle=True, num_workers=8)\n\ntestset = torchvision.datasets.CIFAR100(\n root='./data', train=False, download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(\n testset, batch_size=256, shuffle=False, num_workers=8)\n\ndls = DataLoaders(trainloader, testloader)",
"Files already downloaded and verified\nFiles already downloaded and verified\n"
],
[
"model = models.vgg16_bn(pretrained=False)",
"_____no_output_____"
],
[
"model.classifier[-1] = nn.Linear(in_features=4096, out_features=100)\nmodel = model.cuda()",
"_____no_output_____"
],
[
"learn = Learner(dls, model, loss_func=LabelSmoothingCrossEntropy(), metrics=accuracy, cbs=[CudaCallback, SaveModelCallback(monitor='accuracy')])\nlearn.model = learn.model.cuda()",
"_____no_output_____"
],
[
"learn.lr_find()",
"_____no_output_____"
],
[
"learn.fit_one_cycle(240, .001798)",
"_____no_output_____"
],
[
"learn.validate()",
"_____no_output_____"
],
[
"path = '../models/baseline/cifar100/'\n\ntry:\n os.mkdir(path)\nexcept OSError as error:\n print(error)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
d0d6188ba7db47518f9b0c66d8d94afbe6689a1f | 313,986 | ipynb | Jupyter Notebook | Jupyter Notebook Examples/Python/HPDS/4) BMI-Age Plot by Gender.ipynb | hms-dbmi/PIC-SURE-2-Jupyter-Examples | f45746738d3b120f758e3e859538bd45fe50fa7b | [
"Apache-2.0"
] | null | null | null | Jupyter Notebook Examples/Python/HPDS/4) BMI-Age Plot by Gender.ipynb | hms-dbmi/PIC-SURE-2-Jupyter-Examples | f45746738d3b120f758e3e859538bd45fe50fa7b | [
"Apache-2.0"
] | null | null | null | Jupyter Notebook Examples/Python/HPDS/4) BMI-Age Plot by Gender.ipynb | hms-dbmi/PIC-SURE-2-Jupyter-Examples | f45746738d3b120f758e3e859538bd45fe50fa7b | [
"Apache-2.0"
] | 1 | 2019-12-04T16:27:38.000Z | 2019-12-04T16:27:38.000Z | 521.571429 | 127,960 | 0.935246 | [
[
[
"## Import the needed libraries",
"_____no_output_____"
]
],
[
[
"import PicSureHpdsLib\nimport pandas\nimport matplotlib",
"_____no_output_____"
]
],
[
[
"## Create an instance of the datasource adapter and get a reference to the data resource ",
"_____no_output_____"
]
],
[
[
"adapter = PicSureHpdsLib.BypassAdapter(\"http://pic-sure-hpds-nhanes:8080/PIC-SURE\")\nresource = adapter.useResource()",
"_____no_output_____"
]
],
[
[
"## Get a listing of all \"demographics\" entries in the data dictionary. Show what actions can be done with the \"demographic_results\" object",
"_____no_output_____"
]
],
[
[
"demographic_entries = resource.dictionary().find(\"\\\\demographics\\\\\")\ndemographic_entries.help()",
"\n [HELP] PicSureHpdsLib.Client(connection).useResource(uuid).dictionary().find(term)\n .count() Returns the number of entries in the dictionary that match the given term\n .keys() Return the keys of the matching entries\n .entries() Return a list of matching dictionary entries\n .DataFrame() Return the entries in a Pandas-compatible format\n \n [Examples]\n results = PicSureHpdsLib.Client(connection).useResource(uuid).dictionary().find(\"asthma\")\n df = results.DataFrame()\n \n"
]
],
[
[
"## Examine the demographic_entries results by converting it into a pandas DataFrame",
"_____no_output_____"
]
],
[
[
"demographic_entries.DataFrame()",
"_____no_output_____"
],
[
"resource.query().help()",
"\n .select() list of data fields to return from resource for each record\n .require() list of data fields that must be present in all returned records\n .filter() list of data fields and conditions that returned records satisfy\n [ Filter keys exert an AND relationship on returned records ]\n [ Categorical values have an OR relationship on their key ]\n [ Numerical Ranges are inclusive of their start and end points ]\n\n .getCount() returns a count indicating the number of matching numbers\n .getResults() returns a CSV-like string containing the matching records\n .getResultsDataFrame() returns a pandas DataFrame containing the matching records\n .getRunDetails() returns details about the last run of the query\n .getQueryCommand() returns the JSON-formatted query request\n .show() lists all current query parameters\n \n * getCount(), getResults(), and getResultsDataFrame() functions can also \n accept options that run queries differently which might help with \n connection timeouts. Example: .getResults(async=True, timeout=60)\n \n"
],
[
"resource.query().filter().help()",
"\n filter().\n add(\"key\", value) - or -\n add(\"key\", \"value\") filter to records with KEY column that equals VALUE\n add(\"key\", [\"value1\", \"value2\"]) filter to records with KEY column equalling one value within the given list\n add(\"key\", start, end) filter to records with KEY column value between START and END (inclusive)\n start -or- end may be set to None to filter by a max or min value\n delete(\"key\") delete a filter from the list of filters\n show() lists all current filters that results records must satisfy\n clear() clears all values from the filters list\n \n"
],
[
"query_male = resource.query()\nquery_male.filter().add(\"\\\\demographics\\\\SEX\\\\\", [\"male\"])\n\nquery_female = resource.query()\nquery_female.filter().add(\"\\\\demographics\\\\SEX\\\\\", [\"female\"])",
"_____no_output_____"
],
[
"field_age = resource.dictionary().find(\"\\\\AGE\\\\\")\nfield_BMI = resource.dictionary().find(\"\\\\Body Mass Index\")\n\nquery_male.require().add(field_age.keys())\nquery_male.require().add(field_BMI.keys())\nquery_female.require().add(field_age.keys())\nquery_female.require().add(field_BMI.keys())\n\nquery_female.show()",
".__________[ Query.Select() has NO SELECTIONS ]____________________________________________________________________________________________________________\n.__________[ Query.Require() Settings ]_________________________________________________________________________________________\n| _key__________________________________________________________________________________________________________________________\n| \\\\demographics\\\\AGE\\\\ |\n| \\\\examination\\\\body measures\\\\Body Mass Index (kg per m**2)\\\\ |\n.__________[ Query.Filter() Settings ]_____________________________________________________________________________________________________________________\n| _restriction_type_ | _key__________________________________________________________________________________________________________ | _restriction_values_\n| categorical | \\\\demographics\\\\SEX\\\\ | ['female'] |\n"
]
],
[
[
"## Convert the query results for females into a DataFrame and plot it by BMI and Age",
"_____no_output_____"
]
],
[
[
"df_f = query_female.getResultsDataFrame()\nplot_f = df_f.plot.scatter(x=\"\\\\demographics\\\\AGE\\\\\", y=\"\\\\examination\\\\body measures\\\\Body Mass Index (kg per m**2)\\\\\", c=\"#ffbabb40\")\n\n# ____ Uncomment if graphs are not displaying ____\n#plot_f.plot()\n#matplotlib.pyplot.show()",
"_____no_output_____"
]
],
[
[
"## Convert the query results for males into a DataFrame and plot it by BMI and Age",
"_____no_output_____"
]
],
[
[
"df_m = query_male.getResultsDataFrame()\nplot_m = df_m.plot.scatter(x=\"\\\\demographics\\\\AGE\\\\\", y=\"\\\\examination\\\\body measures\\\\Body Mass Index (kg per m**2)\\\\\", c=\"#5a7dd040\")\n\n# ____ Uncomment if graphs are not displaying ____\n#plot_m.plot()\n#matplotlib.pyplot.show()",
"_____no_output_____"
]
],
[
[
"## Replot the results using a single DataFrame containing both male and female",
"_____no_output_____"
]
],
[
[
"d = resource.dictionary()\ncriteria = []\ncriteria.extend(d.find(\"\\\\SEX\\\\\").keys())\ncriteria.extend(d.find(\"\\\\Body Mass Index\").keys())\ncriteria.extend(d.find(\"\\\\AGE\\\\\").keys())\n\nquery_unified = resource.query()\nquery_unified.require().add(criteria)\ndf_mf = query_unified.getResultsDataFrame()\n\n# map a color field for the plot to use\nsex_colors = {'male':'#5a7dd040', 'female':'#ffbabb40'}\ndf_mf['\\\\sex_color\\\\'] = df_mf['\\\\demographics\\\\SEX\\\\'].map(sex_colors)\n\n\n# plot data\nplot_mf = df_mf.plot.scatter(x=\"\\\\demographics\\\\AGE\\\\\", y=\"\\\\examination\\\\body measures\\\\Body Mass Index (kg per m**2)\\\\\", c=df_mf['\\\\sex_color\\\\'])\n\n# ____ Uncomment if graphs are not displaying ____\n#plot_mf.plot()\n#matplotlib.pyplot.show()",
"_____no_output_____"
]
],
[
[
"## Replot data but trim outliers",
"_____no_output_____"
]
],
[
[
"q = df_mf[\"\\\\examination\\\\body measures\\\\Body Mass Index (kg per m**2)\\\\\"].quantile(0.9999)\n\n# create a masked array to remove outliers\ntest = df_mf.mask(df_mf[\"\\\\examination\\\\body measures\\\\Body Mass Index (kg per m**2)\\\\\"] > q)\n\n# plot data\nplot_mf = test.plot.scatter(x=\"\\\\demographics\\\\AGE\\\\\", y=\"\\\\examination\\\\body measures\\\\Body Mass Index (kg per m**2)\\\\\", c=df_mf['\\\\sex_color\\\\'])\n\n# ____ Uncomment if graphs are not displaying ____\n#plot_mf.plot()\n#matplotlib.pyplot.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d61e438335b25f1cbdfe47f549fbeb296714a4 | 155,757 | ipynb | Jupyter Notebook | Advanced/2. Pension Planning using HJB eqn.ipynb | coorung/Finance | 983d05541d8f85529f6ccf802b27d73c9eeecf53 | [
"MIT"
] | 41 | 2019-12-22T21:56:14.000Z | 2022-02-21T03:59:56.000Z | Advanced/2. Pension Planning using HJB eqn.ipynb | coorung/Finance | 983d05541d8f85529f6ccf802b27d73c9eeecf53 | [
"MIT"
] | null | null | null | Advanced/2. Pension Planning using HJB eqn.ipynb | coorung/Finance | 983d05541d8f85529f6ccf802b27d73c9eeecf53 | [
"MIT"
] | 5 | 2020-04-04T06:29:43.000Z | 2022-01-27T03:42:21.000Z | 382.695332 | 91,784 | 0.929493 | [
[
[
"## Appendix (Application of the mutual fund theorem)",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport FinanceDataReader as fdr\nimport pandas as pd\n\n\nticker_list = ['069500']\ndf_list = [fdr.DataReader(ticker, '2015-01-01', '2016-12-31')['Change'] for ticker in ticker_list]\n\ndf = pd.concat(df_list, axis=1)\n#df.columns = ['005930', '000660', '005935', '035420', '005380', '207940', '012330', '068270', '051910', '055550', '069500']\ndf.columns = ['KODEX200']\n\nr = df.dropna()\nrf = 0.0125\n\n\n#df = df.resample('Y').agg(lambda x:x.mean()*252)\n\n# Calculate basic summary statistics for individual stocks\nstock_volatility = r.std() * np.sqrt(252)\nstock_return = r.mean() * 252\n\nalpha = stock_return.values\nsigma = stock_volatility.values",
"_____no_output_____"
],
[
"# cov_inv = np.linalg.inv(cov)\n# temp = np.dot(cov_inv, (stock_return- rf))\n# theta_opt = temp / temp.sum() # optimal weight in Risky Mutual fund\n\n# alpha = np.dot(theta_opt, stock_return) # 0.5941\n# sigma = np.sqrt(cov.dot(theta_opt).dot(theta_opt))",
"_____no_output_____"
]
],
[
[
"## (5B), (7B)",
"_____no_output_____"
]
],
[
[
"# g_B = 0 # in case of age over retirement (Second scenario in Problem(B))\nX0 = 150. # Saving account at the beginning\nl = 3\nt = 45 # age in case of age over retirement (Second scenario in Problem(B))\ngamma = -3. # risk averse measure\n\nphi = rf + (alpha -rf)**2 / (2 * sigma**2 * (1-gamma)) # temporal function for f_B\nrho = 0.04 # impatience factor for utility function\nbeta = 4.59364 # parameter for mu\ndelta = 0.05032 # parameter for mu\nrf=0.02\n\ndef f_B(t):\n \n if t < 65:\n ds = 0.01\n T = 65\n T_tilde = 110\n value = 0\n \n for s in np.arange(T, T_tilde, ds):\n w_s = np.exp(-rho*s/(1-gamma))\n tmp = (10**(beta + delta*s - 10)- 10**(beta + delta*t - 10))/(delta * np.log(10))\n value += np.exp(-1/(1-gamma)*(tmp - gamma*tmp - gamma*phi *(s-t))) * w_s * ds\n \n f = np.exp(-1/(1-gamma) *(tmp - gamma*tmp + gamma*phi*(T-t))) * value\n \n return f\n \n else: # 65~\n ds = 0.01\n T_tilde = 110\n value = 0\n\n for s in np.arange(t, T_tilde, ds):\n w_s = np.exp(-rho*s/(1-gamma))\n tmp = (10**(beta + delta*s - 10)- 10**(beta + delta*t - 10))/(delta * np.log(10))\n value += np.exp(-1/(1-gamma)*(tmp - gamma*tmp - gamma*phi *(s-t))) * w_s * ds\n \n return value\n\n\n# def f_B(t):\n# ds = 0.01\n# T_tilde = 110\n# value = 0\n \n# for s in np.arange(t, T_tilde, ds):\n# w_s = np.exp(-rho*s/(1-gamma))\n# tmp = (10**(beta + delta*s - 10)- 10**(beta + delta*t - 10))/(delta * np.log(10))\n# value += np.exp(- tmp + gamma/(1-gamma) * phi *(s-t)) * w_s * ds\n# return value\n\n# def V_B(t, x):\n# f_b = f_B(t)\n# value_fcn = 1/gamma * f_b **(1-gamma) * x **gamma\n# return value_fcn\n\ndef C_star(t,X):\n w_t = np.exp(-rho*t/(1-gamma))\n f_b = f_B(t)\n c_t = w_t/f_b * X\n return c_t\n\ndef g_B(t, l):\n \n ds=0.01\n value = 0.\n T=65 # retirement\n \n if t < T:\n for s in np.arange(t, T, ds):\n tmp = (10**(beta + delta*s - 10)- 10**(beta + delta*t - 10))/(delta * np.log(10))\n value += np.exp(-tmp)*l * ds\n return value\n \n else:\n return 0.\n\n\npi_opt = (alpha-rf)/(sigma**2 *(1-gamma)) * (X0 + g_B(t, l))/X0 # Optimal weight for Risky Asset (7B)\n\nprint(pi_opt) # 0.25\n# print(C_star(t, X))",
"[0.83630489]\n"
]
],
[
[
"## Simulation",
"_____no_output_____"
]
],
[
[
"import time\n\nstart = time.time()\n\ndt = 1\n\ndef mu(t): # Mortality rate in next year\n value = (10**(beta + delta*(t+dt) - 10)- 10**(beta + delta*t - 10))/(delta * np.log(10))\n return value\n\nn_simulation = 10000\n\nAsset = np.empty(37)\nAsset_stack = []\nC_stack = []\n\nfor i in range(n_simulation):\n Asset[0] = 150 # initial wealth\n C_list = []\n \n for t in range(45, 81):\n \n if t < 65: # before retirement\n \n l_t = 3 # payment to pension fund\n pi_opt = (alpha-rf)/(sigma**2 *(1-gamma)) * (Asset[t-45] + g_B(t, l_t))/Asset[t-45]\n C_t = 0 # \n Z = np.random.randn()\n\n Asset[t-45+1] = Asset[t-45]*np.exp(((1-pi_opt)*rf + pi_opt*alpha + mu(t)+ l_t/Asset[t-45] \\\n -pi_opt**2 * sigma**2/2)*dt + pi_opt * sigma * np.sqrt(dt) * Z)\n \n else : # after retirement\n \n l_t = 0 # payment duty is 0 after retirement\n pi_opt = (alpha-rf)/(sigma**2 *(1-gamma)) * (Asset[t-45] + g_B(t, l_t))/Asset[t-45]\n C_t = C_star(t=t, X = Asset[t-45])\n Z = np.random.randn()\n\n Asset[t-45+1] = Asset[t-45]*np.exp(((1-pi_opt)*rf + pi_opt*alpha + mu(t)- C_t/Asset[t-45] \\\n -pi_opt**2 * sigma**2/2)*dt + pi_opt * sigma * np.sqrt(dt) * Z)\n C_list.append(C_t)\n\n\n Asset_stack.append(list(Asset))\n C_stack.append(C_list)\n\nend = time.time()\nprint(end - start)",
"12911.317305803299\n"
]
],
[
[
"## Check the Simulation Result",
"_____no_output_____"
]
],
[
[
"Asset_mean = np.mean(Asset_stack, axis=0) #(37,)\nC_mean = np.mean(C_stack, axis=0) # (16,1)\n\nplt.rcParams['figure.figsize'] = [30, 15]\nplt.rcParams.update({'font.size': 30})\n\nplt.title('Retirement planning')\nplt.xlabel('Age')\nplt.ylabel('Won(1000000)')\nplt.plot(range(45,81),Asset_mean[:-1], label='Wealth')\nplt.plot(range(65,81),C_mean, '--', color = 'r', label=\"Pension\")\nplt.legend()\nplt.grid()",
"_____no_output_____"
],
[
"pi_opt_list=[]\nfor t in range(45, 81):\n if t < 65:\n l_t = 3\n else :\n l_t = 0\n pi_opt = (alpha-rf)/(sigma**2 *(1-gamma)) * (Asset_mean[:-1][t-45] + g_B(t, l_t))/Asset_mean[:-1][t-45]\n pi_opt_list.append(pi_opt)",
"_____no_output_____"
],
[
"plt.title('Optimal weight of risky-asset changing following ages')\nplt.xlabel('Age')\nplt.ylabel('Weight')\nplt.bar(range(45,81),np.array(pi_opt_list).squeeze())",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
d0d6210228316563c5dbb76de2bf01a787083582 | 30,265 | ipynb | Jupyter Notebook | Pandas to Spark.ipynb | nancunpei/spark-examples | 0c11cf38317be94c78ad2edaa5a2ecefc9571134 | [
"Apache-2.0"
] | 2 | 2020-07-29T01:52:44.000Z | 2020-07-29T09:55:09.000Z | Pandas to Spark.ipynb | nancunpei/spark-examples | 0c11cf38317be94c78ad2edaa5a2ecefc9571134 | [
"Apache-2.0"
] | null | null | null | Pandas to Spark.ipynb | nancunpei/spark-examples | 0c11cf38317be94c78ad2edaa5a2ecefc9571134 | [
"Apache-2.0"
] | 1 | 2020-07-27T07:38:49.000Z | 2020-07-27T07:38:49.000Z | 25.095357 | 140 | 0.362828 | [
[
[
"## 前言\n本文主要讨论如何把pandas移植到spark, 他们的dataframe共有一些特性如操作方法和模式。pandas的灵活性比spark强, 但是经过一些改动spark基本上能完成相同的工作。\n同时又兼具了扩展性的优势,当然他们的语法和用法稍稍有些不同。\n\n## 主要不同处:\n\n### 分布式处理\npandas只能单机处理, 把dataframe放进内存计算。spark是集群分布式地,可以处理的数据可以大大超出集群的内存数。\n\n### 懒执行\nspark不执行任何`transformation`直到需要运行`action`方法,`action`一般是存储或者展示数据的操作。这种将`transformation`延后的做法可以让spark调度知道所有的执行情况,用于优化执行顺序和读取需要的数据。\n懒执行也是scala的特性之一。通常,在pandas我们总是和数据打交道, 而在spark,我们总是在改变产生数据的执行计划。\n\n### 数据不可变\nscala的函数式编程通常倾向使用不可变对象, 每一个spark transformation会返回一个新的dataframe(除了一些meta info会改变)\n\n### 没有索引\nspark是没有索引概念的.\n\n### 单条数据索引不方便\npandas可以快速使用索引找到数据,spark没有这个功能,因为在spark主要操作的是执行计划来展示数据, 而不是数据本身。\n\n### spark sql\n因为有了SQL功能的支持, spark更接近关系型数据库。",
"_____no_output_____"
],
[
"## pandas和pyspark使用的一些例子",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport pyspark.sql\nimport pyspark.sql.functions as sf\nfrom pyspark.sql import SparkSession",
"_____no_output_____"
]
],
[
[
"### Projections\npandas的投影可以直接通过[]操作",
"_____no_output_____"
]
],
[
[
"person_pd = pd.read_csv('data/persons.csv')\nperson_pd[[\"name\", \"sex\", \"age\"]]",
"_____no_output_____"
]
],
[
[
"pyspark也可以直接`[]`来选取投影, 但是这是一个语法糖, 实际是用了`select`方法",
"_____no_output_____"
]
],
[
[
"spark = SparkSession.builder \\\n .master(\"local[*]\") \\\n .config(\"spark.driver.memory\",\"6G\") \\\n .getOrCreate()\n#person_pd[['age','name']]\n",
"_____no_output_____"
],
[
"person_sp = spark.read.option(\"inferSchema\", True) \\\n .option(\"header\", True) \\\n .csv('data/persons.csv')",
"_____no_output_____"
],
[
"person_sp.show()",
"+---+------+-------+------+\n|age|height| name| sex|\n+---+------+-------+------+\n| 23| 156| Alice|female|\n| 21| 181| Bob| male|\n| 27| 176|Charlie| male|\n| 24| 167| Eve|female|\n| 19| 172|Frances|female|\n| 31| 191| George| male|\n+---+------+-------+------+\n\n"
],
[
"person_sp[['age', 'name']].show()",
"+---+-------+\n|age| name|\n+---+-------+\n| 23| Alice|\n| 21| Bob|\n| 27|Charlie|\n| 24| Eve|\n| 19|Frances|\n| 31| George|\n+---+-------+\n\n"
]
],
[
[
"### 简单transformation",
"_____no_output_____"
],
[
"spark的`dataframe.select`实际上接受任何column对象, 一个column对象概念上是dataframe的一列。一列可以是dataframe的一列输入,也可以是一个计算结果或者多个列的transformation结果。 以改变一列为大写为例:",
"_____no_output_____"
]
],
[
[
"ret = pd.DataFrame(person_pd['name'].apply(lambda x: x.upper()))\nret",
"_____no_output_____"
],
[
"result = person_sp.select(\n sf.upper(person_sp.name)\n)\nresult.show()",
"+-----------+\n|upper(name)|\n+-----------+\n| ALICE|\n| BOB|\n| CHARLIE|\n| EVE|\n| FRANCES|\n| GEORGE|\n+-----------+\n\n"
]
],
[
[
"### 给dataframe增加一列",
"_____no_output_____"
],
[
"pandas给dataframe增加一列很方便,直接给df赋值就行了。spark需要使用`withColumn`函数。",
"_____no_output_____"
]
],
[
[
"def create_salutation(row):\n sex = row[0]\n name = row[1]\n if sex == 'male':\n return 'Mr '+name\n else:\n return \"Mrs \"+name\n \nresult = person_pd.copy()\nresult['salutation'] = result[['sex','name']].apply(create_salutation, axis=1, result_type='expand')\nresult",
"_____no_output_____"
],
[
"result = person_sp.withColumn(\n \"salutation\",\n sf.concat(sf.when(person_sp.sex == 'male', \"Mr \").otherwise(\"Mrs \"), person_sp.name)\n)\nresult.show()",
"+---+------+-------+------+-----------+\n|age|height| name| sex| salutation|\n+---+------+-------+------+-----------+\n| 23| 156| Alice|female| Mrs Alice|\n| 21| 181| Bob| male| Mr Bob|\n| 27| 176|Charlie| male| Mr Charlie|\n| 24| 167| Eve|female| Mrs Eve|\n| 19| 172|Frances|female|Mrs Frances|\n| 31| 191| George| male| Mr George|\n+---+------+-------+------+-----------+\n\n"
]
],
[
[
"### 过滤",
"_____no_output_____"
]
],
[
[
"result = person_pd[person_pd['age'] > 20]\nresult",
"_____no_output_____"
]
],
[
[
"spark支持三种过滤写法",
"_____no_output_____"
]
],
[
[
"person_sp.filter(person_sp['age'] > 20).show()",
"+---+------+-------+------+\n|age|height| name| sex|\n+---+------+-------+------+\n| 23| 156| Alice|female|\n| 21| 181| Bob| male|\n| 27| 176|Charlie| male|\n| 24| 167| Eve|female|\n| 31| 191| George| male|\n+---+------+-------+------+\n\n"
],
[
"person_sp[person_sp['age'] > 20].show()",
"+---+------+-------+------+\n|age|height| name| sex|\n+---+------+-------+------+\n| 23| 156| Alice|female|\n| 21| 181| Bob| male|\n| 27| 176|Charlie| male|\n| 24| 167| Eve|female|\n| 31| 191| George| male|\n+---+------+-------+------+\n\n"
],
[
"person_sp.filter('age > 20').show()",
"+---+------+-------+------+\n|age|height| name| sex|\n+---+------+-------+------+\n| 23| 156| Alice|female|\n| 21| 181| Bob| male|\n| 27| 176|Charlie| male|\n| 24| 167| Eve|female|\n| 31| 191| George| male|\n+---+------+-------+------+\n\n"
]
],
[
[
"### 分组和聚合",
"_____no_output_____"
],
[
"类似sql中的`select aggregation Group by grouping`语句功能,pandas和spark都定义了一些聚合函数,如:\n- count\n- sum\n- avg\n- corr\n- first\n- last\n\n可以具体查看[PySpark Function Documentation](http://spark.apache.org/docs/latest/api/python/pyspark.sql.html#module-pyspark.sql.functions)",
"_____no_output_____"
]
],
[
[
"result = person_pd.groupby('sex').agg({'age': 'mean', 'height':['min', 'max']})\nresult",
"_____no_output_____"
],
[
"from pyspark.sql.functions import avg, min, max\nresult = person_sp.groupBy(person_sp.sex).agg(min(person_sp.height).alias('min height'), max(person_sp.height).alias('max height'),\n avg(person_sp.age))\n \nresult.show()",
"+------+----------+----------+------------------+\n| sex|min height|max height| avg(age)|\n+------+----------+----------+------------------+\n|female| 156| 172| 22.0|\n| male| 176| 191|26.333333333333332|\n+------+----------+----------+------------------+\n\n"
],
[
"person_sp.show()",
"+---+------+-------+------+\n|age|height| name| sex|\n+---+------+-------+------+\n| 23| 156| Alice|female|\n| 21| 181| Bob| male|\n| 27| 176|Charlie| male|\n| 24| 167| Eve|female|\n| 19| 172|Frances|female|\n| 31| 191| George| male|\n+---+------+-------+------+\n\n"
]
],
[
[
"### join",
"_____no_output_____"
],
[
"spark也支持跨dataframe做join, 让我们加个数据作例子。",
"_____no_output_____"
]
],
[
[
"addresses = spark.read.json('data/addresses.json')\naddresses_pd = addresses.toPandas()\naddresses_pd",
"_____no_output_____"
],
[
"pd_join = person_pd.merge(addresses_pd, left_on=['name'], right_on=['name'])\npd_join",
"_____no_output_____"
],
[
"sp_join = person_sp.join(addresses, person_sp.name==addresses.name)\nsp_join.show()\nsp_join_1 = person_sp.join(addresses, on=['name'])\nsp_join_1.show()",
"+---+------+-----+------+---------+-----+\n|age|height| name| sex| city| name|\n+---+------+-----+------+---------+-----+\n| 23| 156|Alice|female| Hamburg|Alice|\n| 21| 181| Bob| male|Frankfurt| Bob|\n+---+------+-----+------+---------+-----+\n\n+-----+---+------+------+---------+\n| name|age|height| sex| city|\n+-----+---+------+------+---------+\n|Alice| 23| 156|female| Hamburg|\n| Bob| 21| 181| male|Frankfurt|\n+-----+---+------+------+---------+\n\n"
]
],
[
[
"### 重装dataframe",
"_____no_output_____"
],
[
"pandas可以很方便地将现有的一列数据赋给一个新的列, 但是spark做起来不是很方便,需要join操作。",
"_____no_output_____"
]
],
[
[
"df = person_pd[['name', 'age']]\ncol = person_pd['height']\nresult = df.copy()\nresult['h2'] = col\nresult",
"_____no_output_____"
],
[
"df = person_sp[['name', 'age']]\ncol = person_sp[['name', 'height']]\nresult = df.join(col, on=['name'])\nresult.show()",
"+-------+---+------+\n| name|age|height|\n+-------+---+------+\n| Alice| 23| 156|\n| Bob| 21| 181|\n|Charlie| 27| 176|\n| Eve| 24| 167|\n|Frances| 19| 172|\n| George| 31| 191|\n+-------+---+------+\n\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
d0d6241951b727c00cc165f6b4ed0217457922f3 | 2,365 | ipynb | Jupyter Notebook | test/dtype.ipynb | opteroncx/MoePhoto | 8e60803c02cbba0d1445cdb7570df1f836c9dff2 | [
"Apache-2.0"
] | 192 | 2018-09-09T15:57:02.000Z | 2022-03-30T06:40:09.000Z | test/dtype.ipynb | lotress/MoePhoto | 6f47515d2cf236773a46413f57839565fa665796 | [
"Apache-2.0"
] | 10 | 2019-02-14T18:52:53.000Z | 2021-10-15T01:41:56.000Z | test/dtype.ipynb | opteroncx/MoePhoto | 8e60803c02cbba0d1445cdb7570df1f836c9dff2 | [
"Apache-2.0"
] | 28 | 2018-10-04T13:52:16.000Z | 2022-03-18T03:26:55.000Z | 23.415842 | 92 | 0.557294 | [
[
[
"import sys\nsys.path.append('../python')\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\nimport cv2\nfrom config import config\nfrom imageProcess import toTorch, readFile, genGetModel, toOutput\nfrom models import Net4x\nimport runSR\n\nshow = lambda im: Image.fromarray(cv2.cvtColor(toOutput(8)(im), cv2.COLOR_BGR2RGB))\n@genGetModel\ndef getModel(opt):\n return Net4x()\n\ndef SRopt():pass\nSRopt.model = '../model/a4/model_new.pth'\nSRopt.scale = 4\nSRopt.ramCoef = 1 / 8000.\nSRopt.cropsize = 0",
"_____no_output_____"
],
[
"config.halfPrecision = True\nconfig.maxGraphicMemoryUsage = 3*2**31\nconfig.getFreeMem()\ndevice = torch.device('cuda')\ndtype = torch.half\nSRopt.modelCached = getModel(SRopt)",
"_____no_output_____"
],
[
"original = toTorch(256, dtype, device)(readFile('t.png'))\nsmall = toTorch(256, dtype, device)(readFile('s.png'))\nprint(dtype, original.shape, small.shape)",
"_____no_output_____"
],
[
"show(small)",
"_____no_output_____"
],
[
"res = runSR.sr(small, SRopt)\nprint(F.l1_loss(res.to(dtype=torch.float), original.to(dtype=torch.float)))\nprint(F.mse_loss(res.to(dtype=torch.float), original.to(dtype=torch.float)))\nshow(res)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
d0d62d041526b11019dc87354154f00c6948cf08 | 45,477 | ipynb | Jupyter Notebook | dl-tutorial/distant-viewing-tutorial-part-2.ipynb | JoshuaGOB/NLTK | 7cc1dc4a8d848df186cae9b86313b455c42c0cb4 | [
"MIT"
] | null | null | null | dl-tutorial/distant-viewing-tutorial-part-2.ipynb | JoshuaGOB/NLTK | 7cc1dc4a8d848df186cae9b86313b455c42c0cb4 | [
"MIT"
] | 1 | 2021-12-13T20:36:40.000Z | 2021-12-13T20:36:40.000Z | dl-tutorial/distant-viewing-tutorial-part-2.ipynb | JoshuaGOB/NLTK | 7cc1dc4a8d848df186cae9b86313b455c42c0cb4 | [
"MIT"
] | 1 | 2018-07-03T04:16:35.000Z | 2018-07-03T04:16:35.000Z | 82.23689 | 2,152 | 0.670757 | [
[
[
"# Distant Viewing with Deep Learning: Part 2\n\nIn Part 2 of this tutorial, we introduce the concepts of deep learning and show it yields\ninteresting similarity metrics and is able to extract feature useful features such as the\npresence and location of faces in the image.",
"_____no_output_____"
],
[
"## Step 9: Python modules for deep learning\n\nWe need to reload all of the Python modules we used in the Part 1.",
"_____no_output_____"
]
],
[
[
"%pylab inline\n# !pip3 install keras # this wasn't working so had to do change from here\n# ------------------\n\nimport getpass\nimport os\n\npassword = getpass.getpass()\n\n\ncommand = \"sudo -S sudo pip3 install https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.8.0-py3-none-any.whl\" #can be any command but don't forget -S as it enables input from stdin\nos.system('echo %s | %s' % (password, command))\n\n!git clone https://github.com/keras-team/keras.git\nos.chdir('keras')\n\ncommand = \"sudo -S python3 setup.py install\" #can be any command but don't forget -S as it enables input from stdin\nos.system('echo %s | %s' % (password, command))\n\ncommand = \"sudo -S sudo pip3 install pandas\" #can be any command but don't forget -S as it enables input from stdin\nos.system('echo %s | %s' % (password, command))\n\n#!sudo pip3 install tensorflow\n# To here------------------\nimport collections\n#import tensorflow\nimport numpy as np\nimport scipy as sp\nimport pandas as pd\n\n\nimport importlib\nfrom os.path import join\nfrom matplotlib.colors import rgb_to_hsv",
"_____no_output_____"
],
[
"\ncommand = \"sudo -S sudo pip3 install keras\" #can be any command but don't forget -S as it enables input from stdin\nos.system('echo %s | %s' % (password, command))\nimport keras\n",
"_____no_output_____"
],
[
"import tensorflow",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nplt.rcParams[\"figure.figsize\"] = (8,8)\nos.chdir(\"/Users/jgo384/Documents/GitHub/NLTK/dl-tutorial/\")\nos.listdir(\"/Users/jgo384/Documents/GitHub/NLTK/dl-tutorial/\")\n",
"_____no_output_____"
]
],
[
[
"We also need to reload the wikiart metadata.",
"_____no_output_____"
]
],
[
[
"wikiart = pd.read_csv(\"meta/wikiart.csv\")",
"_____no_output_____"
]
],
[
[
"To run the code in this notebook from scratch, you will also need the **keras**\nmodule for working with neural networks. This are not included in the default\nAnaconda Python installation and need to be installed seperately. The code\nbelow checks if you have keras installed. If you do, it will be loaded. Otherwise,\na flag will be set so that the code below that requires keras will load the\npre-loaded data.",
"_____no_output_____"
]
],
[
[
"import keras\n\n\nif importlib.util.find_spec(\"keras\") is not None:\n from keras.applications.vgg19 import VGG19\n from keras.preprocessing import image\n from keras.applications.vgg19 import preprocess_input, decode_predictions\n from keras.models import Model\n keras_flag = True\nelse:\n keras_flag = False",
"_____no_output_____"
]
],
[
[
"If you are struggling with installing these, we are happy to assist. You'll be able to follow\nalong without keras, but will not be able to apply the techniques you learned today to new datasets\nwithout it.",
"_____no_output_____"
],
[
"## Step 10: Applying deep learning with neural networks\n\n",
"_____no_output_____"
],
[
"We start by loading a particular neural network model called VGG19. It\ncontains 25 layers and over 143 million parameters. The code below reads\nin the entire model and prints out it structure (unless keras is unavailable,\nin which case a saved version of the model is printed just for reference).",
"_____no_output_____"
]
],
[
[
"if keras_flag:\n vgg19_full = VGG19(weights='imagenet')\n vgg19_full.summary()\nelse:\n with open('data/vgg19.txt','r') as f:\n for line in f:\n print(line, end='')",
"_____no_output_____"
]
],
[
[
"The VGG19 model was trained to identify 1000 classes of objects within an\nimage. It was built as part of the ImageNet challenge, one of the most\ninfluential computer vision competitions that has been running since 2010.\n\nWe will load a test photo of my dog and see what classes the model predicts\nfor the image. We will use a slightly different function to read in the image\nthat scales it to have 224-by-224 pixels as required by the algorithm.",
"_____no_output_____"
]
],
[
[
"img_path = join(\"images\", \"test\", \"dog.jpg\")\nif keras_flag:\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\nelse:\n img = imread(img_path)\n x = img.copy().astype(np.float32)\n \nx = np.expand_dims(x, axis=0)\nx = preprocess_input(x)\nx.shape",
"_____no_output_____"
]
],
[
[
"Notice that it is now a four dimensional array, a point that we will come back to in\na moment. We can look at the image here using the `imshow` function.",
"_____no_output_____"
]
],
[
[
"plt.imshow(img)",
"_____no_output_____"
]
],
[
[
"Assuming you have keras installed, the code here takes the image `x` and predicts\nvalues from the model. Notice that the output of the model is a sequence of 1000\nnumbers. These indicate the predicted probability that the image contains each of \none of the 1000 pre-selected categories. The function `decode_predictions` converts\nthese to give the names of the five most likely categories.",
"_____no_output_____"
]
],
[
[
"if keras_flag:\n y = vgg19_full.predict(x)\n print(y.shape)\n for pred in decode_predictions(y)[0]:\n print(pred)\nelse:\n print((1, 1000))\n y = np.load(join('data', 'dog_pred.npy'))\n for pred in decode_predictions(y)[0]:\n print(pred)",
"_____no_output_____"
]
],
[
[
"The largest predicted class is a \"Shih-Tzu\", incidently an exact match for his\nbreed! The other dogs are all similarly sized dogs, and obvious choices for \nmaking a mistake.",
"_____no_output_____"
],
[
"Now, let's compute the category predictions for each image in the corpus. This involves\nreading in each image in the wikiart corpus and then running them through the VGG19\nmodel. This can take some time, particularly on an older machine, so we have created a\nflag called `process_new`. Keep it to `False` to load pre-computed categories; you can\nswitch it to `True` if you want to compute them directly",
"_____no_output_____"
]
],
[
[
"process_new = False\n\nif process_new:\n wikiart_img = np.zeros((wikiart.shape[0], 224, 224, 3))\n\n for index, row in wikiart.iterrows():\n img_path = join('images', 'wikiart', row['filename'])\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n wikiart_img[index, :, :, :] = x\n if (index % 50) == 0:\n print(\"Done with {0:03d}\".format(index))\n \n wikiart_img = preprocess_input(wikiart_img)\n wikiart_raw = vgg19_full.predict(wikiart_img, verbose=True)\n wikiart_vgg19 = decode_predictions(wikiart_raw, top=20)\n \nelse:\n wikiart_vgg19 = np.load(\"data/wikiart_vgg19_categories.npy\")\n\nprint(wikiart_vgg19.shape)",
"_____no_output_____"
]
],
[
[
"What's the most common top category type for this collection? When can use the\nPython module `collections` to look at the top-10 most common:",
"_____no_output_____"
]
],
[
[
"collections.Counter(wikiart_vgg19[:, 1, 1]).most_common(10)",
"_____no_output_____"
]
],
[
[
"Cliffs and fountains both seem reasonable, but I doubt there are many jigsaw puzzels \nin the wikiart corpus. **Any idea by this might be so common?**",
"_____no_output_____"
],
[
"## Step 11: Neural network embedding\n\nThe VGG19 model was constructed in order to predict the objects present in an image,\nbut there is a lot more that we can do with the model. The amazing property of deep\nlearning is that the intermediate results in the neural network operate by detecting\nlower-level features of the image. For example, the first few detect edges and textures,\nthe next few by understanding shapes, and the latter ones put these together to detect\nobjects. This is incredibly useful because it means that looking at the intermediate\noutputs can tell us something interesting about the images beyond just the 1000\npredicted categories.\n\nAssuming the keras module is installed, we will create a new model that outputs the\nsecond-to-last output of the model. The prediction of this contains 4096 dimensions.\nThese do not correspond directly to categories, but (in theory) images containing\nsimilar objects should have similar 4096-dimensional values.",
"_____no_output_____"
]
],
[
[
"if keras_flag:\n vgg_fc2 = Model(inputs=vgg19_full.input, outputs=vgg19_full.get_layer('fc2').output)\n y = vgg_fc2.predict(x)\n print(y.shape)\nelse:\n print((1, 4096))",
"_____no_output_____"
]
],
[
[
"We can use this new model to predict values on the set of images `wikiart_img`. As above,\nthis can take a few minutes, so you may want to load the pre-saved data again by keeping\n`process_new` equal to `False`.",
"_____no_output_____"
]
],
[
[
"process_new = False\n\nif process_new:\n wikiart_fc2 = vgg_fc2.predict(wikiart_img, verbose=True)\n wikiart_fc2.shape\nelse:\n wikiart_fc2 = np.load(\"data/wikiart_vgg19_fc2.npy\")\n\nprint(wikiart_fc2.shape)",
"_____no_output_____"
]
],
[
[
"Now, we can use these values to figure out which images are similar to another image.\nThis is similar to the closest saturation values, but using a more complex numeric\nmetric for comparison. Compare the results here with those from saturation alone:",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(14, 14))\n\ndists = np.sum(np.abs(wikiart_fc2 - wikiart_fc2[1, :]), 1)\nidx = np.argsort(dists.flatten())[:12]\n\nfor ind, i in enumerate(idx):\n try:\n plt.subplots_adjust(left=0, right=1, bottom=0, top=1)\n plt.subplot(3, 4, ind + 1)\n\n img_path = join('images', 'wikiart', wikiart.iloc[i]['filename'])\n img = imread(img_path)\n plt.imshow(img)\n plt.axis(\"off\")\n except:\n pass",
"_____no_output_____"
]
],
[
[
"The images are all impressionist paintings of trees, showing how the model matches both the\ncontent and style of the original. **In the code below, look at the recommendations for the\nimage you used back in part 7.**",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
d0d63266c76a487f17c151b1d02bcd567b386aa4 | 3,030 | ipynb | Jupyter Notebook | jupyter_notebooks/0016_new_dale-chall_list_improvement.ipynb | korniichuk/phd | 5eb30a4b8bcc716308a849081034bb54c3075999 | [
"Unlicense"
] | null | null | null | jupyter_notebooks/0016_new_dale-chall_list_improvement.ipynb | korniichuk/phd | 5eb30a4b8bcc716308a849081034bb54c3075999 | [
"Unlicense"
] | null | null | null | jupyter_notebooks/0016_new_dale-chall_list_improvement.ipynb | korniichuk/phd | 5eb30a4b8bcc716308a849081034bb54c3075999 | [
"Unlicense"
] | null | null | null | 20.753425 | 95 | 0.488779 | [
[
[
"# New Dale-Chall list improvement\n## Step 1: Lower case, no duplicates",
"_____no_output_____"
]
],
[
[
"from nltk.stem import PorterStemmer",
"_____no_output_____"
],
[
"with open('promovolt/resources/new_dale-chall_simple_words_en-1.0.txt', 'r') as f:\n words = f.read().splitlines()\n \nfor i, word in enumerate(words):\n words[i] = word.lower().strip()\n \nwords = list(set(words))\n\nwords.sort()",
"_____no_output_____"
],
[
"len(words)",
"_____no_output_____"
]
],
[
[
"### Save to file",
"_____no_output_____"
]
],
[
[
"with open('promovolt/resources/new_dale-chall_simple_words_en-1.1.txt', 'w') as f:\n for i, word in enumerate(words):\n if i < (len(words) - 1):\n f.write(word + '\\n')\n else:\n f.write(word)",
"_____no_output_____"
]
],
[
[
"## Step 2: Stemming",
"_____no_output_____"
]
],
[
[
"with open('promovolt/resources/new_dale-chall_simple_words_en-1.1.txt', 'r') as f:\n words = f.read().splitlines()\n\nstemmer = PorterStemmer()\n\nfor i, word in enumerate(words):\n words[i] = stemmer.stem(word)\n \nwords = list(set(words))\n\nwords.sort()",
"_____no_output_____"
],
[
"len(words)",
"_____no_output_____"
]
],
[
[
"### Save to file",
"_____no_output_____"
]
],
[
[
"with open('promovolt/resources/new_dale-chall_simple_words_en_stemmed.txt', 'w') as f:\n for i, word in enumerate(words):\n if i < (len(words) - 1):\n f.write(word + '\\n')\n else:\n f.write(word)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
d0d634dfef4733ced75d11984953b95c2937a47c | 21,496 | ipynb | Jupyter Notebook | Natural Language Processing with Attention Models/Week 4 - Chatbot/C4_W4_Ungraded_Lab_Revnet.ipynb | y33-j3T/Coursera | fbd5ec28ff95db8eef2de13ed96b839db08f2069 | [
"MIT"
] | 125 | 2021-01-02T03:37:27.000Z | 2022-03-23T21:58:13.000Z | Natural Language Processing with Attention Models/Week 4 - Chatbot/C4_W4_Ungraded_Lab_Revnet.ipynb | y33-j3T/Coursera | fbd5ec28ff95db8eef2de13ed96b839db08f2069 | [
"MIT"
] | 2 | 2021-02-08T04:26:14.000Z | 2021-12-31T08:41:38.000Z | Natural Language Processing with Attention Models/Week 4 - Chatbot/C4_W4_Ungraded_Lab_Revnet.ipynb | y33-j3T/Coursera | fbd5ec28ff95db8eef2de13ed96b839db08f2069 | [
"MIT"
] | 150 | 2021-01-02T00:27:46.000Z | 2022-03-30T03:42:27.000Z | 35.413509 | 1,001 | 0.595227 | [
[
[
"# Putting the \"Re\" in Reformer: Ungraded Lab\nThis ungraded lab will explore Reversible Residual Networks. You will use these networks in this week's assignment that utilizes the Reformer model. It is based on on the Transformer model you already know, but with two unique features.\n* Locality Sensitive Hashing (LSH) Attention to reduce the compute cost of the dot product attention and\n* Reversible Residual Networks (RevNets) organization to reduce the storage requirements when doing backpropagation in training.\n\nIn this ungraded lab we'll start with a quick review of Residual Networks and their implementation in Trax. Then we will discuss the Revnet architecture and its use in Reformer.\n## Outline\n- [Part 1: Residual Networks](#1)\n - [1.1 Branch](#1.1)\n - [1.2 Residual Model](#1.2)\n- [Part 2: Reversible Residual Networks](#2)\n - [2.1 Trax Reversible Layers](#2.1)\n - [2.2 Residual Model](#2.2)\n\n\n",
"_____no_output_____"
]
],
[
[
"import trax\nfrom trax import layers as tl # core building block\nimport numpy as np # regular ol' numpy\nfrom trax.models.reformer.reformer import (\n ReversibleHalfResidualV2 as ReversibleHalfResidual,\n) # unique spot\nfrom trax import fastmath # uses jax, offers numpy on steroids\nfrom trax import shapes # data signatures: dimensionality and type\nfrom trax.fastmath import numpy as jnp # For use in defining new layer types.\nfrom trax.shapes import ShapeDtype\nfrom trax.shapes import signature",
"_____no_output_____"
]
],
[
[
"## Part 1.0 Residual Networks\n[Deep Residual Networks ](https://arxiv.org/abs/1512.03385) (Resnets) were introduced to improve convergence in deep networks. Residual Networks introduce a shortcut connection around one or more layers in a deep network as shown in the diagram below from the original paper.\n\n<center><img src = \"Revnet7.PNG\" height=\"250\" width=\"250\"></center>\n<center><b>Figure 1: Residual Network diagram from original paper</b></center>\n\nThe [Trax documentation](https://trax-ml.readthedocs.io/en/latest/notebooks/layers_intro.html#2.-Inputs-and-Outputs) describes an implementation of Resnets using `branch`. We'll explore that here by implementing a simple resnet built from simple function based layers. Specifically, we'll build a 4 layer network based on two functions, 'F' and 'G'.\n\n<img src = \"Revnet8.PNG\" height=\"200\" width=\"1400\">\n<center><b>Figure 2: 4 stage Residual network</b></center>\nDon't worry about the lengthy equations. Those are simply there to be referenced later in the notebook.",
"_____no_output_____"
],
[
"<a name=\"1.1\"></a>\n### Part 1.1 Branch\nTrax `branch` figures prominently in the residual network layer so we will first examine it. You can see from the figure above that we will need a function that will copy an input and send it down multiple paths. This is accomplished with a [branch layer](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#module-trax.layers.combinators), one of the Trax 'combinators'. Branch is a combinator that applies a list of layers in parallel to copies of inputs. Lets try it out! First we will need some layers to play with. Let's build some from functions.",
"_____no_output_____"
]
],
[
[
"# simple function taking one input and one output\nbl_add1 = tl.Fn(\"add1\", lambda x0: (x0 + 1), n_out=1)\nbl_add2 = tl.Fn(\"add2\", lambda x0: (x0 + 2), n_out=1)\nbl_add3 = tl.Fn(\"add3\", lambda x0: (x0 + 3), n_out=1)\n# try them out\nx = np.array([1])\nprint(bl_add1(x), bl_add2(x), bl_add3(x))\n# some information about our new layers\nprint(\n \"name:\",\n bl_add1.name,\n \"number of inputs:\",\n bl_add1.n_in,\n \"number of outputs:\",\n bl_add1.n_out,\n)",
"_____no_output_____"
],
[
"bl_3add1s = tl.Branch(bl_add1, bl_add2, bl_add3)\nbl_3add1s",
"_____no_output_____"
]
],
[
[
"Trax uses the concept of a 'stack' to transfer data between layers.\nFor Branch, for each of its layer arguments, it copies the `n_in` inputs from the stack and provides them to the layer, tracking the max_n_in, or the largest n_in required. It then pops the max_n_in elements from the stack.\n<img src = \"branch1.PNG\" height=\"260\" width=\"600\">\n<center><b>Figure 3: One in, one out Branch</b></center>\nOn output, each layer, in succession pushes its results onto the stack. Note that the push/pull operations impact the top of the stack. Elements that are not part of the operation (n, and m in the diagram) remain intact.",
"_____no_output_____"
]
],
[
[
"# n_in = 1, Each bl_addx pushes n_out = 1 elements onto the stack\nbl_3add1s(x)",
"_____no_output_____"
],
[
"# n = np.array([10]); m = np.array([20]) # n, m will remain on the stack\nn = \"n\"\nm = \"m\" # n, m will remain on the stack\nbl_3add1s([x, n, m]) ",
"_____no_output_____"
]
],
[
[
"Each layer in the input list copies as many inputs from the stack as it needs, and their outputs are successively combined on stack. Put another way, each element of the branch can have differing numbers of inputs and outputs. Let's try a more complex example.",
"_____no_output_____"
]
],
[
[
"bl_addab = tl.Fn(\n \"addab\", lambda x0, x1: (x0 + x1), n_out=1\n) # Trax figures out how many inputs there are\nbl_rep3x = tl.Fn(\n \"add2x\", lambda x0: (x0, x0, x0), n_out=3\n) # but you have to tell it how many outputs there are\nbl_3ops = tl.Branch(bl_add1, bl_addab, bl_rep3x)",
"_____no_output_____"
]
],
[
[
"In this case, the number if inputs being copied from the stack varies with the layer\n<img src = \"branch2.PNG\" height=\"260\" width=\"600\">\n<center><b>Figure 4: variable in, variable out Branch</b></center>\nThe stack when the operation is finished is 5 entries reflecting the total from each layer.",
"_____no_output_____"
]
],
[
[
"# Before Running this cell, what is the output you are expecting?\ny = np.array([3])\nbl_3ops([x, y, n, m])",
"_____no_output_____"
]
],
[
[
"Branch has a special feature to support Residual Network. If an argument is 'None', it will pull the top of stack and push it (at its location in the sequence) onto the output stack\n<img src = \"branch3.PNG\" height=\"260\" width=\"600\">\n<center><b>Figure 5: Branch for Residual</b></center>",
"_____no_output_____"
]
],
[
[
"bl_2ops = tl.Branch(bl_add1, None)\nbl_2ops([x, n, m])",
"_____no_output_____"
]
],
[
[
"<a name=\"1.2\"></a>\n### Part 1.2 Residual Model\nOK, your turn. Write a function 'MyResidual', that uses `tl.Branch` and `tl.Add` to build a residual layer. If you are curious about the Trax implementation, you can see the code [here](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/combinators.py).",
"_____no_output_____"
]
],
[
[
"def MyResidual(layer):\n return tl.Serial(\n ### START CODE HERE ###\n # tl.----,\n # tl.----,\n ### END CODE HERE ###\n )",
"_____no_output_____"
],
[
"# Lets Try it\nmr = MyResidual(bl_add1)\nx = np.array([1])\nmr([x, n, m])",
"_____no_output_____"
]
],
[
[
"**Expected Result**\n(array([3]), 'n', 'm')",
"_____no_output_____"
],
[
"Great! Now, let's build the 4 layer residual Network in Figure 2. You can use `MyResidual`, or if you prefer, the tl.Residual in Trax, or a combination!",
"_____no_output_____"
]
],
[
[
"Fl = tl.Fn(\"F\", lambda x0: (2 * x0), n_out=1)\nGl = tl.Fn(\"G\", lambda x0: (10 * x0), n_out=1)\nx1 = np.array([1])",
"_____no_output_____"
],
[
"resfg = tl.Serial(\n ### START CODE HERE ###\n # None, #Fl # x + F(x)\n # None, #Gl # x + F(x) + G(x + F(x)) etc\n # None, #Fl\n # None, #Gl\n ### END CODE HERE ###\n)",
"_____no_output_____"
],
[
"# Lets try it\nresfg([x1, n, m])",
"_____no_output_____"
]
],
[
[
"**Expected Results**\n(array([1089]), 'n', 'm')",
"_____no_output_____"
],
[
"<a name=\"2\"></a>\n## Part 2.0 Reversible Residual Networks\nThe Reformer utilized RevNets to reduce the storage requirements for performing backpropagation.\n<img src = \"Reversible2.PNG\" height=\"260\" width=\"600\">\n<center><b>Figure 6: Reversible Residual Networks </b></center>\nThe standard approach on the left above requires one to store the outputs of each stage for use during backprop. By using the organization to the right, one need only store the outputs of the last stage, y1, y2 in the diagram. Using those values and running the algorithm in reverse, one can reproduce the values required for backprop. This trades additional computation for memory space which is at a premium with the current generation of GPU's/TPU's.\n\nOne thing to note is that the forward functions produced by two networks are similar, but they are not equivalent. Note for example the asymmetry in the output equations after two stages of operation.\n<img src = \"Revnet1.PNG\" height=\"340\" width=\"1100\">\n<center><b>Figure 7: 'Normal' Residual network (Top) vs REversible Residual Network </b></center>\n\n### Part 2.1 Trax Reversible Layers\n\nLet's take a look at how this is used in the Reformer.",
"_____no_output_____"
]
],
[
[
"refm = trax.models.reformer.ReformerLM(\n vocab_size=33000, n_layers=2, mode=\"train\" # Add more options.\n)\nrefm",
"_____no_output_____"
]
],
[
[
"Eliminating some of the detail, we can see the structure of the network.\n<img src = \"Revnet2.PNG\" height=\"300\" width=\"350\">\n<center><b>Figure 8: Key Structure of Reformer Reversible Network Layers in Trax </b></center>\n\nWe'll review the Trax layers used to implement the Reversible section of the Reformer. First we can note that not all of the reformer is reversible. Only the section in the ReversibleSerial layer is reversible. In a large Reformer model, that section is repeated many times making up the majority of the model.\n<img src = \"Revnet3.PNG\" height=\"650\" width=\"1600\">\n<center><b>Figure 9: Functional Diagram of Trax elements in Reformer </b></center>",
"_____no_output_____"
],
[
"The implementation starts by duplicating the input to allow the two paths that are part of the reversible residual organization with [Dup](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/combinators.py#L666). Note that this is accomplished by copying the top of stack and pushing two copies of it onto the stack. This then feeds into the ReversibleHalfResidual layer which we'll review in more detail below. This is followed by [ReversibleSwap](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/reversible.py#L83). As the name implies, this performs a swap, in this case, the two topmost entries in the stack. This pattern is repeated until we reach the end of the ReversibleSerial section. At that point, the topmost 2 entries of the stack represent the two paths through the network. These are concatenated and pushed onto the stack. The result is an entry that is twice the size of the non-reversible version.\n\nLet's look more closely at the [ReversibleHalfResidual](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/reversible.py#L154). This layer is responsible for executing the layer or layers provided as arguments and adding the output of those layers, the 'residual', to the top of the stack. Below is the 'forward' routine which implements this.\n<img src = \"Revnet4.PNG\" height=\"650\" width=\"1600\">\n<center><b>Figure 10: ReversibleHalfResidual code and diagram </b></center>\n\nUnlike the previous residual function, the value that is added is from the second path rather than the input to the set of sublayers in this layer. Note that the Layers called by the ReversibleHalfResidual forward function are not modified to support reverse functionality. This layer provides them a 'normal' view of the stack and takes care of reverse operation.\n\nLet's try out some of these layers! We'll start with the ones that just operate on the stack, Dup() and Swap().",
"_____no_output_____"
]
],
[
[
"x1 = np.array([1])\nx2 = np.array([5])\n# Dup() duplicates the Top of Stack and returns the stack\ndl = tl.Dup()\ndl(x1)",
"_____no_output_____"
],
[
"# ReversibleSwap() duplicates the Top of Stack and returns the stack\nsl = tl.ReversibleSwap()\nsl([x1, x2])",
"_____no_output_____"
]
],
[
[
"You are no doubt wondering \"How is ReversibleSwap different from Swap?\". Good question! Lets look:\n<img src = \"Revnet5.PNG\" height=\"389\" width=\"1000\">\n<center><b>Figure 11: Two versions of Swap() </b></center>\nThe ReverseXYZ functions include a \"reverse\" compliment to their \"forward\" function that provides the functionality to run in reverse when doing backpropagation. It can also be run in reverse by simply calling 'reverse'.",
"_____no_output_____"
]
],
[
[
"# Demonstrate reverse swap\nprint(x1, x2, sl.reverse([x1, x2]))",
"_____no_output_____"
]
],
[
[
"Let's try ReversibleHalfResidual, First we'll need some layers..",
"_____no_output_____"
]
],
[
[
"Fl = tl.Fn(\"F\", lambda x0: (2 * x0), n_out=1)\nGl = tl.Fn(\"G\", lambda x0: (10 * x0), n_out=1)",
"_____no_output_____"
]
],
[
[
"Just a note about ReversibleHalfResidual. As this is written, it resides in the Reformer model and is a layer. It is invoked a bit differently that other layers. Rather than tl.XYZ, it is just ReversibleHalfResidual(layers..) as shown below. This may change in the future.",
"_____no_output_____"
]
],
[
[
"half_res_F = ReversibleHalfResidual(Fl)\nprint(type(half_res_F), \"\\n\", half_res_F)",
"_____no_output_____"
],
[
"half_res_F([x1, x1]) # this is going to produce an error - why?",
"_____no_output_____"
],
[
"# we have to initialize the ReversibleHalfResidual layer to let it know what the input is going to look like\nhalf_res_F.init(shapes.signature([x1, x1]))\nhalf_res_F([x1, x1])",
"_____no_output_____"
]
],
[
[
"Notice the output: (DeviceArray([3], dtype=int32), array([1])). The first value, (DeviceArray([3], dtype=int32) is the output of the \"Fl\" layer and has been converted to a 'Jax' DeviceArray. The second array([1]) is just passed through (recall the diagram of ReversibleHalfResidual above).",
"_____no_output_____"
],
[
"The final layer we need is the ReversibleSerial Layer. This is the reversible equivalent of the Serial layer and is used in the same manner to build a sequence of layers.",
"_____no_output_____"
],
[
"<a name=\"2.2\"></a>\n### Part 2.2 Build a reversible model\nWe now have all the layers we need to build the model shown below. Let's build it in two parts. First we'll build 'blk' and then a list of blk's. And then 'mod'.\n<center><img src = \"Revnet6.PNG\" height=\"800\" width=\"1600\"> </center>\n<center><b>Figure 12: Reversible Model we will build using Trax components </b></center>",
"_____no_output_____"
]
],
[
[
"blk = [ # a list of the 4 layers shown above\n ### START CODE HERE ###\n None,\n None,\n None,\n None,\n]\nblks = [None, None]\n### END CODE HERE ###",
"_____no_output_____"
],
[
"mod = tl.Serial(\n ### START CODE HERE ###\n None,\n None,\n None,\n ### END CODE HERE ###\n)\nmod",
"_____no_output_____"
]
],
[
[
"**Expected Output**\n```\nSerial[\n Dup_out2\n ReversibleSerial_in2_out2[\n ReversibleHalfResidualV2_in2_out2[\n Serial[\n F\n ]\n ]\n ReversibleSwap_in2_out2\n ReversibleHalfResidualV2_in2_out2[\n Serial[\n G\n ]\n ]\n ReversibleSwap_in2_out2\n ReversibleHalfResidualV2_in2_out2[\n Serial[\n F\n ]\n ]\n ReversibleSwap_in2_out2\n ReversibleHalfResidualV2_in2_out2[\n Serial[\n G\n ]\n ]\n ReversibleSwap_in2_out2\n ]\n Concatenate_in2\n]\n```",
"_____no_output_____"
]
],
[
[
"mod.init(shapes.signature(x1))\nout = mod(x1)\nout",
"_____no_output_____"
]
],
[
[
"**Expected Result**\nDeviceArray([ 65, 681], dtype=int32)",
"_____no_output_____"
],
[
"OK, now you have had a chance to try all the 'Reversible' functions in Trax. On to the Assignment!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
d0d65bcc6181481482bfc5e45d756bbc08dcfabf | 450,459 | ipynb | Jupyter Notebook | MSFT Time Series Analysis.ipynb | maxrzhang/Microsoft-Stock-Time-Series-Analysis | ca5cb3318ffef48862acc17b820884b628afe580 | [
"MIT"
] | null | null | null | MSFT Time Series Analysis.ipynb | maxrzhang/Microsoft-Stock-Time-Series-Analysis | ca5cb3318ffef48862acc17b820884b628afe580 | [
"MIT"
] | null | null | null | MSFT Time Series Analysis.ipynb | maxrzhang/Microsoft-Stock-Time-Series-Analysis | ca5cb3318ffef48862acc17b820884b628afe580 | [
"MIT"
] | null | null | null | 158.94813 | 73,976 | 0.842507 | [
[
[
"# MICROSOFT STOCK PRICE TIME SERIES ANALYSIS",
"_____no_output_____"
],
[
"Within this Jupyter Notebook, we will attempt to forecast and model the future price of Microsoft stock through Time Series Analysis and SARIMAX models, while also modelling out potential trades and profits that could be made based on our forecast through training and test sets",
"_____no_output_____"
],
[
"## Importing Necessary Libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import timedelta\n\nimport statsmodels.api as sm\nfrom statsmodels.tsa.seasonal import seasonal_decompose\nfrom statsmodels.tsa.stattools import adfuller\nfrom statsmodels.tsa.stattools import acf\nfrom statsmodels.tsa.stattools import pacf\nfrom statsmodels.tsa.arima_model import ARMA\nfrom pmdarima import auto_arima\n\nfrom sklearn import metrics\nfrom sklearn.metrics import confusion_matrix, accuracy_score, precision_score, roc_auc_score",
"_____no_output_____"
]
],
[
[
"## Reading in the Stock Price Data and Preprocessing the Data",
"_____no_output_____"
]
],
[
[
"MSFT = pd.read_csv('MSFT.csv')\nMSFT.head()",
"_____no_output_____"
],
[
"MSFT.Date = pd.to_datetime(MSFT.Date)\nMSFT = MSFT.sort_values('Date')\nMSFT.Date = MSFT.Date + timedelta(hours = 16)",
"_____no_output_____"
],
[
"stock = MSFT[['Date', 'Close']].copy()\nstock.columns = ['Date', 'Price']\nMSFT.Date = MSFT.Date - timedelta(hours = 6.5)\nMSFT.head()",
"_____no_output_____"
],
[
"stock.head()",
"_____no_output_____"
],
[
"MSFT.columns = ['Date', 'Price', 'High', 'Low', 'Close', 'Adj Close', 'Volume']\nstock = pd.concat([stock, MSFT[['Date', 'Price']]], axis = 0)\nstock = stock.sort_values('Date')\nstock.set_index(stock.Date, inplace = True)\nstock = stock.drop('Date', 1)\nstock.head()",
"_____no_output_____"
]
],
[
[
"## Creating a Function to Test for Stationarity",
"_____no_output_____"
]
],
[
[
"def test_stationarity(timeseries):\n rolmean = pd.Series(timeseries).rolling(12).mean()\n rolstd = pd.Series(timeseries).rolling(12).mean()\n fig = plt.figure(figsize = (12, 8))\n orig = plt.plot(timeseries, color = 'blue', label = 'Original')\n mean = plt.plot(rolmean, color = 'red', label = 'Rolling Average')\n std = plt.plot(rolstd, color = 'black', label = 'Rolling Standard Deviation')\n plt.legend(loc = 'best')\n plt.title('Comparison of Stock Price, Rolling Average, and Rolling Standard Deviation')\n plt.show()\n print('Results of Augmented Dickey-Fuller Test: ')\n dftest = adfuller(timeseries, autolag = 'AIC')\n dfoutput = pd.Series(dftest[0: 4], index = ['Test Statistic', 'P-Value', 'No. Lags Used', 'No. Observations Used'])\n for key, value in list(dftest[4].items()):\n dfoutput['Critical Value (%s)' %key] = value\n print(dfoutput)",
"_____no_output_____"
]
],
[
[
"Now let us examine the stock price's Autocorrelation and Partial Autocorrelation plots",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize = (12, 8))\nax1 = fig.add_subplot(211)\nfig = sm.graphics.tsa.plot_acf(stock.Price, lags = 200, ax = ax1)\nax2 = fig.add_subplot(212)\nfig = sm.graphics.tsa.plot_pacf(stock.Price, lags = 200, ax = ax2)\nplt.show()",
"_____no_output_____"
]
],
[
[
"We can see that the Autocorrelation of the Microsoft stock prices is weak, perhaps suggesting that the autocorrelation is not statistically significant within the 95% confidence interval. Observing the Partial Autocorrelation graph, however, while we also do not see clear significant indication of partial autocorrelation within the 95% confidence interval, the results seem more promising in comparison to the Autocorrelation graph and the existence of significant partial autocorrelation may exist. We can also observe the graphs for different lag values.",
"_____no_output_____"
],
[
"## Building the SARIMAX Model",
"_____no_output_____"
]
],
[
[
"optimal = auto_arima(stock.Price, start_p = 0, start_q = 0, test = 'adf', seasonal = True)\noptimal.summary()",
"_____no_output_____"
],
[
"model = sm.tsa.statespace.SARIMAX(stock.Price, trend = 'n', order = (3, 1, 0))\nresults = model.fit()\nresults.summary()",
"/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n"
],
[
"stock['Forecast'] = results.predict(dynamic = False)\nstock[['Price', 'Forecast']].plot(figsize = (12, 8))\nplt.show()",
"_____no_output_____"
],
[
"stock[['Price', 'Forecast']].iloc[300:].plot(figsize = (12, 8))\nplt.show()",
"_____no_output_____"
],
[
"stock['Forecast'] = [np.NaN for i in range(300)] + list(results.predict(start = 300, end = 507, dynamic = False))\nstock[['Price', 'Forecast']].iloc[400:].plot(figsize = (12, 8))\nplt.show()",
"_____no_output_____"
],
[
"def calculate_profit(timeseries, predictions, stop):\n capital = 0\n transactions = 0\n own = False\n last_buy = 0\n current_price = 0\n for num, i in enumerate(timeseries.iloc[:stop].iterrows()):\n if i[1][predictions] == 1 and own == False:\n capital -= i[1]['Price']\n own = True\n transactions += 1\n last_buy = i[1]['Price']\n elif i[1][predictions] == 0 and own == True:\n capital += i[1]['Price']\n own = False\n transactions += 1\n else:\n pass\n current_price = i[1]['Price']\n print('Currently Owning?: ', own)\n print('Last Buying Price: $', last_buy)\n print('Current Price: $', current_price)\n print('Current Cash: $', capital)\n if own == True:\n print('Profit: $', current_price + capital)\n else:\n print('Profit: $', capital)\n print('Number of Transactins:', transactions)\n print('Cost of transactions: $', transactions * 5)",
"_____no_output_____"
]
],
[
[
"The above function simulates trading using our SARIMAX predictions, and we assume a $5 transaction fee.",
"_____no_output_____"
]
],
[
[
"stock['Target'] = stock.Price.shift(-1)\nstock.tail()",
"_____no_output_____"
],
[
"stock['Forecast'] = results.predict(dynamic = False)\nstock['Predicted Growth'] = stock[['Forecast', 'Price']].apply(lambda x: 1 if x[0] - x[1] >= 0 else 0, axis = 1)\nstock['Actual Growth'] = stock[['Target', 'Price']].apply(lambda x: 1 if x[0] - x[1] >= 0 else 0, axis = 1)",
"_____no_output_____"
],
[
"calculate_profit(stock.iloc[300:], 'Predicted Growth', -1)",
"Currently Owning?: False\nLast Buying Price: $ 183.600006\nCurrent Price: $ 184.979996\nCurrent Cash: $ 11.70993299999995\nProfit: $ 11.70993299999995\nNumber of Transactins: 100\nCost of transactions: $ 500\n"
],
[
"calculate_profit(stock.iloc[300:], 'Actual Growth', -1)",
"Currently Owning?: False\nLast Buying Price: $ 183.600006\nCurrent Price: $ 184.979996\nCurrent Cash: $ 270.4299149999998\nProfit: $ 270.4299149999998\nNumber of Transactins: 98\nCost of transactions: $ 490\n"
],
[
"stock.head()",
"_____no_output_____"
]
],
[
[
"## Building the ARMA Model",
"_____no_output_____"
]
],
[
[
"model = ARMA(stock.Price, (3, 1)).fit()\nmodel.summary()",
"/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n"
],
[
"stock['Forecast'] = model.predict()\nstock['Target'] = stock.Price.shift(-1)\nstock.head()",
"_____no_output_____"
],
[
"plt.scatter(stock.iloc[1: -1].Target, stock.iloc[1: -1].Forecast)\nplt.xlabel('True Values')\nplt.ylabel('Predictions')\nprint('Score: ', metrics.r2_score(stock.iloc[1: -1].Target, stock.iloc[1: -1].Forecast))\nprint('MSE: ', metrics.mean_squared_error(stock.iloc[1: -1].Target, stock.iloc[1: -1].Forecast))\nplt.show()",
"Score: 0.9566082335540104\nMSE: 11.747995736071015\n"
],
[
"stock['Predicted Growth'] = stock[['Forecast', 'Price']].apply(lambda x: 1 if x[0] - x[1] >= 5 else 0 if x[0] - x[1] <= -5 else 2, axis = 1)\nstock['Actual Growth'] = stock[['Target', 'Price']].apply(lambda x: 1 if x[0] - x[1] >= 5 else 0 if x[0] - x[1] <= -5 else 2, axis = 1)",
"_____no_output_____"
],
[
"print('Model Results: ')\ncalculate_profit(stock.iloc[300:], 'Predicted Growth', -1)\nprint('\\nBest Case Results: ')\ncalculate_profit(stock.iloc[300:], 'Actual Growth', -1)",
"Model Results: \nCurrently Owning?: True\nLast Buying Price: $ 167.820007\nCurrent Price: $ 184.979996\nCurrent Cash: $ -197.660003\nProfit: $ -12.68000699999999\nNumber of Transactins: 7\nCost of transactions: $ 35\n\nBest Case Results: \nCurrently Owning?: False\nLast Buying Price: $ 135.979996\nCurrent Price: $ 184.979996\nCurrent Cash: $ 106.96002100000001\nProfit: $ 106.96002100000001\nNumber of Transactins: 12\nCost of transactions: $ 60\n"
],
[
"rolmean = pd.Series(stock.Price).rolling(20).mean()\nstock['Rolling Mean'] = rolmean\nstock.head()",
"_____no_output_____"
],
[
"stock['First Difference'] = stock['Rolling Mean'] - stock['Rolling Mean'].shift(1)\ntest_stationarity(stock['First Difference'].dropna(inplace = False))",
"_____no_output_____"
],
[
"fig = plt.figure(figsize = (12,8))\nax1 = fig.add_subplot(211)\nfig = sm.graphics.tsa.plot_acf(stock['Rolling Mean'].iloc[5:], lags=10, ax = ax1)\nax2 = fig.add_subplot(212)\nfig = sm.graphics.tsa.plot_pacf(stock['Rolling Mean'].iloc[5:], lags=5, ax=ax2)\nplt.show()",
"_____no_output_____"
],
[
"optimal = auto_arima(stock['Rolling Mean'].dropna(inplace = False), start_p = 0, start_q = 0, test = 'adf', seasonal = True)\noptimal.summary()",
"/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/statespace/sarimax.py:963: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.\n warn('Non-stationary starting autoregressive parameters'\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/base/model.py:568: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n \"Check mle_retvals\", ConvergenceWarning)\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/statespace/sarimax.py:975: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.\n warn('Non-invertible starting MA parameters found.'\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/statespace/sarimax.py:963: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.\n warn('Non-stationary starting autoregressive parameters'\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/statespace/sarimax.py:975: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.\n warn('Non-invertible starting MA parameters found.'\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/base/model.py:568: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n \"Check mle_retvals\", ConvergenceWarning)\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/base/model.py:568: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n \"Check mle_retvals\", ConvergenceWarning)\n"
],
[
"model = sm.tsa.statespace.SARIMAX(stock['Rolling Mean'], trend = 'n', order = (4, 0, 0))\nresults = model.fit()\nresults.summary()",
"/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/statespace/sarimax.py:963: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.\n warn('Non-stationary starting autoregressive parameters'\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/base/model.py:568: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n \"Check mle_retvals\", ConvergenceWarning)\n"
],
[
"stock['Forecast'] = results.predict()\nstock['Target'] = stock['Rolling Mean'].shift(-1)",
"_____no_output_____"
],
[
"plt.scatter(stock.iloc[21:-1].Target, stock.iloc[21:-1].Forecast)\nplt.xlabel(\"True Values\")\nplt.ylabel(\"Predictions\")\nprint(\"Score: \", metrics.r2_score(stock.iloc[21:-1].Target, stock.iloc[21:-1].Forecast))\nprint(\"MSE:\", metrics.mean_squared_error(stock.iloc[21:-1].Target, stock.iloc[21:-1].Forecast))\nplt.show()",
"Score: 0.9984937509713085\nMSE: 0.35508330266433424\n"
],
[
"stock['Predicted Growth'] = stock[['Forecast', 'Price']].apply(lambda x: 1 if x[0] - x[1] >= 0 else 0, axis = 1)\nstock['Actual Growth'] = stock[['Target', 'Price']].apply(lambda x: 1 if x[0] - x[1] >= 0 else 0, axis = 1)",
"_____no_output_____"
],
[
"print(\"Model Resuls: \")\ncalculate_profit(stock.iloc[300:], 'Predicted Growth', -1)\nprint(\"\\nBest Case Results:\")\ncalculate_profit(stock.iloc[300:], 'Actual Growth', -1)",
"Model Resuls: \nCurrently Owning?: False\nLast Buying Price: $ 169.809998\nCurrent Price: $ 184.979996\nCurrent Cash: $ 0.569991999999985\nProfit: $ 0.569991999999985\nNumber of Transactins: 20\nCost of transactions: $ 100\n\nBest Case Results:\nCurrently Owning?: False\nLast Buying Price: $ 169.809998\nCurrent Price: $ 184.979996\nCurrent Cash: $ 20.939986999999974\nProfit: $ 20.939986999999974\nNumber of Transactins: 24\nCost of transactions: $ 120\n"
],
[
"def evaluate_model(truth, predictions, model = None, X = None):\n cm = confusion_matrix(truth, predictions)\n\n print('True Negative: ', cm[0, 0], '| False Positive: ', cm[0, 1])\n print('False Negative: ', cm[1, 0], '| True Positive: ', cm[1, 1], '\\n')\n\n sensitivity = cm[1, 1]/ (cm[1, 0] + cm[1, 1])\n specificity = cm[0, 0]/ (cm[0, 1] + cm[0, 0])\n\n print('Sensitivity (TP/ TP + FN): ', sensitivity)\n print('Specificity (TN/ TN + FP): ', specificity, '\\n')\n\n print('Accuracy: ', accuracy_score(truth, predictions, normalize = True))\n print('Precision: ', precision_score(truth, predictions))\n if model != None:\n print('Roc-Auc: ', roc_auc_score(truth, [x[1] for x in model.predict_proba(X)]))\n else:\n pass\n print('\\n')",
"_____no_output_____"
],
[
"evaluate_model(stock['Actual Growth'], stock['Predicted Growth'])",
"True Negative: 351 | False Positive: 5\nFalse Negative: 5 | True Positive: 147 \n\nSensitivity (TP/ TP + FN): 0.9671052631578947\nSpecificity (TN/ TN + FP): 0.9859550561797753 \n\nAccuracy: 0.9803149606299213\nPrecision: 0.9671052631578947\n\n\n"
],
[
"stock['Predicted Growth'] = stock[['Forecast', 'Price']].apply(lambda x: 1 if x[0] - x[1] >= 5 else 0 if x[0] - x[1] <= -5 else 2, axis = 1)\nstock['Actual Growth'] = stock[['Target', 'Price']].apply(lambda x: 1 if x[0] - x[1] >= 5 else 0 if x[0] - x[1] <= -5 else 2, axis = 1)",
"_____no_output_____"
],
[
"print(\"Model Resuls: \")\ncalculate_profit(stock.iloc[300:], 'Predicted Growth', -1)\nprint(\"\\nBest Case Results:\")\ncalculate_profit(stock.iloc[300:], 'Actual Growth', -1)",
"Model Resuls: \nCurrently Owning?: False\nLast Buying Price: $ 178.58999599999999\nCurrent Price: $ 184.979996\nCurrent Cash: $ -29.67999199999997\nProfit: $ -29.67999199999997\nNumber of Transactins: 2\nCost of transactions: $ 10\n\nBest Case Results:\nCurrently Owning?: False\nLast Buying Price: $ 178.58999599999999\nCurrent Price: $ 184.979996\nCurrent Cash: $ -29.67999199999997\nProfit: $ -29.67999199999997\nNumber of Transactins: 2\nCost of transactions: $ 10\n"
],
[
"profits = []\nfor i in range(1, 30):\n rolmean = pd.Series(stock.Price).rolling(i).mean()\n stock['Rolling Mean'] = rolmean\n model = sm.tsa.statespace.SARIMAX(stock['Rolling Mean'], trend='n', order=(4, 0, 0))\n results = model.fit()\n results.summary()\n stock['Forecast'] = results.predict()\n stock['Target'] = stock['Rolling Mean'].shift(-1)\n stock['Predicted Growth'] = stock[['Forecast', 'Price']].apply(lambda x: 1 if x[0] - x[1] >= 5 else 0 if x[0] - x[1] <= -5 else 2, axis = 1)\n calculate_profit(stock.iloc[300:], 'Predicted Growth', -1)",
"/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/statespace/sarimax.py:963: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.\n warn('Non-stationary starting autoregressive parameters'\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/base/model.py:568: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n \"Check mle_retvals\", ConvergenceWarning)\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n/Users/maxzhang/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.