max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
examples/stockquotes-old/phase1/stockmarket.py | brubbel/Pyro4 | 638 | 12794051 | <filename>examples/stockquotes-old/phase1/stockmarket.py
import random
class StockMarket(object):
def __init__(self, marketname, symbols):
self.name = marketname
self.symbolmeans = {}
for symbol in symbols:
self.symbolmeans[symbol] = random.uniform(20, 200)
self.aggregators = []
def generate(self):
quotes = {}
for symbol, mean in self.symbolmeans.items():
if random.random() < 0.2:
quotes[symbol] = round(random.normalvariate(mean, 20), 2)
for aggregator in self.aggregators:
aggregator.quotes(self.name, quotes)
def listener(self, aggregator):
self.aggregators.append(aggregator)
def symbols(self):
return self.symbolmeans.keys()
| 2.96875 | 3 |
setup.py | jvzantvoort/seeddms | 3 | 12794052 | <reponame>jvzantvoort/seeddms
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import io
import os
import re
setup_path = os.path.abspath(__file__)
setup_path_dir = os.path.dirname(setup_path)
exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read())
long_description = open(os.path.join(setup_path_dir, 'README.md')).read()
setup(
name='seeddms',
version=__version__,
description='SeedDMS REST API',
keywords='dms seeddms',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jvzantvoort/seeddms',
packages=find_packages(exclude=['docs', 'docs-src', 'tests']),
install_requires=['requests'],
license='MIT',
test_suite="tests",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
]
)
| 1.34375 | 1 |
atpbar/report.py | abitoun-42/atpbar | 72 | 12794053 | # <NAME> <<EMAIL>>
##__________________________________________________________________||
class ProgressReportComplementer:
"""Complement progress reports
Complement a progress report with the previous report for the same
task.
Parameters
----------
report : dict
A progress report, a dict with the following entries. The
`taskid` must be always given. The first report for a task
must include `done`, `total`, and 'name'. The `first` and
`last` will be automatically determined if not given.
taskid : immutable
The unique task ID.
done : int, optional
The number of the iterations done so far
total : int
The total iterations to be done
name : str
A name of the task. It will be use as the label on the
progress bars.
first : bool
`True` if the first report for the task. If not given,
automatically determined from `done`; `True` if `done` is
0, `False` otherwise
last : bool
`True` if the last report for the task. If not given,
automatically determined from `done` and `total`; `True`
if `done` equals `total`, `False` otherwise
"""
def __init__(self):
self.previous_reports = { }
self.volatile_fileds = ('first', 'last')
def __call__(self, report):
taskid = report['taskid']
if taskid in self.previous_reports:
self._complement(taskid, report)
self._first(report)
self._last(report)
self._store(taskid, report.copy())
def _complement(self, taskid, report):
report_copy = report.copy()
report.clear()
report.update(self.previous_reports[taskid])
report.update(report_copy)
def _first(self, report):
if 'first' in report:
return
report['first'] = (report['done'] == 0)
def _last(self, report):
if 'last' in report:
return
report['last'] = (report['done'] >= report['total'])
def _store(self, taskid, report):
for k in self.volatile_fileds:
report.pop(k, None)
self.previous_reports[taskid] = report
##__________________________________________________________________||
| 3 | 3 |
serverlesspack/exceptions.py | Robinson04/serverlesspack | 1 | 12794054 | <gh_stars>1-10
from .utils import message_with_vars
class OutputDirpathTooLow(Exception):
def __init__(self, highest_found_directory: str, output_base_dirpath: str):
self.highest_found_directory = highest_found_directory
self.output_base_dirpath = output_base_dirpath
def __str__(self):
return message_with_vars(
message="The highest found directory found in the packages file is higher than the output_base_dirpath. "
"Please increase the output_base_dirpath to an higher directory.",
vars_dict={
'highest_found_directory': self.highest_found_directory,
'output_base_dirpath': self.output_base_dirpath
}
)
| 2.6875 | 3 |
bookstore/profiles/models.py | M0673N/bookstore | 0 | 12794055 | <filename>bookstore/profiles/models.py
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.safestring import mark_safe
from bookstore.accounts.models import BookstoreUser
import cloudinary.models as cloudinary_models
from bookstore.profiles.misc import list_of_countries
from bookstore.profiles.validators import validate_city, validate_phone_number, validate_name
UserModel = get_user_model()
class Profile(models.Model):
first_name = models.CharField(max_length=200, blank=True)
last_name = models.CharField(max_length=200, blank=True)
biography = models.TextField(blank=True)
image = cloudinary_models.CloudinaryField(blank=True, resource_type='image')
country = models.CharField(choices=[(country, country) for country in list_of_countries], blank=True, max_length=44)
city = models.CharField(max_length=200, blank=True)
street_address = models.CharField(max_length=200, blank=True)
post_code = models.CharField(max_length=20, blank=True)
phone = models.CharField(validators=[validate_phone_number], max_length=30, blank=True)
is_author = models.BooleanField(default=False)
is_complete = models.BooleanField(default=False)
user = models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True, blank=True)
def image_tag(self):
return mark_safe(f'<img src="{self.image.url}" width="150" height="150" />')
image_tag.short_description = 'Current Image'
def __str__(self):
return self.first_name + ' ' + self.last_name
class AuthorLike(models.Model):
author = models.ForeignKey(Profile, on_delete=models.CASCADE)
user = models.ForeignKey(UserModel, on_delete=models.CASCADE)
class AuthorDislike(models.Model):
author = models.ForeignKey(Profile, on_delete=models.CASCADE)
user = models.ForeignKey(UserModel, on_delete=models.CASCADE)
class AuthorReview(models.Model):
text = models.TextField()
date_posted = models.DateField(auto_now_add=True)
author = models.ForeignKey(Profile, on_delete=models.CASCADE)
user = models.ForeignKey(UserModel, on_delete=models.CASCADE)
| 2.296875 | 2 |
cheetah_core/djangoapps/core/models.py | dota-2-cheetah/cheetah_core | 0 | 12794056 | <filename>cheetah_core/djangoapps/core/models.py
from django.db import models
# Create your models here.
class ValveDataAbstract(models.Model):
name = models.CharField('Name', max_length=50)
localized_name = models.CharField('Localized name', max_length=50)
nicknames = models.TextField('Nicknames', blank=True)
use_custom_image = models.BooleanField('Use custom image?', default=False)
custom_image = models.ImageField('Custom image', upload_to='custom_images', blank=True)
def __str__(self):
return self.name
class Meta:
abstract = True
| 2.265625 | 2 |
qork/easy.py | flipcoder/qork | 3 | 12794057 | #!/usr/bin/python
from collections import defaultdict
from qork.signal import Signal
from qork.reactive import *
APP = None
def qork_app(a=None):
global APP
if a is None:
return APP
APP = a
return APP
def cache(*args, **kwargs):
return APP.cache(*args, **kwargs)
def add(*args, **kwargs):
return APP.add(*args, **kwargs)
def find(*args, **kwargs):
return APP.world.find(*args, **kwargs)
def find_one(*args, **kwargs):
return APP.world.find(*args, one=True, **kwargs)
def remove(*args, **kwargs):
return APP.remove(*args, **kwargs)
def create(*args, **kwargs):
return APP.create(*args, **kwargs)
def clear():
return APP.scene.clear()
def play(*args, **kwargs):
return APP.play(*args, **kwargs)
# def music(fn):
# return APP.add(fn, loop=True)
| 2.0625 | 2 |
chap8/8-15/printing_functions.py | StewedChickenwithStats/Answers-to-Python-Crash-Course | 1 | 12794058 | def print_models(unprinted_designs, completed_models):
while unprinted_designs:
current_design = unprinted_designs.pop()
print("Printing model: " + current_design)
completed_models.append(current_design)
| 2.859375 | 3 |
nymph/utils/third/doc_parse.py | smilelight/nymph | 1 | 12794059 | <reponame>smilelight/nymph
# -*- coding: utf-8 -*-
from typing import Dict, List
def is_breakpoint(item: dict):
# if item['text_feature'] != 'text':
if item['text_feature'] not in ['table', 'text', 'image']:
return True
if item['is_center'] is True:
return True
if item['is_bold'] is True:
return True
return False
def doc_split_fn(dataset: List[Dict]):
idx_list = []
for i, item in enumerate(dataset):
if is_breakpoint(item):
idx_list.append(i)
if 0 not in idx_list:
idx_list.insert(0, 0)
if len(dataset) not in idx_list:
idx_list.append(len(dataset))
return idx_list
def doc_label_parse(labels: List[str]):
return labels
| 2.421875 | 2 |
src/Eighteenth Chapter/Exercise6.py | matthijskrul/ThinkPython | 0 | 12794060 | <reponame>matthijskrul/ThinkPython
# Rewrite the fibonacci algorithm without using recursion.
# Can you find bigger terms of the sequence?
# Can you find fib(200)?
import time
def fibonacci(n):
fibonaccinumber1 = 0
fibonaccinumber2 = 1
if n > 1:
for i in range(2, n+1):
fibonaccinumber3 = fibonaccinumber1 + fibonaccinumber2
fibonaccinumber1 = fibonaccinumber2
fibonaccinumber2 = fibonaccinumber3
return fibonaccinumber2
t0 = time.perf_counter()
n = 9
result = fibonacci(n)
t1 = time.perf_counter()
print("fibonacci({0}) = {1}, ({2:.2f} secs)".format(n, result, t1 - t0))
| 4.21875 | 4 |
mathfun/primes/__init__.py | lsbardel/mathfun | 0 | 12794061 | <gh_stars>0
from .gcd import xgcd
from .prime_numbers import factors, is_prime, prime_factors
| 1.023438 | 1 |
src/nqdc/_authors.py | neuroquery/nqdc | 1 | 12794062 | <reponame>neuroquery/nqdc<filename>src/nqdc/_authors.py
"""Extracting list of authors from article XML."""
import pandas as pd
from lxml import etree
from nqdc._typing import BaseExtractor
from nqdc import _utils
class AuthorsExtractor(BaseExtractor):
"""Extracting list of authors from article XML."""
fields = ("pmcid", "surname", "given-names")
name = "authors"
def extract(self, article: etree.ElementTree) -> pd.DataFrame:
authors = []
pmcid = _utils.get_pmcid(article)
for author_elem in article.iterfind(
"front/article-meta/contrib-group/contrib[@contrib-type='author']"
):
author_info = {"pmcid": pmcid}
for part in [
"name/surname",
"name/given-names",
]:
elem = author_elem.find(part)
if elem is not None:
author_info[elem.tag] = elem.text
authors.append(author_info)
return pd.DataFrame(authors, columns=self.fields)
| 2.96875 | 3 |
src/vk_chat_bot/vk/manager.py | Dilik8/prof_net_dip | 0 | 12794063 | <gh_stars>0
import threading
import vk_api
import datetime as dt
import flag
from queue import Queue
from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
from vk_api.utils import get_random_id
from src.vk_chat_bot.db.database import UserAppToken, UserSearchList, UserApp, session
from src.vk_chat_bot.vk.vkontakte import SearchEngine, VKinderUser, VkUserCook
class VKGroupManage:
QUEUE = Queue()
COMMANDS = {'start', 'начать', 'search', 'свайп вправо', 'доб. в избранное', 'доб. в чс',
'ну...давай позже 😔', 'а давай познакомимся 🐼'}
def __init__(self, vk_group_token, group_id, oauth_link):
self.vk = vk_api.VkApi(token=vk_group_token)
self.long_poll = VkBotLongPoll(self.vk, group_id=group_id)
self.vk_api = self.vk.get_api()
self.userapp_token = UserAppToken(session)
self.user_app = UserApp(session)
self.oauth_link = oauth_link
self.u_vk_api = None
def _get_firstname(self, user_id):
return self.vk_api.users.get(user_ids=user_id)[0]['first_name']
def _next(self, user_id, user_token, user_firstname) -> None:
usr_search = UserSearchList(user_id, session)
v_usr_cook = VkUserCook(user_token)
s_engine = SearchEngine(user_id, user_token)
random_id = self._generate_user(user_id, user_firstname, usr_search, v_usr_cook, s_engine)
get_id = self.userapp_token.get_last_searched_id(user_id)
if get_id is not None:
usr_search.move_user_to_archive(get_id)
self.userapp_token.update_last_searched(user_id, random_id)
else:
self.userapp_token.update_last_searched(user_id, random_id)
self._ask_to_move_msg(user_id)
def _move_to_fav(self, user_id) -> None:
usr_search = UserSearchList(user_id, session)
get_id = self.userapp_token.get_last_searched_id(user_id)
if user_id is not None:
usr_search.move_user_to_favourite(get_id)
self.userapp_token.update_last_searched(user_id, None)
self._ask_to_move_msg(user_id)
def _move_to_black(self, user_id) -> None:
usr_search = UserSearchList(user_id, session)
get_id = self.userapp_token.get_last_searched_id(user_id)
if user_id is not None:
usr_search.move_user_to_black(get_id)
self.userapp_token.update_last_searched(user_id, None)
self._ask_to_move_msg(user_id)
# Messaging
def _send_msg(self, peer_id, message) -> None:
self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None,
random_id=get_random_id())
def _resend(self, peer_id, value: str):
message = f'Неверный формат, правильный формат: {value}'
self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None,
random_id=get_random_id())
def _send_msg_sign_up(self, peer_id, usr_name) -> None:
message = f'Хаю-Хай 🐍 {usr_name}, для работы с ботом перейдите по кнопке снизу "sign up 📝" и ' \
f'выдайте необходимые права 🐼 после нажмите на зеленую кнопку "start" '
keyboard = VkKeyboard(one_time=False)
keyboard.add_openlink_button(label='sign up 📝', link=self.oauth_link)
keyboard.add_button('start', color=VkKeyboardColor.POSITIVE)
self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(),
random_id=get_random_id())
def _send_msg_signed_in(self, peer_id, firstname) -> None:
message = f'{firstname}, мы все еще не знакомы... давай познакомимся? 🐼\n' \
f'(нужно познакомится с пандой, чтобы перейти к поиску)'
keyboard = VkKeyboard(one_time=True)
keyboard.add_button('а давай познакомимся 🐼', color=VkKeyboardColor.POSITIVE)
keyboard.add_line()
keyboard.add_button('ну...давай позже 😔', color=VkKeyboardColor.NEGATIVE)
self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(),
random_id=get_random_id())
def _send_bye(self, peer_id, usr_name) -> None:
message = f'{usr_name}, <NAME> вызвать меня сможете написав -> start или по кнопке из меню чата'
keyboard = VkKeyboard(one_time=True)
keyboard.add_button('start', color=VkKeyboardColor.SECONDARY)
self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(),
random_id=get_random_id())
def _unknown_command(self, peer_id, txt_msg) -> None:
message = f"неизвестная команда '{txt_msg}' 😞\nнапишите -> start"
keyboard = VkKeyboard(one_time=False)
keyboard.add_button('start', color=VkKeyboardColor.SECONDARY)
self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(),
random_id=get_random_id())
def _ask_relation_msg(self, peer_id) -> None:
message = ('Ваше семейное положение? Отправьте "/re" и цифру от 1 - 8\n\n1 - не женат/не замужем\n'
'2 - есть друг/есть подруга\n3 - помолвлен/помолвлена\n4 - женат/замужем\n5 - всё сложно\n'
'6 - в активном поиске\n7 - влюблён/влюблена\n8 - в гражданском браке\n\nпр. "/re 6"')
self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None,
random_id=get_random_id())
def _ask_to_move_msg(self, peer_id) -> None:
keyboard = VkKeyboard(one_time=True)
keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY)
keyboard.add_line()
keyboard.add_button('доб. в избранное', color=VkKeyboardColor.POSITIVE)
keyboard.add_line()
keyboard.add_button('доб. в чс', color=VkKeyboardColor.NEGATIVE)
self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(),
random_id=get_random_id())
def _get_acquaintance(self, u_token):
user = VKinderUser(u_token).get_info()
if user['dob'] is None or len(user['dob'].split('.')) != 3 or not 1942 <= int(user['dob'].split('.')[2]) < 2014:
user['dob'] = None
if user['gender'] == 0:
user['gender'] = None
if user['relation'] == 0:
user['relation'] = None
self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'],
dob=user['dob'], gender=user['gender'], city=user['city'], relation=user['relation'])
return True
def _check_new_usr_info(self, u_id):
usr = self.user_app.get_user(u_id)
usr_info = {'dob': usr.dob, 'city': usr.city, 'gender': usr.gender, 'relation': usr.relation}
if usr_info['dob'] is None:
self._send_msg(u_id, 'Напишите дату рождения в формате: -> /dob D.M.YYYY (от 9 до 80 лет допускается)'
'\nпр. "/dob 15.7.1990" ')
return False
if usr_info['city'] is None:
self._send_msg(u_id, 'Откуда вы? в формате => (RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿) и напишите город'
'\nпр. "/from 🇺🇦 Киев" или "/from 🇷🇺 Москва" или "/from BY Минск"')
return False
if usr_info['gender'] is None:
self._send_msg(u_id, 'Ваш пол?\n пр. "/gender 1" -> девушка, "/gender 2" -> парень')
return False
if usr_info['relation'] is None:
self._ask_relation_msg(u_id)
return False
self._ask_to_move_msg(u_id)
return True
def _re_check(self, u_id, u_token):
if self._check_new_usr_info(u_id):
self.userapp_token.update_step(u_id, 1)
t = threading.Thread(target=self._search_users, args=(u_id, u_token), daemon=True)
VKGroupManage.QUEUE.put(t)
while not VKGroupManage.QUEUE.empty():
VKGroupManage.QUEUE.get().start()
return True
return False
def _c_dob(self, u_id, answer) -> bool:
if '.' in answer:
num = answer.split('.')
if len(num) == 3:
d, m, y = num[0], num[1], num[2]
if d.isdigit() and m.isdigit() and y.isdigit():
if 1 <= int(d) <= 31 and 1 <= int(m) <= 12 and 1942 <= int(y) <= 2013:
self.user_app.update(u_id, answer, 'dob')
self._send_msg(u_id, 'я запомню вашу днюху ☺️')
return True
self._send_msg(u_id, 'Дата указана неверено')
return False
def _c_city(self, u_id, country, city) -> bool:
vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id))
self.u_vk_api = vk.get_api()
country_flag = flag.dflagize(f"{country.strip()}", subregions=True)
country = self.u_vk_api.database.getCountries(code=country_flag)
country_id = country['items'][0]['id']
if country_id != 0:
ci = self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items']
self.user_app.update(u_id, ci[0]['id'], 'city')
self._send_msg(u_id, f'{country} {city} ☺️')
return True
self._send_msg(u_id, 'Страна/город указан неверено')
return False
def _c_gender(self, u_id, gender) -> bool:
if gender.isdigit() and int(gender) in range(1, 3):
self.user_app.update(u_id, int(gender), 'gender')
return True
self._send_msg(u_id, 'Неверный пол')
return False
def _c_relation(self, u_id, relation) -> bool:
if relation.isdigit() and int(relation) in range(1, 9):
self.user_app.update(u_id, int(relation), 'relation')
return True
self._send_msg(u_id, 'Семейное положение указан неверно')
return False
def _search_users(self, u_id, user_token):
usr_search = UserSearchList(u_id, session)
s_engine = SearchEngine(u_id, user_token)
if usr_search.check_users_existence() is None:
usr = self.user_app.get_user(u_id)
s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year,
gender=usr.gender, city=usr.city, relation=usr.relation)
def _generate_user(self, u_id, name, usr_search_list, usr_cook, search_engine):
if usr_search_list.check_users_existence() is None:
self._send_msg(u_id, f'{name}, подходящих пользователей не найдено... вернитесь чуть позже😓')
return None
r_usr = usr_search_list.select_random_row()
attach = usr_cook.get_user_photos(r_usr.vk_usr_id)
if len(attach) != 3:
usr_search_list.move_user_to_archive(r_usr.vk_usr_id)
return self._generate_user(u_id, name, usr_search_list, usr_cook, search_engine)
self._send_msg(u_id, f'{name}, успешно нашли подходящего пользователей 🐼')
self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]',
attachment=attach, random_id=get_random_id())
return r_usr.vk_usr_id
class VKLaunchGroup(VKGroupManage):
def start(self):
for event in self.long_poll.listen():
if event.type == VkBotEventType.MESSAGE_NEW:
user_id = event.obj['message']['peer_id']
user_firstname = self._get_firstname(user_id)
text_msg = event.obj['message']['text'].strip().lower()
print(f"New msg from {user_id}, text: {text_msg} ")
if text_msg not in VKLaunchGroup.COMMANDS and \
text_msg.split()[0] not in {'/dob', '/from', '/gender', '/re'}:
self._unknown_command(user_id, text_msg)
else:
user_exist = self.userapp_token.check_user(user_id)
user_token = self.userapp_token.get_user_token(user_id)
step = self.userapp_token.get_step(user_id)
start = text_msg in {'start', 'начать'}
next_ = text_msg in {'next', 'свайп вправо'}
if start and user_exist is False:
self._send_msg_sign_up(user_id, user_firstname)
elif step == 0 and user_exist:
if start:
self._send_msg_signed_in(user_id, user_firstname)
elif text_msg == 'ну...давай позже 😔':
self._send_bye(user_id, user_firstname)
elif text_msg == 'а давай познакомимся 🐼':
self._get_acquaintance(user_token)
self._re_check(user_id, user_token)
elif text_msg.split()[0] == '/dob':
txt_c = len(text_msg.split()) == 2
if txt_c:
self._c_dob(user_id, text_msg.split()[1])
self._re_check(user_id, user_token)
else:
self._resend(user_id, '/dob D.M.YYYY')
elif text_msg.split()[0] == '/from':
txt_c = len(text_msg.split()) == 3
if txt_c:
self._c_city(user_id, text_msg.split()[1], text_msg.split()[2])
self._re_check(user_id, user_token)
elif text_msg.split()[0] == '/gender':
txt_c = len(text_msg.split()) == 2
if txt_c:
self._c_gender(user_id, text_msg.split()[1])
self._re_check(user_id, user_token)
else:
self._resend(user_id, '/gender 1 или /gender 2')
elif text_msg.split()[0] == '/re':
txt_c = len(text_msg.split()) == 2
if txt_c:
self._c_relation(user_id, text_msg.split()[1])
self._re_check(user_id, user_token)
else:
self._resend(user_id, '/re 1-6')
elif step == 1 and user_exist:
if start:
self._send_msg(user_id, 'приветствую, нажмите на кнопки снизу 🐼')
self._ask_to_move_msg(user_id)
elif next_:
self._next(user_id, user_token, user_firstname)
self._ask_to_move_msg(user_id)
elif text_msg == 'доб. в избранное':
self._move_to_fav(user_id)
self._send_msg(user_id, 'пользователь добавлен в избраный список ⭐\n'
'идет следующий поиск...️')
self._next(user_id, user_token, user_firstname)
elif text_msg == 'доб. в чс':
self._move_to_black(user_id)
self._send_msg(user_id, 'пользователь добавлен в черный список 🌚\n'
'идет следующий поиск...')
self._next(user_id, user_token, user_firstname)
if __name__ == '__main__':
pass
| 2.0625 | 2 |
docs/import.py | kikei/onsen | 0 | 12794064 | <reponame>kikei/onsen<filename>docs/import.py
from database.models import Onsen
import json
from django.utils import timezone
content = open("docs/points.json").read()
contents = content.split("\n")
contents = list(filter(lambda x: x != "", contents))
jsons = list(map(json.loads, contents))
def extract_char(c):
return c[c.find("泉質:")+3:]
mi = '未登録'
for j in jsons:
name = j["name"]
address = j["address"]
character = extract_char(j["description"])
latitude = j["location"][0]
longitude = j["location"][1]
onsen = Onsen(name=name, tel=mi, address=address, traffic=mi, business_hours=mi, holiday=mi, daytrip=mi, price=mi, character=character, indoor=mi, outdoor=mi, parking=mi, website=mi, note=mi, latitude=latitude, longitude=longitude, publish_date=timezone.now(), modified_date=timezone.now())
onsen.save()
| 2.609375 | 3 |
addressapp/tests/api/test_geographyapi.py | AbhiyantrikTechnology/DentalHub-Backend | 1 | 12794065 | <reponame>AbhiyantrikTechnology/DentalHub-Backend
# -*- coding:utf-8 -*-
from django.contrib.auth.models import Permission
import pytest
from faker import Faker
from mixer.backend.django import mixer
from rest_framework import status
from rest_framework.test import APIClient
from rest_framework_jwt.settings import api_settings
from django.test import TestCase
from userapp.models import User
from patientapp.models import Patient
from encounterapp.models import Encounter, History, Refer
from treatmentapp.models import Treatment
from addressapp.models import Geography, ActivityArea
pytestmark = pytest.mark.django_db
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
fake = Faker()
import re
class TestGeography(TestCase):
def test_list_geography(self):
client = APIClient()
# un authorized access by user
response = client.get('/api/v1/geography')
assert response.status_code == 401, 'list geography'
user_obj = mixer.blend(User)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
response = client.get('/api/v1/geography')
assert response.status_code == 200, 'user can access'
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
response = client.get('/api/v1/geography')
assert response.status_code == 200, 'admin can access'
def test_post_geography(self):
patient_obj = mixer.blend(Patient)
encounter_obj = mixer.blend(Encounter,patient=patient_obj)
client = APIClient()
# un authorized access by user
patient_obj = mixer.blend(Patient)
response = client.post('/api/v1/geography')
assert response.status_code == 401, 'Un authorized access denied.'
# authorized user
user_obj = User.objects.create(email=fake.email(),\
first_name=fake.name(),last_name=fake.name())
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
response = client.post('/api/v1/geography', \
{'city':fake.name(),'state':fake.name(),\
'country':fake.name(),'street_address':fake.name()},format='json')
assert response.status_code == 400, 'only admin can add'
# authorized user with admin
user_obj = User.objects.create(email=fake.email(),\
first_name=fake.name(),last_name=fake.name(),admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
response = client.post('/api/v1/geography', \
{'city':fake.name(),'state':fake.name(),\
'country':fake.name(),'street_address':"ktm"},format='json')
assert response.status_code == 200, 'geography added'
# serializers errors
user_obj = User.objects.create(email=fake.email(),\
first_name=fake.name(),last_name=fake.name(),admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
response = client.post('/api/v1/geography', \
{'city':fake.name(),'state':'',\
'country':fake.name(),'street_address':fake.name()},format='json')
assert response.status_code == 400, 'serializers errors'
# location already added
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
Geography.objects.create(city="ktm",state="ktm",street_address="ktm",country="nepal")
response = client.post('/api/v1/geography', \
{'city':"ktm",'state':"ktm",\
'country':"Nepal",'street_address':"ktm"},format='json')
assert response.status_code == 400, 'location already exists'
# authorized user
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
response = client.post('/api/v1/geography', \
{'city':fake.name(),'street_address':fake.name(),\
'state':fake.name(),'country':"Nepal"},format='json')
assert response.status_code == 400, 'street_address should contain only string'
class TestGeographyUpdate(TestCase):
def test_listupdate_geography(self):
client = APIClient()
# un authorized access by user
response = client.get('/api/v1/geography')
assert response.status_code == 401, 'list geography'
#authorized access by admin
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj = mixer.blend(Geography)
response = client.get('/api/v1/geography/'+str(geography_obj.id))
assert response.status_code == 200, 'admin can access'
#authorized access by admin
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj = mixer.blend(Geography)
response = client.get('/api/v1/geography/'+str(23656544654))
assert response.status_code == 204, 'content not found'
#authorized access by admin
user_obj = mixer.blend(User)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj = mixer.blend(Geography)
response = client.get('/api/v1/geography/'+str(geography_obj.id))
assert response.status_code == 400, 'only admin can access'
def test_post_geography(self):
patient_obj = mixer.blend(Patient)
encounter_obj = mixer.blend(Encounter,patient=patient_obj)
client = APIClient()
# un authorized access by user
patient_obj = mixer.blend(Patient)
response = client.put('/api/v1/geography')
assert response.status_code == 401, 'Un authorized access denied.'
# unauthorized user
user_obj = mixer.blend(User)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj = mixer.blend(Geography)
response = client.put('/api/v1/geography/'+str(geography_obj.id), \
{'city':fake.name(),'state':fake.name(),\
'country':fake.name(),'street_address':fake.name()},format='json')
assert response.status_code == 400, 'only admin can add'
# location already added
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj=Geography.objects.create(city="ktm",state="ktm",street_address="ktm",country="nepal")
response = client.put('/api/v1/geography/'+str(geography_obj.id), \
{'city':"ktm",'state':"ktm",\
'country':"Nepal",'street_address':"ktm"},format='json')
assert response.status_code == 400, 'location already exists'
# authorized user
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj = mixer.blend(Geography)
response = client.put('/api/v1/geography/'+str(geography_obj.id), \
{'city':fake.name(),'street_address':"ktm",\
'state':fake.name(),'country':"Nepal"},format='json')
assert response.status_code == 200, 'only admin can edit'
# authorized user
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj = mixer.blend(Geography)
response = client.put('/api/v1/geography/'+str(geography_obj.id), \
{'city':fake.name(),'street_address':fake.name(),\
'state':fake.name(),'country':"Nepal"},format='json')
assert response.status_code == 400, 'street_address should contain only string'
# authorized user
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj = mixer.blend(Geography)
response = client.put('/api/v1/geography/'+str(geography_obj.id), \
{'city':fake.name(),'street_address':fake.name(),\
'state':fake.name(),'country':''},format='json')
assert response.status_code == 400, 'serializers errors'
# authorized user
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj = mixer.blend(Geography)
response = client.put('/api/v1/geography/'+str(1165465456), \
{'city':fake.name(),'street_address':fake.name(),\
'state':fake.name(),'country':'Nepal'},format='json')
assert response.status_code == 204, 'content not found'
def test_delete_geography(self):
client = APIClient()
# un authorized access by user
geography_obj = mixer.blend(Geography)
response = client.delete('/api/v1/geography/'+str(geography_obj.id))
assert response.status_code == 401, 'Permission not define'
#un authorized access by admin
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj = mixer.blend(Geography)
response = client.delete('/api/v1/geography/'+str(geography_obj.id))
assert response.status_code == 204, 'data delete'
#un authorized access by admin
user_obj = mixer.blend(User,admin=True)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj = mixer.blend(Geography)
response = client.delete('/api/v1/geography/'+str(326545))
assert response.status_code == 204, 'content not found'
#un authorized access by admin
user_obj = mixer.blend(User)
payload = jwt_payload_handler(user_obj)
token = jwt_encode_handler(payload)
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
geography_obj = mixer.blend(Geography)
response = client.delete('/api/v1/geography/'+str(geography_obj.id))
assert response.status_code == 400, 'only admin can delete'
| 1.984375 | 2 |
setup.py | satta/eupathtables | 0 | 12794066 | <reponame>satta/eupathtables
import glob
from setuptools import setup, find_packages
try:
import multiprocessing
except ImportError:
pass
setup(
name='eupathtables',
version='0.1',
description='Python interface for reading and converting EuPathDB flat file dumps',
packages = find_packages(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/satta/eupathtables',
test_suite='nose.collector',
tests_require=['nose >= 1.3'],
scripts=glob.glob('scripts/*'),
license='ISC',
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
)
| 1.484375 | 1 |
lucasSequence.py | StokicDusan/LucasSequence | 0 | 12794067 | <gh_stars>0
# Lucas Sequence L is a sequence of numbers
# such that L(n) = L(n-1) + L(n-2)
from time import perf_counter
from math import sqrt
from doctest import testmod
import sys
class Stopwatch:
def __init__(self):
self.reset()
def start(self):
if not self.__running:
self.__start_time = perf_counter()
self.__running = True
else:
print('Stopwatch already running')
def stop(self):
if self.__running:
self.__elapsed += perf_counter()-self.__start_time
self.__running = False
else:
print('Stopwatch not running')
def reset(self):
self.__start_time = self.__elapsed = 0
self.__running = False
def elapsed(self):
if not self.__running:
return self.__elapsed
else:
print('Stopwatch must be stopped')
return None
def print_time(time:float) -> None:
print('\nElapsed: ', end="")
if time > 1.0:
elapsed = round(time, 3)
print('%.3f s' % elapsed)
elif time > 0.001:
elapsed = time*1000
elapsed = round(elapsed, 2)
print('%.2f ms' % elapsed)
else:
elapsed = time*1000000
elapsed = round(elapsed, 2)
print('%.2f µs' % elapsed)
def lucas_sequence(n0: int, n1: int, n2: int) -> None:
L0, L1 = n0, n1
if n2 >= 1:
print(L0, end=" ")
if n2 >= 2:
print(L1, end=" ")
for i in range(0, n2-2):
print(L0+L1, end=" ")
L0, L1 = L1, L0+L1
def lucas_sequence_timer(n0: int, n1: int, n2: int) -> None:
timer = Stopwatch()
timer.start()
L0, L1 = n0, n1
if n2 >= 1:
print(L0, end=" ")
if n2 >= 2:
print(L1, end=" ")
for i in range(0, n2-2):
print(L0+L1, end=" ")
L0, L1 = L1, L0+L1
timer.stop()
print_time(timer.elapsed())
def lucas_sequence_last(n0: int, n1: int, n2: int) -> None:
L0, L1 = n0, n1
for i in range(0, n2-2):
L0, L1 = L1, L0+L1
print(L1, end=" ")
def lucas_sequence_last_timer(n0: int, n1: int, n2: int) -> None:
timer = Stopwatch()
timer.start()
L0, L1 = n0, n1
for i in range(0, n2-2):
L0, L1 = L1, L0+L1
print(L1, end=" ")
timer.stop()
print_time(timer.elapsed())
def test_lucas_sequence():
"""
>>> lucas_sequence(0,0,6)
0 0 0 0 0 0
>>> lucas_sequence(2,1,10)
2 1 3 4 7 11 18 29 47 76
>>> lucas_sequence(1,1,12)
1 1 2 3 5 8 13 21 34 55 89 144
>>> lucas_sequence(2308,4261,5)
2308 4261 6569 10830 17399
>>> lucas_sequence(5,-20,6)
5 -20 -15 -35 -50 -85
>>> lucas_sequence_last(2,1,100)
489526700523968661124
"""
pass
if __name__ == "__main__":
if(sys.argv[3:]):
lucas_sequence(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]))
else:
testmod()
| 3.65625 | 4 |
GageLogger.py | JakubGreen/Neapco-Wireless-Telemetry | 0 | 12794068 | # Created By: <NAME>
# Date: 7/16/2016
# Description: The program reads wirelessly transmitted data from multiple sensors and saves it on the local SD card.
# Usage: The program simultaneously reads multiple UDP streams and writes them to ".txt" files.
# The output files are formatted to be imported into the InField data analysis software.
import socket
import serial # import Serial Library
from numpy import array, ones, linalg # Import numpy
import sys
import os
from Tkinter import *
import tkMessageBox
from tkFileDialog import asksaveasfilename
import datetime
import thread
from functools import partial
import matplotlib.pyplot as plt
from multiprocessing import Process
n = chr(13) + chr(10) + ""
class SensorNetwork(Frame):
def __init__(self,master=None):
### List of process objects for parallel computing
self.processes = []
### List of bridges
self.bridges = []
###### Tkinter variables
self.ip = StringVar()
self.ip.set("0.0.0.0")
self.isLogging = BooleanVar()
###### GUI Initialization
Frame.__init__(self,master, bd = 10)
self.pack(side = TOP)
#self.wm_title("Feather Receiver")
self.topFrame = Frame(master=self, padx = 8,pady = 8, bd = 2, relief = GROOVE)
self.topFrame.pack(side = TOP, fill = X)
self.startButton = Button(self.topFrame, text = "Start Logging", command = self.startLogging, width = 18)
self.startButton.pack(side = LEFT)
self.stopButton = Button(self.topFrame, text = "Stop Logging", command = self.stopLogging, width = 18, state = DISABLED)
self.stopButton.pack(side = RIGHT)
Button(self.topFrame, text = "Multi-Plot", command = self.plotMultiple).pack(side = LEFT)
self.bottomFrame = Frame(master=self,padx = 8, pady = 8, bd = 2, relief = GROOVE)
self.bottomFrame.pack(side = BOTTOM, fill = X)
self.bridgeButton = Button(self.bottomFrame, text = "Add Bridge", command = self.addBridge, width = 18)
self.bridgeButton.pack(side = LEFT)
self.bridgeRemove = Button(self.bottomFrame, text = "Remove Bridge", command = self.removeBridge, width = 18)
self.bridgeRemove.pack(side = RIGHT)
self.addBridge() # Initialize with one bridge
Label(self.bottomFrame, text = "Neapco Components LLC: <NAME>\t2016", font = ("Helvetica","12")).pack(side = BOTTOM)
def addBridge(self):
a = Bridge(self.ip.get(),0, master=self) # Create new bridge object
a.x.pack(side = TOP) # Pack it to the top of the window
self.bridges.append(a) # Add the object to self.bridges
def removeBridge(self):
self.bridges.pop().x.pack_forget()
### Simultaneously starts logging for all selected bridges
def startLogging(self):
if not tkMessageBox.askyesno("Start Logging","Are you sure?\nFiles may be overwritten"):
return
self.startButton.configure(state = DISABLED)
self.stopButton.configure(state = NORMAL)
for b in self.bridges: ### Loop through list of sensors
if b.checkVar.get(): ### if box is checked
p = Process(target = b.startLogging)
self.processes.append(p)
p.start()
def stopLogging(self):
print ("Stopping Data Collection")
self.startButton.configure(state = NORMAL)
self.stopButton.configure(state = DISABLED)
for p in self.processes: ### Iterate through list of process objects
p.terminate() ### Terminate each process
p.join()
def plotMultiple(self):
for b in self.bridges:
if b.checkVar.get():
xy = b.singlePlot(False) ### Call the singlePlot method for each instance
plt.plot(xy[0],xy[1]) ### Show the plot
plt.xlabel("Time (microseconds)")
plt.ylabel("Torque (inch-pounds)")
plt.title("Time vs. Torque")
plt.show()
class Bridge():
stringFormat = "{} \t {}"
def __init__(self,ip,port,master):
###### Tkinter Varibales
self.x = Frame()
self.isLogging = BooleanVar()
self.checkVar = IntVar()
self.portVar = IntVar()
self.portVar.set(port)
self.filePathVar = StringVar()
###### Variables
self.ip = ip
###### Interface Initialization
Frame.__init__(self.x,master, bd = 2, padx = 3, pady = 3)
self.x.pack(side=LEFT)
self.createWidgets()
###### linear Calibration coefficents
###### Calibrated = mSlope * Uncalibrated + yIntercept
self.mSlope = DoubleVar()
self.mSlope.set(1)
self.yIntercept = DoubleVar(0)
self.pointCount = 0
self.bitPoints = []
self.torquePoints = []
self.isFirstCalib = True
self.bitEntryList = []
self.torqueEntryList = []
self.pointList = []
#### Starts Writing to File
def startLogging(self):
print('Sampling system on Port: ' + str(self.portVar.get()))
self.isLogging.set(True)
### Network Connection
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((self.ip, (self.portVar.get())))
### File Setup
fileLog = open(self.filePathVar.get(), "wb")
### Necessary formatting for InField compatibility
fileLog.write("DM_TestTitle=" + n)
fileLog.write(str(self.file_path) + n + 'Program Start Time: ' + str(datetime.datetime.now()) + n)
fileLog.write("Calibration Values: Slope = " + str(self.mSlope.get()) + ", Y-Intercept = " + str(self.yIntercept.get()) + n)
fileLog.write("DM_Operator=" + n)
fileLog.write("DM_NumLogChans=2" + n)
fileLog.write("DM_NumDataModes=1" + n)
fileLog.write("DM_LogicalChan=1" + n)
fileLog.write("DM_ChanType=SEQUENTIAL" + n)
fileLog.write("DM_ChanName=1" + n)
fileLog.write("DM_NumDims=2" + n)
fileLog.write("DM_DataMode=1" + n)
fileLog.write("DM_DataModeType=TIMHIS" + n)
fileLog.write("DM_AxisLabel.Dim1=[]" + n)
fileLog.write("DM_AxisLabel.Dim2=Time" + n)
fileLog.write("DM_AxisUnits.Dim1=[]" + n)
fileLog.write("DM_AxisUnits.Dim2=us" + n)
fileLog.write("DM_LogicalChan=2" + n)
fileLog.write("DM_ChanType=SEQUENTIAL" + n)
fileLog.write("DM_ChanName=1" + n)
fileLog.write("DM_NumDims=2" + n)
fileLog.write("DM_DataMode=1" + n)
fileLog.write("DM_DataModeType=TIMHIS" + n)
fileLog.write("DM_AxisLabel.Dim1=[]" + n)
fileLog.write("DM_AxisLabel.Dim2=Torque" + n)
fileLog.write("DM_AxisUnits.Dim1=[]" + n)
fileLog.write("DM_AxisUnits.Dim2=in-lb" + n)
fileLog.write("DM_Start=" + n)
isFirst = True ### Boolean to track start time (time offset)
timeOffset = 0
prevTime = 0
prevAdjusted = 0
while True: ### Read packets until told to stop
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
packetSplit = data.decode('utf-8')
lineSplit = packetSplit.split('\n')
for line in lineSplit:
fields = line.split(',')
calibratedData = round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1)
if isFirst:
timeOffset = (-1)*int(fields[0]) # Take the time on the very first packet and store it in to timeOffset
isFirst = False
if(int(fields[0]) < prevTime): # If the processor clock has overflowed
timeOffset = prevAdjusted # Then edit the time offset value
adjustedTime = int(fields[0])+timeOffset # Shift every subsequent packet by the timeOffset
fileLog.write("{:.6f}".format(float(adjustedTime)/1000000) + '\t' + str(calibratedData) + n)
prevTime = int(fields[0])
prevAdjusted = adjustedTime
def createWidgets(self):
check = Checkbutton(self.x,text = "Include",variable = self.checkVar)
check.pack(side=LEFT)
L1 = Label(self.x, text = " PORT")
L1.pack(side=LEFT)
portEntry = Entry(self.x, width = 5, textvariable = self.portVar)
portEntry.pack(side=LEFT)
L1 = Label(self.x, text = " File")
L1.pack(side=LEFT)
fileEntry = Entry(self.x,width = 35,textvariable = self.filePathVar, text = self.filePathVar.get())
fileEntry.pack(side=LEFT)
browseButton = Button(self.x, command = self.saveAs, text = "Browse...")
browseButton.pack(side = LEFT)
calibrateButton = Button(self.x, command = self.calibrate, text = "Calibrate")
calibrateButton.pack(side = LEFT)
Button(self.x, command = partial(self.singlePlot,True), text = "Plot").pack(side = LEFT)
def calibrate(self):
#if len(self.pointList) is not 0:
t = Toplevel(self.x) # Open window
t.wm_title("PORT: " + str(self.portVar.get()) + " Calibration")
a = Frame(t)
a.pack(side = LEFT)
b = Frame(t)
b.pack(side = RIGHT)
c = Frame(a)
c.pack(side = TOP)
d = Frame(a)
d.pack(side = BOTTOM)
Label(c, text = "Bit Value", padx = 15).grid(column = 0,row = 0)
Label(c, text = "Torque (in-lbs)", padx = 15).grid(column = 2, row = 0)
if len(self.pointList) == 0: # If the list of calibration points is empty
for i in xrange(3):
self.addPoint(a)
else:
tempList = self.pointList # Store points in temporary list
self.pointList = [] # Empty out list
for x in tempList: # Copy points over
temp = calibrationPoint(a)
temp.bitValue.set(x.bitValue.get())
temp.torqueValue.set(x.torqueValue.get())
self.pointList.append(temp)
Button(master = d, command = partial(self.addPoint,a), text = "Add Point").pack(side = LEFT,fill = X, padx = 10)
Button(master = d, command = partial(self.removePoint), text = "Remove Point").pack(side = LEFT, fill = X, padx = 10)
Button(master = b, command = partial(self.linReg), text = "Calibrate!").pack(side = TOP)
Label(b, text = "Slope", padx = 15).pack(side = TOP)
Entry(b, textvariable = self.mSlope).pack(side = TOP)
Label(b, text = "Y Intercept", padx = 15).pack(side = TOP)
Entry(b, textvariable = self.yIntercept).pack(side = TOP)
Button(b, command = partial(self.exitWindow,t), text = "OK").pack(side = BOTTOM)
def singlePlot(self, show):
f = open(self.filePathVar.get())
content = f.readlines()
time = []
torque = []
counter = 0
for line in content: ### Find which line the data starts on
counter = counter + 1
if line.find("DM_Start=") != -1:
break
for x in xrange(counter,len(content)-1): ### Starting on the line number found from previous loop
y = content[x].split("\t")
time.append(y[0])
torque.append(y[1])
if show:
plt.plot(time,torque)
plt.xlabel("Time (microseconds)")
plt.ylabel("Torque (inch-pounds)")
plt.title("Time vs. Torque")
plt.show()
return (time,torque)
def saveAs(self):
print 'Please Select File:'
self.file_path = asksaveasfilename(defaultextension = '.txt', filetypes = [('Text Files','.txt')])
self.filePathVar.set(self.file_path)
def addPoint(self, frame):
x = calibrationPoint(frame)
self.pointList.append(x)
def removePoint(self):
self.pointList.pop().pack_forget()
# Used for debugging only
def printEntry(self):
for x in self.pointList:
print("bit value: "+ str(x.bitValue.get()))
print("Torque Value value: "+ str(x.torqueValue.get()))
# Finds slope and y intercept from calibration point cloud
def linReg(self):
temp1 = []
temp2 = []
for x in self.pointList:
temp1.append(x.bitValue.get())
temp2.append(x.torqueValue.get())
A = array([temp1,ones(len(temp1))])
B = array([temp2])
w = linalg.lstsq(A.T,B.T)[0]
m = round(float(w[0]),3)
y = round(float(w[1]),3)
self.mSlope.set(m)
self.yIntercept.set(y)
# Exits a top level window
def exitWindow(self, frame):
frame.withdraw()
def reopenWindow(self, frame):
frame.update()
frame.reiconify()
class calibrationPoint(Frame) :
def __init__(self, master):
self.bitValue = DoubleVar(0)
self.torqueValue = DoubleVar(0)
Frame.__init__(self,master)
self.pack(side = TOP)
self.createWidgets()
def createWidgets(self) :
x = Entry(self, textvariable = self.bitValue, width = 8)
x.pack(side = LEFT, padx = 10, pady = 2)
y = Entry(self, textvariable = self.torqueValue, width = 8)
y.pack(side = LEFT, padx = 10, pady = 2)
return
###### Running code
if __name__ == '__main__':
root = Tk()
root.wm_title("Gage Logger")
app = SensorNetwork(master=root)
app.mainloop()
| 2.609375 | 3 |
src/neocities_sync/__init__.py | kugland/neocities-sync | 2 | 12794069 | """Sync local directories with neocities.org sites."""
import os
import sys
from . import cmdline
from . import local
from .config import load_config_file
from .ignore_files import IgnoreFiles
from .log import (debug, decrease_verbosity, error, fatal, increase_verbosity, info)
from .neocities import Neocities
from .sync_actions import DeleteRemote, DoNothing, UpdateRemote, sync_actions
from .utils import Pushd
def main():
"""Program entry-point."""
cmdline_opts = cmdline.parse(sys.argv[1:])
if cmdline_opts.quietness > 0:
for _ in range(cmdline_opts.quietness):
decrease_verbosity()
elif cmdline_opts.quietness < 0:
for _ in range(-cmdline_opts.quietness):
increase_verbosity()
try:
conf = load_config_file(cmdline_opts.config_file)
except FileNotFoundError:
fatal(f'Config file "{cmdline_opts.config_file}" not found. Run again with "--help" for more info.')
exit(1)
for site, site_conf in conf.items():
client = Neocities(site_conf.api_key)
with Pushd(os.path.expanduser(site_conf.root_dir)):
info(f'Starting sync for site "{site}".')
info("Listing local file tree...")
local_filetree = local.filetree(".")
local_filetree = IgnoreFiles(site_conf).filter(local_filetree)
info(
f"Local file tree has {local_filetree.number_of_files()} file(s)"
f" and {local_filetree.number_of_directories()} dir(s)."
)
info("Fetching remote file tree...")
remote_filetree = client.list()
info(
f"Remote file tree has {remote_filetree.number_of_files()}"
f" file(s) and {remote_filetree.number_of_directories()} dir(s)."
)
info("Comparing file trees...")
applied_actions = 0
for action in sync_actions(local_filetree, remote_filetree):
try:
if isinstance(action, UpdateRemote):
info(f'Updating remote file "{action.path}": {action.reason}.')
if not cmdline_opts.dry_run:
client.upload(action.path)
applied_actions += 1
elif isinstance(action, DeleteRemote):
info(f'Deleting remote file "{action.path}": {action.reason}.')
if not cmdline_opts.dry_run:
client.delete(action.path)
applied_actions += 1
elif isinstance(action, DoNothing):
debug(f'Skipping "{action.path}": {action.reason}.')
else:
raise RuntimeError(f"Unknown action {action.__class__.__name__}.")
except Exception as e: # noqa: B902
error(f"Error while syncing: {e}")
exit(1)
if not cmdline_opts.dry_run:
info(f"Applied {applied_actions} action(s).")
else:
info(f"Would apply {applied_actions} action(s).")
if site_conf.remove_empty_dirs:
info("Searching for empty directories...")
remote_filetree = client.list()
empty_directories = remote_filetree.list_empty_directories()
info(f"Found {len(empty_directories)} empty dir(s).")
for empty_dir in sorted(empty_directories, reverse=True):
info(f'Deleting remote empty directory "{empty_dir}"')
if not cmdline_opts.dry_run:
client.delete(empty_dir)
info(f'Finished sync for site "{site}".')
if __name__ == "__main__":
main()
| 2.421875 | 2 |
happyly/_deprecations/utils.py | alex-tsukanov/cargopy | 5 | 12794070 | import warnings
from typing import Type, Union
def will_be_removed(
deprecated_name: str,
use_instead: Union[str, Type],
removing_in_version: str,
stacklevel=2,
):
new_class_name = (
use_instead.__name__ # type: ignore
if isinstance(use_instead, Type) # type: ignore
else use_instead
)
warnings.warn(
f"Please use {new_class_name} instead, "
f"{deprecated_name} will be removed in happyly v{removing_in_version}.",
DeprecationWarning,
stacklevel=stacklevel,
)
| 2.46875 | 2 |
Python/Topics/Match object and flags/Something's missing/main.py | drtierney/hyperskill-problems | 5 | 12794071 | import re
string = input()
template = r'never gonna let you down...'
match = re.match(template, string, flags=re.IGNORECASE)
| 2.703125 | 3 |
appreg/utils.py | acdh-oeaw/dar | 0 | 12794072 | <reponame>acdh-oeaw/dar
from django.conf import settings
def populate_webapp(webpage_object, metadata):
""" parses a metadata_dict and populates a webpage object with the parsed data
:param webpage_object: An instance of the class appreg.models.WebApp
:param metadata: A dictionary providing string for the following keys ["title",\
"subtitle", "author", "description", "purpose_en", "gitbub", "app_type", "base_tech",\
"framework"]
:return: An instance of the class appreg.models.WebApp.
"""
try:
webpage_object.title = metadata['title']
except KeyError:
webpage_object.title = 'no info provided'
try:
webpage_object.subtitle = metadata['subtitle']
except KeyError:
webpage_object.subtitle = 'no info provided'
try:
webpage_object.author = metadata['author']
except KeyError:
webpage_object.author = 'no info provided'
try:
webpage_object.description = metadata['description']
except KeyError:
webpage_object.description = 'no info provided'
try:
webpage_object.purpose_en = metadata['purpose_en']
except KeyError:
webpage_object.purpose_en = 'no info provided'
try:
webpage_object.git_url = metadata['github']
except KeyError:
webpage_object.git_url = 'no info provided'
try:
webpage_object.app_type = metadata['app_type']
except KeyError:
webpage_object.app_type = 'no info provided'
try:
webpage_object.base_tech = metadata['base_tech']
except KeyError:
webpage_object.base_tech = 'no info provided'
try:
webpage_object.framework = metadata['framework']
except KeyError:
webpage_object.framework = 'no info provided'
try:
webpage_object.version = metadata['version']
except KeyError:
webpage_object.version = 'no info provided'
try:
webpage_object.last_commit = metadata['last_commit']
except KeyError:
webpage_object.last_commit = 'no info provided'
try:
webpage_object.title_img = metadata['title_img']
except KeyError:
webpage_object.title_img = settings.DEFAULT_TITLE_IMG
try:
webpage_object.project_logo = metadata['project_logo']
except KeyError:
webpage_object.project_logo = settings.DEFAULT_LOGO
try:
webpage_object.public = metadata['public']
except KeyError:
webpage_object.public = 'restricted'
return webpage_object
| 2.34375 | 2 |
train.py | LeeDoYup/DeblurGAN-tf | 64 | 12794073 | from __future__ import print_function
import time
import os
import sys
import logging
import json
import tensorflow as tf
import numpy as np
import cv2
import data.data_loader as loader
from models.cgan_model import cgan
from models.ops import *
os.system('http_proxy_on')
def linear_decay(initial=0.0001, step=0, start_step=150, end_step=300):
'''
return decayed learning rate
It becomes 0 at end_step
'''
decay_period = end_step - start_step
step_decay = (initial-0.0)/decay_period
update_step = max(0, step-start_step)
current_value = max(0, initial - (update_step)*step_decay)
return current_value
def train(args):
#assume there is a batch data pair:
dataset = loader.read_data_path(args.data_path_train, name=args.data_name)
num_dataset = len(dataset)
num_batch = num_dataset/args.batch_num
sess = tf.Session()
model = cgan(sess, args)
model.build_model()
model.sess.run(tf.global_variables_initializer())
model.load_weights(args.checkpoint_dir)
for iter in range(args.epoch):
learning_rate = linear_decay(0.0001, iter)
for i, data in enumerate(dataset):
blur_img, real_img = loader.read_image_pair(data,
resize_or_crop = args.resize_or_crop,
image_size=(args.img_h, args.img_w))
feed_dict = {model.input['blur_img']: blur_img,\
model.input['real_img']: real_img,\
model.learning_rate: learning_rate}
loss_G, adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict)
logging.info('%d epoch, %d batch, Generator Loss: %f, add loss: %f, perceptual_loss: %f',\
iter, i, loss_G, adv_loss, perceptual_loss)
#Ready for Training Discriminator
for _ in range(args.iter_disc):
loss_D, loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor)
logging.info('%d epoch, %d batch, Discriminator Loss: %f, loss_disc: %f, gp_loss: %f', iter, i, loss_D, loss_disc, loss_gp)
if (iter+1) % 50 == 0 or iter == (args.epoch-1):
model.save_weights(args.checkpoint_dir, model.global_step)
logging.info("[!] test started")
dataset = loader.read_data_path(args.data_path_test, name=args.data_name)
for i, data in enumerate(dataset):
if not os.path.exists('./test_result'):
os.mkdir('./test_result')
blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop,
image_size=(args.img_h, args.img_w))
feed_dict_G = {model.input['blur_img']: blur_img}
G_out = model.G_output(feed_dict=feed_dict_G)
cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.)
cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.)
cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.)
logging.info("Deblur Image is saved (%d/%d) ", i, len(dataset))
logging.info("[*] test done")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--iter_gen', type=int, default=1)
parser.add_argument('--iter_disc', type=int, default=5)
parser.add_argument('--batch_num', type=int, default=1)
parser.add_argument('--epoch', type=int, default=300)
parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/')
parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/')
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/')
parser.add_argument('--model_name', type=str, default='DeblurGAN.model')
parser.add_argument('--summary_dir', type=str, default='./summaries/')
parser.add_argument('--data_name', type=str, default='GOPRO')
parser.add_argument('--tf_image_monitor', type=bool, default=False)
parser.add_argument('--resize_or_crop', type=str, default='resize')
parser.add_argument('--img_h', type=int, default=256)
parser.add_argument('--img_w', type=int, default=256)
parser.add_argument('--img_c', type=int, default=3)
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
log_format = '[%(asctime)s %(levelname)s] %(message)s'
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=level, format=log_format, stream=sys.stderr)
logging.getLogger("DeblurGAN_TRAIN.*").setLevel(level)
train(args)
| 2.609375 | 3 |
cli/src/ssm_client.py | eyalstoler/ssm-simple-cli | 0 | 12794074 | <reponame>eyalstoler/ssm-simple-cli
import boto3
class SSMClient:
def __init__(self, config, **client_kwargs):
self.session = boto3.Session(**client_kwargs)
self.ssm_config = config
self.profile_name = 'default'
def client(self):
return self.session.client('ssm')
def get(self, name):
return self.client().get_parameter(
Name=name,
WithDecryption=self.ssm_config.should_auto_decrypt_secret_value()
)['Parameter']['Value']
def describe(self, parameters_path=''):
if parameters_path:
params = self.client().describe_parameters(ParameterFilters=[
{
'Key': 'Path',
'Values': [
parameters_path,
]
},
])['Parameters']
else:
params = self.client().describe_parameters()['Parameters']
return [key['Name'] for key in params]
def put(self, key, value, description):
self.client().put_parameter(
Name=key,
Description=description,
Value=value,
Type='SecureString' if self.ssm_config.should_auto_encrypt_secret_value() else 'String'
)
| 2.25 | 2 |
semversioner/core.py | mvanbaak/semversioner | 0 | 12794075 | import os
import sys
import json
import click
import datetime
from distutils.version import StrictVersion
from jinja2 import Template
ROOTDIR = os.getcwd()
INITIAL_VERSION = '0.0.0'
DEFAULT_TEMPLATE = """# Changelog
Note: version releases in the 0.x.y range may introduce breaking changes.
{% for release in releases %}
## {{ release.id }}
{% for data in release.data %}
- {{ data.type }}: {{ data.description }}
{% endfor %}
{% endfor %}
"""
class Semversioner:
def __init__(self, path=ROOTDIR):
semversioner_path_legacy = os.path.join(path, '.changes')
semversioner_path_new = os.path.join(path, '.semversioner')
semversioner_path = semversioner_path_new
deprecated = False
if os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new):
deprecated = True
semversioner_path = semversioner_path_legacy
if not os.path.isdir(semversioner_path):
os.makedirs(semversioner_path)
next_release_path = os.path.join(semversioner_path, 'next-release')
if not os.path.isdir(next_release_path):
os.makedirs(next_release_path)
self.path = path
self.semversioner_path = semversioner_path
self.next_release_path = next_release_path
self.deprecated = deprecated
def is_deprecated(self):
return self.deprecated
def add_change(self, change_type, description):
"""
Create a new changeset file.
The method creates a new json file in the ``.semversioner/next-release/`` directory
with the type and description provided.
Parameters
-------
change_type (str): Change type. Allowed values: major, minor, patch.
description (str): Change description.
Returns
-------
path : str
Absolute path of the file generated.
"""
parsed_values = {
'type': change_type,
'description': description,
}
filename = None
while (filename is None or os.path.isfile(os.path.join(self.next_release_path, filename))):
filename = '{type_name}-{datetime}.json'.format(
type_name=parsed_values['type'],
datetime="{:%Y%m%d%H%M%S}".format(datetime.datetime.utcnow()))
with open(os.path.join(self.next_release_path, filename), 'w') as f:
f.write(json.dumps(parsed_values, indent=2) + "\n")
return {
'path': os.path.join(self.next_release_path, filename)
}
def generate_changelog(self):
"""
Generates the changelog.
The method generates the changelog based on the template file defined
in ``DEFAULT_TEMPLATE``.
Returns
-------
str
Changelog string.
"""
releases = []
for release_identifier in self._sorted_releases():
with open(os.path.join(self.semversioner_path, release_identifier + '.json')) as f:
data = json.load(f)
data = sorted(data, key=lambda k: k['type'] + k['description'])
releases.append({'id': release_identifier, 'data': data})
return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases)
def release(self):
"""
Performs the release.
The method performs the release by taking everything in ``next-release`` folder
and aggregating all together in a single JSON file for that release (e.g ``1.12.0.json``).
The JSON file generated is a list of all the individual JSON files from ``next-release``.
After aggregating the files, it removes the ``next-release`` directory.
Returns
-------
previous_version : str
Previous version.
new_version : str
New version.
"""
changes = []
next_release_dir = self.next_release_path
for filename in os.listdir(next_release_dir):
full_path = os.path.join(next_release_dir, filename)
with open(full_path) as f:
changes.append(json.load(f))
if len(changes) == 0:
click.secho("Error: No changes to release. Skipping release process.", fg='red')
sys.exit(-1)
current_version_number = self.get_version()
next_version_number = self._get_next_version_number(changes, current_version_number)
click.echo("Releasing version: %s -> %s" % (current_version_number, next_version_number))
release_json_filename = os.path.join(self.semversioner_path, '%s.json' % next_version_number)
click.echo("Generated '" + release_json_filename + "' file.")
with open(release_json_filename, 'w') as f:
f.write(json.dumps(changes, indent=2, sort_keys=True))
click.echo("Removing '" + next_release_dir + "' directory.")
for filename in os.listdir(next_release_dir):
full_path = os.path.join(next_release_dir, filename)
os.remove(full_path)
os.rmdir(next_release_dir)
return {
'previous_version': current_version_number,
'new_version': next_version_number
}
def get_version(self):
"""
Gets the current version.
"""
releases = self._sorted_releases()
if len(releases) > 0:
return releases[0]
return INITIAL_VERSION
def _sorted_releases(self):
files = [f for f in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))]
releases = list(map(lambda x: x[:-len('.json')], files))
releases = sorted(releases, key=StrictVersion, reverse=True)
return releases
def _get_next_version_number(self, changes, current_version_number):
release_type = sorted(list(map(lambda x: x['type'], changes)))[0]
return self._increase_version(current_version_number, release_type)
def _increase_version(self, current_version, release_type):
"""
Returns a string like '1.0.0'.
"""
# Convert to a list of ints: [1, 0, 0].
version_parts = list(int(i) for i in current_version.split('.'))
if release_type == 'patch':
version_parts[2] += 1
elif release_type == 'minor':
version_parts[1] += 1
version_parts[2] = 0
elif release_type == 'major':
version_parts[0] += 1
version_parts[1] = 0
version_parts[2] = 0
return '.'.join(str(i) for i in version_parts)
| 2.21875 | 2 |
toolbox/web.py | korn-alex/toolbox | 0 | 12794076 | import requests as rq
from sys import stdout
from pathlib import Path
import re
import os
class Downloader:
"""
class to manage downloading url links
"""
def __init__(self, *args, session=None): # creates a session
self.cwd = Path.cwd()
self.src_path = Path(__file__)
if not session:
self.session = rq.Session()
else:
self.session = session
def _print_progress(self, current_bytes, size):
bar = self._get_bar(current_bytes / size)
output = f'\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB'
stdout.write(output)
# stdout.flush()
def _get_bar(self, progress):
"""
progress must be between 0 and 1\n
Returns the bar with current progress as a string
"""
FULL_BLOCKLENGTH = 32
fillblock = '█'
if progress > 1:
progress = 1
blocks = int(progress / (1/FULL_BLOCKLENGTH))
bar_start = fillblock*blocks
bar_end = (33 - len(bar_start))*'_'+'|'
bar_percent = f' {progress*100:0.2f} % '
text = bar_start+bar_end+bar_percent
return text
def _make_name(self, url_path: Path, name_in: str):
"""
Parses the name and returns a writebale name
"""
# in case its a number and not None
if name_in and name_in != type(str):
name_in = str(name_in)
try:
name_in[0] # if its empty it raises exception
# clean_name = re.search(r'\w+',name_in).group() # parsing name, only alphanumeric, no whitespace
# name = re.split(r'[.].+$',name_in)[0] # name without extension
name_parts = name_in.split('.') # name without extension
if len(name_parts) > 1:
name_noext = '.'.join(name_parts[:-1]) # joining together without extension
else:
name_noext = name_parts[0]
clean_name = ' '.join(re.findall(r'\w+.+',name_noext)) # parsing name, only alphanumeric, no whitespace
clean_name[0] # empty testing
except :
print('invalid name, taking name from url')
name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it
return name
try:
extension = re.search(r'(?<=[.])\w+$', name_in).group() # matching only extension after last "."
# extension = name.split('.')[-1] # matching only extension after last "."
except:
extension = None
if extension:
name_path = Path(f'{clean_name}.{extension}') # custom extension specified and not in the name
else:
name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it
extension = re.search(r'(?<=[.])\w+$', name).group() # matching only extension after last "."
name_path = Path(f'{clean_name}.{extension}') # extension from url
return name_path.name
def download(self, url, d_path=None, name_out=None, printprogess=False):
"""
Downloads from url
`d_path`: Default download path is current working directory.
`name_out`: Default name is the tail of the url address,
can take in a name with or without extension,
takes extension from url if not specified.
`printprogress`: Prints current download progress in terminal.
"""
url_path = Path(url)
#download_path = self.cwd / url_path.name if not d_path else Path(d_path)
name_out = self._make_name(url_path, name_out)
if not d_path:
# download_path = self.src_path.parent
download_path = self.cwd
else:
download_path = Path(d_path)
# os.chdir(download_path)
# making file path
save_file = download_path / name_out
# checking if file already is there
if save_file.exists():
print('skipping', save_file.name)
return
r = self.session.get(url)
# size = float(r.headers['content-length'])
contentlength = r.headers.get('content-length')
if contentlength is not None:
size = float(contentlength)
else:
size = 1
with open(save_file, 'wb') as fd:
tmp = 0
print(f'Downloding: {save_file.name}')
print(f'to {save_file.absolute()}')
for chunk in r.iter_content(chunk_size=1024):
if chunk:
fd.write(chunk)
tmp += 1024
if printprogess:
self._print_progress(tmp, size)
print('')
print('Done')
def input_loop():
while True:
inp = input('Download path:\n')
if _test_write(inp): return inp
#try:
# d_path = Path(inp)
#except Exception as e:
# print('invalid path, try again\n')
# continue
#if d_path.exists(): return d_path
def name_loop():
while True:
inp = input('Name:\n')
return inp
def _test_write(path):
''' writes a file to the path and returns True if it succeded '''
writable = False
try:
p = Path(path)
test_file = p / 'testfile.testfile'
with open(test_file, 'wb') as f:
f.write(bytes(0))
writable = True
except Exception as e:
print('write test failed: ',e)
return
finally:
try:
os.remove(test_file)
except Exception as e:
#print('deleting test write failed: ',e)
pass
return writable
if __name__ == "__main__":
# d_path = input_loop() #let user decide where to download
d_path = Path('/home/bruno/Desktop')
name = name_loop() # let user decide what name it will have
d = Downloader()
test_image_url = 'https://images.pexels.com/photos/459793/pexels-photo-459793.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=750&w=1260'
d.download(test_image_url, d_path, name, printprogess=False) | 2.953125 | 3 |
Automate the Boring Stuff with Python/02.00 chuva.py | rdemarqui/Estudos | 2 | 12794077 | <filename>Automate the Boring Stuff with Python/02.00 chuva.py
# Script da Chuva
print('Está chovendo?')
verificaChuva = input()
if verificaChuva.capitalize() == 'Sim':
print('Você tem Guarda-chuva?')
checkGuardaChuva = input()
if checkGuardaChuva.capitalize() == 'Não':
while verificaChuva.capitalize() == 'Sim':
print('Espere um pouco')
print('Está chovendo?')
verificaChuva = input()
print('Saia')
| 3.78125 | 4 |
models/random_player.py | danilonumeroso/role | 1 | 12794078 | import random
import chess
import chess.engine
class RandomPlayer:
def __init__(self):
self.id = {
'name': 'RandomPlayer'
}
def play(self, board: chess.Board, limit=None) -> chess.engine.PlayResult:
legal_moves = list(board.legal_moves)
move = random.choice(legal_moves)
return chess.engine.PlayResult(move=move, ponder=None)
def quit(self):
pass
| 2.890625 | 3 |
python/python_a_byte_of/varargs.py | ssavinash1/Algorithm_stanford | 24 | 12794079 | def total (initial, *positionals, **keywords):
""" Simply sums up all the passed numbers. """
count = initial
for n in positionals:
count += n
for n in keywords:
count += keywords[n]
return count
print(__name__)
| 3.53125 | 4 |
cleanupNeeded/ASL-Localization-master/dwmDistances.py | AUVSL/Keiller-MS-Thesis | 0 | 12794080 | # dwmDistances
# Being written September 2019 by <NAME>
# Intended for use with DWM 1001 module through UART TLV interface
# This script calls the dwm_loc_get API call as specified in the
# DWM1001 Firmware API Guide 5.3.10.
# It parses the information received to send over
# the ROS network.
# In the future, this script will be expanded to allow
# position updates to be written to anchor nodes
# Currently limited to Python 3.6+. Use command line arguments
# to specify the name of the port (See myParser() function)
import serial # use "pip install pyserial" if you have not already done so
import time
import sys
import argparse
defaultPortName = '/dev/ttyACM0'
# On linux, you should use /dev/ttyACM0
# ON Windows, the port name may be 'COM9' or similar
def myParser():
# This function handles command lets the user specify the
# name of the port to use with a command line argument.
# --port=[name or number]
parser = argparse.ArgumentParser(description = 'get position info') # Script descript.
parser.add_argument(
'--port',
default=defaultPortName,
help='specify the name of the port to use (default: ' + defaultPortName + ' )'
)
args = parser.parse_args()
print("Using port:", args.port)
return args.port
ser = None # This will be the name of the handle to the serial port
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
# API Error codes
ERR_CODE_OK = bytes.fromhex("00")
# 1: unknown command or broken TLV frame
# 2: internal error
# 3: invalid parameter
# 4: busy
# 5: operation not permitted
# API Commands [Type, length]
DWM_POS_SET = bytes.fromhex("01 0d") # Used to set position. Follow with position as 13 bytes
DWM_POS_GET = bytes.fromhex("02 00") # Used to ask for position.
DWM_LOC_GET = bytes.fromhex("0c 00") # Request for position + distances to anchors/tags
# Response codes
TLV_TYPE_DUMMY = bytes.fromhex("00") # Reserved for SPI dummy byte
TLV_TYPE_POS_XYZ = bytes.fromhex("41") # Response position coordinates x,y,z with q
TLV_TYPE_RNG_AN_DIST = bytes.fromhex("48") # Response: Ranging anchor distances
TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex("49") # Response: Ranging anchor distances and positions
def main():
global ser
print("dwmPosGet started.")
myPort = myParser()
# Establish serial port connection
try:
ser = serial.Serial(myPort, baudrate=115200, timeout=None)
print(ser)
print("Connection established.")
except:
print("Error in trying to connect to serial port {}".format(myPort))
stopLoop = False
# Loop plan:
# 1. Ask Decawave for position
# 2. Receive response, parsing as I go
# --First response is confirmation / error code
# --Second response is position
# 2.5 Error handling
# 3. Output message
# ----------
while stopLoop is False:
getLocations()
def sendTLV(request):
global ser
txBuffer = request
try:
ser.reset_input_buffer() # Get rid of anything in the buffer that could confuse this script.
ser.write(txBuffer)
except:
print(f"Error during transmission of request {txBuffer.hex()}")
stopLoop = True
return EXIT_FAILURE
return EXIT_SUCCESS
def receiveTLV():
# Listen for TLV response from Decawave DWM1001 module
# Returns a list of [Type, Length, Value]
# If it receives TLV_TYPE_DUMMY, it keeps listening for next message
global ser # The handle for the serial port connection
typeTLV = TLV_TYPE_DUMMY
while (typeTLV == TLV_TYPE_DUMMY):
typeTLV = ser.read(1) # Read the "type" byte of the response
lengthTLV = ser.read(1) # Read the "length" byte of the response
lengthTLV = int.from_bytes(lengthTLV, byteorder='little')
valueTLV = ser.read(lengthTLV) # Read the value [error code].
return [typeTLV, lengthTLV, valueTLV]
def parsePOSvalue(value):
# This helper function takes a 13-byte position code and returns the
# x, y, z, and q values
x = int.from_bytes(value[0:4], byteorder='little')
y = int.from_bytes(value[4:8], byteorder='little')
z = int.from_bytes(value[8:12], byteorder='little')
q = int.from_bytes(value[12:13], byteorder='little')
return [x, y, z, q]
def parseTLV(typeTLV, length, value):
# TLV_TYPE_DUMMY = bytes.fromhex("00") # Reserved for SPI dummy byte
# TLV_TYPE_POS_XYZ = bytes.fromhex("41") # Response position coordinates x,y,z with q
# TLV_TYPE_RNG_AN_DIST = bytes.fromhex("48") # Response: Ranging anchor distances
# TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex("49") # Response: Ranging anchor distances and positions
if typeTLV == TLV_TYPE_POS_XYZ:
[x, y, z, q] = parsePOSvalue(value)
return [x, y, z, q]
if typeTLV == TLV_TYPE_RNG_AN_DIST:
# This code may be received from an anchor node
num_distances = int.from_bytes(value[0:1])
distances = []
for i in range (num_distances):
offset = i*13+1
addr = value[offset:offset+8].hex() # Note: Address size is 8 bytes here, not 2 bytes
d = int.from_bytes(value[offset+8:offset+12], byteorder = 'little')
dq = int.from_bytes(value[offset+12:offset+13], byteorder = 'little')
distances.append([addr, d, dq])
return [num_distances, distances]
if typeTLV == TLV_TYPE_RNG_AN_POS_DIST:
num_distances = int.from_bytes(value[0:1], byteorder = 'little')
distances = []
for i in range(num_distances):
offset = i*13+1
addr = value[offset:offset+2].hex() # UWB address
d = int.from_bytes(value[offset+2:offset+6], byteorder = 'little') # distance
dq = int.from_bytes(value[offset+6:offset+7], byteorder = 'little') # distance quality
[x,y,z,q] = parsePOSvalue(value[offset+7:offset+20])
distances.append([addr, d, dq, x, y, z, q])
return [num_distances, distances]
# Default case:
print("Error: attempted to parse TLV of type not yet supported.")
return EXIT_FAILURE
def printTLV(typeTLV, length, value):
if typeTLV == TLV_TYPE_POS_XYZ:
print( "{:_<15} {:_<15} {:_<15} {:_<5}".format('x','y','z','q'))
[x,y,z,q] = parseTLV(typeTLV, length, value)
print("{:<15} {:<15} {:<15} {:<5}".format(x,y,z,q))
if typeTLV == TLV_TYPE_RNG_AN_POS_DIST:
print("{:=<5} {:=<15} {:=<5} {:=<15} {:=<15} {:=<15} {:=<5}".format('addr', 'd', 'dq', 'x', 'y', 'z', 'q'))
[num_distances, distances] = parseTLV(typeTLV, length, value)
for i in range(num_distances):
[addr, d, dq, x, y, z, q] = distances[i]
print("{:<5} {:<15} {:<5} {:<15} {:<15} {:<15} {:<5}".format(addr, d, dq, x, y, z, q))
if typeTLV == TLV_TYPE_RNG_AN_DIST:
print("{:=<5} {:=<15} {:=<5}".format('addr','d','dq'))
[num_distances, distances] = parseTLV(typeTLV, length, value)
for i in range(num_distances):
[addr, d, dq] = distances[i]
print("{:<5} {:<15} {:<5}".format(addr, d, dq))
def getLocations():
# 1. Ask Decawave for Position and distances
temp = sendTLV(DWM_LOC_GET)
if temp == EXIT_FAILURE:
return EXIT_FAILURE
# -------------
# 2. Receive response. May get dummy bytes before real response.
[typeTLV, length, value]= receiveTLV()
if value != ERR_CODE_OK:
print("Received an error message. Flushing input buffer.")
print(value)
ser.reset_input_buffer()
return EXIT_FAILURE
# ---------Now, I read until I get the position
[typeTLV, length, value] = receiveTLV() # Expect Position
if length < 13:
print("No position received. Flushing buffer.")
ser.reset_input_buffer()
return EXIT_FAILURE
else:
printTLV(typeTLV, length, value)
[typeTLV, length, value] = receiveTLV() # Expect Distances
if length < 13:
print("No distances received")
else:
printTLV(typeTLV, length, value)
# The following lines allow this script to run as a program if called directly.
if __name__ == "__main__":
main()
| 2.6875 | 3 |
actions/train/hydras.py | drakonnan1st/JackBot | 0 | 12794081 | <gh_stars>0
"""Everything related to training hydralisks goes here"""
from sc2.constants import HYDRALISK
from actions.build.hive import BuildHive
class TrainHydralisk(BuildHive):
"""Ok for now"""
def __init__(self, main):
self.controller = main
BuildHive.__init__(self, self.controller)
async def should_handle(self):
"""Requirements to run handle, it limits the training a little so it keeps building ultralisks,
needs more limitations so the transition to hive is smoother"""
local_controller = self.controller
cavern = local_controller.caverns
if local_controller.hives and not cavern:
return False
if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready):
return False
if local_controller.pits.ready and not local_controller.hives and not await BuildHive.morphing_lairs(self):
return False
if cavern.ready:
return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras)
return not local_controller.floating_buildings_bm
async def handle(self):
"""Execute the action of training hydras"""
local_controller = self.controller
local_controller.add_action(local_controller.larvae.random.train(HYDRALISK))
return True
| 2.359375 | 2 |
Projects/Project_DMS/user/user.py | ivenpoker/Python-Projects | 1 | 12794082 | <gh_stars>1-10
import os
# Initialize UI settings based on terminal size (width)
UI_fill_width = os.get_terminal_size()[0]
# UI_fill_width = 74
UI_user_menu_fill_char = '#'
# Initialize UI settings (for prompts) based on terminal size (width)
UI_input_fill_width = int(int(os.get_terminal_size()[0]) / 2) + 5
# UI_input_fill_width = 54
UI_input_fill_char = ' '
class User:
def __init__(self, username: str, password: str):
self._username = username
self._password = password
pass
def get_username(self):
return self._username
def print_user_menu(user: User) -> None:
print()
print(f" [ USER: {user.get_username()} ] ".center(UI_fill_width, UI_user_menu_fill_char))
def init_user_program(username: str, password: str) -> None:
__logged_user = User(username=username, password=password)
os.system("clear")
print_user_menu(user=__logged_user)
input("System ready ...".center(UI_fill_width, " "))
| 2.78125 | 3 |
remove_bg/apps.py | iamr0b0tx/image-background-remove-tool | 1 | 12794083 | <gh_stars>1-10
from django.apps import AppConfig
class RemoveBgConfig(AppConfig):
name = 'remove_bg'
| 1.203125 | 1 |
sessionAPI/sessionApp/forms.py | KiralyTamas/Django | 0 | 12794084 | <gh_stars>0
from django import forms
class ItemForm(forms.Form):
name=forms.CharField(max_length=40)
quantity=forms.IntegerField() | 1.984375 | 2 |
src/modu/editable/datatypes/fck.py | philchristensen/modu | 0 | 12794085 | # modu
# Copyright (c) 2006-2010 <NAME>
# http://modu.bubblehouse.org
#
#
# See LICENSE for details
"""
Contains the FCK Editor support for modu.editable.
"""
import os, os.path, time, stat, shutil, array
from zope.interface import implements
from modu import editable, assets, web
from modu.persist import sql
from modu.editable import define
from modu.editable import resource as admin_resource
from modu.util import form, tags
from modu.web import resource, app
SUCCESS = 0
CUSTOM_ERROR = 1
UL_RENAME = 201
UL_INVALID_TYPE = 202
UL_ACCESS_DENIED = 203
FLD_EXISTS = 101
FLD_INVALID_NAME = 102
FLD_ACCESS_DENIED = 103
FLD_UNKNOWN_ERROR = 110
class FCKEditorField(define.definition):
"""
A field type that displays the FCK rich text editor.
"""
implements(editable.IDatatype)
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
frm = form.FormNode(self.name)
if(style == 'listing'):
frm(type='label', value='(html content)')
return frm
if(self.get('read_only', False)):
frm(type='label', value=getattr(storable, self.get_column_name(), ''))
return frm
fck_base_path = req.get_path('assets', 'fckeditor')
req.content.report('header', tags.script(type="text/javascript",
src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))[''])
fck_custom_config = req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js')
fck_element_name = '%s-form[%s]' % (storable.get_table(), self.name)
# //$value = str_replace("'", ''', $value);
fck_value = getattr(storable, self.get_column_name(), '')
if(fck_value is None):
fck_value = ''
if(isinstance(fck_value, array.array)):
fck_value = fck_value.tostring()
else:
fck_value = str(fck_value)
fck_value = fck_value.replace("\r\n", r'\r\n')
fck_value = fck_value.replace("\n", r'\n')
fck_value = fck_value.replace("\r", r'\r')
fck_value = fck_value.replace('"', r'\"')
fck_var = 'fck_%s' % self.name
output = tags.script(type="text/javascript")[[
"var %s = new FCKeditor('%s');\n" % (fck_var, fck_element_name),
"%s.Config['CustomConfigurationsPath'] = \"%s\";\n" % (fck_var, fck_custom_config),
"%s.BasePath = \"%s/\";\n" % (fck_var, fck_base_path),
"%s.Value = \"%s\";\n" % (fck_var, fck_value),
"%s.Width = \"%s\";\n" % (fck_var, self.get('width', 600)),
"%s.Height = \"%s\";\n" % (fck_var, self.get('height', 400)),
"%s.ToolbarSet = \"%s\";\n" % (fck_var, self.get('toolbar_set', 'Standard')),
"%s.Create();\n" % fck_var
]]
frm(type="markup", value=output)
return frm
def get_search_value(self, value, req, frm):
"""
@see: L{modu.editable.define.definition.get_search_value()}
"""
value = value.value
if(value is ''):
return None
if(self.get('fulltext_search')):
return sql.RAW(sql.interp("MATCH(%%s) AGAINST (%s)", [value]))
else:
return sql.RAW(sql.interp("INSTR(%%s, %s)", [value]))
class FCKFileField(define.definition):
"""
Select a file from a given directory and save its path to the Storable.
"""
implements(editable.IDatatype)
def get_element(self, req, style, storable):
default_value = getattr(storable, self.get_column_name(), '')
frm = form.FormNode(self.name)
if(style == 'listing' or self.get('read_only', False)):
if not(default_value):
default_value = '(none)'
frm(type='label', value=default_value)
return frm
filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html')
assets.activate_jquery(req)
suffix = tags.input(type="button", value="Select...", id='%s-select-button' % self.name, onclick="getFile('%s')" % self.name)
req.content.report('header', tags.script(type="text/javascript")[[
"function getFile(elementName){\n",
" window.SetUrl = function(value){\n",
" var e = $('#' + elementName + '-value-field');\n",
" e.val(value);\n",
" e = $('#' + elementName + '-value-label');\n",
" e.val(value);\n",
" };\n",
" var filemanager = '%s';\n" % filemgr_path,
" var connector = '%s';\n" % self.get('fck_root', '/fck'),
" var win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\n",
" win.focus();\n",
"}\n",
]])
frm['label'](type="textfield", value=default_value, attributes=dict(id='%s-value-label' % self.name, disabled="1"))
frm['value'](type="hidden", value=default_value, suffix=suffix, attributes=dict(id='%s-value-field' % self.name))
return frm
def update_storable(self, req, frm, storable):
form_name = '%s-form' % storable.get_table()
if(form_name in req.data):
form_data = req.data[form_name]
if(self.name in form_data):
setattr(storable, self.get_column_name(), form_data[self.name]['value'].value)
return True
class FCKEditorResource(resource.CheetahTemplateResource):
"""
Provides server-side support for FCKEditor.
This resource implements the server-side portions of FCKEditor, namely
the image/file upload and server-side file browser.
@ivar selected_root: Details for the file upload directory.
@type selected_root: dict
@ivar content_type: The content type to be returned by this resource,
which changes depending on the particular paths accessed.
@type content_type: str
@ivar content: In most cases, the content to be returned, although it
will be None when using the template to generate the FCK config file.
@type content: str
"""
def __init__(self, **options):
self.allowed_roots = {
'__default__' : dict(
perms = 'access admin',
root_callback = lambda req: os.path.join(req.approot, req.app.webroot),
url_callback = lambda req, *path: req.get_path(*path),
),
}
for key, config in options.get('allowed_roots', {}).items():
if('perms' not in config):
config['perms'] = self.allowed_roots['__default__']['perms']
if('url_callback' not in config):
config['url_callback'] = self.allowed_roots['__default__']['url_callback']
self.allowed_roots[key] = config
def prepare_content(self, req):
"""
@see: L{modu.web.resource.IContent.prepare_content()}
"""
self.content_type = 'text/html'
self.content = None
self.template = None
if(req.postpath and req.postpath[0] == 'fckconfig-custom.js'):
self.prepare_config_request(req)
return
if(req.postpath):
root_key = req.postpath[0]
else:
root_key = '__default__'
self.selected_root = self.allowed_roots[root_key]
if not(req.user.is_allowed(self.selected_root['perms'])):
app.raise403()
if(req.postpath and req.postpath[0] == 'upload'):
self.prepare_quick_upload(req)
else:
self.prepare_browser(req)
def get_content_type(self, req):
"""
@see: L{modu.web.resource.IContent.get_content_type()}
"""
return '%s; charset=UTF-8' % self.content_type
def get_content(self, req):
"""
@see: L{modu.web.resource.IContent.get_content()}
"""
if(self.content):
return self.content
return super(FCKEditorResource, self).get_content(req)
def get_template(self, req):
"""
@see: L{modu.web.resource.ITemplate.get_template()}
"""
return self.template
def get_template_root(self, req, template=None):
"""
@see: L{modu.web.resource.ITemplate.get_template()}
"""
if(template is None):
template = self.get_template(req)
if(template is None):
app.raise500("No template or content available.")
return admin_resource.select_template_root(req, template)
def get_selected_root(self, req):
if('root_callback' in self.selected_root):
return self.selected_root['root_callback'](req)
return self.selected_root['root']
def prepare_quick_upload(self, req):
"""
Provides support for the FCK quick upload feature.
@param req: The current request
@type req: L{modu.web.app.Request}
"""
result, filename = self.handle_upload(req, self.get_selected_root(req))
file_url = self.selected_root['url_callback'](req, filename)
self.content = [str(tags.script(type="text/javascript")[
"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\n" % (result, file_url, filename)
])]
def prepare_browser(self, req):
"""
Provides support for the FCK server-side file browser.
@param req: The current request
@type req: L{modu.web.app.Request}
"""
data = req.data
if(req['REQUEST_METHOD'] == 'POST'):
get_data = form.parse_query_string(req)
else:
get_data = data
command_name = get_data.get('Command').value
resource_type = get_data.get('Type').value
new_folder_name = get_data.get('NewFolderName').value
folder_path = get_data.get('CurrentFolder').value
if(folder_path is None):
folder_path = ''
elif(folder_path.startswith('/')):
folder_path = folder_path[1:]
folder_url = self.selected_root['url_callback'](req, folder_path)
content = tags.Tag('CurrentFolder')(path=folder_path, url=folder_url)
if(command_name == 'GetFolders'):
content += self.get_directory_items(req, folder_path, True)
elif(command_name == 'GetFoldersAndFiles'):
content += self.get_directory_items(req, folder_path, False)
elif(command_name == 'CreateFolder'):
content += self.create_folder(req, folder_path, new_folder_name)
elif(command_name == 'FileUpload'):
self.file_upload(req, folder_path)
return
else:
return
output = '<?xml version="1.0" encoding="utf-8" ?>'
output += tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)]
self.content_type = 'text/xml'
self.content = [output]
def prepare_config_request(self, req):
"""
Uses a Cheetah template to serve up the per-site FCK configuration file.
@param req: The current request
@type req: L{modu.web.app.Request}
"""
self.content_type = 'text/javascript'
self.template = 'fckconfig-custom.js.tmpl'
def get_directory_items(self, req, folder_path, folders_only):
"""
Used by browser code to support directory listing.
@param folder_path: The current folder, relative to C{self.selected_root['root_callback']}
@type folder_path: str
@param folders_only: If True, only list folders
@type req: bool
"""
items = []
directory_path = os.path.join(self.get_selected_root(req), folder_path)
for item in os.listdir(directory_path):
if(item.startswith('.')):
continue
full_path = os.path.join(directory_path, item)
finfo = os.stat(full_path)
if(stat.S_ISREG(finfo.st_mode)):
items.append(tags.Tag('File')(name=item, size=(finfo.st_size // 1024)))
else:
items.append(tags.Tag('Folder')(name=item))
items.sort(lambda a, b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower()))
content = tags.Tag('Folders')[''.join([str(t) for t in items if t.tag == 'Folder'])]
if(not folders_only):
file_string = ''.join([str(t) for t in items if t.tag == 'File'])
if(file_string):
content += tags.Tag('Files')[file_string]
return content
def create_folder(self, req, folder_path, new_folder_name):
"""
Used by browser code to support new folder creation.
@param folder_path: The current folder, relative to C{self.selected_root['root_callback']}
@type folder_path: str
@param new_folder_name: The name of the folder to create
@type new_folder_name: str
"""
directory_path = os.path.join(self.get_selected_root(req), folder_path)
#prevent shenanigans
new_folder_name = new_folder_name.split('/').pop()
new_path = os.path.join(directory_path, new_folder_name)
if(os.access(new_path, os.F_OK)):
content = tags.Tag('Error')(number=FLD_EXISTS)
else:
try:
os.mkdir(new_path)
content = tags.Tag('Error')(number=SUCCESS)
except:
content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR)
return content
def file_upload(self, req, folder_path):
"""
Provides support for file uploads within the server-side browser window.
@param req: The current request
@type req: L{modu.web.app.Request}
@param folder_path: The current folder, relative to C{self.selected_root['root_callback']}
@type folder_path: str
"""
result, filename = self.handle_upload(req, folder_path)
file_url = self.selected_root['url_callback'](req, folder_path, filename)
self.content_type = 'text/html'
self.content = [str(tags.script(type="text/javascript")[
"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\n" % (result, filename)
])]
def handle_upload(self, req, folder_path):
"""
Pulls upload data out of the request and saves to the given folder.
@param req: The current request
@type req: L{modu.web.app.Request}
@param folder_path: The folder to save to, relative to C{self.selected_root['root_callback']}
@type folder_path: str
"""
result = UL_ACCESS_DENIED
data = req.data
fileitem = data['NewFile']
filename = fileitem.filename
destination_path = os.path.join(self.get_selected_root(req), folder_path, filename)
if(os.access(destination_path, os.F_OK)):
parts = filename.split('.')
if(len(parts) > 1):
parts[len(parts) - 2] += '-%d' % int(time.time())
filename = '.'.join(parts)
result = UL_RENAME
else:
result = UL_INVALID_TYPE
if(result != UL_INVALID_TYPE):
try:
uploaded_file = open(destination_path, 'w')
bytes = fileitem.file.read(65536)
while(bytes):
uploaded_file.write(bytes)
bytes = fileitem.file.read(65536)
uploaded_file.close()
result = SUCCESS
except:
import traceback
print traceback.print_exc()
result = UL_ACCESS_DENIED
return result, filename
| 1.929688 | 2 |
bldr/cmd/new.py | bldr-cmd/bldr-cmd | 0 | 12794086 | <reponame>bldr-cmd/bldr-cmd<filename>bldr/cmd/new.py<gh_stars>0
"""
`deps.get` Command
"""
from bldr.environment import Environment
import os
import sys
from git.objects.submodule.root import RootUpdateProgress
import bldr
import bldr.gen.render
import bldr.util
import giturlparse
from git import Repo
from pathlib import Path
import click
import json
from bldr.cli import pass_environment, run_cmd
from bldr.gen.render import render
import bldr.dep.env
dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), "dotbldr")
@click.command("new", short_help="Add new dependency via url")
@click.option("-g", "--git", flag_value=True)
@click.option("-b", "--branch", required=False, type=str)
@click.option("-k", "--brick", flag_value=True, help="Add the dependency to the bldr modules folder")
@click.argument("url", required=False, type=str)
@pass_environment
def cli(ctx, url, git, branch, brick):
run_cmd(ctx, 'init')
run_cmd(ctx, 'deps.add', branch=branch, brick=brick, force=True, path=".", url=url)
run_cmd(ctx, 'gen.up')
| 1.976563 | 2 |
freefield/analysis.py | pfriedrich-hub/freefield | 1 | 12794087 | <filename>freefield/analysis.py<gh_stars>1-10
import numpy as np
from scipy import stats
def double_to_single_pole(azimuth_double, elevation_double):
azimuth_double, elevation_double = np.deg2rad(azimuth_double), np.deg2rad(elevation_double)
azimuth_single = np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double) / np.cos(elevation_double))
return np.rad2deg(azimuth_single)
def single_pole_to_polar(azimuth, elevation):
phi = -1 * azimuth
theta = elevation - 90
return phi, theta
def polar_to_single_pole(phi, theta):
azimuth = phi * -1
elevation = theta + 90
return azimuth, elevation
def polar_to_cartesian(phi, theta):
phi, theta = np.deg2rad(phi), np.deg2rad(theta)
x = np.sin(theta)*np.cos(phi)
y = np.sin(theta)*np.sin(phi)
z = np.cos(theta)
return x, y, z
def mean_dir(data, speaker):
# use vector addition with uncorrected angles:
# sines, cosines = _sines_cosines(data, speaker)
# return numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten()
# use regular addition with corrected angles:
idx = np.where(data[:, 1] == speaker)
return data[idx, 2:4].mean(axis=1)
def mad(data, speaker, ref_dir=None):
'Mean absolute difference between reference directions and pointed directions'
if ref_dir is None:
ref_dir = mean_dir(data, speaker)
idx = np.where(data[:,1] == speaker)
diffs = data[idx,2:4] - ref_dir
return np.sqrt((diffs**2).sum(axis=2)).mean()
def rmse(data, speaker, ref_dir=None):
'Vertical and horizontal localization accuracies were quantified by computing the root mean square of the discrep- ancies between perceived and physical locations (RMSE, Hartmann, 1983; Savel, 2009).'
if ref_dir is None:
ref_dir = mean_dir(data, speaker)
idx = np.where(data[:,1] == speaker)
diffs = data[idx,2:4] - ref_dir
dist = np.sqrt((diffs**2).sum(axis=2))
return np.sqrt((dist**2).mean())
def eg(data, speaker_positions=None):
'''
Vertical localization performance was also quantified by the EG, defined as the slope of the linear regression of perceived versus physical elevations (Hofman et al., 1998). Perfect localization corresponds to an EG of 1, while random elevation responses result in an EG of 0.'''
eles = data[:,3]
if speaker_positions is None:
return np.percentile(eles, 75) - np.percentile(eles, 25)
speaker_seq = data[:,1].astype(int) # presented sequence of speaker numbers
elevation_seq = speaker_positions[speaker_seq,1] # get the elevations for the speakers in the presented sequence
regression = stats.linregress(eles, elevation_seq)
return regression.slope
| 2.359375 | 2 |
zeppelin_handy_helpers/__init__.py | sbilello/zeppelin-handy-helper | 0 | 12794088 | <filename>zeppelin_handy_helpers/__init__.py
from argumentsactions import Read, Check, Stop, Monitor
| 1.203125 | 1 |
mojo/devtools/common/devtoolslib/apptest.py | zbowling/mojo | 1 | 12794089 | <filename>mojo/devtools/common/devtoolslib/apptest.py<gh_stars>1-10
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Apptest is a Mojo application that interacts with another Mojo application
and verifies assumptions about behavior of the app being tested.
"""
import logging
import time
_logger = logging.getLogger()
def _build_shell_arguments(shell_args, apptest_url, apptest_args):
"""Builds the list of arguments for the shell.
Args:
shell_args: List of arguments for the shell run.
apptest_url: Url of the apptest app to run.
apptest_args: Parameters to be passed to the apptest app.
Returns:
Single list of shell arguments.
"""
result = list(shell_args)
if apptest_args:
result.append("--args-for=%s %s" % (apptest_url, " ".join(apptest_args)))
result.append(apptest_url)
return result
def run_apptest(shell, shell_args, apptest_url, apptest_args, timeout,
output_test):
"""Runs shell with the given arguments, retrieves the output and applies
|output_test| to determine if the run was successful.
Args:
shell: Wrapper around concrete Mojo shell, implementing devtools Shell
interface.
shell_args: List of arguments for the shell run.
apptest_url: Url of the apptest app to run.
apptest_args: Parameters to be passed to the apptest app.
output_test: Function accepting the shell output and returning True iff
the output indicates a successful run.
Returns:
True iff the test succeeded, False otherwise.
"""
arguments = _build_shell_arguments(shell_args, apptest_url, apptest_args)
command_line = "mojo_shell " + " ".join(["%r" % x for x in arguments])
_logger.debug("Starting: " + command_line)
start_time = time.time()
(exit_code, output, did_time_out) = shell.run_and_get_output(arguments,
timeout)
run_time = time.time() - start_time
_logger.debug("Completed: " + command_line)
# Only log if it took more than 3 second.
if run_time >= 3:
_logger.info("Test took %.3f seconds: %s" % (run_time, command_line))
if exit_code or did_time_out or not output_test(output):
print 'Failed test: %r' % command_line
if exit_code:
print ' due to shell exit code %d' % exit_code
elif did_time_out:
print ' due to exceeded timeout of %fs' % timeout
else:
print ' due to test results'
print 72 * '-'
print output
print 72 * '-'
return False
return True
| 2.15625 | 2 |
market_maker/abstract.py | pborky/sample-market-maker | 1 | 12794090 |
import logging
from abc import ABC
class LoggingBase(ABC):
def __init__(self, log_level: int) -> None:
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(log_level)
@property
def log_level(self) -> int:
self.logger.level | 3.140625 | 3 |
usertracking/middleware.py | joebos/django-user-tracking | 0 | 12794091 | from tracking import generate_new_tracking_key, register_event
from django.core.urlresolvers import reverse
from django.conf import settings
USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, "USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE", False)
class UserTrackingMiddleware(object):
def process_request(self, request):
#request.session["test"] = "good"
return None
def process_response(self, request, response):
"""
Only record when we return HTML pages. Set a cookie if not set
"""
if 'text/html' in response.get('Content-Type', ''):
content = getattr(response, 'content', '')
if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find("<body") >= 0:
url_request = request.path
urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')]
found = False
for url in urls:
if url_request.find(url) >= 0:
found = True
break
if not found:
tracking_id = None
event_data = {'url': request.path_info, 'method': request.method}
if 'user_tracking_id' not in request.COOKIES:
tracking_id = generate_new_tracking_key()
response.set_cookie('user_tracking_id', tracking_id)
register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request)
#set javascript callback behavior to check if the user has disabled cookies
response.set_cookie('user_tracking_verify', tracking_id)
else:
tracking_id = request.COOKIES['user_tracking_id']
register_event(tracking_id=tracking_id, event_name='server_middleware_page_view',event_data=event_data, request=request)
return response
| 2.078125 | 2 |
workspaces.py | tylermenezes/PyWorkspaces | 1 | 12794092 | #!/usr/bin/python
import re
import subprocess
class workspaces():
@staticmethod
def _cmd(*args):
return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
@staticmethod
def get_display_size():
size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace("\n", "")))[8].split('x')
return {"x": int(size[0]), "y": int(size[1])}
@staticmethod
def get_workspace_count():
total_size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace("\n", "")))[3].split('x')
total_size = [int(x) for x in total_size]
display = workspaces.get_display_size()
return {"x": int(total_size[0]/display['x']), "y": int(total_size[1]/display['y'])}
@staticmethod
def _workspace_coords_to_screen_coords(x, y):
disp_size = workspaces.get_display_size()
workspace_size = workspaces.get_workspace_count()
x_coord = -1 * disp_size['x'] * (workspace_size['x'] - 1 - x)
y_coord = -1 * disp_size['y'] * (workspace_size['y'] - 1- y)
return {"x": x_coord, "y": y_coord}
@staticmethod
def move_window(id, desk_x, desk_y):
coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y)
subprocess.call(['wmctrl', '-i', '-r', id, '-e', '0,' + str(coords['x']) + ',' + str(coords['y']) + ',-1,-1'])
@staticmethod
def get_windows():
windows = workspaces._cmd('wmctrl', '-l').split("\n")
lines = [re.split(' *', desc, 3) for desc in windows]
return [dict(zip(['id', 'desktop', 'machine', 'title'], line)) for line in lines] | 2.78125 | 3 |
utility/args.py | Tabbomat/MADUtilities | 2 | 12794093 | import configargparse
def parse_args() -> dict:
parser = configargparse.ArgParser(default_config_files=['config.ini'])
parser.add_argument('--madmin_url', required=True, type=str)
parser.add_argument('--madmin_user', required=False, default='', type=str)
parser.add_argument('--madmin_password', required=False, default='', type=str)
args, unknown = parser.parse_known_args()
return {'madmin_url': args.madmin_url.rstrip('/'),
'madmin_user': args.madmin_user.strip(),
'madmin_password': args.madmin_password.strip()}
| 2.890625 | 3 |
tests/compose/test_DAG.py | Dynatrace/alyeska | 2 | 12794094 | # -*- coding: utf-8 -*-
## ---------------------------------------------------------------------------
## Copyright 2019 Dynatrace LLC
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
## ---------------------------------------------------------------------------
"""Unit tests for the DAG class."""
from collections import defaultdict
import os
import pathlib
import pytest
from alyeska.compose import Task, DAG
from alyeska.compose.exceptions import CyclicGraphError
from test_compose_globals import (
COMPOSE_SMALL,
COMPOSE_BIG,
COMPOSE_CYCLE,
COMPOSE_TRICKY,
)
# ----------------------------------------------------------------------------
# Helper Functions
# ----------------------------------------------------------------------------
def get_two_tasks():
return (Task("A.py", env="test-env"), Task("B.py", env="test-env"))
# ----------------------------------------------------------------------------
# DAG magic methods
# ----------------------------------------------------------------------------
def test__validate_dependency():
make_tea = Task("make_tea.py", "test-env")
drink_tea = Task("drink_tea.py", "test-env")
with pytest.raises(TypeError):
DAG.validate_dependency([1, 2, 3])
with pytest.raises(ValueError):
DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]}))
with pytest.raises(ValueError):
DAG.validate_dependency({Task: {1, 2, 3}})
DAG.validate_dependency({make_tea: drink_tea})
DAG.validate_dependency({make_tea: {drink_tea, drink_tea}})
def test__DAG_init():
DAG()
# init with dependencies
make_tea = Task("make_tea.py", "test-env")
drink_tea = Task("drink_tea.py", "test-env")
dag = DAG(tasks=make_tea)
assert len(dag.tasks) == 1
dag = DAG(tasks={drink_tea, make_tea})
assert len(dag.tasks) == 2
dag = DAG(upstream_dependencies={drink_tea: make_tea})
assert len(dag.tasks) == 2
dag = DAG(downstream_dependencies={make_tea: drink_tea})
assert len(dag.tasks) == 2
def test__DAG_repr():
p = pathlib.Path("make_tea.py")
make_tea = Task(p, "test-env")
dag = DAG()
dag.add_task(make_tea)
assert repr(dag) == "".join(["DAG({Task(", p.resolve().as_posix(), ")})"])
# ----------------------------------------------------------------------------
# DAG.tasks
# ----------------------------------------------------------------------------
def test__DAG_add_task():
A, B = get_two_tasks()
dag = DAG()
dag.add_task(A)
assert dag.tasks == {A}, "Test Task was not added to the DAG"
def test__DAG_add_tasks():
A, B = get_two_tasks()
C = Task("C.py")
dag = DAG()
dag.add_tasks({A, B})
assert dag.tasks == {A, B}, "Test Tasks were not added to the DAG"
dag.add_tasks(C)
assert dag.tasks == {A, B, C}
def test__DAG_remove_task():
A, B = get_two_tasks()
dag = DAG()
dag.add_tasks({A, B})
dag.remove_task(A)
assert dag.tasks == {B}
def test__DAG_remove_tasks():
A, B = get_two_tasks()
C = Task("C.py")
dag = DAG()
dag.add_tasks({A, B, C})
dag.remove_tasks({A, B})
assert dag.tasks == {C}
dag.remove_tasks(C)
assert dag.tasks == set()
# ----------------------------------------------------------------------------
# add dependencies
# ----------------------------------------------------------------------------
def test__DAG_add_dependency():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, A)
assert dag._edges[A] == set([B])
def test__DAG_add_dependency_detect_cycle():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, A)
with pytest.raises(CyclicGraphError):
dag.add_dependency(A, B)
def test__DAG_add_dependencies():
A, B = get_two_tasks()
C = Task("C.py", env="test-env")
dag = DAG()
dag.add_dependencies({B: A})
assert dag._edges[A] == set([B])
dag = DAG()
dag.add_dependencies({C: {A, B}})
assert dag._edges[A] == set([C])
assert dag._edges[B] == set([C])
def test__DAG_add_dependency_detect_cycle2():
A, B = get_two_tasks()
C = Task("C.py", env="test-env")
dag = DAG()
with pytest.raises(CyclicGraphError):
dag.add_dependencies({A: C, B: A, C: B})
# ----------------------------------------------------------------------------
# methods
# ----------------------------------------------------------------------------
def test__DAG_get_downstream():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, depends_on=A)
assert dag.get_downstream() is not None
assert dag.get_downstream()[A] == {B}
assert dag.get_downstream() == {A: {B}}, "Task B is not downstream"
def test__DAG_get_upstream():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, depends_on=A)
assert dag.get_upstream() is not None
assert dag.get_upstream()[B] == {A}
assert dag.get_upstream() == {B: {A}}, "Task A is not upstream"
def test__DAG_get_sources():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, depends_on=A)
assert dag.get_sources() is not None
assert dag.get_sources() == {A}
def test__DAG_get_sinks():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, depends_on=A)
assert dag.get_sinks() is not None
assert dag.get_sinks() == {B}
def test__DAG_is_cyclic():
A, B = get_two_tasks()
dag = DAG()
dag.add_dependency(B, depends_on=A)
assert not dag.is_cyclic(), "acyclic graph idenfied as cyclic"
with pytest.raises(CyclicGraphError):
dag.add_dependency(A, depends_on=B)
def test__DAG_from_yaml():
DAG.from_yaml(COMPOSE_SMALL)
with pytest.raises(CyclicGraphError):
DAG.from_yaml(COMPOSE_CYCLE)
dag = DAG.from_yaml(COMPOSE_TRICKY)
assert len(dag.tasks) > 0
| 1.515625 | 2 |
tools/check_input.py | yuk-to/SALMON2 | 3 | 12794095 | #!/usr/bin/env python
#
# Copyright 2020 ARTED developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import re
import sys
def list_div(l1,l2):
n = min(len(l1),len(l2))
p = []
for i in range(0,n):
p.append(l1[i] / l2[i])
return p
def list_prod(nlist):
n = 1
for i in nlist:
n = n * i
return n
def fail_exit():
print 'ERROR.'
sys.exit(1)
# SALMON input list
def to_boolean(s):
if s.lower() == '\'y\'':
return True
elif s.lower() == '\'n\'':
return False
else:
return False
def gen_inputlist_map(filename):
inputmap = {}
inputmap['nproc_k'] = 1
inputmap['nproc_ob'] = 1
inputmap['nproc_rgrid'] = [1,1,1]
inputmap['yn_ffte'] = False
inputmap['yn_scalapack'] = False
inputmap['process_allocation'] = 'grid_sequential'
with open(filename) as f:
number_pattern = re.compile(r'\d+')
float_pattern = re.compile(r'[+-]?(?:\d+\.?\d*|\.\d+)(?:[dD][+-]?\d+)?')
yn_pattern = re.compile(r'\'[yYnN]\'')
string_pattern = re.compile(r'\'\w*\'')
theory_pattern = re.compile(r'\s*theory\s*=\s*')
nproc_k_pattern = re.compile(r'\s*nproc_k\s*=\s*\d+')
nproc_ob_pattern = re.compile(r'\s*nproc_ob\s*=\s*\d+')
nproc_rgrid_pattern = re.compile(r'\s*nproc_rgrid\s*=\s*\d+')
nstate_pattern = re.compile(r'\s*nstate\s*=\s*\d+')
al_pattern = re.compile(r'\s*al\s*=\s*\d+')
dl_pattern = re.compile(r'\s*dl\s*=\s*\d+')
num_rgrid_pattern = re.compile(r'\s*num_rgrid\s*=\s*\d+')
yn_ffte_pattern = re.compile(r'\s*yn_ffte\s*=\s*')
yn_scalapack_pattern= re.compile(r'\s*yn_scalapack\s*=\s*')
proc_alloc_pattern = re.compile(r'\s*process_allocation\s*=\s*')
for s in f:
if theory_pattern.match(s):
inputmap['theory'] = string_pattern.search(s).group().strip('\'').lower()
elif nstate_pattern.match(s):
inputmap['nstate'] = int(number_pattern.search(s).group())
elif nproc_k_pattern.match(s):
inputmap['nproc_k'] = int(number_pattern.search(s).group())
elif nproc_ob_pattern.match(s):
inputmap['nproc_ob'] = int(number_pattern.search(s).group())
elif nproc_rgrid_pattern.match(s):
dims = []
for x in number_pattern.findall(s):
dims.append(int(x))
inputmap['nproc_rgrid'] = dims
elif al_pattern.match(s):
dims = []
for x in float_pattern.findall(s):
dims.append(float(x.replace('d','e')))
inputmap['al'] = dims
elif dl_pattern.match(s):
dims = []
for x in float_pattern.findall(s):
dims.append(float(x.replace('d','e')))
inputmap['dl'] = dims
elif num_rgrid_pattern.match(s):
dims = []
for x in number_pattern.findall(s):
dims.append(int(x))
inputmap['num_rgrid'] = dims
elif yn_ffte_pattern.match(s):
inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group())
elif yn_scalapack_pattern.match(s):
inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group())
elif proc_alloc_pattern.match(s):
inputmap['process_allocation'] = string_pattern.search(s).group().strip('\'').lower()
return inputmap
def print_inputlists(inputmap):
print '¶llel'
print ' nproc_k = {}'.format(inputmap['nproc_k'])
print ' nproc_ob = {}'.format(inputmap['nproc_ob'])
print ' nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid'])
print ' process_allocation = \'{}\''.format(inputmap['process_allocation'])
print '/'
print '&system'
print ' nstate = {}'.format(inputmap['nstate'])
print '/'
print '&rgrid'
print ' num_rgrid = {},{},{}'.format(*inputmap['num_rgrid'])
print '/'
# FFTE
def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid):
y1 = num_rgrid[0] % nprocs_rgrid[1]
y2 = num_rgrid[1] % nprocs_rgrid[1]
z1 = num_rgrid[1] % nprocs_rgrid[2]
z2 = num_rgrid[2] % nprocs_rgrid[2]
return y1 == 0 and y2 == 0 and z1 == 0 and z2 == 0
def check_ffte_condition_prime_factors(num_rgrid):
for i in range(0,3):
t = num_rgrid[i]
for j in range(0,26):
if t % 2 == 0:
t = t / 2
for j in range(0,17):
if t % 3 == 0:
t = t / 3
for j in range(0,11):
if t % 5 == 0:
t = t / 5
if t != 1:
return False
return True
# ScaLAPACK
def get_sl_process_grid_size(n, nprocs):
npcol = int(math.sqrt(float(nprocs)))
npcol = npcol + (npcol % 2)
for ii in range(0,100):
nprow = nprocs / npcol
if (nprow*npcol == nprocs):
break
npcol = npcol + 2
return nprow,npcol
def get_sl_blocking_factor(n, nprow, npcol):
k1 = (n+nprow-1)/nprow
k2 = (n+npcol-1)/npcol
mb = min(k1,k2)
nb = mb
return mb,nb
def get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid, is_ffte):
if num_rgrid[0] % nprocs_per_node != 0:
print '[INFO] num_rgrid[0] % nprocs_per_node is not divided.'
nzy = nprocs / nprocs_per_node
nz = int(math.sqrt(float(nzy)))
nz = nz + (nz % 2)
for ii in range(0,100):
ny = nzy / nz
if (ny*nz == nzy):
break
nz = nz + 2
if is_ffte:
ny = ny + (ny % 2)
for ii in range(ny,1,-1):
if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii, nz]):
if nprocs % list_prod([nprocs_per_node, ii, nz]) == 0:
ny = ii
break
return [nprocs_per_node, ny, nz]
if __name__ == '__main__':
if len(sys.argv) < 4:
print '[Usage] ./{} <SALMON inputfile> <required # of node> <required # of procs/node>'.format(sys.argv[0])
fail_exit()
inputmap = gen_inputlist_map(sys.argv[1])
if inputmap['theory'] != 'dft':
print 'Theory {} does not support yet'.format(inputmap['theory'])
# logical checking...
if not 'num_rgrid' in inputmap.keys():
dims = list_div(inputmap['al'], inputmap['dl'])
inputmap['num_rgrid'] = [int(f) for f in dims]
print '[INFO] num_rgrid constructed = {}'.format(inputmap['num_rgrid'])
nnodes = int(sys.argv[2])
nprocs_per_node = int(sys.argv[3])
nprocs_global = nnodes * nprocs_per_node
if list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob'] * inputmap['nproc_k'] != nprocs_global:
print '[INFO]'
print 'product(nproc_rgrid) * nproc_ob * nproc_k /= # of MPI procs = {}'.format(nprocs_global)
print 'calculate nproc_k,ob and rgrid'
# find nproc_.*
num_rgrid = inputmap['num_rgrid']
nproc_rgrid = get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid, inputmap['yn_ffte'])
nproc_ob = nprocs_global / list_prod(nproc_rgrid)
nproc_k = 1
inputmap['nproc_k'] = nproc_k
inputmap['nproc_ob'] = nproc_ob
inputmap['nproc_rgrid'] = nproc_rgrid
if nproc_ob >= nprocs_per_node:
inputmap['process_allocation'] = 'orbital_sequential'
# FFTE checking...
if inputmap['yn_ffte']:
if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']):
print '[FFTE]'
print 'num_rgrid and nproc_rgrid are available to use FFTE.'
if not check_ffte_condition_prime_factors(inputmap['num_rgrid']):
print ' prime factors for number of grids must be combination of 2,3, or 5'
else:
print '[FFTE]'
print 'num_rgrid and nproc_rgrid are unsuitable for using FFTE.'
print 'please check for condition:'
print ' mod(num_rgrid(1),nprocs_rgrid(2)) must be 0'
print ' mod(num_rgrid(2),nprocs_rgrid(2)) must be 0'
print ' mod(num_rgrid(2),nprocs_rgrid(3)) must be 0'
print ' mod(num_rgrid(3),nprocs_rgrid(3)) must be 0'
fail_exit()
# ScaLAPACK checking...
if inputmap['yn_scalapack']:
n = inputmap['nstate']
nprocs = inputmap['nproc_ob']
nprow,npcol = get_sl_process_grid_size(n, nprocs)
if (nprow*npcol != nprocs):
print '[ScaLAPACK] nprow*npcol = {} != {}'.format(nprow*npcol,nprocs)
print ' please check the nproc_ob'
fail_exit()
mb,nb = get_sl_blocking_factor(n, nprow, npcol)
if (nb*npcol != n):
n = max(nb*npcol, n)
n = min(n, (nb+1)*npcol)
mb,nb = get_sl_blocking_factor(n, nprow, npcol)
if (n != inputmap['nstate']):
print '[ScaLAPACK]'
print 'nstate should be changed from {} to {}'.format(inputmap['nstate'],n)
inputmap['nstate'] = n
print '[ScaLAPACK]'
print 'process grid map (row,col) = ({},{})'.format(nprow,npcol)
print 'blocking factor (row,col) = ({},{})'.format(mb,nb)
print '[INFO]'
print 'wave function size per MPI process'
for i in range(0,3):
print 'num_rgrid[{}] / nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i])
print '# of orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob'])
sx = inputmap['nproc_ob'] * inputmap['nproc_k'] / nprocs_per_node
sy = list_prod(inputmap['nproc_rgrid'])
print ''
print '# =============================================== #'
print 'Probably suitable parameters for large scale system.'
print 'please replace the following inputlists.'
print ''
print_inputlists(inputmap)
print ''
print '# =============================================== #'
print 'If you use mesh-torus network type system,'
print 'probably, the following node shape is suitable to run the application.'
print ''
print 'NXxNY = {}x{}'.format(sx,sy)
print 'NX = (nproc_ob * nproc_k) / # of process per node'
print 'NY = product(nproc_rgrid)'
| 2.453125 | 2 |
magic_deco_2.py | bmintz/python-snippets | 2 | 12794096 | #!/usr/bin/env python3
# encoding: utf-8
def inject_x(f):
x = 'hi'
def g(*args, **kwargs):
nonlocal x
return f(*args, **kwargs)
return g
@inject_x
def foo():
return x
def inject_x_attempt_2(f):
def wrapper(f):
x = 'hi'
def wrapped(*args, **kwargs):
nonlocal x
return f(*args, **kwargs)
return wrapped
return wrapper(f)
@inject_x_attempt_2
def bar():
return x
if __name__ == '__main__':
try:
print(foo())
except NameError:
print('attempt 1 failed')
try:
print(bar())
except NameError:
print('attempt 2 failed')
try:
print(baz())
except NameError:
print('attempt 3 failed')
print(x)
| 3.34375 | 3 |
scripts/examples/ClimateEmulator/gev_fit_kma_fretchet.py | teslakit/teslak | 12 | 12794097 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# basic import
import os
import os.path as op
import sys
import time
sys.path.insert(0, op.join(op.dirname(__file__),'..','..'))
# python libs
import numpy as np
import xarray as xr
# custom libs
from teslakit.project_site import PathControl
from teslakit.extremes import FitGEV_KMA_Frechet
# --------------------------------------
# Test data storage
pc = PathControl()
p_tests = pc.p_test_data
p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet')
# input
p_npz = op.join(p_test, 'swell_1_Hs.npz')
# --------------------------------------
# Load data
npzf = np.load(p_npz)
bmus = npzf['arr_0']
n_clusters = npzf['arr_1']
var_wvs = npzf['arr_2']
print(bmus)
print(n_clusters)
print(var_wvs)
print()
# TODO: small differences with ML at nlogl_1-nlogl_2 = 1.92
gp_pars = FitGEV_KMA_Frechet(
bmus, n_clusters, var_wvs)
print(gp_pars)
| 2.125 | 2 |
src/preprocess_clean.py | ngov17/RecipeNet | 0 | 12794098 | <filename>src/preprocess_clean.py
import sys, os
import numpy as np
from PIL import Image
from skimage import io, transform, img_as_float32
import random
START_TOKEN = "*START*" # no index as start token is not added to vocabulary as we don't want to predict start
STOP_TOKEN = "*STOP*" #Index: 0
PAD_TOKEN = "*<PASSWORD>*" #Index: 1
UNK_TOKEN = "*UNK*" #Index: 2
WINDOW_SIZE = 20
def preprocess_image(image_path, is_train=True):
"""
1. read image
2. resize image to 256 * 256 * 3
3. randomly sample a 224 * 224 *3 patch of the image
4. normalize image intensity? (NOTE: not currently doing this, which is fine since original paper doesn't)
5. return processed image
"""
image = io.imread(image_path)
h,w,c = image.shape
newshape = (256,256,3)
if h > w:
newshape = (int((256.0 / float(w)) * float(h)), 256, c)
elif h < w:
newshape = (256, int((256.0 / float(h)) * float(w)), c)
image = transform.resize(image, newshape, anti_aliasing=True)
start_r = 0
start_c = 0
if is_train:
start_r = random.randint(0, newshape[0]-224)
start_c = random.randint(0, newshape[1]-224)
else:
start_r = (newshape[0] - 224) // 2
start_c = (newshape[1] - 224) // 2
image = image[start_r:start_r+224, start_c:start_c+224, :]
return image
def get_image_batch(image_paths, is_train=True):
"""
param image_paths: a list of paths to image locations (such as a length batch_size slice of a larger list)
param is_train: True if processing images for training (sample random image patch) or False if processing for testing (sample central image patch)
return: a numpy array of size (len(image_paths), 224, 224, 3) containing the preprocessed images
"""
return np.stack([preprocess_image(path, is_train) for path in image_paths], axis=0)
def pad_ingredients(ingredient_list):
"""
"""
padded_ingredients_list = []
for line in ingredient_list:
line = list(set(line))
padded_ing = line[:(WINDOW_SIZE - 2)]
padded_ing = [START_TOKEN] + padded_ing + [STOP_TOKEN] + [PAD_TOKEN] * (
WINDOW_SIZE - len(padded_ing) - 1)
padded_ingredients_list.append(padded_ing)
return padded_ingredients_list
def convert_to_id(vocab, sentences):
"""
"""
return np.stack([[vocab[word] if word in vocab else vocab[UNK_TOKEN] for word in sentence] for sentence in sentences])
def build_vocab(ingredients):
"""
"""
vocab = {STOP_TOKEN: 0, PAD_TOKEN: 1, UNK_TOKEN: 2}
tokens = []
for i in ingredients: tokens.extend(i)
all_words = sorted(list(set(tokens)))
for i, word in enumerate(all_words):
vocab[word] = i + 3
return vocab
def get_data(classes_path, ingredients_path, images, train_image_path, test_image_path):
"""
"""
class_file = open(classes_path, "r")
classes = []
for line in class_file:
classes.append(line.rstrip().lower())
ingredients_file = open(ingredients_path, "r")
ingredients = []
for line in ingredients_file:
ingredients.append(line.rstrip().lower())
ingredients_dict = {}
for i in range(len(ingredients)):
ingredients_dict[classes[i]] = ingredients[i]
train_ingredient_list = []
test_ingredient_list = []
train_image_paths = []
test_image_paths = []
train_file = open(train_image_path, "r")
test_file = open(test_image_path, "r")
for line in train_file:
splitline = line.rstrip().split('/')
train_image_paths.append(os.path.join(images, splitline[0], splitline[1]))
for line in test_file:
splitline = line.rstrip().split('/')
test_image_paths.append(os.path.join(images, splitline[0], splitline[1]))
for r, d, f in os.walk(images):
for file in f:
name = file.split("_")[1:-1]
str = ""
for word in name:
str += word + " "
str = str[:-1]
if str in ingredients_dict:
pth = os.path.join(r, file)
if pth in train_image_paths:
train_ingredient_list.append(ingredients_dict[str].split(","))
elif pth in test_image_paths:
test_ingredient_list.append(ingredients_dict[str].split(","))
vocab = build_vocab(train_ingredient_list + test_ingredient_list)
padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list))
padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list))
train_ingredients = convert_to_id(vocab, padded_train_ingredients)
test_ingredients = convert_to_id(vocab, padded_test_ingredients)
return train_image_paths, train_ingredients, test_image_paths, test_ingredients, vocab | 2.8125 | 3 |
env/lib/python3.6/site-packages/dal_queryset_sequence/tests/test_views.py | anthowen/duplify | 1,368 | 12794099 | <gh_stars>1000+
import json
from dal import autocomplete
from django import test
from django.contrib.auth.models import Group
class Select2QuerySetSequenceViewTestCase(test.TestCase):
def setUp(self):
self.expected = {
'pagination': {
'more': False
},
'results': []
}
@classmethod
def setUpClass(cls):
for i in range(0, 3):
Group.objects.create(name='ViewTestCase%s' % i)
cls.request = test.RequestFactory().get('?q=foo')
super(Select2QuerySetSequenceViewTestCase, cls).setUpClass()
def get_view(self, **kwargs):
view = autocomplete.Select2QuerySetSequenceView(
queryset=autocomplete.QuerySetSequence(
Group.objects.all(),
),
paginate_by=2,
**kwargs
)
view.request = self.request
return view
def get_view_response(self, **view_kwargs):
return self.get_view(**view_kwargs).dispatch(self.request)
def get_view_response_json(self, **view_kwargs):
return json.loads(self.get_view_response(**view_kwargs).content)
def test_get(self):
result = self.get_view_response_json()
assert self.expected == result
def test_get_with_create_field(self):
self.expected['results'].append({
'text': 'Create "foo"',
'id': 'foo',
'create_id': True
})
result = self.get_view_response_json(create_field='name')
assert self.expected == result
| 2.421875 | 2 |
tests/test_cli_map.py | sjoerdk/anonapi | 0 | 12794100 | <gh_stars>0
from pathlib import Path
from unittest.mock import Mock
from click.testing import CliRunner
from pytest import fixture
from anonapi.cli import entrypoint
from anonapi.cli.map_commands import (
MapCommandContext,
activate,
add_accession_numbers,
add_selection,
delete,
edit,
find_dicom_files,
add_study_folders,
init,
status,
)
from anonapi.mapper import (
DEFAULT_MAPPING_NAME,
Mapping,
MappingFile,
MappingLoadError,
)
from anonapi.parameters import ParameterSet, PseudoName, SourceIdentifierParameter
from anonapi.settings import DefaultAnonClientSettings
from tests.conftest import AnonAPIContextRunner, MockContextCliRunner
from tests import RESOURCE_PATH
MAPPER_RESOURCE_PATH = RESOURCE_PATH / "test_mapper"
class MappingContextRunner(AnonAPIContextRunner):
"""A click runner that always injects a MapCommandContext instance"""
def __init__(self, mock_context: MapCommandContext):
super().__init__(mock_context=mock_context)
def get_current_mapping(self) -> Mapping:
return self.mock_context.get_current_mapping()
@fixture
def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping):
context = mock_main_runner.get_context()
context.current_dir = lambda: NotImplementedError(
"Call settings.active_mapping_file instead"
)
context.settings.active_mapping_file = a_folder_with_mapping / "anon_mapping.csv"
return mock_main_runner
@fixture
def mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext:
return MapCommandContext(
current_dir=a_folder_with_mapping,
settings=DefaultAnonClientSettings(
active_mapping_file=a_folder_with_mapping / "anon_mapping.csv"
),
)
@fixture
def runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner:
"""A click CLIRunner with MapCommandContext that has a valid active mapping"""
return MappingContextRunner(mock_context=mock_map_context_with_mapping)
@fixture
def mock_map_context_without(tmpdir) -> MapCommandContext:
return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),)
@fixture
def runner_without_mapping(tmpdir):
"""A click CLIRunner that passes MapCommandContext without active mapping
(active mapping is None)
"""
return MockContextCliRunner(
mock_context=MapCommandContext(
current_dir=tmpdir, settings=DefaultAnonClientSettings()
)
)
def test_cli_map_add_selection(
runner_with_mapping, a_folder_with_mapping_and_fileselection
):
"""Add a file selection to a mapping."""
mapping_folder, fileselection_path = a_folder_with_mapping_and_fileselection
runner = runner_with_mapping
result = runner.invoke(
add_selection, str(fileselection_path), catch_exceptions=False
)
assert result.exit_code == 0
mapping = runner_with_mapping.mock_context.get_current_mapping()
assert len(mapping) == 21
assert "fileselection:a_folder/a_file_selection.txt" in "".join(
[str(x) for y in mapping.rows for x in y]
)
def test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir):
result = mock_main_runner.invoke(entrypoint.cli, "map init", catch_exceptions=False)
with open(Path(tmpdir) / "anon_mapping.csv", "r") as f:
f.read()
assert result.exit_code == 0
def test_cli_map_init(mock_main_runner, tmpdir):
runner = mock_main_runner
# there should be no mapping to start with
assert (
"Could not find mapping"
in runner.invoke(entrypoint.cli, "map activate", catch_exceptions=False).output
)
# but after init there should be a valid mapping
runner.invoke(entrypoint.cli, "map init", catch_exceptions=False)
mapping_path = mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME
assert mapping_path.exists()
MappingFile(mapping_path).load_mapping() # should not crash
# and the created mapping should have been activated
assert mock_main_runner.get_context().settings.active_mapping_file == mapping_path
def test_cli_map_info(mock_main_runner_with_mapping):
"""Running map info should give you a nice print of contents"""
context = mock_main_runner_with_mapping.get_context()
context.current_dir = RESOURCE_PATH / "test_cli"
runner = mock_main_runner_with_mapping
result = runner.invoke(entrypoint.cli, "map status", catch_exceptions=False)
assert result.exit_code == 0
assert "folder:folder/file4 patientName4" in result.output
def test_cli_map_info_empty_dir(mock_main_runner):
"""Running info on a directory not containing a mapping file should yield a
nice 'no mapping' message
"""
runner = mock_main_runner
result = runner.invoke(entrypoint.cli, "map status", catch_exceptions=False)
assert result.exit_code == 1
assert "No active mapping" in result.output
def test_cli_map_info_no_active_mapping(runner_without_mapping):
"""Running info on a directory not containing a mapping file should yield a
nice 'no mapping' message
"""
result = runner_without_mapping.invoke(status, catch_exceptions=False)
assert result.exit_code == 1
assert "No active mapping" in result.output
def test_cli_map_info_load_exception(mock_main_runner, monkeypatch):
"""Running info with a corrupt mapping file should yield a nice message"""
# make sure a valid mapping file is found
context = mock_main_runner.get_context()
context.settings.active_mapping_file = (
RESOURCE_PATH / "test_cli" / "anon_mapping.csv"
)
# but then raise exception when loading
def mock_load(x):
raise MappingLoadError("Test Exception")
monkeypatch.setattr("anonapi.mapper.JobParameterGrid.load", mock_load)
runner = CliRunner()
result = runner.invoke(entrypoint.cli, "map status", catch_exceptions=False)
assert result.exit_code == 1
assert "Test Exception" in result.output
def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files):
"""Add all dicom files in this folder to mapping"""
context = mock_map_context_without
runner = AnonAPIContextRunner(mock_context=context)
selection_folder = folder_with_some_dicom_files
# Add this folder to mapping
result = runner.invoke(
add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False,
)
# oh no! no mapping yet!
assert "No active mapping" in result.output
# make one
runner.invoke(init)
# by default there are no rows mapping
assert len(context.get_current_mapping().grid) == 0
# No selection file has been put in the folder at this point
assert not selection_folder.has_file_selection()
# but after adding
result = runner.invoke(
add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False
)
# There should be a selection there
assert result.exit_code == 0
assert selection_folder.has_file_selection()
# also, this selection should have been added to the mapping:
mapping = context.get_current_mapping() # reload from disk
assert len(mapping.grid) == 1
added = ParameterSet(mapping.grid.rows[-1])
identifier = added.get_param_by_type(SourceIdentifierParameter)
# and the identifier should be a FileSelectionIdentifier which is
# relative to the current path
assert not identifier.path.is_absolute()
def test_cli_map_add_folder_no_check(
mock_map_context_without, folder_with_some_dicom_files
):
"""Add all dicom files in this folder to mapping but do not scan"""
context = mock_map_context_without
runner = AnonAPIContextRunner(mock_context=context)
selection_folder = folder_with_some_dicom_files
runner.invoke(init)
# by default there are no rows
assert len(context.get_current_mapping().grid) == 0
# dicom files should not have been selected yet currently
assert not selection_folder.has_file_selection()
# but after adding
result = runner.invoke(
add_study_folders,
args=["--no-check-dicom", str(selection_folder.path)],
catch_exceptions=False,
)
# There should be a selection there
assert result.exit_code == 0
assert selection_folder.has_file_selection()
assert "that look like DICOM" in result.output
@fixture
def create_fileselection_click_recorder(monkeypatch):
"""Add a decorator around the function that adds paths to mapping. Function
will still works as normal, but calls are recorded
"""
recorder = Mock()
def find_dicom_files_recorded(*args, **kwargs):
"""Run the original function, but track calls"""
recorder(*args, **kwargs)
return find_dicom_files(*args, **kwargs)
monkeypatch.setattr(
"anonapi.cli.map_commands.find_dicom_files", find_dicom_files_recorded,
)
return recorder
def test_cli_map_add_study_folders(
runner_with_mapping,
folder_with_mapping_and_some_dicom_files,
create_fileselection_click_recorder,
monkeypatch,
):
"""Add multiple study folders using the add-study-folders command"""
context: MapCommandContext = runner_with_mapping.mock_context
context.current_dir = folder_with_mapping_and_some_dicom_files.path
monkeypatch.setattr(
"os.getcwd", lambda: str(folder_with_mapping_and_some_dicom_files.path)
)
result = runner_with_mapping.invoke(
add_study_folders, args=["--no-check-dicom", "*"], catch_exceptions=False,
)
assert create_fileselection_click_recorder.call_count == 2
assert "that look like DICOM" in result.output
def test_cli_map_delete(mock_map_context_with_mapping):
"""Running map info should give you a nice print of contents"""
context = mock_map_context_with_mapping
runner = AnonAPIContextRunner(mock_context=context)
assert context.settings.active_mapping_file.exists()
result = runner.invoke(delete, catch_exceptions=False)
assert result.exit_code == 0
assert not context.settings.active_mapping_file.exists()
# deleting again will yield nice message
result = runner.invoke(delete)
assert result.exit_code == 1
assert "No such file or directory" in result.output
def test_cli_map_edit(mock_map_context_with_mapping, mock_launch):
context = mock_map_context_with_mapping
runner = AnonAPIContextRunner(mock_context=context)
result = runner.invoke(edit, catch_exceptions=False)
assert result.exit_code == 0
assert mock_launch.called
# now try edit without any mapping being present
mock_launch.reset_mock()
runner.invoke(delete)
result = runner.invoke(edit)
assert "No mapping file found at" in result.output
assert not mock_launch.called
def test_cli_map_activate(mock_map_context_with_mapping):
context = mock_map_context_with_mapping
runner = AnonAPIContextRunner(mock_context=context)
settings = context.settings
settings.active_mapping_file = None # we start with a mapping file, but no active
# after activating, active mapping should be set
runner.invoke(activate)
assert settings.active_mapping_file == context.current_dir / "anon_mapping.csv"
# Graceful error when activating when there is no mapping in current dir
runner.invoke(delete)
assert "Could not find mapping file at" in runner.invoke(activate).output
def test_cli_map_add_paths_file(
mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch
):
"""Add an xls file containing several paths and potentially pseudonyms
to an existing mapping
"""
context = mock_map_context_with_mapping
runner = AnonAPIContextRunner(mock_context=context)
# assert mapping is as expected
mapping = context.get_current_mapping()
assert len(mapping.grid) == 20
# now try to add something from the directory with some dicom files
context.current_dir = folder_with_some_dicom_files.path
monkeypatch.setattr("os.getcwd", lambda: folder_with_some_dicom_files.path)
folders = [
x for x in folder_with_some_dicom_files.path.glob("*") if not x.is_file()
]
# First run with regular command line input
result = runner.invoke(
add_study_folders, args=[str(folders[0])], catch_exceptions=False
)
assert result.exit_code == 0
# Then run with input file input (input file contains 2 folders + names)
input_file_path = MAPPER_RESOURCE_PATH / "inputfile" / "some_folder_names.xlsx"
result = runner.invoke(
add_study_folders, args=["-f", str(input_file_path)], catch_exceptions=False
)
assert result.exit_code == 0
# now three rows should have been added
added = context.get_current_mapping().grid.rows[20:]
assert len(added) == 3
# and the pseudo names from the input file should have been included
pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName) for x in added]
assert pseudo_names[1].value == "studyA"
assert pseudo_names[2].value == "studyB"
def test_cli_map_add_accession_numbers(runner_with_mapping):
"""Add some accession numbers to a mapping"""
result = runner_with_mapping.invoke(add_accession_numbers, ["12344556.12342345"])
assert result.exit_code == 0
mapping = runner_with_mapping.get_current_mapping()
# TODO: make accessing a specific parameter in a row easier. Not like below.
assert (
ParameterSet(mapping.rows[-1]).as_dict()["source"].value.identifier
== "12344556.12342345"
)
def test_cli_map_add_accession_numbers_file(runner_with_mapping):
"""Add some accession numbers to a mapping"""
input_file_path = MAPPER_RESOURCE_PATH / "inputfile" / "some_accession_numbers.xlsx"
result = runner_with_mapping.invoke(
add_accession_numbers, ["--input-file", str(input_file_path)]
)
assert result.exit_code == 0
mapping = runner_with_mapping.get_current_mapping()
assert (
ParameterSet(mapping.rows[-1]).as_dict()["source"].value.identifier
== "123456.12321313"
)
assert ParameterSet(mapping.rows[-1]).as_dict()["pseudo_name"].value == "study3"
| 2.125 | 2 |
Sets/set .add().py | AndreasGeiger/hackerrank-python | 0 | 12794101 | <filename>Sets/set .add().py
amountInputs = int(input())
countryList = set()
for i in range(amountInputs):
countryList.add(input())
print(len(countryList))
| 3.4375 | 3 |
iauth/urls.py | rajmani1995/picfilter | 0 | 12794102 | '''
Authentication urls for ToDos Users
Author: <NAME>
'''
from django.conf.urls import url
from . import views
# Authentiction urls
urlpatterns = [
url(r'^login/', views._login),
url(r'^signup/', views._register),
url(r'^change_password/', views._changePassword),
url(r'^logout/', views._logout),
url(r'^upload/', views._upload),
url(r'^profile/', views._profile),
# url(r'^activate/(?P<id>[0-9]+)/(?P<token>[-\w]+)', views._activate),
# url(r'^resend_activation_email/(?P<id>[0-9]+)', views.resend_activation_email),
] | 2.09375 | 2 |
geneeval/fetcher/auto_fetcher.py | BaderLab/GeneEval | 3 | 12794103 | from typing import List
from geneeval.fetcher.fetchers import Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher
class AutoFetcher:
"""A factory function which returns the correct data fetcher for the given `tasks`.
A `Fetcher` is returned which requests all the data relevant to `tasks` in a single call to its
`fetch` method. This ensures the API endpoints are only queried once, rather than for every
task individually.
NOTE: It is assumed that a `benchmark.json` file already exists, with at least the gene IDs
present. This file can be created by running the `get_protein_ids.py` file in `scripts`.
"""
def __new__(cls, tasks: List[str]) -> Fetcher:
fetcher = Fetcher()
uniprot_fetcher = UniprotFetcher()
for task in tasks:
if task.startswith("sequence"):
uniprot_fetcher.register(SequenceFetcher)
if task.startswith("subcellular_localization"):
uniprot_fetcher.register(LocalizationFetcher)
fetcher.register(uniprot_fetcher)
return fetcher
| 3.1875 | 3 |
lib/__init__.py | caiodearaujo/azure-cosmosdb-python | 0 | 12794104 | import os, json
from typing import Dict, Iterable
from azure.cosmos import (CosmosClient,
PartitionKey,
ContainerProxy,
DatabaseProxy)
SETTINGS = dict(
HOST = os.getenv('COSMOSDB_HOST'),
MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'),
DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID')
)
class DataBaseClient():
def __init__(self, container_id, partition_key) -> None:
super().__init__()
self.container_id = container_id
self.partition_key = partition_key
def get_cosmosdb_client(self) -> CosmosClient:
client = CosmosClient(
endpoint_url=SETTINGS['HOST'],
auth={'masterKey': SETTINGS['MASTER_KEY']}
)
return client
def get_cosmosdb_database(self) -> DatabaseProxy:
client = self.get_cosmosdb_client()
database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID'])
return database
def get_cosmosdb_container(self) -> ContainerProxy:
database = self.get_cosmosdb_database()
container = database.create_container_if_not_exists(
id=self.container_id,
partition_key=PartitionKey(path=self.partition_key)
)
return container
def create_item_cosmosdb(self, item: Dict) -> Dict:
container = self.get_cosmosdb_container()
item = container.create_item(item)
return item
def upsert_item_cosmosdb(self, item: Dict) -> Dict:
container = self.get_cosmosdb_container()
item = container.upsert_item(item)
return item
def delete_item_cosmosdb(self, item: Dict) -> Dict:
container = self.get_cosmosdb_container()
item = container.delete_item(item)
return item
def get_item_cosmosdb(self, item: Dict) -> Dict:
container = self.get_cosmosdb_container()
item = container.read_item(item)
return item
def query_items_cosmosdb(self, query: str) -> Iterable:
container = self.get_cosmosdb_container()
items = container.query_items(query, enable_cross_partition_query=True)
return items | 2.3125 | 2 |
autodiff/node.py | teamjel/cs207-FinalProject | 0 | 12794105 | <filename>autodiff/node.py
"""
Node Logic for Automatic Differentiation
"""
from functools import wraps
import numpy as np
import numbers
from .visualization import create_computational_graph, create_computational_table
from .settings import settings
"""
Custom exceptions.
"""
class NoValueError(Exception):
pass
class node_decorate():
""" Decorator for computation functions.
Implemented as a class for clarity and to serve
as a decorator factory.
Note: the class implementation of decorators behaves
very differently in the case the decorator pattern takes
arguments (__call__ is called only once at decoration,
since we have another function layer outside now).
"""
def __init__(self, mode):
# Maintain function metadata (doctstrings, etc.) with wraps
self.factory = {'evaluate': self.eval_wrapper,
'differentiate': self.diff_wrapper,
'reverse': self.reverse_wrapper}
self.wrapper = self.factory[mode]
def __call__(self, fn):
return self.wrapper(fn)
def eval_wrapper(self, fn):
""" Wrapper for updating node values. """
@wraps(fn)
def wrapper(self):
values = [child.eval() for child in self.children]
result = fn(self, values)
self.set_value(result)
return result
return wrapper
def diff_wrapper(self, fn):
""" Wrapper for updating node derivatives. """
@wraps(fn)
def wrapper(self):
values = [child.eval() for child in self.children]
diffs = [child.diff() for child in self.children]
result = fn(self, values, diffs)
self.set_derivative(result)
return result
return wrapper
def reverse_wrapper(self, fn):
""" Wrapper for updating gradients in reverse pass. """
@wraps(fn)
def wrapper(self):
# Check that we've received all the dependencies we need
if not self.ready_to_reverse():
return
# We need to have done first sweep before reverse, assume values exist
values = [child.value() for child in self.children]
grad_value = self._grad_value
results = fn(self, values, grad_value)
# Need to propagate results (functions need to return same # of results as children)
for idx in range(len(results)):
self.children[idx].add_grad_contribution(results[idx])
self.children[idx].reverse()
return results
return wrapper
class Node():
""" Class Node
Base Node implementation.
"""
def __init__(self):
self._value = None
self._derivative = {}
self._variables = {}
self._cur_var = None
self.children = []
# Name of type of node
self.type = 'None'
# Reverse mode
self._grad_value = 0
self._cur_grad_count = 0
self._grad_count = 0
@classmethod
def make_constant(cls, value):
return Constant(value)
@classmethod
def make_node(cls, node, *values):
new_nodes = []
for value in values:
new = value
if not isinstance(new, Node):
new = cls.make_constant(value)
new_nodes.append(new)
node.set_children(*new_nodes)
node.update_variables()
return node
""" MAGIC
Various implementations to improve the interface
of the package, from calling nodes directly to compute
to treating them as one would expect in symbolic computation.
"""
def __call__(self, *args, **kwargs):
return self.compute(*args, **kwargs)
def __repr__(self):
output = 'Node(Function = %r, Value = %r, Derivative = %r)' % (self.type, self.value(), self.derivative())
return output
def __add__(self, value):
node = self.make_node(Addition(), self, value)
return node
def __radd__(self, value):
node = self.make_node(Addition(), value, self)
return node
def __neg__(self):
node = self.make_node(Negation(), self)
return node
def __sub__(self, value):
node = self.make_node(Subtraction(), self, value)
return node
def __rsub__(self, value):
node = self.make_node(Subtraction(), value, self)
return node
def __mul__(self, value):
node = self.make_node(Multiplication(), self, value)
return node
def __rmul__(self, value):
node = self.make_node(Multiplication(), value, self)
return node
def __truediv__(self, value):
node = self.make_node(Division(), self, value)
return node
def __rtruediv__(self, value):
node = self.make_node(Division(), value, self)
return node
def __pow__(self, value):
node = self.make_node(Power(), self, value)
return node
def __rpow__(self, value):
node = self.make_node(Power(), value, self)
return node
def __eq__(self,other):
return self.value() == other.value() and self.derivative() == other.derivative()
def __ne__(self, other):
return not self == other
def __hash__(self):
return id(self)
""" ATTRIBUTES
Methods for setting and getting attributes.
"""
def value(self):
return self._value
def derivative(self):
return self._derivative
def set_value(self, value):
if not isinstance(value, (numbers.Number, np.ndarray)):
raise TypeError('Value must be numeric or a numpy array.')
self._value = value
def set_derivative(self, value):
var = self.update_cur_var()
if isinstance(value, numbers.Number):
self._derivative[self._cur_var] = value
else:
# if self._cur_var not in self._derivative:
# self._derivative[self._cur_var] = np.zeros(value.size)
self._derivative[self._cur_var][var.var_idx] = value[var.var_idx]
def set_children(self, *children):
self.children = children
""" VARIABLES
Methods for handling variables, the basic
stores for actually computing the values and
derivatives of any given node.
"""
def update_variables(self):
""" Update current variable list to reflect all variables
necessary in children.
"""
new_vars = []
for child in self.children:
if isinstance(child, Variable):
new_vars.append(child)
else:
new_vars.extend(child._variables.values())
variables = list(set(new_vars))
variable_names = [var.name for var in variables]
self._variables = dict(zip(variable_names, variables))
def set_variables(self, input_dict):
""" Set variables for evaluation. """
for key, value in input_dict.items():
self._variables[key].set_value(value)
# if isinstance(value, np.ndarray):
# self._derivative[key] = np.zeros(value.size)
self.zero_vector_derivative(input_dict)
def zero_vector_derivative(self, input_dict):
""" Reset vectors of derivatives recursively in children """
if type(self) != Variable:
for key, value in input_dict.items():
if isinstance(value, np.ndarray) and key in self._variables:
self._derivative[key] = np.zeros(value.size)
for node in self.children:
node.zero_vector_derivative(input_dict)
def update_cur_var(self):
for v in self._variables:
if np.any(self._variables[v].derivative()):
self._cur_var = v
return self._variables[v]
def iterate_seeds(self):
""" Generator to iterate over all variables of this
node, which assign seed values to variables to compute
all partials.
"""
for var in self._variables:
# Reset derivatives
for v in self._variables:
self._variables[v].set_derivative(0)
if isinstance(self._variables[var].value(), np.ndarray):
for idx in self._variables[var].iterate_idxs():
yield idx
else:
self._variables[var].set_derivative(1)
yield var
""" REVERSE MODE
Helper functions for properly doing the reverse mode
of automatic differentiation. These include keeping track
of whether or not any node is ready to compute its contributions
to its children, and managing these contributions.
"""
def zero_grad_values(self):
""" Reset all partial contributions for reverse pass """
self._grad_value = 0
self._cur_grad_count = 0
self._grad_count = 0
for child in self.children:
child.zero_grad_values()
def set_grad_count(self):
""" Calculate dependency counts """
self._grad_count += 1
for child in self.children:
child.set_grad_count()
def ready_to_reverse(self):
return (self._cur_grad_count == self._grad_count)
def add_grad_contribution(self, value):
# Keep track of addition contribution
self._cur_grad_count += 1
self._grad_value += value
""" COMPUTATION
Actual computation functions, with eval and diff
to be implemented by subclasses. Use the node_decorate
decorator to update node values upon computation.
"""
def compute(self, *args, **kwargs):
""" Evaluate and differentiate at the given variable values.
Inputs methods:
-Dictionary of {variable_name: value, ...}
-Keyword arguments of compute(variable_vame=value, ...)
"""
if len(args) == 0:
input_dict = kwargs
elif len(args) == 1:
input_dict = args[0]
if input_dict.keys() != self._variables.keys():
raise TypeError('Input not recognized.')
# Compute the value at this node
self.set_variables(input_dict)
self.eval()
# Compute derivatives based on mode
if settings.current_mode() == "forward":
for var in self.iterate_seeds():
self.diff()
else:
# Reverse mode
self.zero_grad_values()
# Get proper contribution counts
self.set_grad_count()
# Seeding output, current node by 1
self.add_grad_contribution(1)
self.reverse()
# Now set the results
self._derivative = {}
for key, var in self._variables.items():
self._derivative[key] = var._grad_value
return self
# Uncomment when overriding:
# @node_decorate('evaluate')
def eval(self, values):
raise NotImplementedError
# Uncomment when overriding:
# @node_decorate('differentiate')
def diff(self, values, diffs):
raise NotImplementedError
# Uncomment when overriding:
# @node_decorate('reverse')
def reverse(self, values, grad_value):
raise NotImplementedError
def get_comp_graph(self):
""" Creates a computational graph for a given node. """
return create_computational_graph(self)
def get_comp_table(self):
""" Creates a computational table for a given node. """
return create_computational_table(self)
""" SUBCLASSES
Node subclasses that define operations or single
values, such as variables and constants.
"""
class Variable(Node):
""" Node representing a symbolic variable.
Serves as the basis of evaluation, and then
propagates values through the graph
to the final output values.
"""
def __init__(self, name=None):
super().__init__()
if name is None or not isinstance(name, str):
raise ValueError('Name must be given for variable.')
self.name = name
self.type = 'Variable'
self._variables[name] = self
self.var_idx = -1
def eval(self):
if self.value() is None:
raise NoValueError('Variable %s has been given no value.' % self.name)
return self.value()
def diff(self):
if self.derivative() is None:
raise NoValueError('Variable %s has been given no value.' % self.name)
return self.derivative()
# Override dict functionality for variables; I could keep this
# consistent, but would increase computation; elegance tradeoff
def set_derivative(self, value):
if isinstance(self.value(), np.ndarray):
self._derivative[:] = value
else:
self._derivative = value
# On value set, needs to set the derivative
def set_value(self, value):
self._value = None
if isinstance(value, np.ndarray):
self.set_derivative(np.zeros(value.size))
super().set_value(value)
# Iterate over each vector position
def iterate_idxs(self):
for i in range(self._value.size):
self.set_derivative(0)
self.var_idx = i
self._derivative[i] = 1
yield i
# # Override calling the variable
def compute(self, *args, **kwargs):
if len(args) == 0:
input_dict = kwargs
elif len(args) == 1:
input_dict = args[0]
if self.name not in input_dict:
raise TypeError('Input not recognized.')
self.set_value(input_dict[self.name])
self.set_derivative(1);
return self
def __call__(self, *args, **kwargs):
return self.compute(*args, **kwargs)
# Reverse mode doesn't need to do anything, no children
@node_decorate('reverse')
def reverse(self, values, grad_value):
return ()
class Constant(Node):
""" Node representing a constant.
Always initiated with 0 derivative.
"""
def __init__(self, value):
super().__init__()
self.set_value(value)
self.set_derivative(0)
self.type = 'Constant'
def set_derivative(self, value):
self._derivative = value
def eval(self):
return self.value()
def diff(self):
return self.derivative()
# Reverse mode doesn't need to do anything, no children
@node_decorate('reverse')
def reverse(self, values, grad_value):
return ()
class Addition(Node):
def __init__(self):
super().__init__()
self.type = 'Addition'
@node_decorate('evaluate')
def eval(self, values):
left, right = values
return np.add(left, right)
@node_decorate('differentiate')
def diff(self, values, diffs):
left, right = diffs
return np.add(left, right)
# Reverse mode
@node_decorate('reverse')
def reverse(self, values, grad_value):
return (grad_value, grad_value)
class Negation(Node):
def __init__(self):
super().__init__()
self.type = 'Negation'
@node_decorate('evaluate')
def eval(self, values):
return -1*np.array(values[0])
@node_decorate('differentiate')
def diff(self, values, diffs):
return -1*np.array(diffs[0])
# Reverse mode
@node_decorate('reverse')
def reverse(self, values, grad_value):
return (-1*np.array(grad_value),)
class Subtraction(Node):
def __init__(self):
super().__init__()
self.type = 'Subtraction'
@node_decorate('evaluate')
def eval(self, values):
# values vector respects order
return np.subtract(values[0], values[1])
@node_decorate('differentiate')
def diff(self, values, diffs):
return np.subtract(diffs[0], diffs[1])
# Reverse mode
@node_decorate('reverse')
def reverse(self, values, grad_value):
return (grad_value, -grad_value)
class Multiplication(Node):
def __init__(self):
super().__init__()
self.type = 'Multiplication'
@node_decorate('evaluate')
def eval(self, values):
return np.multiply(values[0], values[1])
@node_decorate('differentiate')
def diff(self, values, diffs):
return np.multiply(diffs[0], values[1]) + np.multiply(diffs[1], values[0])
# Reverse mode
@node_decorate('reverse')
def reverse(self, values, grad_value):
left, right = values
left_out = np.multiply(right, grad_value)
right_out = np.multiply(left, grad_value)
return (left_out, right_out)
class Division(Node):
def __init__(self):
super().__init__()
self.type = 'Division'
@node_decorate('evaluate')
def eval(self, values):
if values[1] == 0:
raise ZeroDivisionError('Division by zero.')
return np.divide(values[0], values[1])
@node_decorate('differentiate')
def diff(self, values, diffs):
num = np.multiply(diffs[0], values[1]) - np.multiply(values[0], diffs[1])
denom = np.array(values[1])**2
if denom == 0:
raise ZeroDivisionError('Division by zero.')
return np.divide(num, denom)
# Reverse mode
@node_decorate('reverse')
def reverse(self, values, grad_value):
numer, denom = values
numer_out = np.divide(grad_value, denom)
denom_out = -1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2))
return (numer_out, denom_out)
class Power(Node):
def __init__(self):
super().__init__()
self.type = 'Power'
@node_decorate('evaluate')
def eval(self, values):
base, exp = values
return np.power(base, exp)
@node_decorate('differentiate')
def diff(self, values, diffs):
base, exp = values
b_prime, exp_prime = diffs
# First term
coef = np.multiply(exp, b_prime)
powered = np.power(base, np.subtract(exp, 1))
term1 = np.multiply(coef, powered)
# Second term
term2 = 0
# if exp_prime != 0:
# Compute only if necessary, otherwise we run into log(-c) issues
temp_base = np.copy(base)
temp_base[temp_base<=0] = 1
coef = np.multiply(np.log(temp_base), exp_prime)
powered = np.power(base, exp)
term2 = np.multiply(coef, powered)
return term1+term2
# Reverse mode
@node_decorate('reverse')
def reverse(self, values, grad_value):
base, exp = values
base_out = np.multiply(np.multiply(exp, np.power(base, exp-1)), grad_value)
exp_out = np.multiply(np.multiply(np.log(base), np.power(base, exp)), grad_value)
return (base_out, exp_out) | 2.859375 | 3 |
landmark/test.py | reasonsolo/mtcnn_caffe | 0 | 12794106 | import os
import sys
import cv2
import time
import caffe
import numpy as np
import config
sys.path.append('../')
from fast_mtcnn import fast_mtcnn
from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72
from baidu import call_baidu_api
def create_net(model_dir, iter_num):
model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num)
proto_path = 'landmark.prototxt'
return caffe.Net(proto_path, model_path, caffe.TEST)
if __name__ == '__main__':
iter_num = int(sys.argv[1])
img_path = sys.argv[2]
model_dir = config.MODEL_DIR
if len(sys.argv) > 3:
model_dir = sys.argv[3]
img = cv2.imread(img_path)
net = create_net(model_dir, iter_num)
mtcnn = fast_mtcnn()
boxes = mtcnn(img_path)
for box in boxes:
if not is_valid_facebox(box):
continue
exp_box = expand_mtcnn_box(img, box)
cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]]
baidu_result = call_baidu_api(cropped, '')
baidu_lm = extract_baidu_lm72(baidu_result[0][-1])
for x, y in baidu_lm:
x = int(x + exp_box[0])
y = int(y + exp_box[1])
cv2.circle(img, (int(x), int(y)), 1, (255, 0, 0), 1)
h, w, _ = cropped.shape
cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE))
cropped = np.swapaxes(cropped, 0, 2)
cropped = (cropped - 127.5) / 127.5
net.blobs['data'].data[0] = cropped
out = net.forward()
landmark = out['Dense2'][0]
for pt in landmark.reshape((config.LANDMARK_SIZE, 2)):
x, y = pt
x = x * w + exp_box[0]
y = y * h + exp_box[1]
cv2.circle(img, (int(x), int(y)), 1, (255, 255, 0), 1)
time.sleep(0.5)
cv2.imwrite('result.jpg', img)
| 2.203125 | 2 |
patch_tracking/util/upstream/__init__.py | openeuler-mirror/patch-tracking | 0 | 12794107 | <reponame>openeuler-mirror/patch-tracking
"""upstream init"""
import patch_tracking.util.upstream.git as git
import patch_tracking.util.upstream.github as github
class Factory(object):
"""
Factory
"""
@staticmethod
def create(track):
"""
git type
"""
if track.version_control == 'github':
return github.GitHub(track)
if track.version_control == 'git':
return git.Git(track)
return None
| 1.796875 | 2 |
pcdet/models/model_utils/basic_blocks.py | xiangruhuang/OpenPCDet | 0 | 12794108 | <filename>pcdet/models/model_utils/basic_blocks.py
from torch import nn
def MLP(channels, activation=nn.LeakyReLU(0.2), bn_momentum=0.1, bias=True):
return nn.Sequential(
*[
nn.Sequential(
nn.Linear(channels[i - 1], channels[i], bias=bias),
nn.BatchNorm1d(channels[i], momentum=bn_momentum),
activation,
)
for i in range(1, len(channels))
]
)
| 2.59375 | 3 |
typing_game/api/mixins/colors.py | CarsonSlovoka/typing-game | 0 | 12794109 | <filename>typing_game/api/mixins/colors.py<gh_stars>0
from typing_game.api.generics import RGBColor
class TypingGameColorMixin:
""" Settings of colors."""
__slots__ = ()
TYPING_CORRECT_COLOR = RGBColor.GREEN
TYPING_CUR_POS_COLOR = RGBColor.BLUE
TYPING_MODIFY_COLOR = RGBColor.YELLOW
TYPING_ERROR_COLOR = RGBColor.RED
| 2.15625 | 2 |
import-feeds.py | gleitz/iron-blogger | 3 | 12794110 | #!/usr/bin/python
from lxml import html
import yaml
import sys
import urllib2
import urlparse
from datetime import datetime
print 'Import feeds at ' + str(datetime.now())
HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0'}
with open('bloggers.yml') as f:
users = yaml.safe_load(f.read())
def fetch_links(url):
req = urllib2.Request(url, headers=HEADERS)
tree = html.fromstring(urllib2.urlopen(req).read())
links = tree.xpath(
'//link[@rel="alternate"][contains(@type, "rss") or ' +
'contains(@type, "atom") or contains(@type, "rdf")]')
candidates = [l for l in links if
'atom' in l.attrib['type'] and
'comments' not in l.attrib['href'].lower() and
'comments' not in l.attrib.get('title','')]
if candidates:
return candidates[0].attrib['href']
elif links:
return links[0].attrib['href']
else:
print >>sys.stderr, "No link found for %s" % (url,)
return None
for (name, u) in users.items():
for e in u['links']:
(title, url) = e[0:2]
e[0] = e[0].strip()
if len(e) == 3:
continue
link = fetch_links(url)
if link:
if not link.startswith('http:'):
link = urlparse.urljoin(url, link)
e.append(link)
with open('bloggers.yml', 'w') as f:
yaml.safe_dump(users, f)
| 2.8125 | 3 |
textclassification/randomforest.py | sshekhar10/mymllearnings | 0 | 12794111 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 10 17:37:56 2018
@author: sshekhar
"""
# get some libraries that will be useful
import re
import string
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.ensemble import RandomForestClassifier
# function to split the data for cross-validation
from sklearn.model_selection import train_test_split
# function for transforming documents into counts
from sklearn.feature_extraction.text import CountVectorizer
# function for encoding categories
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
#Function to normalize the text
def normalize_text(s):
#lower-case the text
s = s.lower()
# remove punctuation that is not word-internal (e.g., hyphens, apostrophes)
for ch in string.punctuation:
s = s.replace(ch, " ")
# make sure we didn't introduce any double spaces
s = re.sub('\s+',' ',s)
s = re.sub("[0-9]+", "||DIG||",s)
s = re.sub(' +',' ', s)
return s
#Function to predict the category for a given title
def predict_cat(title):
title=title.lower()
cat_names = {'b' : 'business', 't' : 'science and technology', 'e' : 'entertainment', 'm' : 'health'}
clf_pred = clf.predict(vectorizer.transform([title]))
return cat_names[encoder.inverse_transform(clf_pred)[0]]
news = pd.read_csv("data/uci-news-aggregator.csv")
# let's take a look at our data
#Normalize the title
news['TEXT'] = [normalize_text(s) for s in news['TITLE']]
news['CATEGORY'].unique()
# pull the data into vectors
vectorizer = CountVectorizer()
x = vectorizer.fit_transform(news['TEXT'])
encoder = LabelEncoder()
y = encoder.fit_transform(news['CATEGORY'])
# split into train and test sets
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# Instantiate the classifier: clf
clf = RandomForestClassifier()
# Fit the classifier to the training data
clf.fit(x_train, y_train)
# Print the accuracy
print("Accuracy: {}".format(clf.score(x_test, y_test)))
x_test_clv_pred = clf.predict(x_test)
confusion_matrix(y_test, x_test_clv_pred)
print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_))
randomtitle="vehicular pollution - a big hazard for children"
print predict_cat(randomtitle)
| 2.84375 | 3 |
Code/Blogs_Preprocessing_II.py | MaLuHart/Blogpost-Classification | 0 | 12794112 | <reponame>MaLuHart/Blogpost-Classification
# coding: utf-8
# # Preprocessing der Texte
#
# Autorin: <NAME>
# In[1]:
# Imports
import os
import numpy as np
import re # module for regular expression operations
import csv # module for csv output
from sklearn.model_selection import train_test_split # module to split data into train and test sets
import matplotlib.pyplot as plt # module for visualization
from wordcloud import WordCloud # module for wordclouds
# In[2]:
# Class for accessing and preprocessing the data
folder = '../Preprocessing'
datasets = '../Datasets'
file = folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts: filename;classes;text
if not os.path.exists(datasets):
os.makedirs(datasets)
class MyCorpus(object):
""" Preprocessing des Korpus """
# file: input data
# def __init__(self, file, x, y):
def __init__(self, file):
self.file = file
# memory friendlys because doesn't load the corpus into memory!
def __iter__(self):
openfile = open(self.file, 'r', encoding='utf-8')
# save each document as one item of a list (one document = identnumber, nace-code-list + text)
documents = openfile.readlines()
openfile.close()
texts = self.split_csv(documents)
for text in texts:
#print("\n text in iter:", text)
yield text
# preprocessing
#==========================
# convert text to lower-case, remove punctuation and stopwords
def normalize_text(self, text):
"""Bereinigt den Text:
- transformiert alles in Kleinschrift
- löscht Satz- und Sonderzeichen"""
norm_text = text.lower()
# remove punctuation
for char in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'.', ',', ';', ':', '!', '?', '…','·', '·',
'"', '„', '“', '”', "´", "`", "’", "‘", "‚","'",
'(', ')', '[', ']', '{', '}',
'/', '\\', '|', '_', '-', '–', '—',
'', '', '', '■', '•',
'§', '$', '@', '€', '&', '%', '&', '+', '*', '=', '#',
'«', '»', '≥', '<', '>', '^']:
norm_text = norm_text.replace(char, ' ')
tokens = norm_text.split()
vocab = {}
for word in tokens:
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
""" # read stopwords
words = []
stopwords = open(folder+'/german_stopwords_plain.txt', 'r').read()
for token in tokens:
#if token in stopWords:
if token in stopwords:
continue
else:
words.append(token)"""
#return words, vocab
#return norm_text, vocab
return tokens, vocab
# split identnumber, nace-code-list and corporate purpose and save in lists
def split_csv(self, documents):
"""Splittet jede Zeile des eingelesenen Dokuments in die drei Listen
1) filnames
2) labels
3) text """
ident = []
label = []
text = []
vocabulary = {}
# first row is headline
for i, document in enumerate(documents[1:]):
tmp_ident = document.split(";", 1)[0]
#print("tmp_ident:", tmp_ident)
tmp_label = []
if re.match("aes_", tmp_ident): # Blog "aes - <NAME>" hat nur Thèmes: Histoire, Religions
tmp_label3 = document.split(";", 2)[1].strip()
tmp_label3 = tmp_label3.lower().replace('"', '').strip().split(",")
tmp_label3 = [x.strip()+'_t' for x in tmp_label3]
tmp_label.extend(tmp_label3)
#print("Sonderfall:", tmp_ident, tmp_label)
tmp_text, vocab = self.normalize_text(document.split(";", 3)[2])
#tmp_text = document.split(";", 3)[2]
#print("tmp_text:", tmp_text)
else:
tmp_label1 = document.split(";", 2)[1].strip()
#print("tmp_label1:", tmp_label1)
tmp_label2 = document.split(";", 3)[2].strip()
#print("tmp_label2:", tmp_label2)
tmp_text, vocab = self.normalize_text(document.split(";", 4)[3])
#tmp_text = document.split(";", 4)[3].strip()
#print("tmp_text:", tmp_text)
tmp_label1 = tmp_label1.lower().replace('"', '').strip().split(",")
tmp_label1 = [x.strip()+'_d' for x in tmp_label1]
tmp_label.extend(tmp_label1)
tmp_label2 = tmp_label2.lower().replace('"', '').strip().split(",")
tmp_label2 = [x.strip()+'_t' for x in tmp_label2]
tmp_label.extend(tmp_label2)
#tmp_label = (tmp_label1 + tmp_label2)
#print(tmp_label)
tmp_label = [x.strip() for x in tmp_label]
ident.append(tmp_ident)
label.append(tmp_label)
text.append(tmp_text)
for key, value in vocab.items():
if key in vocabulary:
vocabulary[key] += value
else:
vocabulary[key] = value
return ident, label, text, vocabulary
# In[3]:
# get corpus from disk
identset, labelset, textset, vocab = MyCorpus(file)
# save vocabulary to file
with open(folder+'/blogs_vocabulary.txt',"w", encoding="utf8") as v:
for key, value in sorted(vocab.items()):
v.write("%s : %s\n" % (key, value))
# In[4]:
print(identset[1000])
print(labelset[1000])
print(textset[1000])
print(len(identset))
print(len(labelset))
print(len(textset))
# In[5]:
def get_label_dic(y):
"""Erstellt ein dictionary zur Anzahl der Blogbeiträge pro Label
(Label : Anzahl der zugehörigen Blogbeiträge)"""
labelcount_dic = {}
#tmp_label = ", "
for label in y:
for l in label:
if l in labelcount_dic:
labelcount_dic[l] += 1
else:
labelcount_dic[l] = 1
return labelcount_dic
# In[6]:
# löscht kleine Klassen (<100)
def remove_small_classes(labelset, label_dic):
"""Löscht die Klassen, denen weniger als 100 Blogbeiträge zugeordnet sind"""
small_classes = []
reduced_labelset = []
for key, value in label_dic.items():
if value < 100:
#print(\"%s : %s\" % (key, value))
small_classes.append(key)
for elements in labelset:
tmp_labels = []
for element in elements:
if element in small_classes:
continue
else:
tmp_labels.append(element)
reduced_labelset.append(tmp_labels)
return reduced_labelset, small_classes
# # Erstellung des Korpus all_labels
# In[7]:
# all_labels dic
all_labels_dic = get_label_dic(labelset)
print("Klassen insgesammt:", len(all_labels_dic))
#print(all_labels_dic)
count = 0
classes = list(all_labels_dic)
# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei
# (all_labels nicht reduziert)
with open(folder+'/blogposts_per_all_labels.txt',"w", encoding="utf8") as bpl:
for key, value in sorted(all_labels_dic.items()):
bpl.write("%s : %s\n" % (key, value))
print("%s: %s" % (key, value))
count += value
print("Anzahl der vergebenen Labels:", count)
# In[8]:
all_labels_reduced, small_classes_all_labels = remove_small_classes(labelset, all_labels_dic)
all_labels_reduced_dic = get_label_dic(all_labels_reduced)
print("Klassen mit weniger als 100 Texten:", len(small_classes_all_labels))
print("Klassen insgesammt (reduziert):", len(all_labels_reduced_dic))
# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei
# (all_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen)
with open(folder+'/blogposts_per_all_labels_reduced.txt',"w", encoding="utf8") as bplr:
for key, value in sorted(all_labels_reduced_dic.items()):
bplr.write("%s : %s\n" % (key, value))
print("%s: %s" % (key, value))
# In[9]:
# schreibt filename, classes, text von all_labels in csv-Datei
with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='', encoding="utf-8") as al_csv:
alw = csv.writer(al_csv, delimiter = ";")
alw.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(identset, all_labels_reduced, textset):
labellist = ", ".join(labels)
textlist = " ".join(texts)
alw.writerow([ident, labellist, textlist])
# In[10]:
# splittet all_labels in train- und testset
# x = text, y = labels, z = filnames
X_train_al, X_test_al, y_train_al, y_test_al, z_train_al, z_test_al = train_test_split(textset, all_labels_reduced, identset, test_size = 0.20, stratify=all_labels_reduced, random_state=42)
# In[11]:
# speichert train- und testset von all_labels in csv-Dateien
with open(datasets+'/all_labels_trainset.csv', 'w', newline='', encoding="utf-8") as train_al_csv:
train_al = csv.writer(train_al_csv, delimiter = ";")
train_al.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(z_train_al, y_train_al, X_train_al):
labellist = ", ".join(labels)
textlist = " ".join(texts)
train_al.writerow([ident, labellist, textlist])
with open(datasets+'/all_labels_testset.csv', 'w', newline='', encoding="utf-8") as test_al_csv:
test_al = csv.writer(test_al_csv, delimiter = ";")
test_al.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(z_test_al, y_test_al, X_test_al):
labellist = ", ".join(labels)
textlist = " ".join(texts)
test_al.writerow([ident, labellist, textlist])
# # Erstellung des Korpus reduced_labels
# In[12]:
# themes
themes_dic = {"Afrique_T" : ["Afrique du nord_T", "Algérie_T", "Afrique noire_T", "Afrique australe_T", "Afrique centrale_T", "Afrique de l'Est_T", "Afrique de l'Ouest_T"],
"Amériques_T" : ["Amérique latine_T", "Brésil_T", "Cône sud_T", "Mexique et Amérique centrale_T", "Pays andins_T", "Canada_T", "États-Unis_T"],
"anthropologie_T" : ["Anthropologie culturelle_T", "Anthropologie politique_T", "Anthropologie religieuse_T", "Anthropologie sociale_T"],
"Asie_T" : ["Asie centrale_T", "Asie du Sud-Est_T", "Extrême Orient_T", "Chine_T", "Japon_T", "Monde indien_T", "Monde persan_T", "Moyen-Orient_T", "Proche-Orient_T"],
"Droit_T" : ["Histoire du droit_T", "Sociologie du droit_T"],
"Économie_T" : ["Développement économique_T", "Économie politique_T", "Gestion_T", "Travail_T", "emploi_T"],
"Éducation_T" : ["Histoire de l'éducation_T", "Sciences de l'éducation_T"],
"Épistémologie et méthodes_T": ["Approches biographiques_T", "Approches de corpus_T", "enquêtes_T", "archives_T", "Archéologie_T", "Cartographie_T", "imagerie_T", "SIG_T", "Digital humanities_T", "Épistémologie_T", "Historiographie_T", "Méthodes de traitement et de représentation_T", "Méthodes qualitatives_T", "Méthodes quantitatives_T", "Sciences auxiliaires de l'Histoire_T", "Vie de la recherche_T"],
"Époque contemporaine_T" : ["Prospectives_T", "XIXe siècle_T", "XXe siècle_T", "1914-1918_T", "1918-1939_T", "1939-1945_T", "1945-1989_T", "1989 à de nos jours_T", "XXIe siècle_T"],
"Époque moderne_T" : ["Révolution française_T", "XVIe siècle_T", "XVIIe siècle_T", "XVIIIe siècle_T"],
"Ethnologie_T" : ["Anthropologie culturelle_T", "Anthropologie politique_T", "Anthropologie religieuse_T", "Anthropologie sociale_T"],
"Études des sciences_T" : ["Histoire des sciences_T", "Philosophie des sciences_T", "Sociologie des sciences_T"],
"Études du politique_T" : ["Guerres_T", "conflits_T", "violence_T", "Génocides et massacres_T", "Histoire politique_T", "Institutions politiques_T", "Mouvements politiques et sociaux_T", "Politiques et actions publiques_T", "Relations internationales_T", "Sciences politiques_T", "Sociologie politique_T"],
"Études urbaines_T" : ["Études urbaines_T"],
"Europe_T" : ["Balkans_T", "Belgique_T", "Europe centrale et orientale_T", "Mondes russes et soviétiques_T", "France_T", "Îles britanniques_T", "Italie_T", "Méditerranée_T", "Monde germanique_T", "Pays baltes et scandinaves_T", "Péninsule ibérique_T", "Suisse_T"],
"Géographie_T" : ["Épistémologie & histoire de la géographie_T", "Espace_T", "société et territoire_T", "Géographie : politique_T", "culture et représentation_T", "Géographie appliquée et aménagement_T", "Géographie rurale_T", "Géographie urbaine_T", "Migrations_T", "immigrations_T", "minorités_T", "Nature_T", "paysage et environnement_T", "Systèmes_T", "modélisation_T", "géostatistiques_T"],
"Histoire_T" : ["Histoire des femmes_T", "Histoire du travail_T", "Histoire économique_T", "Histoire industrielle_T", "Histoire rurale_T", "Histoire sociale_T", "Histoire urbaine_T"],
"Information_T" : ["Édition électronique_T", "Histoire et sociologie de la presse_T", "Histoire et sociologie des médias_T", "Histoire et sociologie du livre_T", "Sciences de l'information_T"],
"Langage_T" : ["Linguistique_T", "Littératures_T"],
"Moyen Âge_T" : ["Bas Moyen Âge_T", "Haut Moyen Âge_T"],
"Océanie_T" : ["Océanie_T"],
"Pensée_T" : ["Histoire intellectuelle_T", "Philosophie_T", "Sciences cognitives_T"],
"Préhistoire et antiquité_T" : ["Égypte ancienne_T", "Histoire grecque_T", "Histoire romaine_T", "Monde oriental_T", "Préhistoire_T"],
"Psychisme_T" : ["Psychanalyse_T", "Psychologie_T"],
"Religions_T" : ["Histoire des religions_T", "Sociologie des religions_T"],
"Représentations_T" : ["Architecture_T", "Études visuelles_T", "Histoire culturelle_T", "Histoire de l'Art_T", "Identités culturelles_T", "Patrimoine_T"],
"Sociologie_T" : ["Âges de la vie_T", "Criminologie_T", "Démographie_T", "Étude des genres_T", "Sociologie de la consommation_T", "Sociologie de la culture_T", "Sociologie de la santé_T", "Sociologie du travail_T", "Sociologie économique_T", "Sociologie urbaine_T", "Sport et loisirs_T"]}
themes_dic = {k.lower(): [i.lower() for i in v] for k, v in themes_dic.items()}
print("THEMES:")
for key, value in themes_dic.items():
print("%s: %s" % (key, value))
print(len(list(themes_dic)))
# In[13]:
disciplines_dic = {"administration publique et développement_D" : ["Relations internationales_D", "Sciences politiques_D", "Administration publique_D"],
"Arts et humanités_D" : ["Architecture_D", "Arts_D", "Études asiatiques_D", "Études anciennes_D", "Études culturelles_D", "Folklore_D", "Humanités pluridisciplinaires_D", "Musique_D", "Philosophie_D", "Religions_D"],
"bibliothéconomie_D" : ["Communication_D", "Sciences de l'information et bibliothéconomie_D"],
"Droit_D" : ["Criminologie_D", "Droit_D"],
"Économie_D" : ["Commerce et affaires_D", "Économie_D", "Finance_D"],
"Éducation_D" : ["Éducation et sciences de l'éducation_D", "Éducation : disciplines scientifiques_D", "Éducation spécialisée_D"],
"Études environnementales_D" : ["Études environnementales_D", "Géographie_D", "Études urbaines_D"],
"géographie et développement_D" : ["Études environnementales_D", "Géographie_D", "Études urbaines_D"],
"Histoire et archéologie_D" : ["Archéologie_D", "Histoire_D", "Histoire et philosophie des sciences_D", "Histoire des sciences sociales_D"],
"Langue et linguistique_D" : ["Linguistique appliquée_D", "Théorie du langage et linguistique_D", "Langue et linguistique_D"],
"Littérature_D" : ["Études littéraires_D", "Théorie et critique littéraires_D", "Littérature britannique_D", "Littérature romane_D", "Littérature_D"],
"Management et administration_D" : ["Ergonomie_D", "Travail et relations professionnelles_D", "Planification et développement_D", "Transports_D", "Management et administration_D"],
"Pluridisciplinarité_D" : ["Sciences sociales interdisciplinaires_D"],
"Psychiatrie_D" : ["Psychiatrie_D"],
"Psychologie_D" : ["Psychologie appliquée_D", "Psychologie biologique_D", "Psychologie clinique_D", "Psychologie du développement_D", "Psychologie éducative_D", "Psychologie expérimentale_D", "Psychologie pluridisciplinaire_D", "Psychanalyse_D", "Psychologie sociale_D"],
"Sciences de la santé et de la santé publique_D" : ["Éthique_D", "Politique et services de santé_D", "Sciences et pratiques des soins_D", "Biomédecine_D", "Toxicomanie_D"],
"Sciences de l'information et de la communication_D" : ["Communication_D", "Sciences de l'information et bibliothéconomie_D"],
"Sciences politiques_D" : ["Relations internationales_D", "Sciences politiques_D", "Administration publique_D"],
"Sociologie et anthropologie_D" : ["Anthropologie_D", "Études régionales_D", "Sociologie_D", "Études féministes_D"],
"Travail social et politique sociale_D" : ["Études des relations interethniques_D", "Études sur la famille_D", "Questions sociales_D", "Travail social_D"]}
disciplines_dic = {k.lower(): [i.lower() for i in v] for k, v in disciplines_dic.items()}
print("DISCIPLINES:")
for key, value in disciplines_dic.items():
print("%s: %s" % (key, value))
print(len(list(disciplines_dic)))
# In[14]:
# reduce labels to highest level
def reduce_labels(y):
"""Reduziert die Themen und Disziplinen auf die höchste Hierarchiestufe"""
labels = [] # new y
themes = []
disciplines = []
for i, elements in enumerate(y):
tmp_all_labels = []
tmp_themes = []
tmp_disciplines = []
#print("\nlabels in y an der Stelle %s: %s" % (i, elements))
for element in elements:
#print("\nLabel:", element)
# themes
for key, value in themes_dic.items():
if element == key:
tmp_all_labels.append(element)
tmp_themes.append(element)
#print("\nTheme key:", element)
elif element in value:
tmp_all_labels.append(key)
tmp_themes.append(key)
#print("\nTheme:", key)
else:
("Element nicht gefunden:", element)
# discipilnes
for key, value in disciplines_dic.items():
if element == key:
tmp_all_labels.append(element)
tmp_disciplines.append(element)
#print("\nDiscipline key:", element)
elif element in value:
tmp_all_labels.append(key)
tmp_disciplines.append(key)
#print("\nDiscipline:", key)
else:
("Element nicht gefunden:", element)
#print("\ntmp_list:", tmp_all_labels)
labels.append(list(set(tmp_all_labels)))
themes.append(list(set(tmp_themes)))
disciplines.append(list(set(tmp_disciplines)))
#print("\nnew labelset:", labels)
return labels, themes, disciplines
reduced_labels, themes_only, disciplines_only = reduce_labels(labelset)
# In[15]:
print(reduced_labels[1000])
print(themes_only[1000])
print(disciplines_only[1000])
# In[16]:
# reduced_labels dic
reduced_labels_dic = get_label_dic(reduced_labels)
print("Auf höchste Hierarchieebene reduzierte Klassen insgesammt:", len(reduced_labels_dic))
# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei
# (reduced_labels nicht reduziert)
with open(folder+'/blogposts_per_reduced_labels.txt',"w", encoding="utf8") as rl:
for key, value in sorted(reduced_labels_dic.items()):
rl.write("%s : %s\n" % (key, value))
print("%s: %s" % (key, value))
# In[17]:
# reduced_labels reduced dic (<100)
reduced_labels_reduced, small_classes_reduced_labels = remove_small_classes(reduced_labels, reduced_labels_dic)
reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced)
print("Klassen mit weniger als 100 Texten:", len(small_classes_reduced_labels))
print("Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):", len(reduced_labels_reduced_dic))
# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei
# (reduced_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen)
with open(folder+'/blogposts_per_reduced_labels_reduced.txt',"w", encoding="utf8") as rlr:
for key, value in sorted(reduced_labels_reduced_dic.items()):
rlr.write("%s : %s\n" % (key, value))
print("%s: %s" % (key, value))
# In[18]:
# schreibt filename, classes, text von reduced_labels in csv-Datei
with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='', encoding="utf-8") as rl_csv:
rlw = csv.writer(rl_csv, delimiter = ";")
rlw.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(identset, reduced_labels_reduced, textset):
labellist = ", ".join(labels)
textlist = " ".join(texts)
rlw.writerow([ident, labellist, textlist])
# In[19]:
# splittet all_labels in train- und testset
# x = text, y = labels, z = filnames
X_train_rl, X_test_rl, y_train_rl, y_test_rl, z_train_rl, z_test_rl = train_test_split(textset, reduced_labels_reduced, identset, test_size = 0.20, stratify=reduced_labels_reduced, random_state=42)
# In[20]:
# speichert train- und testset von all_labels in csv-Dateien
with open(datasets+'/reduced_labels_trainset.csv', 'w', newline='', encoding="utf-8") as train_rl_csv:
train_rl = csv.writer(train_rl_csv, delimiter = ";")
train_rl.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(z_train_rl, y_train_rl, X_train_rl):
labellist = ", ".join(labels)
textlist = " ".join(texts)
train_rl.writerow([ident, labellist, textlist])
with open(datasets+'/reduced_labels_testset.csv', 'w', newline='', encoding="utf-8") as test_rl_csv:
test_rl = csv.writer(test_rl_csv, delimiter = ";")
test_rl.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(z_test_rl, y_test_rl, X_test_rl):
labellist = ", ".join(labels)
textlist = " ".join(texts)
test_rl.writerow([ident, labellist, textlist])
# # Erstellung des Korpus themes_only
# In[21]:
# themes_only dic
themes_only_dic = get_label_dic(themes_only)
print("Auf höchste Hierarchieebene reduzierte Themen:", len(themes_only_dic))
# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei
# (themes_only nicht reduziert)
with open(folder+'/blogposts_per_theme.txt',"w", encoding="utf8") as to:
for key, value in sorted(themes_only_dic.items()):
to.write("%s : %s\n" % (key, value))
print("%s: %s" % (key, value))
# In[22]:
# themes_only dic reduced (<100)
themes_only_reduced, small_classes_themes_only = remove_small_classes(themes_only, themes_only_dic)
themes_only_reduced_dic = get_label_dic(themes_only_reduced)
print("Klassen mit weniger als 100 Texten:", len(small_classes_themes_only))
print("Auf höchste Hierarchieebene reduzierte Themen (reduziert):", len(themes_only_reduced_dic))
# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei
# (themes_only reduziert auf Labels mit mehr als 100 Blogbeiträgen)
with open(folder+'/blogposts_per_themes_only_reduced.txt',"w", encoding="utf8") as tor:
for key, value in sorted(themes_only_reduced_dic.items()):
tor.write("%s : %s\n" % (key, value))
print("%s: %s" % (key, value))
# In[23]:
# schreibt filename, classes, text von themes_only in csv-Datei
with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='', encoding="utf-8") as to_csv:
tow = csv.writer(to_csv, delimiter = ";")
tow.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(identset, themes_only, textset):
tow.writerow([ident, labels, texts])
# In[24]:
# splittet all_labels in train- und testset
# x = text, y = labels, z = filnames
X_train_to, X_test_to, y_train_to, y_test_to, z_train_to, z_test_to = train_test_split(textset, themes_only_reduced, identset, test_size = 0.20, stratify=themes_only_reduced, random_state=42)
# In[25]:
# speichert train-, validation- und testset von themes_only in csv-Dateien
with open(datasets+'/themes_only_trainset.csv', 'w', newline='', encoding="utf-8") as train_to_csv:
train_to = csv.writer(train_to_csv, delimiter = ";")
train_to.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(z_train_to, y_train_to, X_train_to):
labellist = ", ".join(labels)
textlist = " ".join(texts)
train_to.writerow([ident, labellist, textlist])
with open(datasets+'/themes_only_testset.csv', 'w', newline='', encoding="utf-8") as test_to_csv:
test_to = csv.writer(test_to_csv, delimiter = ";")
test_to.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(z_test_to, y_test_to, X_test_to):
labellist = ", ".join(labels)
textlist = " ".join(texts)
test_to.writerow([ident, labellist, textlist])
# # Erstellung des Korpus disciplines_only
# In[26]:
# disciplines_only dic
disciplines_only_dic = get_label_dic(disciplines_only)
print("Auf höchste Hierarchieebene reduzierte Klassen insgesammt:", len(disciplines_only_dic))
# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei
# (disciplines_only nicht reduziert)
with open(folder+'/blogposts_per_discipline.txt',"w", encoding="utf8") as do:
for key, value in sorted(disciplines_only_dic.items()):
do.write("%s : %s\n" % (key, value))
print("%s: %s" % (key, value))
# In[27]:
# disciplines_only dic reduced (<100)
disciplines_only_reduced, small_classes_disciplines_only = remove_small_classes(disciplines_only, disciplines_only_dic)
disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced)
print("Klassen mit weniger als 100 Texten:", len(small_classes_disciplines_only))
print("Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):", len(disciplines_only_reduced_dic))
# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei
# (disciplines_only reduziert auf Labels mit mehr als 100 Blogbeiträgen)
with open(folder+'/blogposts_per_disciplines_only_reduced.txt',"w", encoding="utf8") as dor:
for key, value in sorted(disciplines_only_reduced_dic.items()):
dor.write("%s : %s\n" % (key, value))
print("%s: %s" % (key, value))
# In[28]:
# schreibt filename, classes, text von disciplines_only in csv-Datei
with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='', encoding="utf-8") as do_csv:
dow = csv.writer(do_csv, delimiter = ";")
dow.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(identset, disciplines_only, textset):
dow.writerow([ident, labels, texts])
# In[29]:
# Für den Blog des Archivs der Erzdiözese Salzburg (aes) wurden keine Disziplinen gewählt,
# dementsprechend wird dieser Blog aus disciplines_only entfernt
def delete_blog(identset, labelset, textset):
idents = []
labels = []
texts = []
for ident, label, text in zip(identset, labelset, textset):
if ident.startswith('aes_'):
continue
else:
idents.append(ident)
labels.append(label)
texts.append(text)
return idents, labels, texts
identset, disciplines_only_reduced, textset = delete_blog(identset, disciplines_only_reduced, textset)
# In[30]:
# splittet all_labels in train- und testset
# x = text, y = labels, z = filnames
X_train_do, X_test_do, y_train_do, y_test_do, z_train_do, z_test_do = train_test_split(textset, disciplines_only_reduced, identset, test_size = 0.20, stratify=disciplines_only_reduced, random_state=42)
# In[31]:
# speichert train-, validation- und testset von disciplines_only in csv-Dateien
with open(datasets+'/disciplines_only_trainset.csv', 'w', newline='', encoding="utf-8") as train_do_csv:
train_do = csv.writer(train_do_csv, delimiter = ";")
train_do.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(z_train_do, y_train_do, X_train_do):
labellist = ", ".join(labels)
textlist = " ".join(texts)
train_do.writerow([ident, labellist, textlist])
with open(datasets+'/disciplines_only_testset.csv', 'w', newline='', encoding="utf-8") as test_do_csv:
test_do = csv.writer(test_do_csv, delimiter = ";")
test_do.writerow(["filename", "classes", "text"])
for ident, labels, texts in zip(z_test_do, y_test_do, X_test_do):
labellist = ", ".join(labels)
textlist = " ".join(texts)
test_do.writerow([ident, labellist, textlist])
# # Visualisierungen
# In[ ]:
pictures = '../Visualisierungen'
if not os.path.exists(pictures):
os.makedirs(pictures)
# In[32]:
# Histogramm: Blogs pro all_labels (besser in excel visualisieren)
height = list(all_labels_dic.values())
bars = list(all_labels_dic.keys())
y_pos = np.arange(len(bars))
# Create horizontal bars
plt.barh(y_pos, height)
# Create names on the y-axis
plt.yticks(y_pos, bars)
# Show graphic
plt.show()
# Save as SVG:
plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg')
plt.savefig(pictures+'/Blogs_all_labels_histogram.png')
# In[33]:
# Visualisierung des all_label_dics in einer Wortwolke
# Create a list of word
textliste=str(all_labels_dic.keys())
textliste=textliste.replace(',', '').replace("'", "").replace('"', '').replace("l'", '').split(' ')
blacklist = ['et', 'du', 'études', 'de', 'des', 'la', 'dict_keys']
for element in textliste:
if element in blacklist:
textliste.remove(element)
text = str(textliste).replace("'", "")
# Create the wordcloud object
wordcloud = WordCloud(width=680, height=680, margin=0, background_color="white").generate(text)
# Display the generated image:
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.margins(x=0, y=0)
plt.show()
# Save as SVG:
plt.savefig(pictures+'/Blogs_per_all_labels_wordcloud.svg', format='svg')
plt.savefig(pictures+'/Blogs_per_all_labels_wordcloud.png', format='png')
| 3.15625 | 3 |
test_documents/cleaner.py | RDShah/text-analyzer | 0 | 12794113 | <reponame>RDShah/text-analyzer
with open('graduation_raw.txt') as rfile:
with open('graduation.txt','w') as wfile:
for line in rfile:
if line == '\n': continue
if '[' not in line:
wfile.write(line)
elif ']' in line:
wfile.write(line[:line.index('[')])
wfile.write(line[line.index(']')+1:])
wfile.close()
| 2.78125 | 3 |
setup.py | nagataaaas/Iro | 1 | 12794114 | <reponame>nagataaaas/Iro<gh_stars>1-10
"""
IRO
===
Easy and powerful Colorizer for Python!
Powered by [<NAME>](https://twitter.com/514YJ)
[GitHub](https://github.com/nagataaaas/Iro)
```python
from iro import Iro, Color, Style, ColorRGB, Color256
from colorsys import hls_to_rgb
success = Iro((Color.GREEN, "[ SUCCESS ]"))
error = Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255, 0, 0, bg=True), "[ ERROR ]"), disable_rgb=False)
warning = Iro((Color.YELLOW, Color256(255, bg=True), "[ WARNING ]"))
deprecated = Iro((Color256(7), Color256(239, True), Style.STRIKE, "[DEPRECATED]"))
print(success, 'code success.')
print(error, 'code failed!!')
print(warning, 'maybe something wrong.')
print(deprecated, 'this function is deprecated.')
print(Iro([
Color.RED, "Off course, You can nest styles. ",
[
Style.ITALIC,
"This is RED and ITALIC. "
],
[
Color.BLUE,
Color.BG_BRIGHT_YELLOW,
Style.UNDERLINE,
"This is BLUE, BG_YELLOW and UNDERLINED."
],
" Finally back to only RED!"
]))
for h in range(256):
print(Iro([ColorRGB(*map(lambda x: x * 255, hls_to_rgb(h / 256, 0.7, 1)), bg=True), ' '], disable_rgb=False), end='')
```
**output**

# Installation
$ pip install iro
"""
from setuptools import setup
from os import path
about = {}
with open("iro/__about__.py") as f:
exec(f.read(), about)
here = path.abspath(path.dirname(__file__))
setup(name=about["__title__"],
version=about["__version__"],
url=about["__url__"],
license=about["__license__"],
author=about["__author__"],
author_email=about["__author_email__"],
description=about["__description__"],
long_description=__doc__,
long_description_content_type="text/markdown",
install_requires=[],
packages=["iro"],
zip_safe=True,
platforms="any",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Console"
])
| 2.4375 | 2 |
kote/protocol.py | l-n-s/kote | 5 | 12794115 | <gh_stars>1-10
from uuid import UUID, uuid4
MAX_MESSAGE_LENGTH = 1024 # 1 byte for code, 16 for UUID, 1007 for content
class ValidationError(Exception):
pass
class Message:
AUTHORIZATION = 1
PING = 2
PRIVATE = 3
PUBLIC = 4
OK = 5
UNAUTHORIZED = 6
def __init__(self, code=None, uuid=None, content="", destination=None, \
name=None):
self.code = code
self.uuid = uuid or uuid4()
self.content = content
self.destination = destination
self.name = name
def __bytes__(self):
"""Convert the message to bytes string"""
return bytes([self.code]) + self.uuid.bytes + self.content.encode()
def __repr__(self):
return "Message(code={}, uuid={}, content={}, destination={}, name={})".format(
self.code, str(self.uuid), self.content, self.destination, self.name)
@classmethod
def valid_code(cls, code):
"""Check if code is valid"""
return code in [cls.AUTHORIZATION, cls.PING, cls.PRIVATE, cls.PUBLIC,
cls.OK, cls.UNAUTHORIZED]
@classmethod
def parse(cls, data, destination):
"""Parse binary data and return a message"""
data_length = len(data)
if data_length < 17 or data_length > MAX_MESSAGE_LENGTH:
raise ValidationError("invalid message size: "+str(data_length))
code, uuid, content = int(data[0]), data[1:17], ""
if not cls.valid_code(code):
raise ValidationError("invalid code")
uuid = UUID(bytes=uuid)
if data_length > 17:
try:
content = data[17:].decode()
except UnicodeError:
raise ValidationError("content is not a valid unicode string")
return cls(code=code, uuid=uuid, content=content,
destination=destination)
| 2.875 | 3 |
demo/run-adj.py | xtian15/MPAS-SW-TL-AD | 0 | 12794116 | <reponame>xtian15/MPAS-SW-TL-AD
import sys
from datetime import timedelta
import netCDF4 as nc
import numpy as np
sys.path.append('../src/')
from module_sw_mpas import mpas_sw_module as mpsw
from mpas_namelist import namelist
from mpas_sw_driver import read_configs, read_dims, read_vars, \
initial_conditions, clock_namelist
def run_sw_adj():
# ----- nonlinear state trajectory -----
fwdname = 'x1.10242.state.nc'
fwdfile = nc.Dataset(fwdname, 'r')
# ----- end nonlinear state trajectory -----
nml = namelist(nmlname='namelist.sw.x1.10242')
read_configs(nml)
read_dims(nml)
read_vars(nml)
initial_conditions(nml)
mpsw.var_allocation_adj()
mpsw.sw_mpas_init_block()
clock_start, clock_end = clock_namelist(nml)
# ----- adj initial conditions -----
mpsw.u_ad[0, 0, 0] = 1.
mpsw.h_ad[0, 0, 0] = 1.
# ----- end adj initial conditions -----
today = clock_end
ulist, vlist, hlist = [], [], []
unorm = []
itimestep = int((clock_end - clock_start).total_seconds() / mpsw.config_dt)
while today >= clock_start:
print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum())
mpsw.u[0, 0] = fwdfile['u'][itimestep]
mpsw.h[0, 0] = fwdfile['h'][itimestep]
if (today - clock_start).total_seconds() % nml.output_interval == 0:
ulist.append(mpsw.ureconstructzonal_ad[0].copy())
vlist.append(mpsw.ureconstructmeridional_ad[0].copy())
hlist.append(mpsw.h_ad[0, 0].copy())
unorm.append(mpsw.u_ad[0, 0].copy())
mpsw.sw_rk4_adj()
itimestep -= 1
today -= timedelta(seconds=int(mpsw.config_dt))
ulist, vlist = np.array(ulist), np.array(vlist)
hlist = np.array(hlist)
unorm = np.array(unorm)
r2d = 180. / np.pi
outname = nml.file_output.replace('.nc', '.adj.nc')
with nc.Dataset(outname, 'w') as of:
of.createDimension('nTime', hlist.shape[0])
of.createDimension('nCell', mpsw.latcell.shape[0])
of.createDimension('nEdge', mpsw.latedge.shape[0])
of.createVariable('latCell', 'f4', ('nCell'))[:] = mpsw.latcell * r2d
of.createVariable('lonCell', 'f4', ('nCell'))[:] = mpsw.loncell * r2d
of.createVariable('latEdge', 'f4', ('nEdge'))[:] = mpsw.latedge * r2d
of.createVariable('lonEdge', 'f4', ('nEdge'))[:] = mpsw.lonedge * r2d
of.createVariable('ux_ad', 'f4', ('nTime', 'nCell'))[:] = ulist
of.createVariable('uy_ad', 'f4', ('nTime', 'nCell'))[:] = vlist
of.createVariable('h_ad', 'f4', ('nTime', 'nCell'))[:] = hlist
of.createVariable('u_ad', 'f4', ('nTime', 'nEdge'))[:] = unorm
if __name__ == '__main__':
run_sw_adj()
| 2.09375 | 2 |
mazure/services/virtualmachines/models.py | tinvaan/mazure | 2 | 12794117 |
import uuid
import mongoengine as db
from flask import current_app as app
from flask_mongoengine import MongoEngine
class Properties:
name = "example"
nic = "example-nic"
disk = "example-disk"
vmId = str(uuid.uuid4())
subId = str(uuid.uuid4())
rgroup = "example-resource-group"
availabilitySet = "example-availability-set"
ppg = "example-proximity-placement-group"
props = Properties()
store = MongoEngine(app._get_current_object())
class VirtualMachine(db.Document):
tags = db.DictField()
name = db.StringField(required=True)
location = db.StringField(required=True)
subscription = db.StringField(required=True)
resourceGroup = db.StringField(required=True)
rid = db.StringField(required=True, unique=True)
provisioningState = db.StringField(default='Succeeded')
type = db.StringField(default='Microsoft.Compute/virtualMachines')
properties = db.DictField(default={
"vmId": props.vmId,
"availabilitySet": {
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet"
},
"proximityPlacementGroup": {
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}"
},
"hardwareProfile": {
"vmSize": "Standard_DS3_v2"
},
"storageProfile": {
"imageReference": {
"publisher": "MicrosoftWindowsServer",
"offer": "WindowsServer",
"sku": "2016-Datacenter",
"version": "latest"
},
"osDisk": {
"osType": "Windows",
"name": "myOsDisk",
"createOption": "FromImage",
"caching": "ReadWrite",
"managedDisk": {
"storageAccountType": "Premium_LRS",
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}"
},
"diskSizeGB": 30
},
"dataDisks": [
{
"lun": 0,
"name": "myDataDisk0",
"createOption": "Empty",
"caching": "ReadWrite",
"managedDisk": {
"storageAccountType": "Premium_LRS",
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}"
},
"diskSizeGB": 30
},
{
"lun": 1,
"name": "myDataDisk1",
"createOption": "Attach",
"caching": "ReadWrite",
"managedDisk": {
"storageAccountType": "Premium_LRS",
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}"
},
"diskSizeGB": 100
}
]
},
"userData": "RXhhbXBsZSBVc2VyRGF0YQ==",
"osProfile": {
"computerName": "myVM",
"adminUsername": "admin",
"windowsConfiguration": {
"provisionVMAgent": True,
"enableAutomaticUpdates": False
},
"secrets": []
},
"networkProfile": {
"networkInterfaces": [
{
"id": f"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}"
}
]
},
"diagnosticsProfile": {
"bootDiagnostics": {
"enabled": True,
"storageUri": f"http://{props.name}.blob.core.windows.net"
}
},
"extensionsTimeBudget": "PT50M",
"provisioningState": "Succeeded"
})
meta = {'collection': 'virtualmachines'}
def __repr__(self):
return "VirtualMachine(%s)" % self.rid
def save(self, *args, **kwargs):
self.rid = '/subscriptions/%s/resourceGroups/%s/providers/%s/%s' % (
self.subscription, self.resourceGroup, self.type, self.name)
super().save(args, kwargs)
| 2.015625 | 2 |
graduated_site/migrations/0007_auto_20191218_1215.py | vbacaksiz/KTU-MEBSIS | 0 | 12794118 | <filename>graduated_site/migrations/0007_auto_20191218_1215.py
# Generated by Django 3.0 on 2019-12-18 12:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('graduated_site', '0006_auto_20191218_0927'),
]
operations = [
migrations.AddField(
model_name='user_internship_post',
name='image',
field=models.ImageField(null=True, upload_to='', verbose_name='Resim'),
),
migrations.AlterField(
model_name='user_internship_post',
name='working_area',
field=models.ManyToManyField(null=True, related_name='alan', to='graduated_site.working_area', verbose_name='Çalışma Alanları'),
),
]
| 1.515625 | 2 |
SendGridEmail/sendGridEmailHelper.py | prodProject/WorkkerAndConsumerServer | 0 | 12794119 | from sendgrid.helpers.mail import Mail
from CommonCode.strings import Strings
class SendGridEmailHelper:
def builderToMail(self,emailBuilder):
fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId);
toids = list()
for ids in emailBuilder.toId:
toids.append(Strings.getFormattedEmail(builder=ids))
subject = emailBuilder.subject
content = emailBuilder.content
return Mail(from_email=fromId,
to_emails=toids,
subject=subject,
html_content=content)
| 2.640625 | 3 |
tests/request_tests.py | yoshikiohshima/gato | 0 | 12794120 | <reponame>yoshikiohshima/gato<gh_stars>0
import unittest2
from sphero import request
from nose.tools import assert_equal
class RequestTest(unittest2.TestCase):
def test_ping(self):
assert_equal('\xff\xff\x00\x01\x00\x01\xfd', request.Ping().bytes)
def test_set_rgb(self):
response = request.SetRGB(0, 0, 100, 200, 0)
assert_equal('\x00d\xC8\x00', response.packet_body())
| 2.453125 | 2 |
lib/helpers/python/aoc/util/fmath.py | josephroquedev/advent-of-code | 0 | 12794121 | <reponame>josephroquedev/advent-of-code
from functools import reduce
import re
# Usage:
# n = [3, 5, 7]
# a = [2, 3, 2]
# chinese_remainder(n, a) == 23
# https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6
def chinese_remainder(n, a):
sum = 0
prod = reduce(lambda a, b: a * b, n)
for n_i, a_i in zip(n, a):
p = prod // n_i
sum += a_i * mul_inv(p, n_i) * p
return sum % prod
def mul_inv(a, b):
b0 = b
x0, x1 = 0, 1
if b == 1:
return 1
while a > 1:
q = a // b
a, b = b, a % b
x0, x1 = x1 - q * x0, x0
if x1 < 0:
x1 += b0
return x1
def numbers_from(l):
regex = r"-?\d+"
return [int(match) for match in re.findall(regex, l)]
| 3.390625 | 3 |
paths_cli/__init__.py | dwhswenson/openpathsampling-cli | 1 | 12794122 | <filename>paths_cli/__init__.py<gh_stars>1-10
from .cli import OpenPathSamplingCLI
from . import commands
from . import version
| 1.382813 | 1 |
MapApi/models.py | todor943/mapEngine | 0 | 12794123 | <reponame>todor943/mapEngine
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from MapApi import signals
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(signals.user_login, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# Create your models here.
| 2.015625 | 2 |
utils/trello_parser.py | James-Ansley/TrelloCardSortParser | 1 | 12794124 | import json
import os
from typing import TextIO, Hashable, Iterator
from dateutil.parser import isoparse
from utils.sorts import Sort, Group
def parse_board(f: TextIO, card_mapping: dict[str, Hashable]) -> Sort:
"""
Extracts the information from a trello board json file.
A card_mapping maps the card prompts to an ID which is usually more
useful for analysis. Card prompts will be mapped to the given ID when
parsing and used in place of the card prompt.
:param f: a TextIO Stream of the trello board json file
:param card_mapping: a mapping of card names to card ids
:return: a Sort object
"""
data = json.load(f)
trello_lists = data['lists']
trello_lists.sort(key=lambda x: x['pos'])
# Cards are linked to their lists by list ID. So, a temporary mapping
# from list IDs to groups is needed.
groups_by_id = {}
for trello_list in trello_lists:
group_name = trello_list['name']
list_id = trello_list['id']
group = Group(group_name)
groups_by_id[list_id] = group
cards = data['cards']
# Participants may accidentally add cards which are then deleted, "closed".
cards = [card for card in cards if not card['closed']]
for card in cards:
group_id = card['idList']
group = groups_by_id[group_id]
# It may be more useful to map card prompts to an ID for analysis
if card_mapping is not None:
card_data = card_mapping[card['name']]
else:
card_data = card['name']
group.cards.add(card_data)
actions = data['actions']
actions.sort(key=lambda x: isoparse(x['date']))
# Only card moves, list creation, and list renaming are considered.
valid_actions = []
for action in actions:
action_data = action['data']
action_type = action['type']
# Card is moved
if action_type == 'updateCard' and 'listBefore' in action_data:
valid_actions.append(action)
# List is created
elif action_type == 'createList':
valid_actions.append(action)
# List is renamed
elif action_type == 'updateList' and 'name' in action_data['old']:
valid_actions.append(action)
# For the purposes of this study, sorts were considered to start when the
# first trello list was created. Sorts were considered to end when the
# last card move or list rename action was performed.
first_list = next(action for action in valid_actions
if action['type'] == 'createList')
start_time = isoparse(first_list['date'])
end_time = isoparse(actions[-1]['date'])
total_sort_time = end_time - start_time
# Empty groups are discarded.
groups = [group for group in groups_by_id.values() if group.cards]
sort_name = data['name']
cards = set(card_mapping.values())
sort = Sort(sort_name, groups, cards, total_sort_time)
return sort
def get_paths_to_jsons_in_dir(path: str) -> Iterator[str]:
"""
Returns a list of paths to json files in the given directory. Nested
directories are not traversed.
:param path: a path to a directory
:return: the list of paths to json files in the given directory
"""
files = os.listdir(path)
for file in files:
file_path = os.path.join(path, file)
if os.path.isfile(file_path) and file.endswith('.json'):
yield file_path
def parse_sorts_in_dir(path: str,
card_mapping: dict[str, Hashable] = None) -> list[Sort]:
"""
Parses all sorts in the given directory. Nested directories are not
traversed. This is equivalent to calling parse_sort on each json file in
the given directory.
:param path: a path to a directory
:param card_mapping: an optional mapping of card names to card ids
:return: a list of Sort objects
"""
sorts = []
trello_json_paths = get_paths_to_jsons_in_dir(path)
for path in trello_json_paths:
with open(path, 'r') as f:
sort = parse_board(f, card_mapping)
sorts.append(sort)
return sorts
| 3.34375 | 3 |
src/orca/operators.py | cournape/orca-py | 0 | 12794125 | import abc
import logging
import math
import random
from orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent
from orca.ports import InputPort, OutputPort
logger = logging.getLogger(__name__)
OUTPUT_PORT_NAME = "output"
class IOperator(abc.ABC):
def __init__(
self, grid, x, y, name, description, *, glyph=DOT_GLYPH, is_passive=False
):
self.x = x
self.y = y
self.name = name
self.description = description
self.ports = {}
self._grid = grid
self.is_passive = is_passive
self.do_draw = is_passive
self.glyph = glyph.upper() if is_passive else glyph
@abc.abstractmethod
def operation(self, frame, force=False):
"""Run the operator for the given frame and return the payload.
This may modify the grid.
Note: the frame is assumed to match the state of the grid given at
construction time."""
def __str__(self):
return self.name
def run(self, frame, force=False):
payload = self.operation(frame, force)
for port in self.ports.values():
if isinstance(port, OutputPort) and port.is_bang:
continue
logger.debug(
"Ops %s (%d, %d): locking port @ %d, %d",
self.name,
self.x,
self.y,
port.x,
port.y,
)
self._grid.lock(port.x, port.y)
output_port = self._output_port
if output_port:
if output_port.is_bang:
self._bang(payload)
else:
self._output(payload)
def erase(self):
self._grid.poke(self.x, self.y, DOT_GLYPH)
def explode(self):
self._grid.poke(self.x, self.y, BANG_GLYPH)
def has_neighbor(self, glyph):
for x, y in ((-1, 0), (1, 0), (0, -1), (0, 1)):
if self._grid.peek(self.x + x, self.y + y) == glyph:
return True
return False
def move(self, offset_x, offset_y):
new_x = self.x + offset_x
new_y = self.y + offset_y
if not self._grid.is_inside(new_x, new_y):
self.explode()
return
collider = self._grid.peek(new_x, new_y)
if collider not in (BANG_GLYPH, DOT_GLYPH):
self.explode()
return
self.erase()
self.x += offset_x
self.y += offset_y
self._grid.poke(self.x, self.y, self.glyph)
if self._grid.is_inside(self.x, self.y):
self._grid.lock(self.x, self.y)
@property
def _output_port(self):
return self.ports.get(OUTPUT_PORT_NAME)
def _has_output_port(self):
return OUTPUT_PORT_NAME in self.ports
def _should_upper_case(self):
output_port = self._output_port
if output_port is None or not output_port.is_sensitive:
return False
else:
right_port = InputPort(self.x + 1, self.y)
value = self._grid.listen(right_port)
if value.lower() == value.upper() or value.upper() != value:
return False
else:
return True
def _bang(self, payload):
output_port = self._output_port
if output_port is None:
logger.warn("Trying to bang, but no output port.")
return
else:
glyph = BANG_GLYPH if payload else DOT_GLYPH
self._grid.poke(output_port.x, output_port.y, glyph)
def _output(self, glyph, port=None):
if port is None:
output_port = self._output_port
else:
output_port = port
if output_port is None:
logging.warn(
"No output port for operator %s @ (%d, %d)", self.name, self.x, self.y
)
elif glyph is None:
return
else:
if self._should_upper_case():
value = glyph.upper()
else:
value = glyph
self._grid.poke(output_port.x, output_port.y, value)
class Add(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid, x, y, "add", "Output sum of inputs", glyph="a", is_passive=is_passive
)
self.ports.update(
{
"a": InputPort(x - 1, y),
"b": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
index = self._grid.listen_as_value(
self.ports["a"]
) + self._grid.listen_as_value(self.ports["b"])
return self._grid.key_of(index)
class Substract(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"substract",
"Output difference of inputs",
glyph="b",
is_passive=is_passive,
)
self.ports.update(
{
"a": InputPort(x - 1, y),
"b": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
a = self._grid.listen_as_value(self.ports["a"])
b = self._grid.listen_as_value(self.ports["b"])
return self._grid.key_of(abs(b - a))
class Clock(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"clock",
"Outputs modulo of frame",
glyph="c",
is_passive=is_passive,
)
self.ports.update(
{
"rate": InputPort(x - 1, y, clamp=lambda x: max(1, x)),
"mod": InputPort(x + 1, y, default="8"),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
rate = self._grid.listen_as_value(self.ports["rate"])
mod = self._grid.listen_as_value(self.ports["mod"])
value = math.floor(frame / rate) % mod
return self._grid.key_of(value)
class Delay(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"delay",
"Bangs on module of frame",
glyph="d",
is_passive=is_passive,
)
self.ports.update(
{
"rate": InputPort(x - 1, y, clamp=lambda x: max(1, x)),
"mod": InputPort(x + 1, y, default="8"),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True),
}
)
def operation(self, frame, force=False):
rate = self._grid.listen_as_value(self.ports["rate"])
mod = self._grid.listen_as_value(self.ports["mod"])
value = frame % (mod * rate)
return value == 0 or mod == 1
class East(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"east",
"Move eastwards or bang",
glyph="e",
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self.move(1, 0)
self.is_passive = False
class Generator(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"generator",
"Write operands with offset",
glyph="g",
is_passive=is_passive,
)
self.ports.update(
{
"x": InputPort(x - 3, y),
"y": InputPort(x - 2, y),
"len": InputPort(x - 1, y, clamp=lambda x: max(x, 1)),
}
)
def operation(self, frame, force=False):
length = self._grid.listen_as_value(self.ports["len"])
x = self._grid.listen_as_value(self.ports["x"])
y = self._grid.listen_as_value(self.ports["y"]) + 1
for offset in range(length):
input_port = InputPort(self.x + offset + 1, self.y)
output_port = OutputPort(self.x + x + offset, self.y + y)
self.ports.update(
{
f"input{offset}": input_port,
f"output{offset}": output_port,
}
)
res = self._grid.listen(input_port)
self._output(res, output_port)
class Halt(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"half",
"Halts southward operator",
glyph="h",
is_passive=is_passive,
)
def operation(self, frame, force=False):
self._grid.lock(self.x, self.y + 1) # self._output_port.x, self._output_port.y)
class If(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"if",
"Bang if inputs are equal",
glyph="f",
is_passive=is_passive,
)
self.ports.update(
{
"a": InputPort(x - 1, y),
"b": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True),
}
)
def operation(self, frame, force=False):
a = self._grid.listen(self.ports["a"])
b = self._grid.listen(self.ports["b"])
return a == b
class Increment(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"increment",
"Increment operator southward",
glyph="i",
is_passive=is_passive,
)
self.ports.update(
{
"step": InputPort(x - 1, y, default="1"),
"mod": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
step = self._grid.listen_as_value(self.ports["step"])
mod = self._grid.listen_as_value(self.ports["mod"])
out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME])
return self._grid.key_of((out + step) % (mod if mod > 0 else 36))
class Jumper(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"j",
"Outputs northward operator",
glyph="f",
is_passive=is_passive,
)
self.ports.update(
{
"val": InputPort(x, y - 1),
OUTPUT_PORT_NAME: OutputPort(x, y + 1),
}
)
def operation(self, frame, force=False):
self._grid.lock(self._output_port.x, self._output_port.y)
return self._grid.listen(self.ports["val"])
class Multiply(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"multiply",
"Output multiplication of inputs",
glyph="m",
is_passive=is_passive,
)
self.ports.update(
{
"a": InputPort(x - 1, y),
"b": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
a = self._grid.listen_as_value(self.ports["a"])
b = self._grid.listen_as_value(self.ports["b"])
return self._grid.key_of(a * b)
class North(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"north",
"Move northward or bang",
glyph="n",
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self.move(0, -1)
self.is_passive = False
class Random(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"random",
"Outputs random value",
glyph="r",
is_passive=is_passive,
)
self.ports.update(
{
"min": InputPort(x - 1, y),
"max": InputPort(x + 1, y),
OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),
}
)
def operation(self, frame, force=False):
low = self._grid.listen_as_value(self.ports["min"])
high = self._grid.listen_as_value(self.ports["max"])
value = random.randint(low, high)
return self._grid.key_of(value)
class South(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"south",
"Move southward or bang",
glyph="s",
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self.move(0, 1)
self.is_passive = False
class Track(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"track",
"Reads eastward operand",
glyph="t",
is_passive=is_passive,
)
self.ports.update(
{
"key": InputPort(x - 2, y),
"len": InputPort(x - 1, y, clamp=lambda x: max(1, x)),
OUTPUT_PORT_NAME: OutputPort(x, y + 1),
}
)
def operation(self, frame, force=False):
key = self._grid.listen_as_value(self.ports["key"])
length = self._grid.listen_as_value(self.ports["len"])
for offset in range(length):
self._grid.lock(self.x + offset + 1, self.y)
port = InputPort(self.x + 1 + key % length, self.y)
return self._grid.listen(port)
class West(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"west",
"Move westward or bang",
glyph="w",
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self.move(-1, 0)
self.is_passive = False
class Jymper(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"y",
"Outputs westward operator",
glyph="y",
is_passive=is_passive,
)
self.ports.update(
{
"val": InputPort(x - 1, y),
OUTPUT_PORT_NAME: OutputPort(x + 1, y),
}
)
def operation(self, frame, force=False):
self._grid.lock(self._output_port.x, self._output_port.y)
return self._grid.listen(self.ports["val"])
class Bang(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"bang",
"Bangs neighboring operands",
glyph=BANG_GLYPH,
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self.do_draw = False
self.erase()
class Comment(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"comment",
"Halts line",
glyph=COMMENT_GLYPH,
is_passive=is_passive,
)
self.do_draw = False
def operation(self, frame, force=False):
self._grid.lock(self.x, self.y)
for x in range(self.x + 1, self._grid.cols):
self._grid.lock(x, self.y)
if self._grid.peek(x, self.y) == self.glyph:
break
_NOTES_VALUES = ("C", "c", "D", "d", "E", "F", "f", "G", "g", "A", "a", "B")
NOTE_TO_INDEX = {k: i for i, k in enumerate(_NOTES_VALUES)}
class Midi(IOperator):
def __init__(self, grid, x, y, *, is_passive=False):
super().__init__(
grid,
x,
y,
"midi",
"Send MIDI note",
glyph=":",
is_passive=True,
)
self.ports.update(
{
"channel": InputPort(self.x + 1, self.y),
"octave": InputPort(
self.x + 2, self.y, clamp=lambda x: min(max(0, x), 8)
),
"note": InputPort(self.x + 3, self.y),
"velocity": InputPort(
self.x + 4, self.y, default="f", clamp=lambda x: min(max(0, x), 16)
),
"length": InputPort(
self.x + 5, self.y, clamp=lambda x: min(max(0, x), 32)
),
}
)
def operation(self, frame, force=False):
if not self.has_neighbor(BANG_GLYPH) and not force:
return
for port_name in "channel", "octave", "note":
if self._grid.listen(self.ports[port_name]) == DOT_GLYPH:
return
note = self._grid.listen(self.ports["note"])
if not NOTE_TO_INDEX:
return
channel = self._grid.listen_as_value(self.ports["channel"])
if channel > 15:
return
octave = self._grid.listen_as_value(self.ports["octave"])
velocity = self._grid.listen_as_value(self.ports["velocity"])
length = self._grid.listen_as_value(self.ports["length"])
self._grid.push_midi(MidiNoteOnEvent(channel, octave, note, velocity, length))
| 2.828125 | 3 |
108.py | Aarush4907/C107 | 0 | 12794126 | <filename>108.py
import pandas as pd
import plotly.graph_objects as go
df = pd.read_csv(r"D:\PROGRAMS\PYTHON\C102\108Data.csv")
student_df = df.loc[df["student_id"]=="TRL_123"]
print (student_df.groupby("level")["attempt"].mean())
fig = go.Figure(go.Bar(x = student_df.groupby("level")["attempt"].mean(),y=['level1','level2','level3','level4'],orientation = 'h' ))
fig.show()
| 3.390625 | 3 |
tests/describe/spec/test_utils.py | jeffh/describe | 3 | 12794127 | import sys
from unittest import TestCase
from StringIO import StringIO
from functools import wraps
from mock import Mock, patch
from describe.spec.utils import (tabulate, Benchmark, CallOnce,
getargspec, func_equal, accepts_arg, filter_traceback)
class DescribeFilteredTraceback(TestCase):
@patch('traceback.format_exception')
def it_should_stop_emitting_when_marker_is_found(self, format_exception):
error = MagicMock(spec=Exception)
tb = Mock(spec=TracebackType)
tb.__contains__.return_value = False
target = tb.tb_next.tb_next.tb_next
target.tb_frame.f_globals.__contains__.return_value = True
format_exception.return_value = 'foo'
self.assertEqual(filter_traceback(error, tb), "foo")
format_exception.assert_called_once_with(Exception, error, target)
def it_should_return_traceback_if_its_not_a_traceback_type(self):
tb = 'bar'
self.assertEqual(filter_traceback(Mock(), tb), "bar")
#class DescribeFnReturnsLocals(TestCase):
# def test_it_captures_locals_from_function(self):
# def foo():
# a = 2
# b = 'foo'
# z = {}
#
# fn = returns_locals(foo)
# context = fn()
# del context['sys']
# del context['_________describe_exception']
# self.assertEqual(context, {
# 'a': 2,
# 'b': 'foo',
# 'z': {},
# })
#
# def test_it_captures_locals_from_decorated_function(self):
# def d(name):
# @with_metadata
# def decorator(fn):
# @wraps(fn)
# def wrapper(*args, **kwargs):
# return fn(name, *args, **kwargs)
# return wrapper
# return decorator
#
# @d('lol')
# def foo(name):
# a = 'foo'
# b = 3
#
# func = returns_locals(foo)
# context = func()
# del context['sys']
# del context['_________describe_exception']
# self.assertEqual(context, {
# 'a': 'foo',
# 'b': 3,
# 'name': 'lol',
# })
class DescribeAcceptArgs(TestCase):
def test_it_returns_True_for_function_with_one_arg(self):
def foo(a):
pass
self.assertTrue(accepts_arg(foo))
def test_it_returns_True_for_class_method_with_one_arg(self):
class Foobar(object):
def foo(self, a):
pass
self.assertTrue(accepts_arg(Foobar().foo))
def test_it_returns_False_otherwise(self):
class Foobar(object):
def foo(self):
pass
def foo():
pass
self.assertFalse(accepts_arg(Foobar().foo))
self.assertFalse(accepts_arg(foo))
def test_it_returns_False_when_non_function(self):
self.assertFalse(accepts_arg(None))
class DescribeIntegrationGetArgSpec(TestCase):
def it_returns_argspec_of_functions(self):
fn = lambda a: 0
self.assertEqual(getargspec(fn), (('a',), None, None, None))
def it_returns_argspec_of_class_constructor(self):
class Foo(object):
def __init__(self, f):
pass
self.assertEqual(getargspec(Foo), (('f',), None, None, None))
def it_returns_argspec_of_class_call_magicmethod(self):
class Foo(object):
def __call__(self, f):
pass
self.assertEqual(getargspec(Foo), (('f',), None, None, None))
def it_returns_argspec_of_wrapped_function(self):
fn = wraps(lambda a: 0)
self.assertEqual(getargspec(fn), (('a',), None, None, None))
def it_returns_argspec_of_wrapped_function_with_CallOnce(self):
fn = CallOnce(lambda a: 0)
self.assertEqual(getargspec(fn), (('a',), None, None, None))
class DescribeCallOnce(TestCase):
def test_it_can_call_wrapped_fn_once(self):
m = Mock()
subject = CallOnce(m)
subject()
subject()
subject()
m.assert_called_once_with()
def test_it_does_nothing_for_wrapping_None(self):
subject = CallOnce(None)
subject()
subject()
def test_its_equal_with_None(self):
subject = CallOnce(None)
self.assertEqual(subject, CallOnce(None))
def test_its_equal_with_like_function(self):
subject = CallOnce(lambda:0)
self.assertEqual(subject, CallOnce(lambda:0))
def test_its_truthiness_if_wrapped_is_callable(self):
subject = CallOnce(object())
self.assertFalse(bool(subject))
def test_its_truthiness_if_wrapped_is_callable(self):
subject = CallOnce(lambda:0)
self.assertTrue(bool(subject))
def test_it_preserves_function_attributes(self):
m = Mock()
m.__doc__ = 'my fn doc'
m.__name__ = 'my_func'
m.__module__ = 'super.awesome.module'
m.func_name = 'my_fn'
m.func_code = Mock()
subject = CallOnce(m)
self.assertEqual(subject.__doc__, 'my fn doc')
self.assertEqual(subject.__name__, 'my_func')
self.assertEqual(subject.__module__, 'super.awesome.module')
self.assertEqual(subject.func_name, 'my_fn')
self.assertEqual(subject.func_code, m.func_code)
class TestFuncEqual(TestCase):
def test_it_compares_lambda_function_equality(self):
self.assertTrue(func_equal(lambda:0, lambda:0))
self.assertTrue(func_equal(lambda:1, lambda:1))
self.assertFalse(func_equal(lambda:2, lambda:1))
def test_it_compares_function_equality(self):
def add(a, b): return a + b
def new_add(a, b): return a + b
def sub(a, b): return a - b
def sub_const(a): return a - 2
self.assertTrue(func_equal(add, new_add))
self.assertFalse(func_equal(new_add, sub))
self.assertFalse(func_equal(sub, sub_const))
@patch('describe.spec.utils.get_true_function')
def test_it_compares_class_constructors(self, getfn):
getfn.side_effect = lambda o: (o.__init__, None)
class Foo(object):
def __init__(self, bar):
self.bar = bar
class FooBar(object):
def __init__(self, cake):
self.bar = cake
class ABC(object):
def __init__(self, cake, bar):
self.cake = cake
self.bar()
class Cake(object):
def __init__(self, roflcopter, empty):
self.bake = roflcopter
self.assertTrue(func_equal(Foo, FooBar))
print 'equal'
self.assertFalse(func_equal(Foo, ABC))
self.assertFalse(func_equal(Foo, Cake))
@patch('describe.spec.utils.get_true_function')
def test_it_compares_callables(self, getfn):
getfn.side_effect = lambda o: (o.__call__, None)
class Foo(object):
def __call__(self, a, b):
return a + b
class FooBar(object):
def __call__(self, a, b):
return a + self.b
class Cake(object):
def __call__(self, c, b):
return c + b
self.assertFalse(func_equal(Foo(), FooBar()))
self.assertTrue(func_equal(Foo(), Cake()))
class TestTabulate(TestCase):
def test_tabulation_of_string(self):
self.assertEqual(tabulate('foo\nbar'), ' foo\n bar')
def test_tabulation_does_not_insert_spaces_between_double_newlines(self):
self.assertEqual(tabulate('\n\nfoo'), '\n\n foo')
def test_tabulation_ignores_first_line(self):
self.assertEqual(tabulate('foo\nbar', ignore_first=True), 'foo\n bar')
def test_tabulation_by_times(self):
self.assertEqual(tabulate('\n\nfoo', times=2), '\n\n foo')
def test_tabulation_by_zero_times(self):
self.assertEqual(tabulate('\n\nfoo', times=0), '\n\nfoo')
# class TestLocalsFromFunction(TestCase):
# def test_extracts_local_functions_with_invocation(self):
# def describe_spec():
# lol = True
# def it_should_read_submethods(): pass
# def before_each(): pass
# def before_all(): pass
# def sample_func(): pass
# def after_each(): pass
# def after_all(): pass
# def it_should_capture_this_method(): pass
# context = locals_from_function(describe_spec)
# methods = [
# 'it_should_read_submethods',
# 'before_each',
# 'before_all',
# 'after_each',
# 'after_all',
# 'it_should_capture_this_method',
# 'sample_func',
# ]
# self.assertEqual(set(context.keys()), set(methods))
# def test_reraises_any_exceptions_thrown(self):
# def describe_spec():
# @does_not_exist
# def it_should_do_stuff(): pass
# with self.assertRaises(NameError):
# locals_from_function(describe_spec)
class TestBenchmark(TestCase):
def test_benchmark(self):
import time
timer = Benchmark()
with timer:
time.sleep(0.1)
self.assertTrue(timer.history[-1] > 0.09)
def test_benchmark_multiple(self):
import time
timer = Benchmark()
with timer: time.sleep(0.02)
with timer: time.sleep(0.02)
with timer: time.sleep(0.02)
with timer: time.sleep(0.02)
with timer: time.sleep(0.02)
self.assertEqual(len(timer.history), 5)
self.assertTrue(timer.total_time > 0.09)
| 2.453125 | 2 |
modules/led_strip/main.py | la-guirlande/modules | 1 | 12794128 | <reponame>la-guirlande/modules
import re
import time
from modules.utils import ghc, color, project
try:
import RPi.GPIO as GPIO
is_gpio = True
except Exception:
is_gpio = False
print('Running module without GPIO')
module = ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value)
current_color = color.Color(0, 0, 0)
current_loop = []
if is_gpio:
GPIO.setmode(GPIO.BCM)
GPIO.setup(24, GPIO.OUT)
GPIO.setup(27, GPIO.OUT)
GPIO.setup(10, GPIO.OUT)
pwm_r = GPIO.PWM(24, 100)
pwm_g = GPIO.PWM(27, 100)
pwm_b = GPIO.PWM(10, 100)
pwm_r.start(0)
pwm_g.start(0)
pwm_b.start(0)
@module.listening('color')
def color_listener(data):
global current_loop
current_loop = []
c = color.Color(data['red'], data['green'], data['blue'])
print(' > Event "color" received :', c.to_array())
set_color(c)
@module.listening('loop')
def loop_listener(data):
global current_loop
current_loop = []
if 'loop' in data:
current_loop = load_loop(data['loop'])
print(' > Event "loop" received :', current_loop)
loop()
def load_loop(loop_data):
loop = []
for part in loop_data.split('|'):
if re.match('c\(\d{1,3},\d{1,3},\d{1,3}\)', part):
loop.append({ 'type': 'c', 'data': list(map(int, re.findall('\d+', part))) })
elif re.match('w\(\d+\)', part):
loop.append({ 'type': 'w', 'data': list(map(int, re.findall('\d+', part))) })
elif re.match('t\(\d{1,3},\d{1,3},\d{1,3},\d+\)', part):
loop.append({ 'type': 't', 'data': list(map(int, re.findall('\d+', part))) })
else:
print('Invalid part :', part)
return loop
def loop():
while current_loop and module.connected:
for part in current_loop:
if not current_loop or not module.connected:
break
match part['type']:
case 'c':
set_color(color.Color(part['data'][0], part['data'][1], part['data'][2]))
case 'w':
time.sleep(part['data'][0] / 1000)
case 't':
now = time.time() * 1000
next = now + part['data'][3]
start_color = current_color.copy()
end_color = color.Color(part['data'][0], part['data'][1], part['data'][2])
while now < next:
if not current_loop or not module.connected:
break
now = time.time() * 1000
mix = abs(((next - now) / part['data'][3]) - 1)
set_color(start_color.mix(end_color, mix))
def set_color(color):
current_color.set_color(color)
# print(color.to_array())
if is_gpio:
pwm_r.ChangeDutyCycle(color.r * (100 / 255))
pwm_g.ChangeDutyCycle(color.g * (100 / 255))
pwm_b.ChangeDutyCycle(color.b * (100 / 255))
module.connect()
module.wait()
if is_gpio:
pwm_r.stop()
pwm_g.stop()
pwm_b.stop()
GPIO.cleanup()
| 2.65625 | 3 |
plugins/test.py | keyboardcrunch/fastapi_route_plugin_framework | 0 | 12794129 | """ example plugin to extend a /test route """
from fastapi import APIRouter
router = APIRouter()
@router.get("/test")
async def tester():
""" test route """
return [{"result": "test"}]
| 2.046875 | 2 |
taattack/constraints/pre_constraints/pre_constraint.py | linerxliner/ValCAT | 0 | 12794130 | from abc import ABC, abstractmethod
class PreConstraint(ABC):
@abstractmethod
def filter(self, ids, workload):
raise NotImplemented()
| 2.390625 | 2 |
lib/update.py | fadushin/namecheap-dns | 1 | 12794131 | #!/usr/bin/env python
#
# Copyright (c) dushin.net
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of dushin.net nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY dushin.net ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL dushin.net BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import os
import json
import xml.etree.ElementTree as ElementTree
import http_client
def create_parser():
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
"--root",
dest="root",
help="Root directory",
type="string",
)
parser.add_option(
"--force",
dest="force",
action="store_true",
help="Force an update",
)
return parser
def load_config(_config_file) :
import config
return config
def load_updates(dat) :
if not os.path.exists(dat) :
return {}
with open(dat) as f :
return json.loads(f.read())
def write_updates(dat, updates) :
with open(dat, 'w') as f :
f.write(json.dumps(updates))
def get_current_ip() :
client = http_client.HttpClient("ipinfo.io", secure=True)
response = client.get("/json")
if response['status'] != 200 :
raise Exception("Unable to retrieve IP info")
return json.loads(response['body'])['ip']
def xml_tree_to_dict(element) :
ret = {}
for child in element :
ret[child.tag] = child.text
return ret
def xml_string_to_dict(text) :
return xml_tree_to_dict(ElementTree.fromstring(text))
def maybe_update_ip(config, updates, ip, force) :
client = http_client.HttpClient("dynamicdns.park-your-domain.com", secure=True)
for host in config.hosts :
if not force and host in updates and updates[host] == ip :
config.logger.info("Host %s has ip %s. Skipping." % (host, ip))
continue
params = {
'host': host,
'domain': config.domain,
'password': <PASSWORD>
}
response = client.get("/update", params=params)
if response['status'] != 200 :
raise Exception("Did not receive 200 on update IP info")
data = xml_string_to_dict(response['body'])
if data['ErrCount'] != '0' or data['IP'] != ip or data['Done'] != 'true' :
raise Exception("Error encountered updating ip %s: %s" % (ip, data))
config.logger.warn("Updated host %s in domain %s with ip %s" % (host, config.domain, ip))
updates[host] = ip
return updates
def main(argv) :
parser = create_parser()
(options, args) = parser.parse_args()
try:
if not options.root:
parser.print_help()
return 1
config_file = os.sep.join([options.root, "etc", "config.py"])
if not os.path.isfile(config_file) :
print("No configuration file found: %s" % config_file)
parser.print_help()
return 1
config = load_config(config_file)
var_dir = os.sep.join([options.root, "var"])
if not os.path.exists(var_dir) :
os.mkdir(var_dir)
dat = os.sep.join([var_dir, "update.dat"])
updates = maybe_update_ip(config, load_updates(dat), get_current_ip(), options.force)
write_updates(dat, updates)
return 0
except Exception as e:
import logging
logging.error(e)
import traceback
traceback.print_exc()
return -1
if __name__ == "__main__" :
sys.exit(main(sys.argv))
| 1.445313 | 1 |
project.py | mrdanielvelez/ReadAloud-StanfordCodeinPlace-FinalProject | 0 | 12794132 | <gh_stars>0
import os, pyttsx3, tkinter as tk
import PyPDF3
from tkinter.font import BOLD
from tkinter import Canvas, filedialog
from tkPDFViewer import tkPDFViewer as pdf
from gtts import gTTS, tts
from playsound import playsound
from tkinter import *
root = tk.Tk()
root.title("Read Aloud: Turn Your PDF Files into Audiobooks [Stanford Code in Place 2021 Python Project by <NAME>]")
root.iconbitmap("ReadAloud_icon.ico")
canvas = tk.Canvas(root, height=800, width=800, bg="#3F5A36", highlightbackground="#3F5A36", highlightthickness=2)
canvas.create_text(400, 45, text="Turn any PDF into an Audiobook", font=("Helvetica", 21, BOLD), justify="center", fill="white")
canvas.pack()
frame = tk.Frame(root, bg="white")
frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1)
# Holds string of one PDF file path
FILENAME = []
# PDF Status Indicators in a List of Booleans
PDF_STATUS = [False]
# Prevents duplicate Read Aloud Buttons
READ_ALOUD_STATUS = [False]
def pdf_opened():
if PDF_STATUS[-1]:
return True
return False
# Opens PDF/EPUB file for viewing and displays its name + extension
def add_file():
if not pdf_opened():
for widget in frame.winfo_children():
widget.destroy()
filename = filedialog.askopenfilename(initialdir="/clear", title="Select a File", filetypes=(("PDF files", "*.pdf"), ("All Files", "*.*")))
close_button()
label = tk.Label(frame, text=filename_from_path(filename))
label.pack()
FILENAME.append(filename)
open_pdf(filename)
# Open File Button
open_file = tk.Button(root, text="Open a File", padx=30, pady=5, fg="white", bg="#5C1010", justify="center", command=add_file)
open_file.pack()
# Returns "file name + .extension"
def filename_from_path(file):
file_split = file.split("/")
return file_split[-1]
# Starts Text-to-Speech Process
def generate_tts():
if PDF_STATUS[-1]:
audio_reader = pyttsx3.init()
with open(FILENAME[-1], "rb") as file:
my_pdf = PyPDF3.PdfFileReader(file)
pages = my_pdf.numPages
my_text = ""
for num in range(pages):
page = my_pdf.getPage(num)
my_text += page.extractText()
global audiobook_name
audiobook_name = filename_from_path(FILENAME[-1]).split(".")[0].title() + " Audiobook.mp3"
audio_reader.save_to_file(my_text, audiobook_name)
audio_reader.runAndWait()
popup_msg(f"Successfully generated MP3 file \"{audiobook_name}\"")
# Generates popup window after creating MP3 file
def popup_msg(msg):
popup = tk.Tk()
popup.title("Read Aloud")
popup.iconbitmap("ReadAloud_icon.ico")
popup.geometry("1000x100")
popup.config(bg="darkgreen")
label = tk.Label(popup, text=msg, font="Helvetica 18")
label.pack(side="top", fill="x", pady=10)
B1 = tk.Button(popup, text="OK", command=popup.destroy)
B1.pack(pady=5, padx=5)
popup.mainloop()
# Opens PDF in frame
def open_pdf(file):
pdf_var = pdf.ShowPdf()
# Clears any previous PDF images before loading new file
pdf_var.img_object_li.clear()
set_pdf = pdf_var.pdf_view(frame, file_location = file, width = 120, height = 120)
set_pdf.pack()
# Creates read aloud button
if not READ_ALOUD_STATUS[-1]:
read_aloud = tk.Button(root, text="Generate Audiobook", padx=5, pady=12, fg="white", bg="#4B1B5B", justify="center", command=generate_tts)
read_aloud.pack()
READ_ALOUD_STATUS.append(True)
PDF_STATUS.append(True)
def close_button():
close_file = tk.Button(frame, text="Close File", padx=20, pady=7, fg="white", bg="black", command=close)
close_file.pack()
def close():
for widget in frame.winfo_children():
widget.destroy()
FILENAME.clear()
PDF_STATUS.append(False)
root.mainloop()
| 3.25 | 3 |
python/setup.py | ziyu-guo/treelite | 1 | 12794133 | <gh_stars>1-10
# coding: utf-8
"""Setup script"""
from __future__ import print_function
import os
import shutil
import tempfile
from setuptools import setup, Distribution, find_packages
class TemporaryDirectory(object):
"""Context manager for tempfile.mkdtemp()"""
# pylint: disable=R0903
def __enter__(self):
self.name = tempfile.mkdtemp() # pylint: disable=W0201
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name)
class BinaryDistribution(Distribution):
"""Overrides Distribution class to bundle platform-specific binaries"""
# pylint: disable=R0201
def has_ext_modules(self):
"""Has an extension module"""
return True
LIBPATH_PY = os.path.abspath('./treelite/libpath.py')
LIBPATH = {'__file__': LIBPATH_PY}
# pylint: disable=W0122
exec(compile(open(LIBPATH_PY, "rb").read(), LIBPATH_PY, 'exec'),
LIBPATH, LIBPATH)
# Paths for C/C++ libraries
LIB_PATH = LIBPATH['find_lib_path'](basename='treelite')
RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime')
if (not LIB_PATH) or (not RT_PATH) or (not os.path.isdir('../build/runtime')):
raise RuntimeError('Please compile the C++ package first')
# ignore libraries already in python/treelite; only use ones in ../lib
if os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'):
del LIB_PATH[0]
del RT_PATH[0]
LIB_BASENAME = os.path.basename(LIB_PATH[0])
LIB_DEST = os.path.join('./treelite', LIB_BASENAME)
RT_BASENAME = os.path.basename(RT_PATH[0])
RT_DEST = os.path.join('./treelite', RT_BASENAME)
# remove stale copies of library
if os.path.exists(LIB_DEST):
os.remove(LIB_DEST)
if os.path.exists(RT_DEST):
os.remove(RT_DEST)
shutil.copy(LIB_PATH[0], LIB_DEST)
shutil.copy(RT_PATH[0], RT_DEST)
# copy treelite.runtime
PY_RT_SRC = '../runtime/native/python/treelite_runtime'
PY_RT_DEST = './treelite/runtime/treelite_runtime'
if os.path.exists(PY_RT_DEST):
shutil.rmtree(PY_RT_DEST)
shutil.copytree(PY_RT_SRC, PY_RT_DEST)
with open('../VERSION', 'r') as f:
VERSION = f.readlines()[0].rstrip('\n')
with open('./treelite/VERSION', 'w') as f2:
print('{}'.format(VERSION), file=f2)
# Create a zipped package containing glue code for deployment
with TemporaryDirectory() as tempdir:
shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime')))
libpath = os.path.abspath(os.path.join(tempdir, 'runtime', 'lib'))
filelist = os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST'))
if os.path.exists(libpath): # remove compiled lib
shutil.rmtree(libpath)
if os.path.exists(filelist):
os.remove(filelist)
shutil.make_archive(base_name='./treelite/treelite_runtime',
format='zip',
root_dir=os.path.abspath(tempdir),
base_dir='runtime/')
setup(
name='treelite',
version=VERSION,
description='treelite: toolbox for decision tree deployment',
url='http://treelite.io',
author='DMLC',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
packages=find_packages(),
install_requires=['numpy', 'scipy'],
package_data={
'treelite': [LIB_BASENAME, RT_BASENAME, 'treelite_runtime.zip', 'VERSION']
},
distclass=BinaryDistribution
)
| 2.015625 | 2 |
setup.py | purplesky2016/docassemble-VirtualCourtSampleInterviews | 1 | 12794134 | import os
import sys
from setuptools import setup, find_packages
from fnmatch import fnmatchcase
from distutils.util import convert_path
standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info')
def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):
out = {}
stack = [(convert_path(where), '', package)]
while stack:
where, prefix, package = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package))
else:
stack.append((fn, prefix + name + '/', package))
else:
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
setup(name='docassemble.ALRecipes',
version='0.1.0',
description=('A docassemble extension.'),
long_description='# docassemble.ALRecipes\r\n\r\n## Content\r\nThis repository includes both short examples you can insert directly into\r\nyour own playground, and longer examples that you can discover from its landing page: Quinten please add the link here.\r\n\r\n - Some Playground examples for the Document Assembly Line project.\r\n - Generic docassemble recipe interviews to address a particular need.\r\n \r\nTo learn more, visit the [ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\r\n\r\n## Add examples to your own playground\r\n\r\nEdit the /config, and add the following: \r\n\r\n```yaml\r\nplayground examples:\r\n - docassemble.ALRecipes:data/questions/examples.yml\r\n - docassemble.base:data/questions/example-list.yml \r\n```\r\n\r\n',
long_description_content_type='text/markdown',
author='AssemblyLine',
author_email='<EMAIL>',
license='The MIT License (MIT)',
url='https://docassemble.org',
packages=find_packages(),
namespace_packages=['docassemble'],
install_requires=['mechanize>=0.4.7'],
zip_safe=False,
package_data=find_package_data(where='docassemble/ALRecipes/', package='docassemble.ALRecipes'),
)
| 2.265625 | 2 |
mistos-backend/src/app/api/classes/image.py | Maddonix/mistos_2 | 1 | 12794135 | <reponame>Maddonix/mistos_2
import copy
from os import name
import warnings
import xml
from pathlib import Path
from typing import Any, List, Optional, Set
import numpy as np
from app import crud
from app import fileserver_requests as fsr
from app.api import utils_import, utils_paths, utils_results
from app.api.classes.image_result_layer import (DbImageResultLayer,
IntImageResultLayer)
from app.api.classes.result_measurement import (DbResultMeasurement,
IntResultMeasurement)
from app.api.classes_com import ComImage
from app.api.dependencies import check_sess
from pydantic import BaseModel, constr
from pathlib import Path
class DbImage(BaseModel):
'''
A class to handle database and file storage of Images
Attributes
----------
uid : int
the objects unique identifier
series_index : int
index of image if multiple images were imported in a single file
name : str
the objects name
hint : str = ""
empty string by default. brief description of the object
has_bg_layer: bool = False
indicator if image has an associated background layer.
bg_layer_id: int, optional
None if no associated background layer, otherwise id of the background layer.
path_metadata: pathlib.Path, optional
path to the images metadata ".json". Automatically generated as image is saved to database.
path_image: pathlib.Path, optional
path to the images array ".zarr" folder. Automatically generated as image is saved to database.
image_result_layers: List[DbImageResultLayer] = []
emtpy list by default. List of all associated DbImageResultLayer objects
measurements: List[DbResultMeasurement] = []
emtpy list by default. List of all associated DbResultMeasurement objects
tags: Set[str] = []
set of string keywords to easily categorize objects in frontend.
Methods
-------
to_int_class()->app.api.classes_internal.IntImage:
returns object as int_class. Loads layer array from file path in the process.
to_com_class()->app.api.classes_com.ComImage:
returns object as com_class.
set_bg_false(sess = None)
sets "has_bg_layer" property to False in db.
set_bg_true(layer_uid: int, sess = None)
sets "has_bg_layer" property to True in db. sets bg_layer_id to given value.
create_in_db(sess = None):
creates object in database, updates objects path and uid attributes accordingly. Uses default session if none is passed.
refresh_from_db() -> DbImage
Fetches image from database and returns DbImage object.
update_hint(new_hint: str, sess = None):
updates objects hint in database. Uses default session if none is passed.
update_channel_names(channel_names: List[str])
edits "custom_channel_names" attribute of image in it's metadata.json
delete_from_system(sess = None):
deletes object in database and file storage. Uses default session if none is passed.
'''
uid: int
series_index: int
name: str
hint: str = ""
has_bg_layer: bool = False
bg_layer_id: Optional[int]
path_metadata: Optional[Path]
path_image: Optional[Path]
image_result_layers: List[DbImageResultLayer] = []
measurements: List[DbResultMeasurement] = []
tags: Set[str] = []
def to_int_class(self, for_refresh=False):
'''
Returns object as int class.
Parameters:
- for_refresh(bool = False): If True, image array is not reloaded from file storage.
'''
kwargs = self.dict()
# Only load the full image if not already loaded
if for_refresh == False:
data = fsr.load_zarr(kwargs["path_image"])
kwargs["data"] = data
else:
kwargs["data"] = None
metadata = fsr.load_json(self.path_metadata)
metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path(
self.path_metadata)
kwargs["metadata_omexml"] = fsr.load_metadata_xml(metadata_omexml_path)
del kwargs["path_metadata"]
del kwargs["path_image"]
kwargs["metadata"] = metadata
kwargs["image_result_layers"] = [image_result_layer.to_int_class()
for image_result_layer in self.image_result_layers]
kwargs["result_measurements"] = [measurement.to_int_class()
for measurement in self.measurements]
return IntImage(**kwargs)
def to_com_class(self):
'''
Returns obect as com class.
'''
kwargs = self.dict()
kwargs["metadata"] = utils_import.load_metadata_only(
self.path_metadata)
kwargs["imageResultLayers"] = [image_result_layer.to_com_class()
for image_result_layer in self.image_result_layers]
kwargs["measurements"] = [measurement.to_com_class()
for measurement in self.measurements]
kwargs["seriesIndex"] = self.series_index
kwargs["hasBgLayer"] = self.has_bg_layer
kwargs["bgLayerId"] = self.bg_layer_id
kwargs["tags"] = list(self.tags)
return ComImage(**kwargs)
def set_bg_false(self, sess=None):
'''
Sets imagaes has_bg_layer property to False in database.
Parameters:
- sess(sqlalchemy.orm.Session): The database session to be used, if no session is passed default session will be used (app.api.dependencies.get_db).
'''
sess = check_sess(sess)
crud.update_image_bg_false(self.uid, sess)
def set_bg_true(self, layer_uid: int, sess=None):
'''
Sets images bg_layer_id property to given value.
Parameters:
- layer_uid(int): uid of result layer to be used as background layer.
- sess(sqlalchemy.orm.Session): The database session to be used, if no session is passed default session will be used (app.api.dependencies.get_db).
'''
sess = check_sess(sess)
crud.update_image_bg_true(self.uid, layer_uid, sess)
def create_in_db(self, sess=None):
'''
Creates object in db. Paths and id are generated and updated in object.
Parameters:
- sess(sqlalchemy.orm.Session): The database session to be used, if no session is passed default session will be used (app.api.dependencies.get_db)
'''
sess = check_sess(sess)
sql_image = crud.create_image(self, sess)
self.uid = sql_image.id
self.path_image = Path(sql_image.path_image)
self.path_metadata = Path(sql_image.path_metadata)
def refresh_from_db(self, sess=None):
'''
Refreshes object image from db.
Parameters:
- sess(sqlalchemy.orm.Session): The database session to be used, if no session is passed default session will be used (app.api.dependencies.get_db)
'''
sess = check_sess(sess)
updated_db_image = crud.read_image_by_uid(
self.uid, sess, for_refresh=True)
return updated_db_image
def update_hint(self, new_hint: str, sess=None):
'''
This function expects a new hint as string and calls crud.update_image_hint to update the image hint.
Parameters:
- new_hint(str): string to be saved.
- sess(sqlalchemy.orm.Session): The database session to be used, if no session is passed default session will be used (app.api.dependencies.get_db)
'''
sess = check_sess(sess)
crud.update_image_hint(self.uid, new_hint, sess)
def update_channel_names(self, channel_names: List[str]):
'''
This function expects a new channel names as list of strings. opens metadata.json and edits custom_channel_names
Parameters:
- channel_names(List[str]): List of strings to be saved as channel names.
'''
metadata = fsr.load_json(self.path_metadata)
metadata["custom_channel_names"] = channel_names
fsr.save_metadata(metadata, self.path_metadata)
def delete_from_system(self, sess=None):
'''
calls crud.delete_image and passed db_image object to delete all associated files and db entries
Parameters:
- sess(sqlalchemy.orm.Session): The database session to be used, if no session is passed default session will be used (app.api.dependencies.get_db)
'''
sess = check_sess(sess)
crud.delete_image(self, sess)
class IntImage(BaseModel):
'''
A class to handle calculations and other internal operations with images.
Attributes
----------
uid : int
the objects unique identifier
name : str
the objects name
series_index : int
if multiple images are imported via one file (image series), the index of the image is stored here
metadata : dict
reduced metadata for easy use within mistos. As created by app.api.utils_import.acquire_metadata_dict (creates metadata dict for whole series)
Series metadatadict is passed into IntImage.on_init(). Thereafter, only image's metadata will be saved to .json and loaded.
hint : str, optional
empty string by default. brief description of the object
experiment_ids: List[int]
empty list by default. List of experiments_group ids which use the image.
image_result_layers : List[IntImageResultLayer]
empty list by default. List of all associated IntImageResultLayer objects.
result_measurements : List[IntResultMeasurement]
empty list by default. List of all associated IntResultMeasurement objects
tags : Set[str]:
empty set by default. Set of keywords to work with in the frontend
data : Any ___TO BE DONE: add custom field type___
array of shape (z,c,y,x) in which the image is stored. Is loaded from .zarr files, most numpy operations work, some may cause trouble.
metadata_omexml : Any
original metadata xml data as read by bioformats import when image was imported
has_bg_layer : bool
False by default. Indicates if image as layer selected as background_layer.
bg_layer_id : int, optional
None if no bg_layer selected, otherwise it holds the bg_layer_id
Methods
-------
on_init():
Initializes object. Object is saved in database and file storage
get_thumbnail_path():
Helper function which returns path to the thumbnail on fileserver.
get_image_scaling():
Returns dimensions normalized scales in array with shape (z,y,x) or None.
to_db_class() -> app.api.classes_db.DbImage:
Returns object as DbImage object.
set_bg_false():
Helper function to set has_bg_layer to False and bg_layer_id to None.
set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer):
Method to set layer as background layer
select_channel(channel: int) -> np.array:
Helper method expects channel index. Returns deep copy of channel with shape (z,y,x).
select_result_layer(uid: int) -> app.api.classes_internal.IntResultLayer | None:
Returns layer with corresponding id, returns None if id is not found in self.image_result_layers
calculate_background() -> List:
Returns list of length n_channels. List holds the mean pixel values for each channel if background layer is defined and zeros if no background layer is defined.
measure_mask_in_image(layer_id: int) -> app.api.classes_int.IntMeasurementResult:
Returns measurement object for given result layer and saves it to db and file storage.
get_classifiers(clf_type: str) -> dict:
Fetches all saved classifiers from db, filters for type and returns dictionary of format {"UID_NAME": UID}
refresh_from_db():
Fetches data of this image from db and updates the objects attributes.
delete_result_layer(layer_id: int):
Deletes the layer from database, file storage and the image. If layer was background_layer, corresponding attributes are reset.
estimate_ground_truth_layer(layer_id_list: List[int], suffix: str):
Fetches given layers and uses SimpleITKs STAPLE algorithm to estimate ground truth.
Resulting layer will be initialized as IntResultLayer.
add_layer_from_roi(path)
add_layer_from_mask(path)
'''
uid: int
name: str
series_index: int
metadata: dict
hint: Optional[str] = ""
experiment_ids: List[int] = []
image_result_layers: List[IntImageResultLayer] = []
result_measurements: List[IntResultMeasurement] = []
tags: Set[str] = set()
data: Any
metadata_omexml: Any
has_bg_layer: bool = False
bg_layer_id: Optional[int]
def on_init(self):
'''
Method to initialize the object. Handles image as new image if "uid" == -1 and as imported Mistos image if "uid" == -2.
Creates image in database which generates path and id.
'''
if self.uid == -1:
db_image = self.to_db_class()
db_image.create_in_db()
self.uid = db_image.uid
original_filename = self.metadata["original_filename"]
self.metadata = self.metadata["images"][self.series_index]
self.metadata["original_filename"] = original_filename
# save zarr
fsr.save_zarr(self.data, db_image.path_image)
# save metadata dict
fsr.save_json(self.metadata, db_image.path_metadata)
# save metadata xml
path_xml = utils_paths.make_metadata_xml_path_from_json_path(
db_image.path_metadata)
metadata_string = self.metadata_omexml.to_xml(encoding="utf-8")
metadata_string = xml.dom.minidom.parseString(
metadata_string).toprettyxml(indent="\t")
fsr.save_metadata_xml(metadata_string, path_xml)
elif self.uid == -2:
db_image = self.to_db_class()
db_image.create_in_db()
self.uid = db_image.uid
print(f"Importing archived Mistos image with id {self.uid}")
fsr.save_zarr(self.data, db_image.path_image)
fsr.save_json(self.metadata, db_image.path_metadata)
path_xml = utils_paths.make_metadata_xml_path_from_json_path(
db_image.path_metadata)
# metadata_string = self.metadata_omexml.to_xml(encoding="utf-8")
metadata_omexml = self.metadata_omexml.toprettyxml(indent="\t")
fsr.save_metadata_xml(metadata_omexml, path_xml)
# save thumbnail
thumbnail = utils_import.generate_thumbnail(self.data)
thumbnail_path = self.get_thumbnail_path()
fsr.save_thumbnail(thumbnail, thumbnail_path)
def get_thumbnail_path(self):
'''
Helper function which returns path to the thumbnail on fileserver.
Gets gets fileserver path and joins with return value from utils_paths.make_thumbnail_path
Returns path as pathlib.Path
'''
return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid))
def get_image_scaling(self):
'''
Reads pixel dimensions and returns relative dimensions.
Returns dimensions normalized scales in array with shape (z,y,x) or None if no scaling information was provided in metadata.
'''
x = self.metadata['pixel_size_physical_x']
y = self.metadata['pixel_size_physical_y']
z = self.metadata['pixel_size_physical_z']
n_z = self.metadata['pixel_size_z']
if n_z > 1:
dims = np.array([z, y, x])
dims = dims/dims.max()
elif n_z == 1:
dims = np.array([y, x])
dims = dims/dims.max()
else:
dims = None
print("Couldn't calculate scaling from metadata, defaulting to None")
return dims
def to_db_class(self):
'''
Transforms internal class representation to db class representation.
'''
db_image_result_layers = [result_layer.to_db_class()
for result_layer in self.image_result_layers]
db_result_measurements = [measurement.to_db_class()
for measurement in self.result_measurements]
db_image = DbImage(
uid=self.uid,
series_index=self.series_index,
name=self.name,
hint=self.hint,
path_metadata=None,
path_image=None,
has_bg_layer=self.has_bg_layer,
bg_layer_id=self.bg_layer_id,
experiment_ids=self.experiment_ids,
image_result_layers=db_image_result_layers,
result_measurements=db_result_measurements,
tags=self.tags
)
return db_image
def set_bg_false(self):
'''
Helper function to set has_bg_layer to False and bg_layer_id to None.
Attribute is changed in db, then object attributes are reloaded from db.
'''
db_image = self.to_db_class()
db_image.set_bg_false()
self.refresh_from_db()
def set_bg_true(self, image_layer: IntImageResultLayer):
'''
Method to set layer as background layer.
Parameters:
- image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to be selected as background layer.
'''
layer_uid = image_layer.uid
db_image = self.to_db_class()
db_image.set_bg_true(layer_uid)
self.refresh_from_db()
def select_channel(self, channel: int):
'''
Helper method expects channel index.
Returns deep copy of channel with shape (z,y,x).
Parameters:
- channel(int): index of channel to be selected.
'''
channel_data = copy.deepcopy(self.data[:, channel, ...])[
:, np.newaxis, ...]
return channel_data
def select_result_layer(self, uid: int):
layers = [_ for _ in self.image_result_layers if _.uid == uid]
if len(layers) > 0:
return layers[0]
else:
warnings.warn(
f"IntImage.select channel could not select layer with id {uid}.\nThis image has the associated layers \n{layers}",
UserWarning)
return None
def calculate_background(self):
'''
Expects the bg_uid to belong to a result layer of this image.
Result layer will be turned to binary, assuming all labels > 0 to be background.
Returns list of length n_channel with mean intensity of measured pixels.
'''
if self.has_bg_layer:
bg_uid = self.bg_layer_id
bg_layer = self.select_result_layer(bg_uid)
bg_mask = bg_layer.data
else:
bg_mask = np.zeros((
self.data.shape[0],
self.data.shape[2],
self.data.shape[3]
))
if bg_mask.max() < 2:
bg_mask = np.where(bg_mask > 0, 1, 0)
n_pixel = bg_mask.sum()
n_channel = self.data.shape[1]
mean_pixel = []
for n in range(n_channel):
channel_data = self.select_channel(n)
selection = np.where(bg_mask, channel_data, 0)
_mean = selection.sum()/n_pixel
mean_pixel.append(_mean)
return mean_pixel
def measure_mask_in_image(self, layer_id: int):
'''
Method to measure mask and save result as ResultMeasurement. Creates measurement object and initializes it (save to db and file storage)
Returns IntResultMeasurement object:
measurement.measurement has shape: (n_labels, n_channel, n_features), n_features == 2 (n_pixels, sum_pixels)
Parameters:
- layer_id(int): uid of the layer to be measured
'''
image_array = self.data
layer = self.select_result_layer(layer_id)
measurement, measurement_summary = utils_results.calculate_measurement(
image_array, layer.data)
measurement_result = IntResultMeasurement(
uid=-1,
name=utils_paths.make_measurement_name(self.name, layer.name),
hint="",
image_id=self.uid,
result_layer_id=layer.uid,
measurement=measurement,
measurement_summary=measurement_summary
)
measurement_result.on_init()
self.refresh_from_db()
return measurement_result
def get_classifiers(self, clf_type: str):
'''
Loads all classifiers of given type from database.
Returns dictionary of format {"UID_NAME": UID}. Mainly for use in napari viewer.
Parameters:
- clf_type(str): Valid classifier type, for available types see app.api.cfg_classes.classifier_types.
'''
# Fetches dict in form {name: id}
clf_dict = crud.read_classifier_dict_by_type(clf_type)
if clf_dict == {}:
clf_dict["No classifers found"] = None
return clf_dict
def refresh_from_db(self):
'''
Requests current information from db and updates the object's attributes accordingly.
Does not reload image data again
'''
db_image = self.to_db_class()
updated_info = db_image.refresh_from_db()
self.name = updated_info.name
self.hint = updated_info.hint
self.experiment_ids = updated_info.experiment_ids
self.image_result_layers = updated_info.image_result_layers
self.result_measurements = updated_info.result_measurements
self.tags = updated_info.tags
self.has_bg_layer = updated_info.has_bg_layer
self.bg_layer_id = updated_info.bg_layer_id
def delete_result_layer(self, layer_id: int):
'''
Method to delete a result layer by uid.
If result layer is selected as background layer, the attributes "has_bg_layer" and "bg_layer_id" are set to False and None.
Parameters:
- layer_id(int): Id of result layer to be deleted.
'''
layer = self.select_result_layer(layer_id)
if layer_id == self.bg_layer_id:
self.set_bg_false()
layer.delete()
self.refresh_from_db()
def estimate_ground_truth_layer(self, layer_id_list: List[int], suffix: str = None):
'''
Method to estimate ground truth from multiple layers with by SimpleITK's STAPLE probabilities.
For ground truth estimation layer will be binarized, all labels > 0 will be unified and represented as foreground (==1) for calculation.
Saves label layer to image, database and file storage.
Parameters:
- layer_id_list(List[int]): List of layer ids to be used for ground truth estimation. Must belong to this image.
- suffix(str): will be appended to layer name.
'''
if suffix == None:
suffix = ""
else:
suffix = "_" + suffix
label_array_list = [crud.read_result_layer_by_uid(
layer_id).to_int_class().data for layer_id in layer_id_list]
ground_truth_estimation_array = utils_results.staple_gte(
label_array_list)
hint = f"Following Label Layers were used to estimate the ground truth: {layer_id_list}"
int_result_layer = IntImageResultLayer(
uid=-1,
name=f"ground_truth_estimation{suffix}",
hint=hint,
image_id=self.uid,
layer_type="labels",
data=ground_truth_estimation_array
)
int_result_layer.on_init()
self.refresh_from_db()
self.measure_mask_in_image(int_result_layer.uid)
def add_layer_from_roi(self, path: Path):
mask = utils_import.read_roi(path, self.data.shape)
int_result_layer = IntImageResultLayer(
uid=-1,
name=f"{path.name}",
hint="imported maks",
image_id=self.uid,
layer_type="labels",
data=mask
)
int_result_layer.on_init()
self.refresh_from_db()
self.measure_mask_in_image(int_result_layer.uid)
def add_layer_from_mask(self, path: Path):
mask = utils_import.read_mask(path)
if type(mask) == type(None):
warnings.warn("Image could not be read!")
return None
else:
image_shape = self.data.shape
if mask.shape == (image_shape[0], image_shape[-2], image_shape[-1]):
int_result_layer = IntImageResultLayer(
uid=-1,
name=f"{path.name}",
hint="imported mask",
image_id=self.uid,
layer_type="labels",
data=mask
)
int_result_layer.on_init()
self.refresh_from_db()
self.measure_mask_in_image(int_result_layer.uid)
elif mask.shape == (1, image_shape[-2], image_shape[-1]):
_mask = np.zeros((image_shape[0], image_shape[-2], image_shape[-1]))
_mask[:,...] = mask
int_result_layer = IntImageResultLayer(
uid=-1,
name=f"{path.name}",
hint="imported mask",
image_id=self.uid,
layer_type="labels",
data=_mask
)
int_result_layer.on_init()
self.refresh_from_db()
self.measure_mask_in_image(int_result_layer.uid)
else:
warnings.warn(
f"Mask shape {mask.shape} does not match image shape {image_shape}")
| 1.992188 | 2 |
app/rooms/examples/__init__.py | olegliubimov/code-examples-python | 21 | 12794136 | <gh_stars>10-100
from .eg001_create_room_with_data import eg001Rooms
from .eg002_create_room_with_template import eg002
from .eg003_export_data_from_room import eg003
from .eg004_add_forms_to_room import eg004
from .eg005_get_rooms_with_filters import eg005
from .eg006_create_external_form_fill_session import eg006
from .eg007_create_form_group import eg007
from .eg008_grant_office_access_to_form_group import eg008
from .eg009_assign_form_to_form_group import eg009
| 1.125 | 1 |
onlinejudge/implementation/command/login.py | kfaRabi/online-judge-tools | 0 | 12794137 | <gh_stars>0
# Python Version: 3.x
import getpass
import sys
from typing import *
import onlinejudge
import onlinejudge.implementation.logging as log
import onlinejudge.implementation.utils as utils
if TYPE_CHECKING:
import argparse
def login(args: 'argparse.Namespace') -> None:
# get service
service = onlinejudge.dispatch.service_from_url(args.url)
if service is None:
sys.exit(1)
# configure
kwargs = {}
if service.get_name() == 'yukicoder':
if not args.method:
args.method = 'github'
if args.method not in ['github', 'twitter']:
log.failure('login for yukicoder: invalid option: --method %s', args.method)
sys.exit(1)
kwargs['method'] = args.method
else:
if args.method:
log.failure('login for %s: invalid option: --method %s', service.get_name(), args.method)
sys.exit(1)
with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess:
if args.check:
if service.is_logged_in(session=sess):
log.info('You have already signed in.')
else:
log.info('You are not signed in.')
sys.exit(1)
else:
# login
def get_credentials() -> Tuple[str, str]:
if args.username is None:
args.username = input('Username: ')
if args.password is None:
args.password = getpass.getpass()
return args.username, args.password
log.warning('If you don\'t want to give your password to this program, you can give only your session tokens.')
log.info('see: https://github.com/kmyk/online-judge-tools/blob/master/LOGIN_WITH_COOKIES.md')
service.login(get_credentials, session=sess, **kwargs) # type: ignore
| 2.453125 | 2 |
automatic_model_loading.py | FedericoMolinaChavez/tesis-research | 0 | 12794138 | <gh_stars>0
from keras.models import model_from_json
from keras.models import load_model
model_names = {'clivage_1_json' : 'models/model1.json',
'clivage_1' : 'models/model1.h5',
'clivage_2_json' : 'models/model2.json',
'clivage_2' : 'models/model2.h5',
'clivage_3_json' : 'models/model3.json',
'clivage_3' : 'models/model3.h5',
'clivage_4_json' : 'models/model4.json',
'clivage_4' : 'models/model4.h5',
'clivage_5_json' : 'models/model5.json',
'clivage_5' : 'models/model5.h5',
'clivage_6_json' : 'models/model6.json',
'clivage_6' : 'models/model6.h5',
'clivage_7_json' : 'models/model7.json',
'clivage_7' : 'models/model7.h5',
'clivage_8_json' : 'models/model8.json',
'clivage_8' : 'models/model8.h5',
'clivage_9_json' : 'models/model9.json',
'clivage_9' : 'models/model9.h5'}
def automatic_loading () :
keys = list(model_names.keys())
#print(keys)
models = []
j = 0
for i in range(0,int(len(keys)/2)):
try:
i = i*2
json_file = open(model_names.get(keys[i]), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(model_names.get(keys[i+1]))
print("Loaded model from disk")
models.append(loaded_model)
except Exception as e:
raise
else:
pass
finally:
pass
return models
#print(automatic_loading()) | 2.375 | 2 |
piwebasync/websockets/__init__.py | newvicx/piwebasync | 0 | 12794139 | <gh_stars>0
from .client import WebsocketClient | 1.085938 | 1 |
src/builder.py | neobepmat/BatchBuilder | 0 | 12794140 | <reponame>neobepmat/BatchBuilder
# Temperature-conversion program using PyQt
import sys, os
from PyQt4 import QtGui, QtCore, uic
from PyQt4.QtGui import QMessageBox
from main import *
from builder_configuration import BuilderConfiguration
form_class = uic.loadUiType("main-interface.ui")[0] # Load the UI
class MyException(Exception):
pass
class MyWindowClass(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.btnStart.clicked.connect(self.validate_builder)
self.isOfficialVersion = False
self.versionNumber = None
self.doBuildTdkDesign = False
self.doBuildOverlord = False
self.doBuildConfiguratore = False
self.doBuildTdk = False
self.doBuildTdkDeviceDriver = False
self.doTdkSetup = False
self.doTdkDeviceDriverSetup = False
self.doBuildTdk_Debug = False
self.doBuildTdkDeviceDriver_Debug = False
self.configurator = BuilderConfiguration()
def getOfficialVersionType(self):
if self.chkOfficialVersion.isChecked():
self.isOfficialVersion = True
else:
self.isOfficialVersion = False
def getOperationsList(self):
if self.chkBuildTdkDesign.isChecked():
self.doBuildTdkDesign = True
else:
self.doBuildTdkDesign = False
if self.chkBuildOverlord.isChecked():
if not self.doBuildTdkDesign:
self.chkBuildTdkDesign.toggle()
self.doBuildTdkDesign = True
self.doBuildOverlord = True
else:
self.doBuildOverlord = False
if self.chkBuildConfiguratore.isChecked():
if not self.doBuildTdkDesign:
self.chkBuildTdkDesign.toggle()
self.doBuildTdkDesign = True
self.doBuildConfiguratore = True
else:
self.doBuildConfiguratore = False
if self.chkBuildTdk.isChecked():
self.doBuildTdk = True
else:
self.doBuildTdk = False
if self.chkBuildTdkDeviceDriver.isChecked():
self.doBuildTdkDeviceDriver = True
else:
self.doBuildTdkDeviceDriver = False
if self.chkDoTdkSetup.isChecked():
self.doTdkSetup = True
else:
self.doTdkSetup = False
if self.chkDoTdkDeviceDriverSetup.isChecked():
self.doTdkDeviceDriverSetup = True
else:
self.doTdkDeviceDriverSetup = False
if self.chkDoBuildTdk_Debug.isChecked():
self.doBuildTdk_Debug = True
else:
self.doBuildTdk_Debug = False
if self.chkDoBuildTdkDeviceDriver_Debug.isChecked():
self.doBuildTdkDeviceDriver_Debug = True
else:
self.doBuildTdkDeviceDriver_Debug = False
def checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked):
try:
if not os.path.exists(FTCMfolderToBeChecked):
print('Directory %s does not exist. It will be created.'% (FTCMfolderToBeChecked))
os.makedirs(FTCMfolderToBeChecked)
print('Directory %s has been created.'% (FTCMfolderToBeChecked))
if not os.path.exists(TDKfolderToBeChecked):
print('Directory %s does not exist. It will be created.'% (TDKfolderToBeChecked))
os.makedirs(TDKfolderToBeChecked)
print('Directory %s has been created.'% (TDKfolderToBeChecked))
return True
except MyException as e:
QMessageBox. about(self, "CheckDirectoryExistence - Exception", str(e))
return False
def resumeOperations(self):
if not self.edtVersion.text():
raise MyException('Version number not specified!')
if self.isOfficialVersion:
FTCMfolderToBeChecked = self.configurator.OfficialExePath + "\\" + self.edtVersion.text()
TDKfolderToBeChecked = self.configurator.OfficialTdkPath + "\\" + self.edtVersion.text()
else:
FTCMfolderToBeChecked = self.configurator.NonOfficialExePath + "\\" + self.edtVersion.text()
TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath + "\\" + self.edtVersion.text()
if not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked):
raise MyException('Error while creating folders for FTCM and TDK versions!')
self.versionNumber = self.edtVersion.text()
operationsLog = ""
self.getOfficialVersionType()
self.getOperationsList()
if self.isOfficialVersion:
operationsLog += 'Building an OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber +']\n\n'
else:
operationsLog += 'Building a NON_OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber +']!\n\n'
if self.doBuildTdkDesign:
operationsLog += "Building TDK in DESIGN: YES\n\n"
else:
operationsLog += "Building TDKin DESIGN: NO\n\n"
if self.doBuildOverlord:
operationsLog += "Building OVERLORD: YES\n"
else:
operationsLog += "Building OVERLORD: NO\n"
if self.doBuildConfiguratore:
operationsLog += "Building CONFIGURATORE OVERLORD: YES\n\n"
else:
operationsLog += "Building CONFIGURATORE OVERLORD: NO\n\n"
if self.doBuildTdk:
operationsLog += "Building TDK: YES\n"
else:
operationsLog += "Building TDK: NO\n"
if self.doBuildTdk_Debug:
operationsLog += "Building TDK in DEBUG: YES\n\n"
else:
operationsLog += "Building TDK in DEBUG: NO\n\n"
if self.doBuildTdkDeviceDriver:
operationsLog += "Building TDK DEVICE DRIVER: YES\n"
else:
operationsLog += "Building TDK DEVICE DRIVER: NO\n"
if self.doBuildTdkDeviceDriver_Debug:
operationsLog += "Building TDK DEVICE DRIVER in DEBUG: YES\n\n"
else:
operationsLog += "Building TDK DEVICE DRIVER in DEBUG: NO\n\n"
if self.doTdkSetup:
operationsLog += "Building OVERLORD SETUP: YES\n\n"
else:
operationsLog += "Building OVERLORD SETUP: NO\n\n"
if self.doTdkDeviceDriverSetup:
operationsLog += "Building TDK DEVICE DRIVER SETUP: YES\n\n"
else:
operationsLog += "Building TDK DEVICE DRIVER SETUP: NO\n\n"
operationsLog += "Would you like to continue using these parameters?"
print(operationsLog)
#QMessageBox.about(self, "List of Operations", operationsLog)
reply = QMessageBox.question(self, "List of Operations", operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
return True
else:
return False
def validate_builder(self):
try:
reply = self.resumeOperations()
if reply:
print("Operations will be executed")
self.mainBuilder = MainBuilder(
self.isOfficialVersion,
self.doBuildTdkDesign,
self.doBuildOverlord,
self.doBuildConfiguratore,
self.doBuildTdk,
self.doBuildTdkDeviceDriver,
self.doTdkSetup,
self.doTdkDeviceDriverSetup,
self.versionNumber,
self.doBuildTdk_Debug,
self.doBuildTdkDeviceDriver_Debug)
# self, doOfficial, doOverlord,
# doConfiguratore, doTdk, doTdkDeviceDriver,
# doTdkSetup, doTdkDeviceDriverSetup, versionNumber,
# doTdk_Debug, doTdkDeviceDriver_Debug
self.main_workflow()
else:
print("Operation has been stopped by the operator!")
except MyException as e:
QMessageBox.about(self, "List of Operations - Exception", str(e))
return False
def launch_vb6(self):
self.mainBuilder.buildVb6()
pass
def launch_tdk_design(self):
self.mainBuilder.buildTdkDesign()
pass
def launch_tdk_full(self):
self.mainBuilder.buildTdkFinal()
pass
def launch_tdkdevicedriver(self):
self.mainBuilder.buildTdkDeviceDriver()
pass
def launch_tdk_setup(self):
self.mainBuilder.buildTdkSetup()
pass
def launch_tdkdevicedriver_setup(self):
pass
def main_workflow(self):
self.launch_tdk_design()
self.launch_vb6()
self.launch_tdk_full()
self.launch_tdkdevicedriver()
self.launch_tdk_setup()
self.launch_tdkdevicedriver_setup()
QMessageBox.about(self, "List of Operations", "Operations have been finished!")
pass
app = QtGui.QApplication(sys.argv)
myWindow = MyWindowClass(None)
myWindow.show()
app.exec_()
| 2.25 | 2 |
ISPProgrammer/NXPChip.py | snhobbs/NXPISP | 3 | 12794141 | import math
import zlib
from time import sleep
import struct
from timeout_decorator import timeout
from timeout_decorator.timeout_decorator import TimeoutError
from pycrc.algorithms import Crc
from .ISPChip import ISPChip
NXPReturnCodes = {
"CMD_SUCCESS" : 0x0,
"INVALID_COMMAND" : 0x1,
"SRC_ADDR_ERROR" : 0x2,
"DST_ADDR_ERROR" : 0x3,
"SRC_ADDR_NOT_MAPPED" : 0x4,
"DST_ADDR_NOT_MAPPED" : 0x5,
"COUNT_ERROR" : 0x6,
"INVALID_SECTOR/INVALID_PAGE" : 0x7,
"SECTOR_NOT_BLANK" : 0x8,
"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION" : 0x9,
"COMPARE_ERROR" : 0xa,
"BUSY" : 0xb,
"PARAM_ERROR" : 0xc,
"ADDR_ERROR" : 0xd,
"ADDR_NOT_MAPPED" : 0xe,
"CMD_LOCKED" : 0xf,
"INVALID_CODE" : 0x10,
"INVALID_BAUD_RATE" : 0x11,
"INVALID_STOP_BIT" : 0x12,
"CODE_READ_PROTECTION_ENABLED" : 0x13,
"Unused 1" : 0x14,
"USER_CODE_CHECKSUM" : 0x15,
"Unused 2" : 0x16,
"EFRO_NO_POWER" : 0x17,
"FLASH_NO_POWER" : 0x18,
"Unused 3" : 0x19,
"Unused 4" : 0x1a,
"FLASH_NO_CLOCK" : 0x1b,
"REINVOKE_ISP_CONFIG" : 0x1c,
"NO_VALID_IMAGE" : 0x1d,
"FAIM_NO_POWER" : 0x1e,
"FAIM_NO_CLOCK" : 0x1f,
"NoStatusResponse" : 0xff,
}
def GetErrorCodeName(code: int) -> str:
code = int(code)
for item in NXPReturnCodes.items():
if code == item[1]:
return item[0]
return "Not Found"
def RaiseReturnCodeError(code: int, call_name: str) -> None:
if int(code) != NXPReturnCodes["CMD_SUCCESS"]:
raise UserWarning(
"Return Code Failure in {} {} {}".format(call_name, GetErrorCodeName(code), code))
def RemoveBootableCheckSum(vector_table_loc: int, image: bytes):
kuint32_t_size = 4
MakeBootable(vector_table_loc, image)
image_list = list(image)
for byte in range(kuint32_t_size):
image_list[vector_table_loc * kuint32_t_size + byte] = 0
return bytes(image_list)
# 2s compliment of checksum
def CalculateCheckSum(frame) -> int:
csum = 0
for entry in frame:
csum += entry
return (1<<32) - (csum % (1<<32))
def Crc32(frame) -> int:
#CRC32
polynomial = 0x104c11db6
crc = Crc(width=32, poly=polynomial, reflect_in=True,
xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00)
crc_calc = crc.bit_by_bit(frame)
return crc_calc
def GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) -> bytes:
# make this a valid image by inserting a checksum in the correct place
vector_table_size = 8
kuint32_t_size = 4
# Make byte array into list of little endian 32 bit words
intvecs = struct.unpack("<%dI"%vector_table_size,
orig_image[:vector_table_size * kuint32_t_size])
# calculate the checksum over the interrupt vectors
intvecs_list = list(intvecs[:vector_table_size])
intvecs_list[vector_table_loc] = 0 # clear csum value
csum = CalculateCheckSum(intvecs_list)
intvecs_list[vector_table_loc] = csum
vector_table_bytes = b''
for vecval in intvecs_list:
vector_table_bytes += struct.pack("<I", vecval)
return vector_table_bytes
def MakeBootable(vector_table_loc: int, orig_image: bytes) -> bytes:
vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image)
image = vector_table_bytes + orig_image[len(vector_table_bytes):]
return image
def FillDataToFitSector(data: bytes, size: int) -> bytes:
if len(data) != size:
data += bytes([0xff] *(size - len(data)))
return data
class NXPChip(ISPChip):
kWordSize = 4
kPageSizeBytes = 64
SectorSizePages = 16
MaxByteTransfer = 1024
StatusRespLength = len(ISPChip.kNewLine) + 1
#Parity = None
#DataBits = 8
#StopBits = 1
SyncString = "Synchronized"+ISPChip.kNewLine
SyncStringBytes = bytes(SyncString, encoding="utf-8")
SyncVerified = bytes("OK"+ISPChip.kNewLine, encoding="utf-8")
ReturnCodes = NXPReturnCodes
CRCLocation = 0x000002fc
CRCValues = {
"NO_ISP": 0x4e697370,
"CRP1" : 0x12345678,
"CRP2" : 0x87654321,
"CRP3" : 0x43218765,
}
kSleepTime = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.CrystalFrequency = 12000#khz == 30MHz
self.SectorCount = 0
self.RAMSize = 0
self.RAMRange = [0, 0]
self.FlashRange = [0, 0]
self.RAMStartWrite = 0
self.kCheckSumLocation = 7 #0x0000001c
def FlashAddressLegal(self, address):
return address >= self.FlashRange[0] and address <= self.FlashRange[1];
def FlashRangeLegal(self, address, length):
print(self.FlashRange, address, length)
return self.FlashAddressLegal(address) and self.FlashAddressLegal(address + length - 1) and length <= self.FlashRange[1] - self.FlashRange[0] and address%self.kPageSizeBytes == 0
def RamAddressLegal(self, address):
return address >= self.RAMRange[0] and address <= self.RAMRange[1]
def RamRangeLegal(self, address, length):
return self.RamAddressLegal(address) and self.RamAddressLegal(address + length) and length <= self.RAMRange[1] - self.RAMRange[0] and address%self.kWordSize == 0
def GetReturnCode(self) -> int:
for _ in range(10):
#sleep(.1)
try:
resp = self.ReadLine().strip()
return int(resp)
except ValueError:
pass
return self.ReturnCodes["NoStatusResponse"]
def AssertReturnCode(self, call_name: str) -> None:
'''
Get a return code with no response
'''
code = self.GetReturnCode()
RaiseReturnCodeError(code, call_name)
def Write(self, string : bytes) -> None:
#print(out)
assert(type(string) is bytes)
self.WriteSerial(string)
#self.WriteSerial(bytes(self.kNewLine, encoding = "utf-8"))
'''
Takes the command string, return the response code
'''
def WriteCommand(self, command_string: str) -> int:
self.Write(bytes(command_string + self.kNewLine, encoding="utf-8"))
return self.GetReturnCode()
def Unlock(self):
'''
Enables Flash Write, Erase, & Go
'''
self.ClearBuffer()
response_code = self.WriteCommand("U 23130")
RaiseReturnCodeError(response_code, "Unlock")
def SetBaudRate(self, baud_rate: int, stop_bits: int = 1):
'''
Baud Depends of FAIM config, stopbit is 1 or 2
'''
response_code = self.WriteCommand("B {} {}".format(baud_rate, stop_bits))
RaiseReturnCodeError(response_code, "Set Baudrate")
def Echo(self, on: bool = True):
'''
ISP echos host when enabled
'''
if on:
command = "A 1"
else:
command = "A 0"
response_code = self.WriteCommand(command)
RaiseReturnCodeError(response_code, "Set Echo")
def WriteToRam(self, start: int, data: bytes):
assert len(data)%self.kWordSize == 0
assert self.RamRangeLegal(start, len(data))
print("Write to RAM %d bytes"%len(data))
#while i < len(data):
# self.Write("W %d %d"%(start + i, kWordSize))
# self.AssertReturnCode("Write to RAM")#get confirmation
# self.Write(data[i:i+kWordSize])#Stream data after confirmation
# i+=kWordSize
#when transfer is complete the handler sends OK<CR><LF>
response_code = self.WriteCommand("W %d %d"%(start, len(data)))
RaiseReturnCodeError(response_code, "Write to RAM")
self.Write(data)#Stream data after confirmation
#self.Write("OK"+self.kNewLine)
try:
print(self.ReadLine())
except TimeoutError:
return
@timeout(4)
def ReadMemory(self, start: int, num_bytes: int):
assert num_bytes%self.kWordSize == 0
assert self.RamRangeLegal(start, num_bytes)
print("ReadMemory")
#self.Flush()
#self.Read()
#self.ClearBuffer()
#self.Flush()
print("R %d %d"%(start, num_bytes))
response_code = self.WriteCommand("R %d %d"%(start, num_bytes))
RaiseReturnCodeError(response_code, "Read Memory")
while len(self.data_buffer_in) < (num_bytes):
self.Read()
# Command success is sent at the end of the transferr
data = []
while self.data_buffer_in:
ch = self.data_buffer_in.popleft()
data.append(ch)
if len(data) != num_bytes:
print(data, len(data), num_bytes)
assert len(data) == num_bytes
return bytes(data)
def PrepSectorsForWrite(self, start: int, end: int):
try:
response_code = self.WriteCommand("P %d %d"%(start, end))
except Exception:
response_code = self.WriteCommand("P %d %d"%(start, end))
RaiseReturnCodeError(response_code, "Prep Sectors")
def CopyRAMToFlash(self, flash_address: int, ram_address: int, num_bytes: int):
assert self.RamRangeLegal(ram_address, num_bytes)
assert self.FlashRangeLegal(flash_address, num_bytes)
response_code = self.WriteCommand("C %d %d %d"%(flash_address, ram_address, num_bytes))
RaiseReturnCodeError(response_code, "Copy RAM To Flash")
#sleep(.2)
def Go(self, address: int, thumb_mode: bool = False):
'''
Start executing code at the specified spot
'''
mode = ""
if thumb_mode:
mode = 'T'
response_code = self.WriteCommand("G %d %s"%(address, mode))
RaiseReturnCodeError(response_code, "Go")
def EraseSector(self, start: int, end: int):
response_code = self.WriteCommand("E %d %d"%(start, end))
RaiseReturnCodeError(response_code, "Erase Sectors")
def ErasePages(self, start: int, end: int):
response_code = self.WriteCommand("X %d %d"%(start, end))
RaiseReturnCodeError(response_code, "Erase Pages")
def CheckSectorsBlank(self, start: int, end: int) -> bool:
assert start <= end
response_code = self.WriteCommand("I %d %d"%(start, end))
try:
self.ReadLine()
response = self.ReadLine().strip()
print("Check Sectors Blank response", response)
except TimeoutError:
pass
if response_code not in (NXPReturnCodes["CMD_SUCCESS"], NXPReturnCodes["SECTOR_NOT_BLANK"]):
RaiseReturnCodeError(response_code, "Blank Check Sectors")
return response_code == NXPReturnCodes["CMD_SUCCESS"]
def ReadPartID(self):
response_code = self.WriteCommand("J")
RaiseReturnCodeError(response_code, "Read Part ID")
resp = self.ReadLine()
return int(resp)
def ReadBootCodeVersion(self):
'''
LPC84x sends a 0x1a first for some reason.
Also the boot version seems to be Minor then Major not like the docs say
'''
response_code = self.WriteCommand("K")
RaiseReturnCodeError(response_code, "Read Bootcode Version")
minor = self.ReadLine().strip()
major = self.ReadLine().strip()
return "%d.%d"%(int(major), int(minor))
'''
Checks to see if two sections in the memory map are equal
'''
def MemoryLocationsEqual(self, address1: int, address2: int, num_bytes: int):
self.Write(bytes(("M %d %d %d"%(address1, address2, num_bytes) + self.kNewLine), encoding="utf-8"))
response = self.ReadLine()
response_code = int(response[0])
if response_code not in (NXPReturnCodes["CMD_SUCCESS"], NXPReturnCodes["COMPARE_ERROR"]):
RaiseReturnCodeError(response_code, "Compare")
return response_code == NXPReturnCodes["CMD_SUCCESS"]
def ReadUID(self):
response_code = self.WriteCommand("N")
RaiseReturnCodeError(response_code, "Read UID")
uuids = [
self.ReadLine().strip(),
self.ReadLine().strip(),
self.ReadLine().strip(),
self.ReadLine().strip()]
return " ".join(["0x%08x"%int(uid) for uid in uuids])
def ReadCRC(self, address: int, num_bytes: int) -> int:
try:
response_code = self.WriteCommand("S %d %d"%(address, num_bytes))
except TimeoutError:
response_code = self.WriteCommand("S %d %d"%(address, num_bytes))
RaiseReturnCodeError(response_code, "Read CRC")
return int(self.ReadLine().strip())
def ReadFlashSig(self, start: int, end: int, wait_states: int = 2, mode: int = 0) -> str:
assert start < end
assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end))
response_code = self.WriteCommand("Z %d %d %d %d"%(start, end, wait_states, mode))
RaiseReturnCodeError(response_code, "Read Flash Signature")
sig = []
for i in range(4):
sig.append(self.ReadLine().strip())
return sig
def ReadWriteFAIM(self):
response_code = self.WriteCommand("O")
RaiseReturnCodeError(response_code, "Read Write FAIM")
def ResetSerialConnection(self):
self.Flush()
self.Write(bytes(self.kNewLine, encoding="utf-8"))
try:
self.ReadLine()
except TimeoutError:
pass
def InitConnection(self):
self.ResetSerialConnection()
try:
try:
self.SyncConnection()
self.SetCrystalFrequency(self.CrystalFrequency)
except (UserWarning, TimeoutError) as w:
print("Sync Failed", w)
print("Connect to running ISP")
self.ClearSerialConnection()
self.Echo(False)
try:
self.ReadLine()
self.Flush()
self.ClearBuffer()
except TimeoutError:
pass
uid = self.ReadUID()
print("Part UID: %s"%uid)
boot_code_version = self.ReadBootCodeVersion()
print("Boot Code Version: %s"%boot_code_version)
self.SetBaudRate(self.baud_rate)
print("Baudrate set to %d"%self.baud_rate)
except Exception as e:
print(e, type(e))
raise
def SyncConnection(self):
synced = False
self.ClearSerialConnection()
self.Flush()
for i in range(5):
self.Write(bytes('?'*15, encoding="utf-8"))
#self.Write('?' + self.kNewLine)
try:
frame_in = self.ReadLine()
if self.SyncString.strip() in frame_in.strip():
synced = True
break
except TimeoutError:
pass
if not synced:
#Check for SyncString
raise UserWarning("Syncronization Failure")
#self.Flush()
self.Write(self.SyncStringBytes)#echo SyncString
try:
frame_in = self.ReadLine()#discard echo
except TimeoutError:
pass
verified = False
for i in range(3):
try:
frame_in = self.ReadLine()#Should be OK\r\n
if self.SyncVerified.strip() in frame_in:
verified = True
break
except TimeoutError:
pass
if not verified:
raise UserWarning("Verification Failure")
print("Syncronization Successful")
def ClearSerialConnection(self):
self.Write(bytes(self.kNewLine, encoding="utf-8"))
self.ClearBuffer()
self.Flush()
self.Read()
self.ClearBuffer()
self.Flush()
for _ in range(2):
try:
self.ReadLine()
except TimeoutError:
pass
def SetCrystalFrequency(self, frequency_khz: int):
self.Write((bytes("%d"%frequency_khz + self.kNewLine, encoding="utf-8")))
verified = False
for i in range(3):
try:
frame_in = self.ReadLine()#Should be OK\r\n
if self.SyncVerified.strip() in frame_in:
verified = True
break
except TimeoutError:
pass
if not verified:
raise UserWarning("Verification Failure")
def CheckFlashWrite(self, data, flash_address: int) -> bool:
'''
Read Memory and compare it to what was written
'''
data_read = self.ReadMemory(flash_address, len(data))
if len(data) != len(data_read):
raise ValueError("Read Memory received incorrect amount of data")
if isinstance(type(data), data_read):
raise TypeError("data written and data read are of different types")
return data == data_read
def WriteFlashSector(self, sector: int, data: bytes):
ram_address = self.RAMStartWrite
sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages
flash_address = self.FlashRange[0] + sector*sector_size_bytes
print("\nWriting Sector: %d\nFlash Address: %x\nRAM Address: %x\n"%(sector, flash_address, ram_address))
assert len(data) == sector_size_bytes
#data += bytes(sector_size_bytes - len(data))
data_crc = zlib.crc32(data, 0)
try:
ram_crc = self.ReadCRC(ram_address, num_bytes=len(data))
except Exception:
ram_crc = self.ReadCRC(ram_address, num_bytes=len(data))
while ram_crc != data_crc:
sleep(self.kSleepTime)
self.WriteToRam(ram_address, data)
sleep(self.kSleepTime)
ram_crc = self.ReadCRC(ram_address, num_bytes=len(data))
if data_crc != ram_crc:
print("CRC Check failed", data_crc, ram_crc)
else:
break
# Check to see if sector is already equal to RAM, if so skip
try:
self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes)
print("Flash already equal to RAM, skipping write")
return
except:
pass
print("Prep Sector")
self.PrepSectorsForWrite(sector, sector)
sleep(self.kSleepTime)
print("Erase Sector")
self.EraseSector(sector, sector)
sleep(self.kSleepTime)
assert self.CheckSectorsBlank(sector, sector)
sleep(self.kSleepTime)
print("Prep Sector")
sector_blank = self.CheckSectorsBlank(sector, sector)
assert sector_blank
sleep(self.kSleepTime)
self.PrepSectorsForWrite(sector, sector)
sleep(self.kSleepTime)
print("Write to Flash")
self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes)
sleep(self.kSleepTime)
flash_crc = self.ReadCRC(flash_address, num_bytes=len(data))
assert flash_crc == data_crc
assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes)
def WriteSector(self, sector: int, data: bytes):
#assert data
sector_bytes = self.SectorSizePages*self.kPageSizeBytes
assert(len(data) > 0)
filled_data = FillDataToFitSector(data, sector_bytes)
self.WriteFlashSector(sector, filled_data)
sleep(self.kSleepTime)
#assert self.ReadSector(sector) == data_chunk
def WriteBinaryToFlash(self, image_file: str, start_sector: int):
sector_bytes = self.SectorSizePages*self.kPageSizeBytes
assert sector_bytes%self.kWordSize == 0
with open(image_file, 'rb') as f:
prog = f.read()
image = prog
print("Program Length:", len(prog))
sector_count = int(math.ceil(len(prog)/sector_bytes))
assert start_sector + sector_count <= self.SectorCount
self.Unlock()
for sector in reversed(range(start_sector, start_sector + sector_count)):
print("\nWriting Sector %d"%sector)
data_chunk = image[(sector-start_sector) * sector_bytes : (sector - start_sector + 1) * sector_bytes]
self.WriteSector(sector, data_chunk)
sleep(1)
chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1])
print("Flash Signature: %s"%chip_flash_sig)
print("Programming Complete.")
def WriteImage(self, image_file: str):
sector_bytes = self.SectorSizePages*self.kPageSizeBytes
assert sector_bytes%self.kWordSize == 0
#make not bootable
self.Unlock()
self.WriteSector(0, bytes([0xde]*sector_bytes))
with open(image_file, 'rb') as f:
prog = f.read()
#image = RemoveBootableCheckSum(self.kCheckSumLocation, prog)
image = MakeBootable(self.kCheckSumLocation, prog)
print("Program Length:", len(prog))
sector_count = int(math.ceil(len(prog)/sector_bytes))
assert sector_count <= self.SectorCount
for sector in reversed(range(sector_count)):
print("\nWriting Sector %d"%sector)
data_chunk = image[sector * sector_bytes : (sector + 1) * sector_bytes]
self.WriteSector(sector, data_chunk)
chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1])
print("Flash Signature: %s"%chip_flash_sig)
print("Programming Complete.")
def FindFirstBlankSector(self) -> int:
for sector in range(self.SectorCount):
if self.CheckSectorsBlank(sector, self.SectorCount - 1):
return sector
return self.SectorCount - 1
def ReadSector(self, sector: int) -> bytes:
sector_bytes = self.SectorSizePages*self.kPageSizeBytes
assert sector_bytes%self.kWordSize == 0
return self.ReadMemory(sector*sector_bytes, sector_bytes)
def ReadImage(self, image_file: str):
blank_sector = self.FindFirstBlankSector()
with open(image_file, 'wb') as f:
for sector in range(blank_sector):
print("Sector ", sector)
f.write(self.ReadSector(sector))
def MassErase(self):
last_sector = self.SectorCount - 1
sleep(1)
self.ClearBuffer()
self.Unlock()
self.PrepSectorsForWrite(0, last_sector)
self.EraseSector(0, last_sector)
print("Checking Sectors are blank")
assert self.CheckSectorsBlank(0, last_sector)
| 2.21875 | 2 |
homework3 [MAIN PROJECT]/project/seller.py | Gaon-Choi/ITE2038_ | 0 | 12794142 | <gh_stars>0
import time
import argparse
from helpers.connection import conn
def parsing_seller(parser:argparse.ArgumentParser):
sub_parsers = parser.add_subparsers(dest='function')
# info
parser_info = sub_parsers.add_parser('info')
parser_info.add_argument('id', type=int)
# update
parser_update = sub_parsers.add_parser('update')
parser_update.add_argument('id', type=int)
parser_update.add_argument('attr', type=str, choices=['name', 'phone', 'local', 'domain', 'passwd'])
parser_update.add_argument('data', type=str)
def int_check(text):
try: int(text); return True
except ValueError: return False
def show_seller_from_table(row):
print("Name: {name}".format(name = row[1]))
print("Phone Number: {phone}".format(phone = row[2]))
print("email: {local}@{domain}".format(local=row[3], domain = row[4]))
def show_seller_info(args):
# TODO
try:
cur = conn.cursor()
sql = "SELECT * FROM seller WHERE id={id};".format(id=args.id)
cur.execute(sql)
rows = cur.fetchall()
if not rows:
print("Given seller ID doesn't exist.")
return
for row in rows:
show_seller_from_table(row)
except Exception as err:
print(err)
def modify_seller_info(args):
# TODO
try:
cur = conn.cursor()
sql = "UPDATE seller " \
"SET {attr} = \'{data}\' " \
"WHERE seller.id={id}".format(attr=args.attr, data=args.data, id=int(args.id))
print(sql)
cur.execute(sql)
conn.commit()
except Exception as err:
print(err)
conn.rollback()
print("modify_seller_info")
if __name__ == "__main__":
start = time.time()
parser = argparse.ArgumentParser()
parsing_seller(parser)
args = parser.parse_args()
if args.function == 'info':
show_seller_info(args)
elif args.function == 'update':
modify_seller_info(args)
else:
parser.print_help()
print("Running Time: ", end="")
print(time.time() - start)
| 2.84375 | 3 |
third_party/blink/renderer/bindings/scripts/idl_types.py | Ron423c/chromium | 575 | 12794143 | <filename>third_party/blink/renderer/bindings/scripts/idl_types.py
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""IDL type handling.
Classes:
IdlTypeBase
IdlType
IdlUnionType
IdlArrayOrSequenceType
IdlSequenceType
IdlFrozenArrayType
IdlNullableType
IdlAnnotatedType
IdlTypes are picklable because we store them in interfaces_info.
"""
from collections import defaultdict
################################################################################
# IDL types
################################################################################
INTEGER_TYPES = frozenset([
# http://www.w3.org/TR/WebIDL/#dfn-integer-type
'byte',
'octet',
'short',
'unsigned short',
# int and unsigned are not IDL types
'long',
'unsigned long',
'long long',
'unsigned long long',
])
NUMERIC_TYPES = (
INTEGER_TYPES | frozenset([
# http://www.w3.org/TR/WebIDL/#dfn-numeric-type
'float',
'unrestricted float',
'double',
'unrestricted double',
]))
# http://www.w3.org/TR/WebIDL/#dfn-primitive-type
PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES)
BASIC_TYPES = (
PRIMITIVE_TYPES | frozenset([
# Built-in, non-composite, non-object data types
# http://heycam.github.io/webidl/#idl-types
'DOMString',
'ByteString',
'USVString',
# http://heycam.github.io/webidl/#idl-types
'void',
]))
TYPE_NAMES = {
# http://heycam.github.io/webidl/#dfn-type-name
'any': 'Any',
'boolean': 'Boolean',
'byte': 'Byte',
'octet': 'Octet',
'short': 'Short',
'unsigned short': 'UnsignedShort',
'long': 'Long',
'unsigned long': 'UnsignedLong',
'long long': 'LongLong',
'unsigned long long': 'UnsignedLongLong',
'float': 'Float',
'unrestricted float': 'UnrestrictedFloat',
'double': 'Double',
'unrestricted double': 'UnrestrictedDouble',
'DOMString': 'String',
'ByteString': 'ByteString',
'USVString': 'USVString',
'object': 'Object',
}
STRING_TYPES = frozenset([
# http://heycam.github.io/webidl/#es-interface-call (step 10.11)
# (Interface object [[Call]] method's string types.)
'String',
'ByteString',
'USVString',
])
EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([
'AllowShared',
'Clamp',
'EnforceRange',
'StringContext',
'TreatNullAs',
])
################################################################################
# Inheritance
################################################################################
ancestors = defaultdict(list) # interface_name -> ancestors
def inherits_interface(interface_name, ancestor_name):
return (interface_name == ancestor_name
or ancestor_name in ancestors[interface_name])
def set_ancestors(new_ancestors):
ancestors.update(new_ancestors)
class IdlTypeBase(object):
"""Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType
and IdlNullableType.
"""
def __str__(self):
raise NotImplementedError('__str__() should be defined in subclasses')
def __getattr__(self, name):
# Default undefined attributes to None (analogous to Jinja variables).
# This allows us to not define default properties in the base class, and
# allows us to relay __getattr__ in IdlNullableType to the inner type.
return None
def resolve_typedefs(self, typedefs):
raise NotImplementedError(
'resolve_typedefs should be defined in subclasses')
def idl_types(self):
"""A generator which yields IdlTypes which are referenced from |self|,
including itself."""
yield self
################################################################################
# IdlType
################################################################################
class IdlType(IdlTypeBase):
# FIXME: incorporate Nullable, etc.
# to support types like short?[] vs. short[]?, instead of treating these
# as orthogonal properties (via flags).
callback_functions = {}
callback_interfaces = set()
dictionaries = set()
enums = {} # name -> values
def __init__(self, base_type, is_unrestricted=False):
super(IdlType, self).__init__()
if is_unrestricted:
self.base_type = 'unrestricted %s' % base_type
else:
self.base_type = base_type
def __str__(self):
return self.base_type
def __getstate__(self):
return {
'base_type': self.base_type,
}
def __setstate__(self, state):
self.base_type = state['base_type']
@property
def is_basic_type(self):
return self.base_type in BASIC_TYPES
@property
def is_callback_function(self): # pylint: disable=C0103
return self.base_type in IdlType.callback_functions
@property
def is_custom_callback_function(self):
entry = IdlType.callback_functions.get(self.base_type)
callback_function = entry.get('callback_function')
if not callback_function:
return False
return 'Custom' in callback_function.extended_attributes
@property
def is_callback_interface(self):
return self.base_type in IdlType.callback_interfaces
@property
def is_dictionary(self):
return self.base_type in IdlType.dictionaries
@property
def is_enum(self):
# FIXME: add an IdlEnumType class and a resolve_enums step
# at end of IdlDefinitions constructor
return self.name in IdlType.enums
@property
def enum_values(self):
return IdlType.enums.get(self.name)
@property
def enum_type(self):
return self.name if self.is_enum else None
@property
def is_integer_type(self):
return self.base_type in INTEGER_TYPES
@property
def is_void(self):
return self.base_type == 'void'
@property
def is_numeric_type(self):
return self.base_type in NUMERIC_TYPES
@property
def is_primitive_type(self):
return self.base_type in PRIMITIVE_TYPES
@property
def is_interface_type(self):
# Anything that is not another type is an interface type.
# http://www.w3.org/TR/WebIDL/#idl-types
# http://www.w3.org/TR/WebIDL/#idl-interface
# In C++ these are RefPtr types.
return not (self.is_basic_type or self.is_callback_function
or self.is_dictionary or self.is_enum or self.name == 'Any'
or self.name == 'Object' or self.name == 'Promise'
) # Promise will be basic in future
@property
def is_string_type(self):
return self.name in STRING_TYPES
@property
def name(self):
"""Return type name
http://heycam.github.io/webidl/#dfn-type-name
"""
base_type = self.base_type
return TYPE_NAMES.get(base_type, base_type)
@classmethod
def set_callback_functions(cls, new_callback_functions):
cls.callback_functions.update(new_callback_functions)
@classmethod
def set_callback_interfaces(cls, new_callback_interfaces):
cls.callback_interfaces.update(new_callback_interfaces)
@classmethod
def set_dictionaries(cls, new_dictionaries):
cls.dictionaries.update(new_dictionaries)
@classmethod
def set_enums(cls, new_enums):
cls.enums.update(new_enums)
def resolve_typedefs(self, typedefs):
base_type = self.base_type
if base_type in typedefs:
resolved_type = typedefs[base_type]
if resolved_type.base_type in typedefs:
raise ValueError("We can't typedef a typedef'ed type.")
# For the case that the resolved type contains other typedef'ed
# type(s).
return resolved_type.resolve_typedefs(typedefs)
return self
################################################################################
# IdlUnionType
################################################################################
class IdlUnionType(IdlTypeBase):
# http://heycam.github.io/webidl/#idl-union
# IdlUnionType has __hash__() and __eq__() methods because they are stored
# in sets.
def __init__(self, member_types):
super(IdlUnionType, self).__init__()
self.member_types = member_types
def __str__(self):
return '(' + ' or '.join(
str(member_type) for member_type in self.member_types) + ')'
def __hash__(self):
return hash(self.name)
def __eq__(self, rhs):
return self.name == rhs.name
def __getstate__(self):
return {
'member_types': self.member_types,
}
def __setstate__(self, state):
self.member_types = state['member_types']
@property
def flattened_member_types(self):
"""Returns the set of the union's flattened member types.
https://heycam.github.io/webidl/#dfn-flattened-union-member-types
"""
# We cannot use a set directly because each member is an
# IdlTypeBase-derived class, and comparing two objects of the
# same type is not the same as comparing their names.
# In other words:
# x = IdlType('ByteString')
# y = IdlType('ByteString')
# x == y # False
# x.name == y.name # True
# |flattened_members|'s keys are type names, the values are type
# |objects|.
# We assume we can use two IDL objects of the same type interchangeably.
flattened_members = {}
for member in self.member_types:
if member.is_nullable:
member = member.inner_type
if member.is_union_type:
for inner_member in member.flattened_member_types:
flattened_members[inner_member.name] = inner_member
else:
flattened_members[member.name] = member
return set(flattened_members.values())
@property
def number_of_nullable_member_types(self):
"""Returns the union's number of nullable types.
http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types
"""
count = 0
for member in self.member_types:
if member.is_nullable:
count += 1
member = member.inner_type
if member.is_union_type:
count += member.number_of_nullable_member_types
return count
@property
def is_union_type(self):
return True
def single_matching_member_type(self, predicate):
matching_types = list(filter(predicate, self.flattened_member_types))
if len(matching_types) > 1:
raise ValueError('%s is ambiguous.' % self.name)
return matching_types[0] if matching_types else None
@property
def string_member_type(self):
return self.single_matching_member_type(
lambda member_type: (member_type.is_string_type or member_type.is_enum)
)
@property
def numeric_member_type(self):
return self.single_matching_member_type(
lambda member_type: member_type.is_numeric_type)
@property
def boolean_member_type(self):
return self.single_matching_member_type(
lambda member_type: member_type.base_type == 'boolean')
@property
def sequence_member_type(self):
return self.single_matching_member_type(
lambda member_type: member_type.is_sequence_type)
@property
def dictionary_member_type(self):
return self.single_matching_member_type(
lambda member_type: member_type.is_dictionary)
@property
def as_union_type(self):
# Note: Use this to "look through" a possible IdlNullableType wrapper.
return self
@property
def name(self):
"""Return type name (or inner type name if nullable)
http://heycam.github.io/webidl/#dfn-type-name
"""
return 'Or'.join(member_type.name for member_type in self.member_types)
def resolve_typedefs(self, typedefs):
self.member_types = [
member_type.resolve_typedefs(typedefs)
for member_type in self.member_types
]
return self
def idl_types(self):
yield self
for member_type in self.member_types:
for idl_type in member_type.idl_types():
yield idl_type
################################################################################
# IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType
################################################################################
# TODO(bashi): Rename this like "IdlArrayTypeBase" or something.
class IdlArrayOrSequenceType(IdlTypeBase):
"""Base class for array-like types."""
def __init__(self, element_type):
super(IdlArrayOrSequenceType, self).__init__()
self.element_type = element_type
def __getstate__(self):
return {
'element_type': self.element_type,
}
def __setstate__(self, state):
self.element_type = state['element_type']
def resolve_typedefs(self, typedefs):
self.element_type = self.element_type.resolve_typedefs(typedefs)
return self
@property
def is_array_or_sequence_type(self):
return True
@property
def is_sequence_type(self):
return False
@property
def is_frozen_array(self):
return False
@property
def enum_values(self):
return self.element_type.enum_values
@property
def enum_type(self):
return self.element_type.enum_type
def idl_types(self):
yield self
for idl_type in self.element_type.idl_types():
yield idl_type
class IdlSequenceType(IdlArrayOrSequenceType):
def __init__(self, element_type):
super(IdlSequenceType, self).__init__(element_type)
def __str__(self):
return 'sequence<%s>' % self.element_type
@property
def name(self):
return self.element_type.name + 'Sequence'
@property
def is_sequence_type(self):
return True
class IdlFrozenArrayType(IdlArrayOrSequenceType):
def __init__(self, element_type):
super(IdlFrozenArrayType, self).__init__(element_type)
def __str__(self):
return 'FrozenArray<%s>' % self.element_type
@property
def name(self):
return self.element_type.name + 'Array'
@property
def is_frozen_array(self):
return True
################################################################################
# IdlRecordType
################################################################################
class IdlRecordType(IdlTypeBase):
def __init__(self, key_type, value_type):
super(IdlRecordType, self).__init__()
self.key_type = key_type
self.value_type = value_type
def __str__(self):
return 'record<%s, %s>' % (self.key_type, self.value_type)
def __getstate__(self):
return {
'key_type': self.key_type,
'value_type': self.value_type,
}
def __setstate__(self, state):
self.key_type = state['key_type']
self.value_type = state['value_type']
def idl_types(self):
yield self
for idl_type in self.key_type.idl_types():
yield idl_type
for idl_type in self.value_type.idl_types():
yield idl_type
def resolve_typedefs(self, typedefs):
self.key_type = self.key_type.resolve_typedefs(typedefs)
self.value_type = self.value_type.resolve_typedefs(typedefs)
return self
@property
def is_record_type(self):
return True
@property
def name(self):
return self.key_type.name + self.value_type.name + 'Record'
################################################################################
# IdlNullableType
################################################################################
# https://heycam.github.io/webidl/#idl-nullable-type
class IdlNullableType(IdlTypeBase):
def __init__(self, inner_type):
super(IdlNullableType, self).__init__()
if inner_type.name == 'Any':
raise ValueError('Inner type of nullable type must not be any.')
if inner_type.name == 'Promise':
raise ValueError(
'Inner type of nullable type must not be a promise.')
if inner_type.is_nullable:
raise ValueError(
'Inner type of nullable type must not be a nullable type.')
if inner_type.is_union_type:
if inner_type.number_of_nullable_member_types > 0:
raise ValueError(
'Inner type of nullable type must not be a union type that '
'itself includes a nullable type.')
if any(member.is_dictionary
for member in inner_type.flattened_member_types):
raise ValueError(
'Inner type of nullable type must not be a union type that '
'has a dictionary type as its members.')
self.inner_type = inner_type
def __str__(self):
# FIXME: Dictionary::ConversionContext::setConversionType can't
# handle the '?' in nullable types (passes nullability separately).
# Update that function to handle nullability from the type name,
# simplifying its signature.
# return str(self.inner_type) + '?'
return str(self.inner_type)
def __getattr__(self, name):
return getattr(self.inner_type, name)
def __getstate__(self):
return {
'inner_type': self.inner_type,
}
def __setstate__(self, state):
self.inner_type = state['inner_type']
@property
def is_nullable(self):
return True
@property
def name(self):
return self.inner_type.name + 'OrNull'
@property
def enum_values(self):
# Nullable enums are handled by preprending a None value to the list of
# enum values. This None value is converted to nullptr on the C++ side,
# which matches the JavaScript 'null' in the enum parsing code.
inner_values = self.inner_type.enum_values
if inner_values:
return [None] + inner_values
return None
def resolve_typedefs(self, typedefs):
self.inner_type = self.inner_type.resolve_typedefs(typedefs)
return self
def idl_types(self):
yield self
for idl_type in self.inner_type.idl_types():
yield idl_type
################################################################################
# IdlAnnotatedType
################################################################################
class IdlAnnotatedType(IdlTypeBase):
"""IdlAnnoatedType represents an IDL type with extended attributes.
[Clamp], [EnforceRange], [StringContext], and [TreatNullAs] are applicable
to types.
https://heycam.github.io/webidl/#idl-annotated-types
"""
def __init__(self, inner_type, extended_attributes):
super(IdlAnnotatedType, self).__init__()
self.inner_type = inner_type
self.extended_attributes = extended_attributes
if any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES
for key in extended_attributes):
raise ValueError(
'Extended attributes not applicable to types: %s' % self)
if ('StringContext' in extended_attributes
and inner_type.base_type not in ['DOMString', 'USVString']):
raise ValueError(
'StringContext is only applicable to string types.')
def __str__(self):
annotation = ', '.join(
(key + ('' if val is None else '=' + val))
for key, val in self.extended_attributes.items())
return '[%s] %s' % (annotation, str(self.inner_type))
def __getattr__(self, name):
return getattr(self.inner_type, name)
def __getstate__(self):
return {
'inner_type': self.inner_type,
'extended_attributes': self.extended_attributes,
}
def __setstate__(self, state):
self.inner_type = state['inner_type']
self.extended_attributes = state['extended_attributes']
@property
def is_annotated_type(self):
return True
@property
def has_string_context(self):
return 'StringContext' in self.extended_attributes
@property
def name(self):
annotation = ''.join(
(key + ('' if val is None else val))
for key, val in sorted(self.extended_attributes.items()))
return self.inner_type.name + annotation
def resolve_typedefs(self, typedefs):
self.inner_type = self.inner_type.resolve_typedefs(typedefs)
return self
def idl_types(self):
yield self
yield self.inner_type
| 1.726563 | 2 |
widget_row/config.py | chr0nu5/core | 0 | 12794144 | <filename>widget_row/config.py
class Config:
def data(self):
config = {
'name':'Row',
'icon': 'fa fa-square-o',
'tags': 'row, bootstrap',
'preview': False
}
return config | 2.09375 | 2 |
mipengine/controller/algorithm_execution_DTOs.py | ThanKarab/MIP-Engine | 4 | 12794145 | from pydantic import BaseModel
from typing import Dict, List
from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO
from mipengine.controller.node_tasks_handler_interface import INodeTasksHandler
# One of the two expected data object for the AlgorithmExecutor layer.
class AlgorithmExecutionDTO(BaseModel):
context_id: str
algorithm_name: str
algorithm_request_dto: AlgorithmRequestDTO
class Config:
arbitrary_types_allowed = True
# The second expected data object for the AlgorithmExecutor layer.
# It contains the handler objects(essentially the celery objects) on which the
# AlgorithmExecutor will request tasks execution
class NodesTasksHandlersDTO(BaseModel):
global_node_tasks_handler: INodeTasksHandler
local_nodes_tasks_handlers: List[INodeTasksHandler]
class Config:
arbitrary_types_allowed = True
| 2.203125 | 2 |
main.py | p-jonczyk/work-list | 2 | 12794146 | <gh_stars>1-10
import openpyxl as xl
import calendar
import work_list
import row_style
import os
import files_handeling
import constants as const
import input_handeling
def main():
# take source file name
source_file_name = input(
"\nSource - employees workhour list - file name (with .xlsx extention): ")
# loading the source excel file to get data from
source_file_path = f".\{source_file_name}"
files_handeling.check_required_files_existence(
source_file_path, source_file_name)
source_ws = xl.load_workbook(source_file_path).worksheets[0]
# get data from source file
month_name = source_ws['R1'].value
construction_site = source_ws['D3'].value
# loading the base excel file to save data to
base_file_path = ".\\base.xlsx"
files_handeling.check_required_files_existence(
base_file_path, source_file_name)
base_file = xl.load_workbook(base_file_path)
base_ws = base_file.active
# check output folder existance
output_folder_path = f".\{construction_site} employees worklists"
files_handeling.check_required_files_existence(output_folder_path,
construction_site=construction_site)
# different work hour start in month dict
work_hour_start_days = {}
# welcomme message
print(const.welcome_msg)
# count for help msg to be shown
cnt = 0
# get user's input
while True:
try:
# help msg if problems
input_handeling.show_help(cnt)
pause = float(input('Break length: '))
year = int(input('Year: '))
month = int(input('Month (1 - 12): '))
# get days in month
days_in_month = calendar.monthrange(year, month)[1]
if input_handeling.check_input(pause, year, month):
print(
f"Chosen month has {days_in_month} days.")
# input handeling
input_handeling.handeling_work_hour_days(
work_hour_start_days, days_in_month)
break
else:
cnt += 1
print(const.incorrect_values_msg)
except ValueError:
cnt += 1
print(const.incorrect_values_msg)
# create new directory after chcecking all conditions
os.mkdir(f"{output_folder_path}")
# loading style
base_file.add_named_style(row_style.base)
base_file.add_named_style(row_style.days_off)
# table creation
work_list.fill_worksheet(source_ws, base_ws, month_name, base_file,
month, year, work_hour_start_days, pause, construction_site)
files_handeling.exit()
if __name__ == "__main__":
main()
| 3.109375 | 3 |
mozillians/mozspaces/migrations/0001_initial.py | caktus/mozillians | 0 | 12794147 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MozSpace'
db.create_table('mozspaces_mozspace', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('address', self.gf('django.db.models.fields.CharField')(max_length=300)),
('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=100)),
('country', self.gf('django.db.models.fields.CharField')(max_length=5)),
('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)),
('lon', self.gf('django.db.models.fields.FloatField')()),
('lat', self.gf('django.db.models.fields.FloatField')()),
('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)),
('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])),
))
db.send_create_signal('mozspaces', ['MozSpace'])
# Adding model 'Keyword'
db.create_table('mozspaces_keyword', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])),
))
db.send_create_signal('mozspaces', ['Keyword'])
# Adding model 'Photo'
db.create_table('mozspaces_photo', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])),
))
db.send_create_signal('mozspaces', ['Photo'])
def backwards(self, orm):
# Deleting model 'MozSpace'
db.delete_table('mozspaces_mozspace')
# Deleting model 'Keyword'
db.delete_table('mozspaces_keyword')
# Deleting model 'Photo'
db.delete_table('mozspaces_photo')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842704)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842643)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mozspaces.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keywords'", 'to': "orm['mozspaces.MozSpace']"})
},
'mozspaces.mozspace': {
'Meta': {'object_name': 'MozSpace'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'featured_mozspace'", 'null': 'True', 'to': "orm['mozspaces.Photo']"}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
'extra_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'phone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mozspaces.photo': {
'Meta': {'object_name': 'Photo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos'", 'to': "orm['mozspaces.MozSpace']"}),
'photofile': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
}
}
complete_apps = ['mozspaces']
| 2.078125 | 2 |
basic/print emoji.py | jspw/Basic_Python | 6 | 12794148 | #website : https://unicode.org/emoji/charts/full-emoji-list.html
#replace '+' with '000'
print("\U0001F600")
print("\U0001F603")
print("\U0001F604")
print("\U0001F601")
print("\U0001F606")
print("\U0001F605")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("At the End of the Day : \U0001F595 \b Fuck You !") | 2.90625 | 3 |
rally_ovs/tests/unit/plugins/ovs/context/test_ovn_nb.py | LorenzoBianconi/ovn-scale-test | 15 | 12794149 | <reponame>LorenzoBianconi/ovn-scale-test
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally_ovs.plugins.ovs.context import ovn_nb
from rally_ovs.tests.unit.plugins.ovs import utils
from tests.unit import test
class OvnNorthboundContextTestCase(test.TestCase):
@mock.patch("rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client")
def test_setup(self, mock_create_client):
ovn_nbctl_show_output = """\
switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O)
port lport_c52f4c_LXzXCE
port lport_c52f4c_dkZSDg
switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj)
port lport_c52f4c_cm8SIf
port lport_c52f4c_8h7hn2
switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce)
port lport_c52f4c_X3px3u
port lport_c52f4c_92dhqb
"""
mock_client = mock_create_client.return_value
mock_client.show.return_value = ovn_nbctl_show_output
context = utils.get_fake_context(ovn_nb={})
nb_context = ovn_nb.OvnNorthboundContext(context)
nb_context.setup()
expected_setup_output = ovn_nbctl_show_output
actual_setup_output = nb_context.context["ovn-nb"]
self.assertEqual(expected_setup_output, actual_setup_output)
| 1.796875 | 2 |
aio_parallel_tools/aio_task_pool/__init__.py | Python-Tools/aio_parallel_tools | 0 | 12794150 | """All Supported Task Pools."""
from .aio_fixed_task_pool_simple import AioFixedTaskPoolSimple
from .aio_fixed_task_pool_lifo import AioFixedTaskPoolLifo
from .aio_fixed_task_pool_priority import AioFixedTaskPoolPriority
from .aio_autoscale_task_pool_simple import AioAutoScaleTaskPoolSimple
from .aio_autoscale_task_pool_lifo import AioAutoScaleTaskPoolLifo
from .aio_autoscale_task_pool_priority import AioAutoScaleTaskPoolPriority
| 1 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.