metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "1923488289/meiduo_mall",
"score": 2
} |
#### File: celery_tasks/sms/tasks.py
```python
from meiduo_mall.libs.yuntongxun.sms import CCP
from celery_tasks.main import celery_app
# 加个装饰器就是个任务
@celery_app.task(name='send_tasks', bind=True, retry_backoff=3)
def send_tasks(self, to, datas, tempid):
try:
# print(datas[0])
ccp = CCP()
ccp.send_template_sms(to,datas,tempid)
except Exception as e:
self.retry(exc=e, max_retries=2)
```
#### File: apps/advertisement/views.py
```python
from django.shortcuts import render
from django.views import View
from goods.models import GoodsCategory, GoodsChannel
from meiduo_mall.utils import category
from .models import Contentcategory, Content
class IndexView(View):
def get(self, request):
categories = category.get_category()
#广告分类的所有的查询集
contents = Contentcategory.objects.all()
content_dict = {}
for content in contents:
#一类广告的所有的广告信息
content_dict[content.key] = content.content_set.filter(status=True).order_by('sequence')
context = {
'categories': categories,
'contents':content_dict,
}
return render(request, 'index.html', context)
```
#### File: apps/auoth/views.py
```python
import re
from django.contrib.auth import login
from django.shortcuts import render, redirect
from django.views import View
from QQLoginTool.QQtool import OAuthQQ
from django.conf import settings
from django import http
from meiduo_mall.utils.response_code import RETCODE
from auoth.models import QAuthQQUser
from meiduo_mall.utils import itsdangerous
from . import constants
from django_redis import get_redis_connection
from users.models import User
from django.contrib.auth import login
# 向服务器获取qq授权地址
class QQcodeView(View):
def get(self, request):
# 登录成功后在回到这个页面去
next_url = request.GET.get('next')
# 创建工具对象
qq_tool = OAuthQQ(
# id,应用的唯一标识
settings.QQ_CLIENT_ID,
# key appid的密钥
settings.QQ_CLIENT_SECRET,
# 生成授权地址,包括回调地址,应用中设置好的
settings.QQ_REDIRECT_URI,
#
next_url
)
# 调用方法,生成授权登录的url地址,
login_url = qq_tool.get_qq_url()
print(login_url + '------------------')
return http.JsonResponse(
{
'code': RETCODE.OK,
'errmsg': 'ok',
# 授权地址,即qq登录扫码页面链接
'login_url': login_url
}
)
class OpendidView(View):
def get(self, request):
# code 只能用一次,要是再用,需要重新获取
code = request.GET.get('code')
next_url = request.GET.get('state')
# 创建工具对象
qq_tool = OAuthQQ(
# id
settings.QQ_CLIENT_ID,
# key
settings.QQ_CLIENT_SECRET,
# 回调地址
settings.QQ_REDIRECT_URI,
#
next_url
)
try:
# 获取token
token = qq_tool.get_access_token(code)
# 获取openid,对网站货应用用户的唯一标识
openid = qq_tool.get_open_id(token)
except:
openid = 0
# return http.HttpResponse(openid)
return http.HttpResponseBadRequest('OAutg2.0认证错误')
else:
try:
oauth_user = QAuthQQUser.objects.get(openid=openid)
except:
# openid 没有绑定到美多
# 初次授权
# access_token = generate_eccess_token(openid)
# 进行加密
token = itsdangerous.dumps({'openid': openid}, constants.OPENID_EXPIRES)
context = {'token': token}
return render(request, 'oauth_callback.html', context)
pass
else:
# 根据外键 得到一个用户的信息
qq_user = oauth_user.user
# 状态保持
login(request, qq_user)
response = redirect('/')
response.set_cookie('username', qq_user.username, max_age=3600 * 24 * 15)
return response
def post(self, request):
# 接收
mobile = request.POST.get('mobile')
password = request.POST.get('pwd')
token = request.POST.get('access_token')
sms_code = request.POST.get('sms_code')
# 验证
print('------mobile====' + mobile)
print('=======password++++++' + password)
print('===========token===' + token)
if not all([mobile, password, sms_code]):
return http.HttpResponseBadRequest('缺少必须参数')
if not re.match(r'^1[345789]\d{9}', mobile):
return http.HttpResponseBadRequest('您输入的手机号格式不正确')
if not re.match(r'[0-9A-Za-z]{8,20}', password):
return http.HttpResponseBadRequest('密码错误')
# 获取链接对象
redis_cli = get_redis_connection('verificat_code')
#
redis_cli_request = redis_cli.get('sms_' + mobile)
if redis_cli_request is None:
return http.HttpResponseBadRequest('短信验证码过期')
if redis_cli_request.decode() != sms_code:
return http.HttpResponseBadRequest('短信验证码错误')
# 解密
json = itsdangerous.loads(token, constants.OPENID_EXPIRES)
if json is None:
return http.HttpResponse('授权信息已经过期')
openid = json.get('openid')
# 处理
try:
user = User.objects.get(mobile=mobile)
except:
user = User.objects.create_user(username=mobile, password=password, mobile=mobile)
login(request, user)
response = redirect('/')
response.set_cookie('username', user.username, max_age=60 * 60 * 24 * 14)
return response
else:
# 判断密码
if not user.check_password(password):
return http.HttpResponseBadRequest('帐号的信息失效')
# 写道数据库
QAuthQQUser.objects.create(openid=openid, user=user)
login(request, user)
response = redirect('/')
response.set_cookie('user', user.username, max_age=60 * 60 * 24 * 14)
return response
# class OauthView(View):
# def get(self, request):
# return render(request, 'oauth_callback.html')
```
#### File: apps/goods/views.py
```python
from django.shortcuts import render
from django.views.generic import View
from .models import GoodsCategory
from meiduo_mall.utils.category import get_category
#分页
from django.core.paginator import Paginator
# Create your views here.
class ListView(View):
def get(self, request, category_id, page_num):
# 查询当前指定分类对象
try:
category3 = GoodsCategory.objects.get(pk=category_id)
except:
return render(request, '404.html')
categories=get_category()
category2=category3.parent
category1=category2.parent
breadcrumb={
'cat1':{
'name':category1.name,
'url':category1.goodchannel_set.all()[0].url
},
'cat2':category2,
'cat3':category3
}
skus=category3.sku_set.filter(is_launched=True)
sort =request.GET('sort','default')
#价格
if sort =='price':
skus=skus.order_by('price')
#人气
elif sort=='hot':
skus=skus.order_by('-sales')
#默认
else:
skus=skus.order_by('-id')
#一页有几条数据
paginator=Paginator.page(skus,5)
#第几条数据
page_skus=paginator.page(page_num)
context={'categories':categories,
'breadcrumb':breadcrumb,
'sort':sort,
'page_skus':page_skus,
'category':category3
}
return render(request, 'list.html',context)
```
#### File: apps/users/views.py
```python
import json
from .models import Address
from django.shortcuts import render, redirect
from django.views.generic import View
from django import http
import re
from django.contrib.auth import login, logout, authenticate
from django_redis import get_redis_connection
from django.contrib.auth.mixins import LoginRequiredMixin
from meiduo_mall.utils.LogMixin import LoginRequiredJSONMixin
from meiduo_mall.utils.response_code import RETCODE
from celery_tasks.email.tasks import send_virify_email
from django.conf import settings
from meiduo_mall.utils import itsdangerous
from . import contants
# from pymysql import DatabaseError
from users.models import User
class MyClass(View):
def get(self, request):
return render(request, 'register.html')
def post(self, request):
username = request.POST.get('user_name')
password = request.POST.get('<PASSWORD>')
password2 = request.POST.get('<PASSWORD>')
mobile = request.POST.get('phone')
allow = request.POST.get('allow')
sms_code = request.POST.get('msg_code')
# 判断参数是否齐全
if not all([username, password, password2, mobile, allow, sms_code]):
return http.HttpResponseForbidden('缺少必传参数')
# 判断用户名是否是5-20个字符
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):
return http.HttpResponseForbidden('请输入5-20个字符的用户名')
if User.objects.filter(username=username).count() > 0:
return http.HttpResponseForbidden('用户名已经存在')
# 判断密码是否是8-20个数字
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return http.HttpResponseForbidden('请输入8-20位的密码')
# 判断两次密码是否一致
if password != <PASSWORD>:
return http.HttpResponseForbidden('两次输入的密码不一致')
# 判断手机号是否合法
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('请输入正确的手机号码')
if User.objects.filter(mobile=mobile).count() > 0:
return http.HttpResponseForbidden('手机号已经存在')
# 获取链接对象
redis_conn = get_redis_connection('verificat_code')
# 取出值
redis_server_m = redis_conn.get('sms_' + mobile)
# print(redis_server_m+'redis')
if redis_server_m is None:
return http.HttpResponseBadRequest('短信验证码过期')
if sms_code != redis_server_m.decode():
return http.HttpResponseBadRequest('短信验证码不正确')
redis_conn.delete('sms_' + mobile)
# cc创建对象写数据库,create_user 的作用都密码加密,且能写入数据库。
# 他和create的区别是crate_user 继承了AbstractUser,这里面有加密的方法,
# 而create 集成的models没有加密的方法,create_user继承了认证类
user = User.objects.create_user(
username=username,
password=password,
mobile=mobile
)
# 状态保持 将注册的信息临时性保存在session中将session保存在缓冲中,
# 将缓冲保存在redis数据库中,
login(request, user)
response = redirect('/')
#将信息保存在帐号保存在浏览器上
response.set_cookie('username', username, 60 * 60 * 24 * 14)
return response
# 帐号验证
class UserView(View):
def get(self, request, username):
count = User.objects.filter(username=username).count()
# Jsonresponse 1、已经将数据转为json字符串了2、content-type 已经转为applicattion/json
return http.JsonResponse({'count': count})
# 手机号验证 ajax
class MobilView(View):
def get(self, request, phone):
count = User.objects.filter(mobile=phone).count()
return http.JsonResponse(
{'count': count}
)
# 登录 /login/
class LoginView(View):
def get(self, request):
return render(request, 'login.html')
def post(self, request):
username = request.POST.get('username')
pwd = request.POST.get('pwd')
# 一个是点击登录,没有next 参数,默认为'/',
# 如果实在是点击用户中心登录,再有next参数,然后登录成功后再换回到用户中心
next_url = request.GET.get('next', '/')
if not all([pwd, username]):
return http.HttpResponseBadRequest('缺少必填参数')
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):
return http.HttpResponseForbidden('请输入5-20个字符的用户名')
if not re.match(r'^[0-9A-Za-z]{8,20}$', pwd):
return http.HttpResponseForbidden('请输入8-20位的密码')
# 帐号验证,密码加密后验证,调用的配置中的后端,直到找到方法
user = authenticate(username=username, password=<PASSWORD>)
print(username + '=======' + pwd)
print(user)
if user is None:
return render(request, 'login.html', {
'loginerror': '用户名或者密码错误'
})
else:
print('-------------------------')
# 保持状态
login(request, user)
# return render(request, '/')
# return http.HttpResponse('ok')
response = redirect(next_url)
# 浏览器带cookie
response.set_cookie('username', username, 60 * 60 * 24 * 14)
return response
# 退出 /logout
class LogoutView(View):
def get(self, request):
response = redirect('/login/')
# 本质删除session
logout(request)
response.delete_cookie('username')
return response
# 用户中新 判断用户是否在线如果不在线则进入登录页面验证 /info
# loginrequiredMxin 这已经封装好了,在内部判断用户是否登录,如果没有等直接转到同一个页面去,
# 直接在setting中设置
class InfoView(LoginRequiredMixin, View):
def get(self, request):
# 判断用户是否登录
# if not request.user.is_authenticated:
# return redirect('/login/')
context = {
'username': request.user.username,
'mobile': request.user.mobile,
'email': request.user.email,
'email_active': request.user.email_active
}
return render(request, 'user_center_info.html', context)
# 自定义装饰器,过滤掉未登录的帐号
class EmailView(LoginRequiredJSONMixin, View):
def put(self, request):
# 转为字典,传送过来的数据为字节,所以要赚换成字符串
json_dict = json.loads(request.body.decode())
email = json_dict.get('email')
# 校验参数
if not email:
return http.JsonResponse(
{
'code': RETCODE.DBERR,
'errmsg': '没有邮箱数据'
}
)
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.JsonResponse(
{
'code': RETCODE.DBERR,
'errmsg': '邮箱格式错误'
}
)
try:
user = request.user
user.email = email
user.save()
except:
return http.JsonResponse(
{
'code': RETCODE.DBERR,
'errmsg': '添加邮箱失败'
}
)
# 加密
token = itsdangerous.dumps({'user_id': user.id}, contants.EMAIL_ACTIVE_EXPIRES)
# 设置 路径,激活用户的邮箱
url = settings.EMAIL_ACTIVE_URL + '?token=%s' % token
print(url)
send_virify_email.delay(email, url)
print('-------------===========')
return http.JsonResponse({
'code': RETCODE.OK,
'errmsg': '添加邮箱成功'
})
class JFEmailView(View):
def get(self, request):
token = request.GET.get('token')
# print('=========-----------' + token)
# 判断是否缺少参数
if not token:
return http.HttpResponseBadRequest('缺少参数token')
# 进行解密
token = itsdangerous.loads(token, contants.EMAIL_ACTIVE_EXPIRES)
# print('==============' + token)
if token is None:
return http.HttpResponseBadRequest('参数已经过期')
user_id = token.get('user_id')
try:
user = User.objects.get(pk=user_id)
except:
return http.HttpResponseBadRequest('邮箱激活失败')
user.email_active = True
user.save()
return redirect('/info/')
class AddressView(LoginRequiredMixin, View):
def get(self, request):
user = request.user
address_1 = Address.objects.filter(user=user, is_delete=False)
# if address is None:
# return http.JsonResponse({
# 'code': RETCODE.PARAMERR,
# 'errmsg': '没有设置收获地址'
# })
addresses = []
for address in address_1:
addresses.append(address.dict_s())
print(addresses)
context = {
'addresses': addresses,
'default_address_id': user.default_address_id
}
print(context)
return render(request, 'user_center_site.html', context)
class AddressCreateView(LoginRequiredMixin, View):
def post(self, requset):
# 登录用户是谁这个user就是谁
user = requset.user
if user.addresses.count() > 20:
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '地址数量数量超过20'
})
dicts = json.loads(requset.body.decode())
receiver = dicts.get('receiver')
province_id = dicts.get('province_id')
city_id = dicts.get('city_id')
district_id = dicts.get('district_id')
detail = dicts.get('place')
mobile = dicts.get('mobile')
tel = dicts.get('tel')
email = dicts.get('email')
if not all([receiver, province_id, city_id, district_id, detail, mobile]):
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '缺少必须的参数'
})
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '手机号格式不正确'
})
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '固定电话格式不正确'
})
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '邮箱格式不正确'
})
address = Address.objects.create(
user=user,
title=receiver,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
detail=detail,
mobile=mobile,
tel=tel,
email=email
)
# 如果当前地址没有默认收获地址,则将设置默认
if user.default_address is None:
user.default_address = address
user.save()
return http.JsonResponse({
'code': RETCODE.OK,
'errmsg': 'ok',
'address': address.dict_s()
})
class UpdateAddressView(LoginRequiredMixin, View):
def put(self, request, address_id):
dicts = json.loads(request.body.decode())
receiver = dicts.get('receiver')
province_id = dicts.get('province_id')
city_id = dicts.get('city_id')
district_id = dicts.get('district_id')
detail = dicts.get('place')
mobile = dicts.get('mobile')
tel = dicts.get('tel')
email = dicts.get('email')
if not all([receiver, province_id, city_id, district_id, detail, mobile]):
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '缺少必须的参数'
})
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '手机号格式不正确'
})
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '固定电话格式不正确'
})
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '邮箱格式不正确'
})
try:
Address.objects.filter(pk=address_id).update(
user=request.user,
title=receiver,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
detail=detail,
mobile=mobile,
tel=tel,
email=email
)
except:
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '更新地址失败'
})
address = Address.objects.get(pk=address_id)
return http.JsonResponse({
'code': RETCODE.OK,
'errmsg': '修改成功',
'address': address.dict_s()
})
def delete(self, request, address_id):
try:
Address.objects.filter(pk=address_id).update(
is_delete=True
)
except:
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '删除失败'
})
else:
return http.JsonResponse({
'code': RETCODE.OK,
'errmsg': '删除成功'
})
class DefaultAddressView(LoginRequiredMixin, View):
def put(self, request, address_id):
try:
address = Address.objects.get(pk=address_id)
request.user.default_address = address
request.user.save()
except:
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '设置默认地址失败'
})
return http.JsonResponse({
'code': RETCODE.OK,
'errmsg': '设置成功'
})
class UpdateTitleAddressView(LoginRequiredMixin, View):
def put(self, request, address_id):
dicts = json.loads(request.body.decode())
title = dicts.get('title')
try:
Address.objects.filter(pk=address_id).update(title=title)
except:
return http.JsonResponse({
'code': RETCODE.PARAMERR,
'errmsg': '地址标签设置失败'
})
return http.JsonResponse({
'code': RETCODE.OK,
'errmsg': '地址标签设置成功'
})
class ChangePasswordView(LoginRequiredMixin, View):
def get(self, request):
return render(request, 'user_center_pass.html')
def post(self, request):
old_pwd = request.POST.get('old_pwd')
password = request.POST.get('new_pwd')
password1 = request.POST.get('new_cpwd')
if not all([old_pwd, password, password1]):
return http.HttpResponseBadRequest('缺少必要参数')
# if not re.match(r'^[0-9A-Za-z]{8,20}$',old_pwd):
# return http.HttpResponseBadRequest('密码格式不正确')
if not request.user.check_password(old_pwd):
return render(request, 'user_center_pass.html', {'origin_pwd_errmsg': '原始密码错误'})
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return http.HttpResponseBadRequest('新密码格式不正确')
if password != <PASSWORD>:
return http.HttpResponseBadRequest('两次密码不一样')
try:
request.user.set_password(password)
request.user.save()
except:
return http.HttpResponseBadRequest('新密码设置失败')
logout(request)
response=redirect('/login/')
response.delete_cookie('username')
return response
```
#### File: meiduo_mall/utils/category.py
```python
from meiduo_mall.apps.goods.models import GoodsChannel
def get_category():
# 频道分类信息
# 查询频道
channels = GoodsChannel.objects.order_by('group_id', 'sequence')
categories = {}
# 遍历频道,获取 一级分类二级分类
for channel in channels:
if channel.group_id not in categories:
# 如果不存在则创建新字典
categories[channel.group_id] = {
'channels': [], # 一级分类
'sub_cats': [] # 二级分类
}
channel_dict = categories[channel.group_id]
channel_dict['channels'].append({
'name': channel.category.name, # 一级分类的名称
'url': channel.url # 频道的链接
})
# 向频道中添加二级分类
catetory2s = channel.category.subs.all()
# 6.遍历频道中逐个添加二级分类
for catetory2 in catetory2s:
channel_dict['sub_cats'].append({
'name': catetory2.name, # 二级分类名称
'sub_cats': catetory2.subs.all() # 三级分类
})
return categories
``` |
{
"source": "1923851861/Oldboy_SH_Python",
"score": 3
} |
#### File: Oldboy_SH_Python/day16/spam.py
```python
__all__=['read1','money']
money=1000
def read1():
print('spam模块:',money)
def read2():
print('spam模块')
read1()
def change():
global money
money=0
```
#### File: Oldboy_SH_Python/day35/01 GIL.py
```python
from threading import Thread,Lock
import time
mutex=Lock()
n=100
def task():
global n
mutex.acquire()
temp=n
time.sleep(0.1)
n=temp-1
mutex.release()
if __name__ == '__main__':
t_l=[]
start_time=time.time()
for i in range(3):
t=Thread(target=task)
t_l.append(t)
t.start()
for t in t_l:
t.join()
stop_time=time.time()
print(n)
print(stop_time-start_time)
``` |
{
"source": "1924zjy0835/OpenCV",
"score": 3
} |
#### File: OpenCV/P1-test02/color_space.py
```python
import cv2 as cv
# 读取本地图片
# def image_read():
# HSV格式中H(色彩/色度)的取值范围是[0, 179],S(饱和度)的取值范围是[0,255]中,V(亮度)的取值范围是[0,255]。
# 不同的软件的HSV值可能不同,要进归一化处理
# 色彩空间转换
def color_space_demo(image):
# cv.cvtColor(input_image, flag) flag指的是要转换的类型
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) # 从BGR到灰度图使用到的flag就是cv2.COLOR_BGR2GRAY
cv.imshow("gray image", gray)
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV) # 从BGR到HSV,使用到的flag就是cv2.COLOR_BGR2HSV
# 可以使用下面的命令获取所有的可用的flag
# flags = [i for in dir(cv) if i startswith("COLOR_")]
# print(flags)
cv.imshow("hsv image", hsv)
yuv = cv.cvtColor(image, cv.COLOR_BGR2YUV)
cv.imshow("yuv image", yuv)
Ycrcb = cv.cvtColor(image, cv.COLOR_BGR2YCrCb)
cv.imshow("ycrcb image", Ycrcb)
image = cv.imread("./images/photo01.jpg")
# cv.namedWindow("input image", cv.WINDOW_AUTOSIZE)
cv.imshow("show image", image)
# color_space_demo(image)
cv.waitKey(0)
cv.destroyAllWindows()
``` |
{
"source": "19-2-SKKU-OSS/2019-2-OSS-L4",
"score": 3
} |
#### File: 2019-2-OSS-L4/sports/mostCommentsArticle.py
```python
import time, os
import pandas as pd
from selenium import webdriver
class TrendCrawler:
def __init__(self):
self.data = None
def crawling(self):
# 데이터 저장 디렉토리 생성
save_dir = "data"
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# 홈 URL
URL = 'https://sports.news.naver.com/index.nhn'
# 셀레니움으로 창 열기
#driver는 url을 복사할 창, driver2는 복사된 링크로 들어가서 기사 내용 가져올 창
driver = webdriver.Chrome()
driver2 = webdriver.Chrome()
driver.get(URL)
#정치, 경제, IT 등
articles = driver.find_elements_by_css_selector("#mostCommentedNewsList > li > a")
data_list = []
for article in articles:
driver2.get(article.get_attribute("href"))
title = driver2.find_element_by_css_selector("#content > div > div.content > div > div.news_headline > h4").text
content = driver2.find_element_by_css_selector("#newsEndContents").text
comments_elements = driver2.find_elements_by_css_selector(".u_cbox_list > li")
comments = []
for i, comment in enumerate(comments_elements):
if i == 5:
break
comments.append(comment.text)
# like = driver2.find_element_by_css_selector("#spiLayer > div._reactionModule.u_likeit > ul > li.u_likeit_list.good > a > span.u_likeit_list_count._count").text
# hate = driver2.find_element_by_css_selector("#spiLayer > div._reactionModule.u_likeit > ul > li.u_likeit_list.angry > a > span.u_likeit_list_count._count").text
# reactions=[]
# reactions.append(like)
# reactions.append(hate)
data = {}
data['title'] = title
data['contents'] = content
data['comments'] = comments
# data['reactions'] = reactions
data_list.append(data)
self.data = data_list
def save_csv(self, save_path):
df = pd.DataFrame(self.data)
pd.DataFrame.to_csv(df, save_path)
if __name__ == "__main__":
crawler = TrendCrawler()
crawler.crawling()
crawler.save_csv("data.csv")
``` |
{
"source": "19-2-SKKU-OSS/2019-2-OSS--L9-",
"score": 3
} |
#### File: algorithms/maths/add.py
```python
def add() :
a, b = map(int, input().split())
print(a+b)
```
#### File: algorithms/search/left_leaf_nodes.py
```python
import sys
sys.stdin = open("input.txt", 'r')
def preorder(node):
global cnt
if node.child == []:
cnt += 1
for child in node.child:
preorder(tree[child])
class Node:
def __init__(self):
self.child = []
def setChild(self, node):
self.child.append(node)
def removeChild(self, node):
self.child.remove(node)
N = int(input())
tree = [Node() for _ in range(N)]
cnt = 0
parent = list(map(int, input().split()))
for i in range(N):
if parent[i] != -1:
tree[parent[i]].setChild(i)
if N != 1:
i = int(input())
if parent[i] == -1:
cnt = 0
else:
tree[parent[i]].removeChild(i)
preorder(tree[parent.index(-1)])
print(cnt)
``` |
{
"source": "19-2-SKKU-OSS/L1",
"score": 4
} |
#### File: Python/AStarSearch/astar2.py
```python
class Queue:
def __init__(self):
self.items = []
def enqueue(self, node):
self.items.append(node)
def sortedEnqueue(self, node):
i = 0
while i < len(self.items) and self.items[i].f <= node.f:
i = i + 1
self.items.insert(i, node)
def dequeue(self):
return self.items.pop(0) # updated
def isEmpty(self):
if len(self.items) == 0 : return True
else: return False
class Problem:
def __init__(self, i, g, m):
self.initState = i
self.goalState = g
self.model = m
def isGoal(self, s):
if self.goalState == s:
return True
else:
return False
def sucFN(self, city):
return self.model.get(city, [])
class Node:
def __init__(self,s, p, c, d, h):
self.state = s
self.parent = p
self.cost = c
self.depth = d
self.f = h + self.cost
def solutionPath(self):
path = self.state
if self.depth == 0:
return path
else:
return path + ' <-- ' + self.parent.solutionPath()
def __str__(self):
return 'S: ' + self.state + ', depth = ' + str(self.depth) + ', cost = ' + str(self.cost)
class Astar:
def __init__(self, p, hmap):
self.numberGeneratedNodes = 0
self.prob = p
self.frontier = Queue()
self.visited = set()
self.hmap = hmap
def expand(self, parent):
children = []
for i in self.prob.sucFN(parent.state):
s = Node(i[0], parent, i[1] + parent.cost, parent.depth + 1, self.hmap[i[0]] )
print('CHILD generated', i[0])
children.append(s)
self.numberGeneratedNodes += len(children)
return children
def solution(self):e
root = Node(self.prob.initState, None, 0, 0, self.hmap[self.prob.initState])
self.frontier.enqueue(root)
while not self.frontier.isEmpty():
parent = self.frontier.dequeue()
self.visited.add(parent.state)
if self.prob.isGoal(parent.state):
return parent
expandedNodes = self.expand(parent)
for i in expandedNodes:
print('CHECKING CHILD', i.state)
if i.state not in self.visited:
self.frontier.sortedEnqueue(i)
print('CHILD ADDED')
return False
```
#### File: Python/BreadthFirstSearch/bfs2.py
```python
class Bfs:
def __init__(self, graph, nodes):
self.graph = graph
self.nodes = nodes
self.visited = [False for i in range(nodes)]
self.queue = [0]
def bfs(self):
while self.queue:
node = self.queue.pop(0)
self.visit(node)
for node in range(self.nodes):
if not self.visited[node]:
self.queue.append(node)
self.bfs()
def visit(self, node):
self.visited[node] = True
print (node)
for neighbour in graph[node]:
if not self.visited[neighbour]:
self.visited[neighbour] = True
self.queue.append(neighbour)
# graph = [[1,3], [2], [], [2], [7], [6,7], [7], [], []]
# nodes = 9
# makeBFS = Bfs(graph, nodes)
# makeBFS.bfs()
```
#### File: Python/CoinChange/CoinChange.py
```python
import copy
returnList = []
# fillCoin function
# This function fill payment top to bottom way.
# First, fill payment with largest coin
# Second, pop largest coin one by one.
# Last, call fillCoin function with second largest coin
# Repeat these process untill call fillCoin function with the smallest coin.
def fillCoin(current, coinType, result, restPayment):
max_coins = restPayment//coinType[current]
coin = coinType[current]
# Check case that restPayment != 0 but current coin can't fill payment
# Check whether it can call smaller coin
if(restPayment < coin):
if current < len(coinType)-1:
fillCoin(current + 1, coinType, result, restPayment)
max_coins = -1
else: return
# Fill payment with current coin as much as possible
for i in range(max_coins+1):
result.append(coin)
restPayment -= coin*(max_coins+1)
for i in range(max_coins+1):
result.pop()
restPayment += coin
# When restPayment == 0, add it to list which have way to pay
if restPayment == 0:
returnList.append(copy.deepcopy(result))
else:
if current < len(coinType) - 1:
fillCoin(current + 1, coinType, result, restPayment)
# coinChange function
# It control fillCoin function
def coinChange(coins, payment):
list = []
result = []
sorted_coins = sorted(coins, reverse=True)
fillCoin(0, sorted_coins, list, payment)
# Main
coins = [100,500]
n = 1200
coinChange(coins, n)
print(returnList)
```
#### File: Python/CountingInversions/CountingInversions.py
```python
def countingInversions(list):
result = []
list_length = len(list)
for i in range(list_length - 1):
for j in range(i + 1, list_length):
if (list[i]>list[j]):
result.append([list[i], list[j]])
return result
# Main
input = [6, 7, 2, 5, 1, 4, 3]
print(countingInversions(input))
```
#### File: Python/InsertionSort/insertionsort.py
```python
def insertion_sort(list):
i = 1
for i in range(i, len(list)):
k = list[i]
j = i - 1
while j >= 0 and list[j] > k:
list[j+1] = list[j]
j = j - 1
list[j+1] = k
return list
array = [1, 5, 65, 23, 57, 1232, -1, -5, -2, 242, 100,
4, 423, 2, 564, 9, 0, 10, 43, 64, 32, 1, 999]
print(array)
insertion_sort(array)
print(array)
``` |
{
"source": "1930346/SmartNet",
"score": 2
} |
#### File: SmartNet/routes/product_category.py
```python
import datetime
from sqlalchemy.sql import func
from fastapi import APIRouter, Response, status
#Esto solo me dice a donde conectarme, no hay un schema
from config.db import conn
#Aquí traemos el schema
from models.product_categories import product_categories
#Llamada al schema usuario para crear uno
from schemas.product_category import Product_category, Product_category_outs, Product_category_update, Product_category_in
#Modulo para generar una función de cifrado
from cryptography.fernet import Fernet
#Ahora para scar los codigos HTTP
from starlette.status import HTTP_204_NO_CONTENT
product_category = APIRouter()
"""
Endpoint para obtener todos los product_categories
@return: lista de product_categories
"""
#Obtiene todos los product_categories
@product_category.get("/product_categories", response_model=list[Product_category_outs], tags=["product_categories"])
def get_product_categories():
return conn.execute(product_categories.select()).fetchall()
"""
Endpoint para obtener un product_category a través de un ID
@param: id_product_category: id del product_category
@return: un product_category
"""
#Obtiene un product_category por id
@product_category.get("/product_categories/{id}", response_model=Product_category_outs, tags=["product_categories"])
def get_product_category(id: str):
return conn.execute(product_categories.select().where(product_categories.c.id == id)).first()
"""
Endpoint para crear un product_category
@param: product_category: información del product_category
@return: un product_category
"""
#Creación de un product_category
@product_category.post("/product_categories", response_model=Product_category_outs, tags=["product_categories"])
def create_product_category(product_category: Product_category_in):
new_product_category = {
"name": product_category.name,
"description": product_category.description
}
result = conn.execute(product_categories.insert().values(new_product_category))
return conn.execute(product_categories.select().where(product_categories.c.id == result.lastrowid)).first()
"""
Endpoint para borrar un product_category
@param: product_category: información del product_category
@return: HTTP_204_NO_CONTENT
"""
#Eliminación de un product_category
@product_category.delete("/product_categories/{id}", status_code=status.HTTP_204_NO_CONTENT, tags=["product_categories"])
def delete_product_category(id: str):
conn.execute(product_categories.delete().where(product_categories.c.id == id))
return Response(status_code=HTTP_204_NO_CONTENT)
"""
Endpoint para actualizar un product_category
@param: product_category: información del product_category
@return: un product_category
"""
#Actualización de un product_category
@product_category.put("/product_categories/{id}", response_model=Product_category_outs, tags=["product_categories"])
def update_product_category(id: str, product_category: Product_category_update):
conn.execute(product_categories.update().values(
name=product_category.name,
description=product_category.description,
modified_at= func.now() #ask for this
).where(product_categories.c.id == id))
return conn.execute(product_categories.select().where(product_categories.c.id == id)).first()
``` |
{
"source": "1935090/donation",
"score": 2
} |
#### File: donation/app/forms.py
```python
from django import forms
from users.models import CustomUser
class CustomUserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
Confirm_password = forms.CharField(required=False, widget=forms.PasswordInput)
class Meta:
model = CustomUser
fields = ('name', 'email','password')
labels = {
'name': 'Name',
'email':'Email',
'password': 'Password',
'Confirm_password':'<PASSWORD>',
}
def __init__(self, *args, **kwargs):
super(CustomUserForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class UpdateSettingForm(forms.ModelForm):
Current_password = forms.CharField(required=False, widget=forms.PasswordInput(attrs={'placeholder': 'Current Password'}))
New_password = forms.CharField(required=False, widget=forms.PasswordInput(attrs={'placeholder': 'New Password'}))
Confirm_password = forms.CharField(required=False, widget=forms.PasswordInput(attrs={'placeholder': 'Confirm Password'}))
masjidCardNumber = forms.CharField(required=False, widget=forms.TextInput(attrs={'placeholder': '0000 0000 0000 0000'}))
masjidCardName = forms.CharField(required=False, widget=forms.TextInput(attrs={'placeholder': '<NAME>'}))
masjid_name = forms.CharField(required=False, widget=forms.TextInput(attrs={'placeholder': 'Masjid Name'}))
phone= forms.CharField(required=False, widget=forms.TextInput(attrs={'type': 'tel', 'class':'getphone', }))
# profile_pic= forms.CharField(required=False, widget=forms.TextInput(attrs={'type': 'file' }))
class Meta:
model = CustomUser
fields = ('masjid_name','email','profile_pic', "masjidCardNumber", "masjidCardName", "phone")
labels = {
'masjid_name': 'Masjid Name',
'email': 'Email',
'Current_password':'<PASSWORD>',
'New_password':'<PASSWORD>',
'Confirm_password':'<PASSWORD>',
'profile_pic': 'Profile picture',
}
def __init__(self, *args, **kwargs):
super(UpdateSettingForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class UpdatePasswordForm(forms.ModelForm):
New_password = forms.CharField(required=False, widget=forms.PasswordInput)
Confirm_password = forms.CharField(required=False, widget=forms.PasswordInput)
class Meta:
model = CustomUser
fields = ()
labels = {
}
def __init__(self, *args, **kwargs):
super(UpdatePasswordForm, self).__init__(*args, **kwargs)
self.fields['New_password'].widget.attrs['placeholder'] = 'New password'
self.fields['Confirm_password'].widget.attrs['placeholder'] = 'Confirm password'
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
``` |
{
"source": "1937Elysium/elysium_vision_2020",
"score": 2
} |
#### File: visions/power_cell_visions/power_cell.py
```python
from ovl import *
from elysium_vision_2020.connections import *
yellow = Color([22, 60, 60], [45, 255, 255])
contour_filters = [area_filter(min_area=200), dec_area_sort()]
image_filters = [ovl.gaussian_blur(kernel_size=(5, 5))]
one_power_cell_director = Director(directing_function=center_directions,
target_amount=1,
failed_detection=9999)
one_power_cell = Vision(camera=CAMERA,
threshold=yellow,
contour_filters=contour_filters,
image_filters=image_filters,
connection=ROBOT_NETWORK_TABLES_CONNECTION,
director=one_power_cell_director)
def power_cell_vision(power_cell, send_location):
while True:
image = power_cell.get_image()
contours, _ = power_cell.detect(image)
directions = power_cell.get_directions(contours, image)
ovl.display_contours(image, delay=1, contours=contours)
print("Directions:", directions)
power_cell.send_to_location(directions, send_location)
if __name__ == '__main__':
POWER_CELL_VISION = one_power_cell
power_cell_vision(POWER_CELL_VISION, X_DIRECTION_LOCATION)
``` |
{
"source": "1937Elysium/ovl",
"score": 3
} |
#### File: ovl/detectors/haar_cascade_detector.py
```python
import cv2
import numpy as np
from .detector import Detector
class HaarCascadeDetector(Detector):
"""
A detector used to detect objects using haar cascade algorithim
The Detector initializes using an xml file containing the descriptor
The Detector uses the underlying cv2.CascadeClassifier
For more information on
"""
def __init__(self, classifier: str):
self.classifier_source = classifier
self.classifier = cv2.CascadeClassifier(classifier)
def detect(self, image: np.ndarray, *args, **kwargs):
greyscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return self.classifier.detectMultiScale(greyscale)
```
#### File: thresholds/color/color.py
```python
import numpy as np
import copy
import cv2
from typing import *
from ..threshold import Threshold
BaseForColor = NewType("BaseForColor", Union[int, Tuple[Tuple[int, int, int], Tuple[int, int, int]]])
SERIALIZED_COLOR_KEYS = {"high", "low"}
def validate(value, ceiling):
"""
Checks if val is positive and less than the ceiling value.
:param value: The value to be checked (usually an int)
:param ceiling: The highest value "val" can be. (usually an int)
:return: True if val is less than ceiling and not negative
"""
value = int(value)
return 0 <= value < ceiling
def assert_hsv(hsv_point):
"""
Makes sure the hsv point(or vector) given in the parameter has valid values.
:param hsv_point: a 3 length list with 3 ints describing a point the hsv color space
that describe a color limit in a range for a findContour function.
:return: True if valid False if not.
:rtype: bool
"""
return validate(hsv_point[0], 180) and validate(hsv_point[1], 256) and validate(hsv_point[2], 256)
class Color(Threshold):
"""
Color is a Threshold object (an object that turns an image to a binary image
- an image with pixels with the value 1 and 0)
Color object thresholds using 2 HSV color ranges.
Read more about HSV here:
https://www.lifewire.com/what-is-hsv-in-design-1078068
HSV Ranges in OVL (And in the underlying open-cv (cv2)) are:
Hue - 0 to 179 (360/2 so divide any regular HSV value you use by 2)
So if Yellow is 40 - 80 (degrees) in regular HSV palette its 20 to 40 in ovl
Saturation - 0 to 255 (255 is equal to 1 or 100%)
Value - 0 to 255 (255 is equal to 1 or 100%)
2 ranges are passed to the Constructor:
1) Low HSV Range - the lowest acceptable H S V values ([low_H, low_S, low_V])
2) High HSV Range - the highest acceptable H S V values ([high_H, high_S, high_V])
.. code-block:: python
low_range = [15, 100, 100]
high_range = [45, 255, 255]
color = ovl.Color(low_range, high_range)
The color object can the be passed to a Vision object to threshold binary images
Threshold object can be used by themselves using the color.convert() method.
NOTE: Threshold objects automatically convert images to HSV (From the default BGR)
There are multiple built in "battery included" pre-made color object
for instant use in testing and tuning
List of colors:
Red (MultiColorObject) : Red (low) + Red (high)
Red (Low): [0, 100, 100], [8, 255, 255]
Red (High): [172, 100, 100], [179, 255, 255]
Note: in order to find red, use both ranges (low and high) and use the some of both results.
Blue: [105, 100, 100], [135, 255, 255]
Green: [45, 100, 100], [75, 255, 255]
Yellow: [20, 100, 100], [55, 255, 255]
Orange: [10, 100, 100], [18, 255, 255]
Grey: [0, 0, 0], [179, 50, 195]
Black: [0, 0, 0], [179, 255, 30]
White: [0, 0, 200], [179, 20, 255]
Teal: [110, 100, 100], [130, 255, 255]
Purple: [135, 100, 100], [165, 255, 255]
"""
def validate(self, *args, **kwargs):
return assert_hsv(self.low_bound) and assert_hsv(self.high_bound)
def __init__(self, low: BaseForColor, high: BaseForColor):
"""
Constructor for the Color used to turn images to binary images based on
HSV color space (pixels that are in the specified HSV Range)
H - 0 - 179
S - 0 - 255
V - 0 - 255
:param high: high hsv limit of the color
:param low: low hsv limit of the color
"""
if type(low) is tuple:
low = list(low)
if type(low) is int:
low = [low, 100, 100]
if type(high) is tuple:
high = list(high)
if type(high) is int:
high = [high, 255, 255]
self.__low_bound = np.array(low)
self.__high_bound = np.array(high)
def threshold(self, image: np.ndarray) -> np.ndarray:
return cv2.inRange(image, self.low, self.high)
def convert(self, image: np.ndarray) -> np.ndarray:
"""
Converts a given image to hsv and then thresholds and returns the binary mask
:param image: a BGR image
:return: binary mask
:rtype: numpy array
"""
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
return self.threshold(hsv_image)
def __repr__(self):
return 'Color({}, {})'.format(repr(self.low_bound), repr(self.high_bound))
def copy(self):
"""
Duplicates the Color object so that changes do not affect the original color
Useful for modifying BuiltInColors without changing the default
:return: a copy of the color object
"""
return copy.deepcopy(self)
@property
def low_bound(self):
if isinstance(self.__low_bound, np.ndarray):
return self.__low_bound.tolist()
return self.__low_bound
@property
def high_bound(self):
if isinstance(self.__high_bound, np.ndarray):
return self.__high_bound.tolist()
return self.__high_bound
@property
def low(self):
"""
Returns the low hsv limit of the color.
:return: An uint8 numpy array with the low limit of the color.
:rtype: uint8 numpy array
"""
return self.__low_bound
@property
def high(self):
"""
Returns the high hsv limit of the color.
:return: An uint8 numpy array with the high limit of the color.
:rtype: uint8 numpy array
"""
return self.__high_bound
def __str__(self):
return self.__repr__()
```
#### File: ovl/visions/ambient_vision.py
```python
import numpy as np
from typing import List, Tuple
import types
from . import vision
from ..connections import connection
from ..detectors.detector import Detector
class AmbientVision:
"""
A mesh of 2 vision objects that extends one of the visions at a time.
The visions are swapped every main_amount of update_vision calls.
This makes the "ambient_vision" run in the background once every multiple frames.
The example includes only general instructions, 2 vision objects need to be defined to detect something
An example will be as follows:
.. code-block:: python
vision1 = Vision(....)
vision2 = Vision(....)
vision_controller = AmbientVision(main_vision=vision1, ambient_vision=vision2, main_amount=3)
while True:
image = vision_controller.get_image()
contours, image = vision_controller.detect(image)
vision_controller.update_vision()
you can get the current vision object using vision_controller.current_vision
"""
def __init__(self, main_vision: vision.Vision, ambient_vision: vision.Vision,
main_amount: int, start_ambient: bool = False):
self.main_vision = main_vision
self.ambient_vision = ambient_vision
self.main_amount = main_amount
self.is_ambient = start_ambient
if start_ambient:
self.counter = 0
self.current_vision = main_vision
else:
self.counter = main_amount
self.current_vision = ambient_vision
@property
def connection(self) -> connection.Connection:
return self.current_vision.connection
@property
def detector(self) -> Detector:
return self.current_vision.detector
@property
def image_filters(self) -> List[types.FunctionType]:
return self.current_vision.image_filters
@property
def directions(self):
return self.current_vision.director
def get_image(self) -> np.ndarray:
"""
Take a picture using the current vision
See Vision.get_image for more information
"""
return self.current_vision.get_image()
def apply_image_filters(self, image: np.ndarray) -> np.ndarray:
"""
Applies all of the image filter of the current vision on the given image
See Vision.apply_image_filters for more information
"""
return self.current_vision.apply_image_filters(image)
def detect(self, image: np.ndarray, verbose=False, *args, **kwargs) -> Tuple[List[np.ndarray], np.ndarray]:
"""
Gets contours and applies all filters and returns the result,
thus detecting the object according to the specifications in the vision,
Uses the current vision
:param image: image in which the vision should detect an object
:param verbose: if true prints additional information about contour filtering
:return: contours, ratio list (from the filter functions) and the filtered image
"""
return self.current_vision.detect(image, verbose=verbose, *args, **kwargs)
def send(self, data, *args, **kwargs):
"""
Sends a given value to the current vision's connection object
:param data: the data to be sent
:param args: any other parameters to the specific connection object
:param kwargs:
:return: Whatever the underlying connection object returns, usually the parameter data
"""
return self.current_vision.send(data, *args, **kwargs)
def get_directions(self, contours, image, sorter=None):
"""
Returns the directions for the given image and contours using the current vision
:param contours: the final contours detected.
:param image: the image where the contours were detected in.
:param sorter: a sorter function that can be added to be applied before getting directions
:return: the directions
"""
return self.current_vision.get_directions(contours, image, sorter)
def update_vision(self):
"""
Increases the inner counter and swaps the ambient and the main vision
after the set number of updates (self.main_amount)
This is used to switch between the main vision and ambient vision
"""
if self.counter < self.main_amount:
self.counter += 1
self.current_vision = self.main_vision
self.is_ambient = False
else:
self.counter = 0
self.current_vision = self.ambient_vision
self.is_ambient = True
``` |
{
"source": "193s/pup-py",
"score": 2
} |
#### File: pup-py/pup/__init__.py
```python
import json
from subprocess import Popen, PIPE
def raw(text, selectors='', **kwargs):
v = selectors
if 'mode' in kwargs:
v += ' %s{%s}' % (kwargs['mode'], kwargs['attrkey'] if 'attrkey' in kwargs else '')
args = ['pup'] + ([v] if v != '' else [])
p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
res = p.communicate(text)
assert res[1] == '', res[1]
return res[0].rstrip('\n')
def pup(text, selectors=''):
return json.loads(raw(text, selectors, mode='json'))
def text(text, selectors=''):
return raw(text, selectors, mode='text')
def attr(text, selectors, attr):
return raw(text, selectors, mode='attr', attrkey=attr)
``` |
{
"source": "1941-1945-mozhempovtorit/Self-playing-pong",
"score": 4
} |
#### File: 1941-1945-mozhempovtorit/Self-playing-pong/ball.py
```python
import pygame
from random import randint, choice
black = (255, 0, 0)
class Ball(pygame.sprite.Sprite):
def __init__(self, color, center, radius):
# Calling the parent class (pygame.Sprite) constructor
super().__init__()
# Surface to draw on (X and Y pos)
self.image = pygame.Surface((2*radius, 2*radius))
self.image.fill(black)
self.image.set_colorkey(black)
# Drawing a ball
pygame.draw.circle(self.image, color, (radius, radius), radius)
# Randomise ball velocity
self.velocity = [choice((-1, 1)) * randint(4, 5), randint(-6, 6)]
# Bool for collisions with paddles (BUG FIX)
self.isBouncing = False
# Returns a rectangle covering ball's sprite, also puts it in center
self.rect = self.image.get_rect(center=(center[0], center[1]))
def bounce(self): # Bouncing of paddles
if self.velocity[0] > 0:
self.velocity[0] = randint(-8, -5)
else:
self.velocity[0] = randint(5, 8)
self.velocity[1] = randint(-6, 6)
def replay(self, center): # Re-throws a ball
self.rect.x = center[0]
self.rect.y = center[1]
self.velocity = [choice((-1, 1)) * randint(4, 5), randint(-6, 6)]
def run(self): # Ball motion-loop
self.rect.x += self.velocity[0]
self.rect.y += self.velocity[1]
``` |
{
"source": "1941260986/12306",
"score": 3
} |
#### File: 12306/config/ticketConf.py
```python
__author__ = 'MR.wen'
import os
import yaml
def _get_yaml():
"""
解析yaml
:return: s 字典
"""
path = os.path.join(os.path.dirname(__file__) + '/ticket_config.yaml')
try: # 兼容2和3版本
with open(path, encoding="utf-8") as f:
s = yaml.load(f)
except Exception:
with open(path) as f:
s = yaml.load(f)
return s.decode() if isinstance(s, bytes) else s
if __name__ == '__main__':
print(_get_yaml())
``` |
{
"source": "19428861/DeepCube-and-DeepCubePlus",
"score": 3
} |
#### File: code/environments/env_utils.py
```python
import numpy as np
from random import choice
import re
import sys
sys.path.append('./')
def getEnvironment(envName):
envName = envName.lower()
if envName == 'cube3':
from environments.cube_interactive_simple import Cube
Environment = Cube(N=3,moveType="qtm")
elif envName == 'cube3htm':
from environments.cube_interactive_simple import Cube
Environment = Cube(N=3,moveType="htm")
elif envName == 'cube3htmaba':
from environments.cube_interactive_simple import Cube
Environment = Cube(N=3,moveType="htmaba")
elif envName == 'cube4':
from environments.cube_interactive_simple_4 import Cube as Environment
elif envName == 'cube4d2':
from environments.cube4D import Cube
Environment = Cube(2)
elif envName == 'puzzle15':
from environments.puzzleN import PuzzleN
Environment = PuzzleN(4)
elif envName == 'puzzle24':
from environments.puzzleN import PuzzleN
Environment = PuzzleN(5)
elif envName == 'puzzle35':
from environments.puzzleN import PuzzleN
Environment = PuzzleN(6)
elif envName == 'puzzle48':
from environments.puzzleN import PuzzleN
Environment = PuzzleN(7)
elif 'lightsout' in envName:
from environments.LightsOut import LightsOut
m = re.search('lightsout([\d]+)',envName)
Environment = LightsOut(int(m.group(1)))
elif 'hanoi' in envName:
from environments.Hanoi import Hanoi
m = re.search('hanoi([\d]+)d([\d]+)p',envName)
numDisks = int(m.group(1))
numPegs = int(m.group(2))
Environment = Hanoi(numDisks,numPegs)
elif envName == 'sokoban':
from environments.Sokoban import Sokoban
Environment = Sokoban(10,4)
return(Environment)
def generate_envs(Environment,numPuzzles,scrambleRange,probs=None):
assert(scrambleRange[0] > 0)
scrambs = range(scrambleRange[0],scrambleRange[1]+1)
legal = Environment.legalPlays
puzzles = []
puzzles_symm = []
scrambleNums = np.zeros([numPuzzles],dtype=int)
moves = []
for puzzleNum in range(numPuzzles):
startConfig_idx = np.random.randint(0,len(Environment.solvedState_all))
scrambled = Environment.solvedState_all[startConfig_idx]
scrambled_symm = np.stack(Environment.solvedState_all,axis=0)
assert(Environment.checkSolved(scrambled))
# Get scramble Num
scrambleNum = np.random.choice(scrambs,p=probs)
scrambleNums[puzzleNum] = scrambleNum
# Scramble puzzle
while Environment.checkSolved(scrambled): # don't return any solved puzzles
moves_puzzle = []
for i in range(scrambleNum):
move = choice(legal)
scrambled = Environment.next_state(scrambled, move)
scrambled_symm = Environment.next_state(scrambled_symm, move)
moves_puzzle.append(move)
moves_puzzle.append(move)
puzzles.append(scrambled)
puzzles_symm.append(scrambled_symm)
return(puzzles,scrambleNums,moves,puzzles_symm)
```
#### File: code/environments/Sokoban.py
```python
import numpy as np
from random import choice
import argparse
import matplotlib.pyplot as plt
import os
import time
import cPickle as pickle
import sys
sys.path.append('./')
from ml_utils import search_utils
from ml_utils import nnet_utils
#import gym
#import gym_sokoban
#dir(gym_sokoban)
class Sokoban:
def tf_dtype(self):
import tensorflow as tf
return(tf.uint8)
def __init__(self, dim, numBoxes):
self.dim = dim
self.numBoxes = numBoxes
if self.dim == 10 and self.numBoxes == 4:
self.env_name = 'Sokoban-v1'
self.legalPlays = np.array(['a','s','d','w'])
self.legalPlays_rev = np.array(['d','w','a','s'])
self.dtype = np.uint8
#self.env = gym.make(self.env_name)
numPos = self.dim ** 2
self.wallBegin = 0; self.wallEnd = numPos
self.goalBegin = numPos; self.goalEnd = 2*numPos
self.boxBegin = 2*numPos; self.boxEnd = 3*numPos
self.sokobanBegin = 3*numPos; self.sokobanEnd = 4*numPos
self.numPos = numPos
self.nextStateIdxs = np.zeros((self.numPos,len(self.legalPlays)),dtype=np.int)
for d1 in range(self.dim):
for d2 in range(self.dim):
for moveIdx,move in enumerate(self.legalPlays):
coord = np.array([d1,d2])
coord_next = coord.copy()
if move.lower() == 'a':
coord_next[1] = (coord[1] - 1)*(coord[1] != 0)
elif move.lower() == 'd':
coord_next[1] = np.minimum(coord[1] + 1,self.dim-1)
elif move.lower() == 's':
coord_next[0] = np.minimum(coord[0] + 1,self.dim-1)
elif move.lower() == 'w':
coord_next[0] = (coord[0] - 1)*(coord[0] != 0)
coord_flat = coord[0]*self.dim + coord[1]
coord_flat_next = coord_next[0]*self.dim + coord_next[1]
self.nextStateIdxs[coord_flat,moveIdx] = coord_flat_next
def get_new_game_state(self):
"""
self.env.reset()
room_fixed_flat = self.env.room_fixed.flatten()
room_state_flat = self.env.room_state.flatten()
wall_idxs = 1*(room_fixed_flat == 0)
goal_idxs = 1*(room_fixed_flat == 2)
box_idxs = 1*(room_state_flat == 4)
sokoban_idxs = 1*(room_state_flat == 5)
state = np.concatenate((wall_idxs,goal_idxs,box_idxs,sokoban_idxs),axis=0)
state = np.array([state])
state = state.astype(self.dtype)
"""
statesDir = "environments/sokoban_utils/sokoban_10_10_4/"
stateFiles = [f for f in os.listdir(statesDir) if os.path.isfile(os.path.join(statesDir, f)) and ('.pkl' in f)]
states = pickle.load(open("%s/%s" % (statesDir,choice(stateFiles)),"rb"))
idx = choice(range(states.shape[0]))
state = states[idx:(idx+1),:]
return(state)
def parse_states(self,fileName):
states = []
lines = [line.rstrip('\n') for line in open(fileName)]
rowIdx = -1
state = -np.ones(self.numPos*4)
for line in lines:
if rowIdx >= 0:
startIdx = rowIdx*self.dim
endIdx = (rowIdx+1)*self.dim
state[(self.wallBegin+startIdx):(self.wallBegin+endIdx)] = np.array([x == "#" for x in line])*1
state[(self.goalBegin+startIdx):(self.goalBegin+endIdx)] = np.array([x == "." for x in line])*1
state[(self.boxBegin+startIdx):(self.boxBegin+endIdx)] = np.array([x == "$" for x in line])*1
state[(self.sokobanBegin+startIdx):(self.sokobanBegin+endIdx)] = np.array([x == "@" for x in line])*1
rowIdx = rowIdx + 1
if ";" in line:
rowIdx = 0
state = -np.ones(self.numPos*4)
continue
if rowIdx == self.dim:
assert(sum(state[self.goalBegin:self.goalEnd]) == self.numBoxes)
assert(sum(state[self.boxBegin:self.boxEnd]) == self.numBoxes)
assert(sum(state[self.sokobanBegin:self.sokobanEnd]) == 1)
states.append(state.copy())
rowIdx = -1
states = np.stack(states,axis=0)
states = states.astype(self.dtype)
return(states)
def render(self,states):
wall_idxs = states[:,self.wallBegin:self.wallEnd]
goal_idxs = states[:,self.goalBegin:self.goalEnd]
box_idxs = states[:,self.boxBegin:self.boxEnd]
sokoban_idxs = states[:,self.sokobanBegin:self.sokobanEnd]
states_rendered = np.ones((states.shape[0],self.numPos))
states_rendered[wall_idxs == 1] = 0
states_rendered[goal_idxs == 1] = 2
states_rendered[box_idxs == 1] = 4
states_rendered[sokoban_idxs == 1] = 5
states_rendered[(goal_idxs == 1) & (sokoban_idxs == 1)] = 3
states_rendered[(goal_idxs == 1) & (box_idxs == 1)] = 6
return(states_rendered)
def next_state(self, states_input, move, reverse=False):
moveIdx = np.where(self.legalPlays == np.array(move))[0][0]
outputs = np.atleast_2d(states_input.copy())
numStates = outputs.shape[0]
# Move sokoban
_, sokobanIdxs = np.where(outputs[range(numStates),self.sokobanBegin:self.sokobanEnd] == 1)
sokobanIdxs_next = self.nextStateIdxs[sokobanIdxs,moveIdx].copy()
# Check if hitting a wall
hitWall = outputs[range(numStates),self.wallBegin + sokobanIdxs_next] == 1
sokobanIdxs_next[hitWall] = sokobanIdxs[hitWall]
if reverse:
moveIdx_rev = np.where(self.legalPlays == self.legalPlays_rev[moveIdx])[0][0]
sokobanIdxs_next_rev = self.nextStateIdxs[sokobanIdxs,moveIdx_rev].copy()
# Check if hitting a wall
hitWallOrBox = (outputs[range(numStates),self.wallBegin + sokobanIdxs_next_rev] == 1) | (outputs[range(numStates),self.boxBegin + sokobanIdxs_next_rev] == 1)
sokobanIdxs_next_rev[hitWallOrBox] = sokobanIdxs[hitWallOrBox]
sokobanIdxs_next[hitWallOrBox] = sokobanIdxs[hitWallOrBox]
# Check if box is pushed
boxIdxs_ex, boxIdxs = np.where(outputs[range(numStates),self.boxBegin:self.boxEnd] == 1)
boxIdxs_next = boxIdxs.copy()
box_pushed_idxs = np.where(sokobanIdxs_next[boxIdxs_ex] == boxIdxs)[0]
box_pushed_ex = boxIdxs_ex[box_pushed_idxs]
if box_pushed_idxs.shape[0] > 0:
boxIdxs_pushed = boxIdxs[box_pushed_idxs]
# move any box that has been pushed
if not reverse:
boxIdxs_pushed_next = self.nextStateIdxs[boxIdxs_pushed,moveIdx].copy()
# Check hitting a wall or another box
hitWallOrBox = (outputs[box_pushed_ex,self.wallBegin + boxIdxs_pushed_next] == 1) | (outputs[box_pushed_ex,self.boxBegin + boxIdxs_pushed_next] == 1)
boxIdxs_pushed_next[hitWallOrBox] = boxIdxs_pushed[hitWallOrBox]
# if box has not been pushed then Sokoban does not move
sokobanIdxs_next[box_pushed_ex] = sokobanIdxs_next[box_pushed_ex]*np.invert(hitWallOrBox) + sokobanIdxs[box_pushed_ex]*hitWallOrBox
else:
boxIdxs_pushed_next = self.nextStateIdxs[boxIdxs_pushed,moveIdx_rev].copy()
boxIdxs_next[box_pushed_idxs] = boxIdxs_pushed_next
outputs[range(numStates),self.sokobanBegin + sokobanIdxs] = 0
if not reverse:
outputs[range(numStates),self.sokobanBegin + sokobanIdxs_next] = 1
else:
outputs[range(numStates),self.sokobanBegin + sokobanIdxs_next_rev] = 1
outputs[boxIdxs_ex,self.boxBegin + boxIdxs] = 0
outputs[boxIdxs_ex,self.boxBegin + boxIdxs_next] = 1
return outputs
def checkSolved(self, states):
states = np.atleast_2d(states)
goal_idxs = states[:,self.goalBegin:self.goalEnd]
box_idxs = states[:,self.boxBegin:self.boxEnd]
return np.all(goal_idxs == box_idxs,axis=1)
def getReward(self, states, isSolved = None):
states = np.atleast_2d(states)
reward = np.ones(shape=(states.shape[0]))
return reward
def state_to_nnet_input(self, states, randTransp=False):
states_nnet = np.atleast_2d(states.copy())
#states_nnet = states_nnet.reshape([states_nnet.shape[0],self.N,self.N,1])
return(states_nnet)
def get_pullable_idx(self,states):
### Get box idxs
boxIdxs_ex, boxIdxs = np.where(states[:,self.boxBegin:self.boxEnd] == 1)
boxIdxs = boxIdxs.reshape([states.shape[0],self.numBoxes])
boxPulledIdxs = -np.ones([states.shape[0]],dtype=np.int)
boxPulledIdxs_next = -np.ones([states.shape[0]],dtype=np.int)
sokobanIdxs_next = -np.ones([states.shape[0]],dtype=np.int)
### Move sokoban to a place adjacent to box that it can pull
for stateIdx in range(states.shape[0]):
### Get idxs adjacent to box
for moveIdx in np.random.permutation(len(self.legalPlays)):
boxAdjIdxs = self.nextStateIdxs[boxIdxs[stateIdx],moveIdx]
boxAdjIdxs2 = self.nextStateIdxs[boxAdjIdxs,moveIdx]
boxAdjIdxs12 = np.stack((boxAdjIdxs,boxAdjIdxs2))
canPull = np.all((states[stateIdx,self.wallBegin + boxAdjIdxs12] != 1) & (states[stateIdx,self.boxBegin + boxAdjIdxs12] != 1),axis=0) & (boxAdjIdxs != boxAdjIdxs2)
if max(canPull):
box_chose = np.random.choice(np.where(canPull)[0])
boxPulledIdxs[stateIdx] = boxIdxs[stateIdx][box_chose]
boxPulledIdxs_next[stateIdx] = boxAdjIdxs[box_chose]
sokobanIdxs_next[stateIdx] = boxAdjIdxs2[box_chose]
break
return(boxPulledIdxs,boxPulledIdxs_next,sokobanIdxs_next)
def get_reachable_boxes(self,states,sokobanIdxs):
reachableBoxes = [None]*states.shape[0]
return(reachableBoxes)
def make_solved_state(self,states):
states = np.atleast_2d(states)
states_solved = states.copy()
numStates = states_solved.shape[0]
_, sokobanIdxs = np.where(states_solved[:,self.sokobanBegin:self.sokobanEnd] == 1)
### Set boxes to goal
states_solved[:,self.boxBegin:self.boxEnd] = states_solved[:,self.goalBegin:self.goalEnd]
### Set sokoban idx to pullable idx
_, sokobanIdxs_solved, _ = self.get_pullable_idx(states_solved)
states_solved[range(numStates),self.sokobanBegin + sokobanIdxs] = 0
states_solved[range(numStates),self.sokobanBegin + sokobanIdxs_solved] = 1
return(states_solved)
def pull_box(self,states):
states = np.atleast_2d(states)
states_pulled = states.copy()
numStates = states_pulled.shape[0]
_, sokobanIdxs = np.where(states_pulled[:,self.sokobanBegin:self.sokobanEnd] == 1)
boxPulledIdxs, boxPulledIdxs_next, sokobanIdxs_next = self.get_pullable_idx(states_pulled)
states_pulled[range(numStates),self.sokobanBegin + sokobanIdxs] = 0
states_pulled[range(numStates),self.sokobanBegin + sokobanIdxs_next] = 1
states_pulled[range(numStates),self.boxBegin + boxPulledIdxs] = 0
states_pulled[range(numStates),self.boxBegin + boxPulledIdxs_next] = 1
return(states_pulled)
def generate_envs(self,numStates,scrambleRange,probs=None):
assert(scrambleRange[0] >= 0)
scrambs = range(scrambleRange[0],scrambleRange[1]+1)
states = []
### Load states
statesDir = "environments/sokoban_utils/sokoban_10_10_4/"
stateFiles = [f for f in os.listdir(statesDir) if os.path.isfile(os.path.join(statesDir, f)) and ('.pkl' in f)]
while len(states) != numStates:
numToLoad = numStates-len(states)
stateFile = choice(stateFiles)
states_load = pickle.load(open("%s/%s" % (statesDir,stateFile),"rb"))
load_idxs = np.random.permutation(states_load.shape[0])
load_idxs = load_idxs[:min(numToLoad,len(load_idxs))]
[states.append(states_load[i]) for i in load_idxs]
states = np.stack(states,axis=0)
### Take reverse steps
scrambleNums = np.random.choice(scrambs,size=numStates,replace=True,p=probs)
states = self.make_solved_state(states)
numMoves = np.zeros(numStates)
while (np.max(numMoves < scrambleNums) == True):
poses = np.where((numMoves < scrambleNums))[0]
subsetSize = max(len(poses)/len(self.legalPlays),1)
poses = np.random.choice(poses,subsetSize)
move = choice(self.legalPlays)
states[poses] = self.next_state(states[poses],move,reverse=True);
numMoves[poses] = numMoves[poses] + 1
states = list(states)
return(states,scrambleNums)
def print_state(self, state):
out = str(np.reshape(state, (self.n, self.n)))
out = out.replace("1", "X")
out = out.replace("0", "O")
out = out.replace("[", " ")
out = out.replace("]", " ")
print(out)
class InteractiveEnv(plt.Axes):
def __init__(self,state,env,heuristicFn=None):
self.state = state
self.env = env
self.heuristicFn = heuristicFn
if self.state is None:
self.state = self.env.get_new_game_state()
super(InteractiveEnv, self).__init__(plt.gcf(),[0,0,1,1])
callbacks = fig.canvas.callbacks.callbacks
del callbacks['key_press_event']
self.figure.canvas.mpl_connect('key_press_event',self._keyPress)
self._updatePlot()
self.move = []
def _keyPress(self, event):
if event.key.upper() in 'ASDW':
self.state = self.env.next_state(self.state,event.key.lower())
self._updatePlot()
if self.env.checkSolved(self.state)[0]:
print("SOLVED!")
elif event.key.upper() in 'R':
self.state = self.env.get_new_game_state()
self._updatePlot()
elif event.key.upper() in 'O':
self.state = self.env.make_solved_state(self.state)
self._updatePlot()
elif event.key.upper() in 'P':
for i in range(1000):
self.state = self.env.next_state(self.state,choice(self.env.legalPlays),reverse=True)
self._updatePlot()
elif event.key.upper() == 'N':
self.stepNnet()
elif event.key.upper() == 'M':
self.solveNnet()
def _updatePlot(self):
self.clear()
renderedIm = self.env.render(self.state)
renderedIm = renderedIm.reshape((self.env.dim,self.env.dim))
self.imshow(renderedIm)
self.figure.canvas.draw()
def stepNnet(self):
search = search_utils.BFS(self.state, self.heuristicFn, self.env)
values, nextStatesValueReward = search.run(1)
nextMoves = np.argmin(nextStatesValueReward,axis=1)
self.state = self.env.next_state(self.state,self.env.legalPlays[nextMoves[0]])
self._updatePlot()
def solveNnet(self):
startTime = time.time()
BestFS_solve = search_utils.BestFS_solve(self.state,self.heuristicFn,self.env,bfs=0)
isSolved, solveSteps, nodesGenerated_num = BestFS_solve.run(numParallel=100,depthPenalty=0.1,verbose=True)
### Make move
moves = solveSteps[0]
print("Neural network found solution of length %i (%s)" % (len(moves),time.time()-startTime))
for move in moves:
self.state = self.env.next_state(self.state,move)
self._updatePlot()
time.sleep(0.5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--heur', type=str, default=None, help="")
args = parser.parse_args()
state = None
env = Sokoban(10,4)
#state = np.expand_dims(env.generate_envs(100, [1000, 1000])[0][0],0)
heuristicFn = None
if args.heur is not None:
heuristicFn = nnet_utils.loadNnet(args.heur,"",False,env)
fig = plt.figure(figsize=(5, 5))
interactiveEnv = InteractiveEnv(state,env,heuristicFn)
fig.add_axes(interactiveEnv)
plt.show()
```
#### File: code/ml_utils/nnet_utils.py
```python
import tensorflow as tf
from tensorflow.contrib import predictor
import sonnet as snt
import numpy as np
import pickle as pickle
import os
import sys
sys.path.append('./')
sys.path.append('./')
sys.path.append('../')
sys.path.append('../code')
sys.path.append('../data')
sys.path.append('./solvers/cube3/')
#from tensorflow_utils import layers
from multiprocessing import Queue
def nnetPredict(x,inputQueue,predictor,Environment,batchSize=10000,realWorld=True):
stateVals = np.zeros((0,1))
numExamples = x.shape[0]
startIdx = 0
while startIdx < numExamples:
endIdx = min(startIdx + batchSize,numExamples)
x_itr = x[startIdx:endIdx]
if realWorld == True:
states_nnet = Environment.state_to_nnet_input(x_itr)
else:
states_nnet = x_itr
states_nnet = np.expand_dims(states_nnet,1)
inputQueue.put(states_nnet)
numStates = states_nnet.shape[0]
stateVals_batch = np.array([predictor.next()['values'].max() for _ in range(numStates)])
stateVals_batch = np.expand_dims(stateVals_batch,1)
stateVals = np.concatenate((stateVals,stateVals_batch),axis=0)
startIdx = endIdx
assert(stateVals.shape[0] == numExamples)
return(stateVals)
def getEstimatorPredFn(network,inputDim,Environment,batchSize=10000):
inputQueue = Queue(1)
tf_dtype = Environment.tf_dtype()
def inputGen():
while True:
yield inputQueue.get()
def input_fn_test():
ds = tf.data.Dataset.from_generator(inputGen,(tf_dtype),(tf.TensorShape([None,1]+inputDim)))
return(ds)
predictor = network.predict(input_fn_test)
def predFn(x,realWorld=True):
return(nnetPredict(x,inputQueue,predictor,Environment,batchSize,realWorld))
return(predFn)
def nnetPredict_exported(predict_fn,x,Environment,batchSize=10000,realWorld=True):
stateVals = np.zeros((0,1))
numExamples = x.shape[0]
startIdx = 0
while startIdx < numExamples:
endIdx = min(startIdx + batchSize,numExamples)
x_itr = x[startIdx:endIdx]
if realWorld == True:
states_nnet = Environment.state_to_nnet_input(x_itr)
else:
states_nnet = x_itr
states_nnet = np.expand_dims(states_nnet,1)
stateVals_batch = predict_fn({"x": states_nnet})['output']
stateVals = np.concatenate((stateVals,stateVals_batch),axis=0)
startIdx = endIdx
assert(stateVals.shape[0] == numExamples)
return(stateVals)
def loadNnet(modelLoc,modelName,useGPU,Environment,batchSize=10000,gpuNum=None):
assert(modelLoc != "")
argsFile = "%s/args.pkl" % (modelLoc)
exportDir = "%s/exported_model" % (modelLoc)
if os.path.isfile(argsFile) or True:
CONFIG = tf.ConfigProto()
CONFIG.gpu_options.allow_growth = True # Prevents tf from grabbing all gpu memory.
if useGPU and len(os.environ['CUDA_VISIBLE_DEVICES']) > 0:
print('\nRunning from GPU %s' % str(os.environ['CUDA_VISIBLE_DEVICES']))
else:
print('\nRunning from CPU')
config = tf.estimator.RunConfig(session_config=CONFIG)
tf.InteractiveSession(config=CONFIG)
if os.path.isdir(exportDir) or True:
predict_fn = predictor.from_saved_model(export_dir=exportDir)
def nnetFn(x,realWorld=True):
return(nnetPredict_exported(predict_fn,x,Environment,batchSize,realWorld))
else:
args = pickle.load(open(argsFile,"rb"))
nnet_model_fn = lambda features,labels,mode : model_fn(features,labels,mode,args)
network = tf.estimator.Estimator(model_fn=nnet_model_fn, config=config, model_dir=modelLoc)
inputDim = list(Environment.state_to_nnet_input(Environment.solvedState).shape[1:])
nnetFn = getEstimatorPredFn(network,inputDim,Environment,batchSize) # TODO parallel calls to estimator will prob result in errors
return(nnetFn)
def getModelName(args):
if args.nnet_name == "":
labeledDataName = args.labeled_data.split("/")
labeledDataName.remove("")
if len(labeledDataName) == 0:
labeledDataName = ['False']
nnetName = "l%i_resb_%i_h%i_act%s_bs%i_lri%s_lrd%s_momi%s_momf%s_opt%s_l2%s_dop%s_env%s_sr_%i_%i_lab%s" % (args.num_l,args.num_res,args.num_h,args.act_type.upper(),args.batch_size,args.lr_i,args.lr_d,args.mom_i,args.mom_f,args.opt.upper(),args.l2,args.drop_p,args.env.upper(),args.scramb_min,args.scramb_max,labeledDataName[-1])
if args.batch_norm:
nnetName = nnetName + "_bn"
if args.layer_norm:
nnetName = nnetName + "_ln"
if args.weight_norm:
nnetName = nnetName + "_wn"
if args.angle_norm:
nnetName = nnetName + "_an"
else:
nnetName = args.nnet_name
if args.debug != 0:
nnetName = "%s_debug" % (nnetName)
return(nnetName)
def addNnetArgs(parser):
parser.add_argument('--env', type=str, default='cube3', help="Environment: cube3, cube4, puzzle15, puzzle24")
parser.add_argument('--solve_itrs', type=int, default=5000, help="How often to test")
# Architecture
parser.add_argument('--num_l', type=int, default=1, help="Number of hidden layers")
parser.add_argument('--num_res', type=int, default=1, help="Number of residual blocks")
parser.add_argument('--num_h', type=int, default=100, help="Number of hidden neurons")
parser.add_argument('--act_type', type=str, default="relu", help="Type of activation function")
parser.add_argument('--skip_depth', type=int, default=0, help="How far back to concatenate previous layers")
parser.add_argument('--maxout_bs', type=int, default=2, help="Maxout block size")
parser.add_argument('--debug', action='store_true', default=False, help="Deletes labeled file after opening")
parser.add_argument('--batch_norm', action='store_true', default=False, help="Add if doing batch normalization")
parser.add_argument('--layer_norm', action='store_true', default=False, help="Add if doing layer normalization")
parser.add_argument('--weight_norm', action='store_true', default=False, help="Add if doing weight normalization")
parser.add_argument('--angle_norm', action='store_true', default=False, help="Add if doing angle normalization")
# Gradient descent
parser.add_argument('--max_itrs', type=int, default=5000000, help="Maxmimum number of iterations")
parser.add_argument('--lr_i', type=float, default=0.001, help="Initial learning rate")
parser.add_argument('--lr_d', type=float, default=0.9999993, help="Learning rate decay")
parser.add_argument('--mom_i', type=float, default=0.0, help="Initial momentum")
parser.add_argument('--mom_f', type=float, default=0.0, help="Final momentum")
parser.add_argument('--mom_c_s', type=int, default=500000, help="Momentum change steps")
parser.add_argument('--batch_size', type=int, default=100, help="Batch size")
parser.add_argument('--opt', type=str, default="adam", help="Optimization method: sgd or adam")
parser.add_argument('--bm', type=float, default=0.9, help="b1 for adam")
parser.add_argument('--bv', type=float, default=0.999, help="b2 for adam")
# Regularization
parser.add_argument('--drop_p', type=float, default=0.0, help="Probabiliy of a neuron being dropped out")
parser.add_argument('--l2', type=float, default=0.0, help="L2 for weight regularization")
# Input/Output Format
parser.add_argument('--in_type', type=str, default="fc", help="Type of input")
parser.add_argument('--out_type', type=str, default="linear", help="Type of output")
# Problem difficulty
parser.add_argument('--scramb_min', type=int, default=1, help="Minimum number of scrambles to train on")
parser.add_argument('--scramb_max', type=int, default=20, help="Maximum number of scrambles to train on")
parser.add_argument('--max_turns', type=int, default=None, help="Maximum number of turns when solving")
# Labeled data set
parser.add_argument('--labeled_data', type=str, required=True, help="File for labeled data")
parser.add_argument('--nnet_name', type=str, default="", help="Replace nnet name with this name, if exists")
parser.add_argument('--save_dir', type=str, default="savedModels", help="Director to which to save model")
parser.add_argument('--delete_labeled', action='store_true', default=False, help="Deletes labeled file after opening")
parser.add_argument('--model_num', type=int, default=0, help="Model number for progressive learning")
parser.add_argument('--eps', type=float, default=None, help="Training stops if test set falls below specified error for supervised training. Default is none, meaning no early stopping due to this argument.")
return(parser)
def statesToStatesList(states,env):
if env == 'cube3':
#oneHot_idx = tf.one_hot(states,24,on_value=1,off_value=0)
oneHot_idx = tf.one_hot(states,6,on_value=1,off_value=0)
outRep = tf.reshape(oneHot_idx,[-1,int(oneHot_idx.shape[1])*int(oneHot_idx.shape[2])])
elif env == 'puzzle15':
oneHot_idx = tf.one_hot(states,4*4,on_value=1,off_value=0)
outRep = tf.reshape(oneHot_idx,[-1,int(oneHot_idx.shape[1])*int(oneHot_idx.shape[2])])
elif env == 'puzzle24':
oneHot_idx = tf.one_hot(states,5*5,on_value=1,off_value=0)
outRep = tf.reshape(oneHot_idx,[-1,int(oneHot_idx.shape[1])*int(oneHot_idx.shape[2])])
elif env == 'puzzle35':
oneHot_idx = tf.one_hot(states,6*6,on_value=1,off_value=0)
outRep = tf.reshape(oneHot_idx,[-1,int(oneHot_idx.shape[1])*int(oneHot_idx.shape[2])])
elif env == 'puzzle48':
oneHot_idx = tf.one_hot(states,7*7,on_value=1,off_value=0)
outRep = tf.reshape(oneHot_idx,[-1,int(oneHot_idx.shape[1])*int(oneHot_idx.shape[2])])
elif 'lightsout' in env:
outRep = states
elif 'hanoi' in env:
m = re.search('hanoi([\d]+)d([\d]+)p',env)
numPegs = int(m.group(2))
oneHot_idx = tf.one_hot(states,numPegs,on_value=1,off_value=0)
outRep = tf.reshape(oneHot_idx,[-1,int(oneHot_idx.shape[1])*int(oneHot_idx.shape[2])])
elif 'sokoban' in env:
outRep = states
return(tf.cast(outRep,tf.float32))
def model_fn(features,labels,mode,args):
if type(features) == type(dict()):
states = features["x"][:,0,:]
else:
states = features[:,0,:]
if mode == tf.estimator.ModeKeys.TRAIN:
isTraining = True
else:
isTraining = False
### Process states
statesProcessed = statesToStatesList(states,args.env)
print("Processed shape: {}".format(statesProcessed.shape[1]))
dropoutSonnet = lambda x: tf.nn.dropout(x,keep_prob=1-args.drop_p)
nnet_layers = []
doBatchNorm = args.batch_norm
layerNorm = args.layer_norm
weightNorm = args.weight_norm
angleNorm = args.angle_norm
nnet_layers.append(lambda x: layers.dense(x,5000,args.act_type,isTraining,doBatchNorm,args.l2,weightNorm,layerNorm,angleNorm))
for layerIdx in range(0,args.num_l):
nnet_layers.append(lambda x: layers.dense(x,args.num_h,args.act_type,isTraining,doBatchNorm,args.l2,weightNorm,layerNorm,angleNorm))
for layerIdx in range(0,args.num_res):
nnet_layers.append(lambda x: layers.resBlock(x,args.num_h,args.act_type,2,isTraining,doBatchNorm,args.l2,weightNorm,layerNorm,angleNorm))
nnet_layers.append(dropoutSonnet)
nnet_layers.append(lambda x: layers.dense(x,1,"linear",isTraining,False,args.l2,False,False,False))
nnet = snt.Sequential(nnet_layers)
### Get curr state value
stateVals_nnet = nnet(statesProcessed)
global_step = tf.train.get_global_step()
predictions = {"values": stateVals_nnet}
if mode == tf.estimator.ModeKeys.PREDICT:
return(tf.estimator.EstimatorSpec(mode, predictions=predictions, export_outputs={"y":tf.estimator.export.PredictOutput(stateVals_nnet)}))
### Get state value target
stateVals_dp = tf.cast(labels,tf.float32)
### Cost
stateVals_dp = tf.stop_gradient(stateVals_dp)
errs = stateVals_dp - stateVals_nnet
cost = tf.reduce_mean(tf.pow(errs,2),name="cost")
if mode == tf.estimator.ModeKeys.EVAL:
return(tf.estimator.EstimatorSpec(mode, loss=cost))
### Tests
scrambleRange = [args.scramb_min,args.scramb_max]
scrambleTests = range(scrambleRange[0],scrambleRange[1]+1)
if args.scramb_max - args.scramb_min > 30:
scrambleTests = np.linspace(args.scramb_min,args.scramb_max,30,dtype=np.int)
for scramb in scrambleTests:
err_val = tf.gather(errs,tf.where(tf.equal(tf.floor(stateVals_dp[:,0]),scramb))[:,0])
tf.summary.scalar('Cost_%i' % (scramb), tf.reduce_mean(tf.pow(err_val,2)))
tf.summary.scalar('Cost', cost)
tf.summary.scalar('batch_size', tf.shape(states)[0])
### Optimization
graph_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_regularization_loss = tf.reduce_sum(graph_regularizers)
cost = cost + total_regularization_loss
if mode == tf.estimator.ModeKeys.TRAIN:
learningRate = tf.train.exponential_decay(args.lr_i, global_step, 1, args.lr_d, staircase=False)
momentum = args.mom_i + (args.mom_f-args.mom_i)*tf.minimum(tf.to_float(global_step)/float(args.mom_c_s),1.0)
tf.summary.scalar('lr', learningRate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
if args.opt.upper() == "SGD":
opt = tf.train.MomentumOptimizer(learningRate,momentum).minimize(cost,global_step)
elif args.opt.upper() == "ADAM":
opt = tf.train.AdamOptimizer(learningRate,args.bm,args.bv).minimize(cost,global_step)
return(tf.estimator.EstimatorSpec(mode, predictions=predictions, loss=tf.reduce_mean(tf.pow(errs,2)), train_op=opt))
def getNextStates(cubes,Environment):
legalMoves = Environment.legalPlays
nextStates_cube = np.empty([len(cubes),len(legalMoves)] + list(cubes[0].shape),dtype=Environment.dtype)
nextStateRewards = np.empty([len(cubes),len(legalMoves)])
nextStateSolved = np.empty([len(cubes),len(legalMoves)],dtype=bool)
### Get next state rewards and if solved
if type(cubes) == type(list()):
cubes = np.stack(cubes,axis=0)
for moveIdx,move in enumerate(legalMoves):
nextStates_cube_move = Environment.next_state(cubes, move)
isSolved = Environment.checkSolved(nextStates_cube_move)
nextStateSolved[:,moveIdx] = isSolved
nextStateRewards[:,moveIdx] = Environment.getReward(nextStates_cube_move,isSolved)
if type(move[0]) == type(list()):
nextStateRewards[:,moveIdx] = nextStateRewards[:,moveIdx] - (len(move) - 1)
nextStates_cube[:,moveIdx,:] = nextStates_cube_move
return(nextStates_cube,nextStateRewards,nextStateSolved)
``` |
{
"source": "1942Spectre/image_data_augmenter",
"score": 4
} |
#### File: 1942Spectre/image_data_augmenter/image_toolkit.py
```python
from PIL import Image
import numpy as np
def resize(img,size):
## Resize the given image to the given size without cropping
img = img.resize(size)
return img
def horizontal_flip(img):
## Flip the image horizontally
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def rotate(img , degrees):
## Rotate the image in given degrees , negative degrees for right flip
img = img.rotate(degrees)
return img
def grayscale(img):
return img.convert(mode="L")
``` |
{
"source": "1951FDG/vectordrawableresdir2colorresfile",
"score": 3
} |
#### File: 1951FDG/vectordrawableresdir2colorresfile/rgbcsv2colordict.py
```python
from __future__ import print_function
import csv
import os.path
import sys
import time
import timeit
# Begin timer:
start_time = timeit.default_timer()
def program(*args):
# do whatever
pass
# Provide default file_in name argument if not provided:
if __name__ == "__main__":
# def main(argv):
try:
file_in = sys.argv[1]
except IndexError: # getopt.GetoptError:
print("Usage: " + sys.argv[0] + " -i <inputfile> -o <outputfile>")
sys.exit(2)
# Exit if file_in not found:
if os.path.exists(file_in) and os.access(file_in, os.R_OK):
with open(file_in, newline="") as f:
reader = csv.reader(f, delimiter=",")
for i in reader:
header_rows = (
"# "
+ time.strftime("%Y-%m-%d %H:%M (local time)")
+ " "
+ sys.argv[0]
+ " START: rowcount="
+ str(sum(1 for _ in f))
+ "."
)
print(header_rows)
else:
print(
"# "
+ time.strftime("%Y-%m-%d %H:%M (local time)")
+ " "
+ sys.argv[0]
+ " ABORTED. Either file "
+ file_in
+ " is missing or is not readable."
)
sys.exit()
# Provide default file_out name argument if not provided:
if __name__ == "__main__":
# def main(argv):
try:
file_out = sys.argv[2]
except IndexError: # getopt.GetoptError:
# Name output file by appending .txt to the name:
file_out = sys.argv[0] + ".txt"
# Send STDOUT to a file:
stdout = sys.stdout # remember the handle to the real standard output.
sys.stdout = open(file_out, "w")
# Print in black format:
with open(file_in, newline="") as in_f:
reader = csv.DictReader(in_f, delimiter=",")
print("HEX_TO_NAMES = {")
rownum = 0
for i in reader:
print(" " + '"' + i["_Hex"] + '": "' + i["_Title"] + '"' + ",")
rownum = rownum + 1
print("}")
footer_rows = (
"# "
+ time.strftime("%Y-%m-%d")
+ " "
+ os.path.basename(sys.argv[0])
+ " "
+ os.path.basename(file_out)
+ " output "
+ str(rownum)
+ " rows."
)
print(footer_rows, end="") # no NewLine
# Close the file every time:
sys.stdout.close()
sys.stdout = stdout # Restore regular stdout.
# End timer:
elapsed = timeit.default_timer() - start_time
print(
"# "
+ time.strftime("%Y-%m-%d %H:%M (local time)")
+ " "
+ sys.argv[0]
+ " END: ran for "
+ "{:.2f}".format(elapsed * 1000)
+ " secs."
)
print(footer_rows)
``` |
{
"source": "19521242bao/SE104",
"score": 2
} |
#### File: apps/result/views.py
```python
from collections import Counter
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.shortcuts import render, HttpResponseRedirect, redirect
from django.views.generic import ListView
from apps.corecode.models import AcademicSession, AcademicTerm,StudentClass
from apps.students.models import Student
from .models import Result
from .forms import CreateResults, EditResults,CreateResultCLass,GetResutlSubjectForm
ListView
@login_required
def create_result(request):
students = Student.objects.all()
if request.method == 'POST':
#after visiting the second page
if 'finish' in request.POST:
form = CreateResults(request.POST)
if form.is_valid():
subjects = form.cleaned_data['subjects']
session = form.cleaned_data['session']
term = form.cleaned_data['term']
students = request.POST['students']
results = []
for student in students.split(','):
stu = Student.objects.get(pk=student)
if stu.current_class:
for subject in subjects:
check = Result.objects.filter(session=session, term=term,current_class=stu.current_class,subject=subject, student=stu).first()
if not check:
results.append(
Result(
session=session,
term=term,
current_class=stu.current_class,
subject=subject,
student=stu
)
)
Result.objects.bulk_create(results)
return redirect('edit-results')
#after choosing students
id_list = request.POST.getlist('students')
if id_list:
form = CreateResults(initial={"session": request.current_session, "term":request.current_term})
studentlist = ','.join(id_list)
return render(request, 'result/create_result_page2.html', {"students": studentlist, "form": form, "count":len(id_list)})
else:
messages.warning(request, "You didnt select any student.")
return render(request, 'result/create_result.html', {"students": students})
@login_required
def add_score(request):
class_list=StudentClass.objects.all()
if request.method == 'POST':
if 'finish' in request.POST:
form = EditResults(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Results successfully updated')
return redirect('view-results')
else:
if "current_class" in request.POST:
class_name = request.POST['current_class']
results = Result.objects.filter(
session=request.current_session, term=request.current_term,current_class=class_name)
form = EditResults(queryset=results)
return render(request, 'result/edit_results2.html', {"formset": form})
class_id=request.POST.getlist('current_class')
print(class_id)
if class_id:
form=CreateResultCLass(initial={"session": request.current_session, "term":request.current_term})
class_select=','.join(class_id)
return render(request, 'result/edit_results2.html',
{"current_class": class_select, "form": form, "count": len(class_id)})
else:
messages.warning(request, "You didnt select any class.")
return render(request, 'result/class_list.html', {"class_list": class_list})
def edit_results(request):
if request.method == 'POST':
form = EditResults(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Results successfully updated')
return redirect('edit-results')
else:
results = Result.objects.filter(
session=request.current_session, term=request.current_term)
form = EditResults(queryset=results)
return render(request, 'result/edit_results.html', {"formset": form})
@login_required
def all_results_view_class(request):
results = Result.objects.filter(
session=request.current_session, term=request.current_term)
bulk = {}
for result in results:
test_total = 0
exam_total = 0
subjects = []
for subject in results:
if subject.student == result.student:
subjects.append(subject)
subject.test_score = float(subject.test_score)
subject.exam_score = float(subject.exam_score)
test_total = (test_total + subject.test_score)
exam_total = (exam_total + subject.exam_score)
test_total = test_total / len(subjects)
exam_total = exam_total / len(subjects)
bulk[result.student.id] = {
"student": result.student,
"subjects": subjects,
"test_total": test_total,
"exam_total": exam_total,
"total_total": round((test_total + exam_total) / 2, 2)
}
context = {
"results": bulk
}
return render(request, 'result/all_results.html', context)
def score_grade(score):
if score <= 10 and score >= 8:
return 'Giỏi'
elif score < 8 and score >= 6.5:
return 'Khá'
elif score < 6.5 and score >= 5:
return 'Trung Bình'
elif score >= 0 and score < 5:
return 'Không Đạt'
else:
return "Invalid Score"
@login_required
def all_results_view(request):
results = Result.objects.filter(
session=request.current_session, term=request.current_term)
bulk = {}
def find_student(arr, target):
for i in range(1, len(arr)):
if arr[i][0] == target:
return i
return -1
grade = []
t = len(results)
classlist = [] # Ten cac lop
grading_class = [["", 0, 0, 0, 0]] # [Ten class, A, B, C, D]
std = [["example", 0, 0, "A", "class"]] # [Ten hoc sinh, Diem Trung Binh, cnt , grading, Class]
for result in results:
test_class = 0
exam_class = 0
total_average = 0
total_total = 0
class_member = []
if result.current_class not in classlist:
classlist.append(result.current_class)
grading_class.append([classlist[-1], 0, 0, 0, 0])
for student in results:
grade.append(result.current_class)
if student.current_class == result.current_class:
class_member.append(student.student)
if find_student(std, student.student) == -1 or len(std) == 1:
std.append([student.student, 0, 0, "", student.current_class])
exam_class += student.exam_score
test_class += student.test_score
total_total = (student.exam_score + student.test_score) / 2
position_student_in_std = find_student(std, student.student)
std[position_student_in_std][1] += total_total
std[position_student_in_std][2] += 1
if std[position_student_in_std][2] == 2:
std[position_student_in_std][2] = 1
std[position_student_in_std][1] /= 2
for i in range(1, len(std)):
std[i][3] = score_grade(std[i][1])
for j in range(1, len(grading_class)):
if std[i][-1] == grading_class[j][0]:
grading_class[j][2] += 1
if std[i][3] == "Giỏi":
grading_class[j][1] += 1
if std[i][3] == "Khá":
grading_class[j][1] += 1
if std[i][3] == "Trung Bình":
grading_class[j][1] += 1
x = len(std)
for i in range(1, len(grading_class)):
if grading_class[i][2] == 0:
percent=0
else:
percent=int((grading_class[i][1]/(grading_class[i][2]))*100)
bulk[grading_class[i][0]] = {
"name_class": grading_class[i][0],
"term": request.current_term,
"percent": percent,
"good": grading_class[i][1],
"SL":grading_class[i][2]
}
context = {
"results": bulk
}
return render(request, 'result/all_results_class.html', context)
def all_result_view_subject(request):
bulk = {}
if request.method == 'POST':
form = GetResutlSubjectForm(request.POST)
if form.is_valid():
subjects = form.cleaned_data['subjects']
term=form.cleaned_data['term']
session=form.cleaned_data['session']
results=Result.objects.filter(term=term,session=session,subject=subjects)
list_class=list(results.values_list('current_class', flat=True).distinct())
name_class=""
for id_class in list_class:
print(id_class)
number_class=0
good_member=0
for result in results:
if result.current_class.id==id_class:
name_class=result.current_class
number_class+=1
score_student=(result.total_score())
if score_student>=5:
good_member+=1
print(subjects)
bulk[id_class] = {
"name_subject":subjects,
"name_class": name_class,
"term": request.current_term,
"percent": int(good_member/number_class*100),
"good": good_member,
"SL":number_class
}
print(bulk)
context = {
"results": bulk,
"form":form
}
return render(request, 'result/result_subject.html', context)
form = GetResutlSubjectForm(initial={"session": request.current_session, "term": request.current_term})
return render(request, 'result/result_subject.html', {"form": form})
```
#### File: apps/students/models.py
```python
from django.db import models
from django.utils import timezone
from django.urls import reverse
from django.core.validators import RegexValidator, EmailValidator
from apps.corecode.models import StudentClass, Subject
class Student(models.Model):
GENDER = [
('nam', 'Nam'),
('nữ', 'Nữ')
]
AGE=[
('15',"15"),
('16',"16"),
('17',"17"),
('18',"18"),
('19',"19"),
('20',"20"),
]
name = models.CharField(max_length=200,verbose_name= "họ tên")
gender = models.CharField(max_length=10, choices=GENDER, default='male',verbose_name="giới tính")
date_of_birth = models.DateField(default=timezone.now,verbose_name="ngày sinh")
#age=models.IntegerField(default=15,choices=AGE,verbose_name="tuổi")
current_class = models.ForeignKey(StudentClass, on_delete=models.SET_NULL, blank=True, null=True,verbose_name="Lớp")
#date_of_admission = models.DateField(default=timezone.now,verbose_name="ngày nhập học")
mobile_num_regex = RegexValidator(regex="^[0-9]{10,15}$", message="Entered mobile number isn't in a right format!")
parent_mobile_number = models.CharField(validators=[mobile_num_regex], max_length=13, blank=True,verbose_name="số điện thoại")
email_regex=EmailValidator(message="Email is not valid")
email=models.CharField(validators=[email_regex],max_length=30,default=None)
address = models.TextField(blank=True,verbose_name="địa chỉ")
#others = models.TextField(blank=True,verbose_name="thông tin thêm")
passport = models.ImageField(blank=True, upload_to='students/passports/',verbose_name="ảnh đại diện")
class Meta:
ordering = ["name"]
def __str__(self):
return f'{self.name} )'
def get_absolute_url(self):
return reverse('student-detail', kwargs={'pk': self.pk})
class StudentBulkUpload(models.Model):
date_uploaded = models.DateTimeField(auto_now=True,verbose_name="ngày cập nhật")
csv_file = models.FileField(upload_to='students/bulkupload/')
``` |
{
"source": "1961C-GT/BaseCode",
"score": 3
} |
#### File: 1961C-GT/BaseCode/eng_display.py
```python
import math
import time
import tkinter as tk
from tkinter import font
from tkinter import *
from multiprocessing import Process, Pipe
from eng_colors import EngColors
from main import Main
import config
class EngDisplay:
def __init__(self, src=None, use_light_theme=False, repeat_log=False):
self.colors = EngColors(use_dark=(not use_light_theme))
self.parent_conn, self.child_conn = Pipe()
self.parent_conn_serial_out, self.child_conn_serial_out = Pipe()
self.data_src = src
self.main = Main(src, multi_pipe=self.child_conn, serial_pipe=self.child_conn_serial_out, repeat_log=repeat_log)
if src is None:
self.using_serial = True
else:
self.using_serial = False
self.proc = Process(target=self.main.run)
self.proc.start()
# self.width = 700
# self.height = 700
self.move_amt = 20
self.meas_to_map = 1 / 1000
self.universal_scale = 2
self.start_pos = []
self.measuring = False
self.cur_line = None
self.cur_line_txt = None
self.closed = False
self.popup_active = False
self.cycle_counter = 0
self.paused = True
self.not_cleared = True
self.tk_version = tk.TclVersion
self.create_eng_display()
def create_eng_display(self):
self.window = tk.Tk()
self.myframe = Frame(self.window)
self.myframe.pack(fill=BOTH, expand=YES)
w, h = self.window.winfo_screenwidth(), self.window.winfo_screenheight()
self.canvas = ResizingCanvas(self.myframe, width=w, height=h, borderwidth=0, bg=self.colors.background,
highlightthickness=0)
self.window.wm_title(f"MNSLAC Engineering Display")
self.canvas.pack(fill=BOTH, expand=YES)
self.zoom(3)
# Add menu
# self.menu_bar = Menu(self.window)
# self.file_menu = Menu(self.menu_bar, tearoff=0)
# self.file_menu.add_command(label="Exit", command=self.window.quit, accelerator="Cmd+q")
# self.menu_bar.add_cascade(label="File", menu=self.file_menu)
# self.help_menu = Menu(self.menu_bar, tearoff=0)
# self.menu_bar.add_cascade(label="Help", menu=self.help_menu)
# self.window.config(menu=self.menu_bar)
# Bind these functions to motion, press, and release
self.canvas.bind('<Motion>', self.measure)
self.canvas.bind('<Button-1>', self.start_measure)
self.canvas.bind('<Button-3>', lambda e: self.zoom(0.9))
self.canvas.bind('<Button-2>', lambda e: self.zoom(0.9))
self.window.bind('<Up>', lambda e: self.canvas.move_by(0, self.move_amt))
self.window.bind('<Down>', lambda e: self.canvas.move_by(0, -self.move_amt))
self.window.bind('<Left>', lambda e: self.canvas.move_by(self.move_amt, 0))
self.window.bind('<Right>', lambda e: self.canvas.move_by(-self.move_amt, 0))
self.window.bind('<Shift-Up>', lambda e: self.canvas.move_by(0, self.move_amt * 5))
self.window.bind('<Shift-Down>', lambda e: self.canvas.move_by(0, -self.move_amt * 5))
self.window.bind('<Shift-Left>', lambda e: self.canvas.move_by(self.move_amt * 5, 0))
self.window.bind('<Shift-Right>', lambda e: self.canvas.move_by(-self.move_amt * 5, 0))
self.canvas.bind('<ButtonRelease-1>', self.stop_measure)
self.window.protocol("WM_DELETE_WINDOW", self.close_callback)
# self.canvas.addtag_all("bg")
# Create crosshairs in background
scale = 25 * 1000 # Distance in mm between crosshairs
number = 30 # Total number of crosshairs to be drawn
min_val = -scale * number
max_val = scale * number
length = scale * number * self.universal_scale * self.meas_to_map
for x in range(min_val, max_val, scale):
x_tmp = x * self.universal_scale * self.meas_to_map + self.canvas.x_pos
self.canvas.create_line(int(x_tmp), int(length), int(x_tmp), int(-length), fill=self.colors.crosshair,
tags="obj-bg", dash=(3, 10)) # 1D1F25
for y in range(min_val, max_val, scale):
y_tmp = y * self.universal_scale * self.meas_to_map + self.canvas.y_pos
self.canvas.create_line(int(length), int(y_tmp), int(-length), int(y_tmp), fill=self.colors.crosshair,
tags="obj-bg", dash=(3, 10))
# Create the MNSLAC icon
mnslac_icon = PhotoImage(file=self.colors.mnslac_logo)
self.icon = self.canvas.create_image(w - 10, 10, image=mnslac_icon, anchor=NE)
# Create the hz update text
self.update_hz = self.canvas.create_text(10, 10, text="0.0 Hz", fill=self.colors.text,
font=font.Font(family=self.colors.data_font,
size=self.colors.text_size_large), anchor=tk.NW)
# Create the no_connection rectangle and text
self.no_connection_rect = self.canvas.create_rectangle(-300, h / 3 - 50, 300 + w, h / 3 + 50,
fill=self.colors.background_accent, tag="del")
self.no_connection = self.canvas.create_text(w / 2, h / 3, text="NO CONNECTION", fill=self.colors.text_warn,
font=font.Font(family=self.colors.data_font,
size=self.colors.text_size_xlarge),
anchor=tk.CENTER, tag="del")
# Create the connection details text based on whether or not we are using a serial connection or log file
if self.using_serial is False:
self.no_connection_details = self.canvas.create_text(w / 2, h / 3 + 20,
text="Begin playback of log file by clicking 'Play'",
fill=self.colors.text_details,
font=font.Font(family=self.colors.data_font,
size=self.colors.text_size_medium),
anchor=tk.CENTER, tag="del")
msg = f"Loaded from '{self.data_src}'"
else:
self.no_connection_details = self.canvas.create_text(w / 2, h / 3 + 20,
text="Connect NODE and/or ensure proper serial configuration",
fill=self.colors.text_details,
font=font.Font(family=self.colors.data_font,
size=self.colors.text_size_medium),
anchor=tk.CENTER, tag="del")
serial_port = config.SERIAL_PORT
saving_log = self.main.log_file_name
if saving_log is None:
msg = f"Listening to Serial '{serial_port}', no logging'"
else:
msg = f"Listening to Serial '{serial_port}', logging to '{saving_log}'"
self.file_details = self.canvas.create_text(10, h - 10, text=f"{msg}", fill=self.colors.text,
font=font.Font(family=self.colors.data_font,
size=self.colors.text_size_small), anchor=tk.SW)
# Initialize the main canvas
if self.main.kill or not self.update_frame():
print('Error initializing the main canvas')
return
# Create the details canvas
self.dh = 370
self.dw = 350
self.details = Canvas(self.window, width=self.dw, height=self.dh, bg=self.colors.background_accent,
highlightthickness=0, borderwidth=0)
self.details.create_rectangle(2, 2, self.dw - 4, self.dh - 4, fill="", outline=self.colors.pinstripe,
dash=(1, 5))
self.d_title = self.details.create_text(20, 20, text="Node Details:", fill=self.colors.text_details,
font=font.Font(family='Courier New', size=14), anchor=tk.NW)
# Create the list for node details based on the node_list from main
self.node_details_list = {}
self.connection_list = {}
y_pos = 50
for node_id, node_obj in self.main.node_list.items():
if node_obj.is_base:
continue
# Create the text objects for later use
self.node_details_list[node_id] = {
'txt_id': self.details.create_text(20, y_pos, text=node_obj.name, fill=self.colors.text_details,
font=font.Font(family='Courier New', size=12), anchor=tk.NW),
'name': node_obj.name,
'bat': 0,
'temp': 0,
'heading': 0,
'speed': 0
}
y_pos += 20
# Create the node ranging section
y_pos += 20
self.details.create_text(20, y_pos, text="Ranging List", fill=self.colors.text_details,
font=font.Font(family='Courier New', size=14), anchor=tk.NW)
y_pos += 30
x_pos = 20
y_pos_o = y_pos
rows_per_col = 5
row_counter = 0
for name in self.main.name_arr:
id1, id2 = name.split('-')
n1 = self.main.node_list[id1].name
n2 = self.main.node_list[id2].name
n1 = n1[0] + n1.split()[1]
n2 = n2[0] + n2.split()[1]
self.connection_list[name] = {
"counter": 0,
"node_obj1": self.main.node_list[id1],
"node_obj2": self.main.node_list[id2],
"name": n1 + "↔" + n2,
"txt_id": self.details.create_text(x_pos, y_pos, text=n1 + "↔" + n2 + ": 0",
fill=self.colors.text_details,
font=font.Font(family='Courier New', size=12), anchor=tk.NW)
}
y_pos += 20
row_counter += 1
if row_counter == rows_per_col:
row_counter = 0
x_pos += 100
y_pos = y_pos_o
# Skip below the ranging section
y_pos = y_pos_o + 20 * rows_per_col + 30
x_pos = 75
# Create the section containing buttons, based on whether or not the data source is a file or serial connection
if self.using_serial:
# The data source is a serial connection, create the sleep and reset buttons
button2 = Button(self.window, command=lambda: self.popup_msg(type="sleep"), text="Sleep (Seconds)",
width=13, anchor=tk.CENTER, highlightbackground=self.colors.background_accent, bd=0,
highlightthickness=0, relief=tk.FLAT)
button2_window = self.details.create_window(x_pos, y_pos, anchor=tk.CENTER, window=button2)
x_pos += 74
y_pos -= 15
self.sleep_time_entry = Entry(self.window, highlightbackground=self.colors.background_accent,
bg=self.colors.entry_background, bd=2)
e1_window = self.details.create_window(x_pos, y_pos, anchor=tk.NW, window=self.sleep_time_entry)
y_pos += 44
x_pos -= 75
button1 = Button(self.window, command=lambda: self.popup_msg(type="reset"), text="Reset Network", width=13,
anchor=tk.CENTER, highlightbackground=self.colors.background_accent, bd=0,
highlightthickness=0, relief=tk.FLAT)
button1_window = self.details.create_window(x_pos, y_pos, anchor=tk.CENTER, window=button1)
else:
# The data source is a file, create the play/pause button and playback speed slider
y_pos += 10
self.play_pause_button_string = tk.StringVar()
self.pause_play_button = Button(self.window, command=self.play_pause,
textvariable=self.play_pause_button_string, width=13, anchor=tk.CENTER,
highlightbackground=self.colors.background_accent, bd=0,
highlightthickness=0, relief=tk.FLAT)
self.play_pause_button_string.set("Play")
button2_window = self.details.create_window(x_pos, y_pos, anchor=tk.CENTER, window=self.pause_play_button)
x_pos += 75
y_pos -= 1
self.update_hz_limit_scale = Scale(self.window, from_=0, to=1, resolution=0.001,
command=self.update_refresh_hz, orient=HORIZONTAL,
troughcolor=self.colors.slider_trough, borderwidth=0, length=175,
width=15, relief=tk.FLAT, activebackground="#AAA", sliderrelief=tk.FLAT,
showvalue=False, fg="#FFF")
self.update_hz_limit_scale.set(1)
w_window = self.details.create_window(x_pos, y_pos, anchor=tk.W, window=self.update_hz_limit_scale)
self.details.create_text(x_pos, y_pos - 15, text="Slow", fill=self.colors.text_details,
font=font.Font(family='Courier New', size=10), anchor=tk.W)
self.details.create_text(x_pos + 175, y_pos - 15, text="Fast", fill=self.colors.text_details,
font=font.Font(family='Courier New', size=10), anchor=tk.E)
self.update_hz_target_text = self.details.create_text(x_pos + 88, y_pos + 18, text="≈60Hz",
fill=self.colors.text_details,
font=font.Font(family='Courier New', size=10),
anchor=tk.CENTER)
self.canvas.assign_resize_callback(self.resize_event)
# self.proc.terminate()
self.main_loop()
def popup_msg(self, type="reset"):
if self.popup_active:
return
valid = False
message = ""
target = None
if type == "reset":
valid = True
message = "Are you sure you want to reset?"
target = self.reset_network
elif type == "sleep":
try:
self.sleep_time = int(self.sleep_time_entry.get())
self.sleep_time_entry.config(fg=self.colors.text)
message = "Sleep for {} seconds OK?".format(int(self.sleep_time))
target = self.sleep_network
valid = True
except:
self.sleep_time_entry.config(fg=self.colors.text_error)
print('Invalid value for sleep time: {}'.format(self.sleep_time_entry.get()))
else:
print('Unknown pop message type: {}'.format(type))
return
if valid:
ph = 80
pw = 270
self.popup = Canvas(self.window, width=pw, height=ph, bg=self.colors.background_dialog,
highlightthickness=0, borderwidth=0)
w, h = self.window.winfo_screenwidth(), self.window.winfo_screenheight()
self.popup.place(x=w / 2 - pw / 2, y=h / 3 - ph / 2)
self.popup.create_rectangle(1, 1, pw - 3, ph - 3, fill="", outline="#888", dash=(1, 5))
self.popup.create_rectangle(0, 0, pw, 20, fill=self.colors.background_accent, outline="")
self.popup.create_polygon([-1, ph, 4, ph, -1, ph - 5], outline='', fill=self.colors.background, width=0)
self.popup.create_polygon([pw + 1, ph, pw - 4, ph, pw + 1, ph - 5], outline='', fill=self.colors.background,
width=0)
self.popup.create_polygon([-1, -1, 4, -1, -1, 5], outline='', fill=self.colors.background, width=0)
self.popup.create_polygon([pw + 1, -1, pw - 4, -1, pw + 1, 5], outline='', fill=self.colors.background,
width=0)
self.popup.create_text(pw / 2, 11, text="NOTICE", fill=self.colors.text_notice,
font=font.Font(family='Courier New', size=14), anchor=tk.CENTER)
self.popup.create_text(pw / 2, 35, text=message, fill=self.colors.text_invert,
font=font.Font(family='Helvetica', size=14), anchor=tk.CENTER)
yes_button = Button(self.window, command=target, text="Yes", width=13, anchor=tk.CENTER,
highlightbackground=self.colors.background_dialog, bd=0, highlightthickness=0,
relief=tk.FLAT)
yes_button_window = self.popup.create_window(70, ph - 20, anchor=tk.CENTER, window=yes_button)
no_button = Button(self.window, command=self.destroy_popup, text="No", width=13, anchor=tk.CENTER,
highlightbackground=self.colors.background_dialog, bd=0, highlightthickness=0,
relief=tk.FLAT)
no_button_window = self.popup.create_window(pw - 70, ph - 20, anchor=tk.CENTER, window=no_button)
self.popup_active = True
self.ph = ph
self.pw = pw
def destroy_popup(self):
if self.popup_active:
self.popup.destroy()
self.popup_active = False
def reset_network(self):
self.destroy_popup()
self.parent_conn_serial_out.send({
'cmd': 'reset'
})
def sleep_network(self):
self.destroy_popup()
self.parent_conn_serial_out.send({
'cmd': 'sleep',
'time': self.sleep_time
})
def play_pause(self):
print('Play Pause!')
if self.using_serial is False:
self.paused = not self.paused
if self.paused:
self.parent_conn_serial_out.send({
'cmd': 'pause'
})
# self.details.itemconfig(self.pause_play_button, text="Play")
self.play_pause_button_string.set("Play")
else:
self.parent_conn_serial_out.send({
'cmd': 'play'
})
# self.details.itemconfig(self.pause_play_button, text="Pause")
self.play_pause_button_string.set("Pause")
# 0 -> 1
# 0.5 -> 5
# 1 -> 60
def update_refresh_hz(self, value):
try:
refresh_hz = float(value)
if refresh_hz <= 0.5:
refresh_hz = refresh_hz * 8 + 1
else:
refresh_hz = (refresh_hz - 0.5) * 110 + 5
if refresh_hz == 60:
refresh_hz = 0 # Sets unlimited hz
self.details.itemconfig(self.update_hz_target_text, text="∞".format(float(refresh_hz)),
font=font.Font(family=self.colors.data_font, size=22))
else:
self.details.itemconfig(self.update_hz_target_text, text="≈{:.2f}Hz".format(float(refresh_hz)),
font=font.Font(family=self.colors.data_font, size=10))
self.parent_conn_serial_out.send({
'cmd': 'set_speed',
'speed': refresh_hz
})
except Exception as e:
pass
def resize_event(self, event):
self.details.place(x=event.width - (self.dw + 25), y=event.height - (self.dh + 25))
if self.popup_active is True:
self.popup.place(x=event.width / 2 - self.pw / 2, y=event.height / 3 - self.ph / 2)
coords = self.canvas.coords(self.icon)
self.canvas.move(self.icon, (event.width - 10) - coords[0], 0)
coords = self.canvas.coords(self.file_details)
self.canvas.move(self.file_details, 0, (event.height - 10) - coords[1])
if self.not_cleared:
coords = self.canvas.coords(self.no_connection)
self.canvas.move(self.no_connection, event.width / 2 - coords[0], event.height / 3 - coords[1])
coords = self.canvas.coords(self.no_connection_details)
self.canvas.move(self.no_connection_details, event.width / 2 - coords[0], event.height / 3 + 20 - coords[1])
coords = self.canvas.coords(self.no_connection_details)
self.canvas.move(self.no_connection_details, event.width / 2 - coords[0], event.height / 3 + 20 - coords[1])
coords = self.canvas.coords(self.no_connection_rect)
self.canvas.move(self.no_connection_rect, -300 - coords[0], event.height / 3 - 40 - coords[1])
def close_callback(self):
self.window.destroy()
self.closed = True
print('Window Closed!')
self.proc.terminate()
def main_loop(self):
frame_end = False
receiving = True
last_update = 0
message_timer = 0
try:
while True:
if time.time() - message_timer > 0.5:
message_timer = time.time()
self.draw_update_hz(int(self.cycle_counter / 0.5))
self.cycle_counter = 0
if frame_end is True:
if not self.update_frame():
return
last_update = time.time()
# self.clear_canvas()
frame_end = False
elif time.time() - last_update > 0.03333333333:
if not self.update_frame():
return
last_update = time.time()
while receiving is True:
if self.parent_conn.poll():
msg = self.parent_conn.recv()
if type(msg) == dict and "cmd" in msg:
if "args" not in msg:
continue
if msg['cmd'] == "frame_start":
frame_end = False
self.clear_canvas()
self.cycle_counter += 1
elif msg['cmd'] == "frame_end":
frame_end = True
break
elif msg['cmd'] == "clear_screen":
self.clear_canvas()
elif msg['cmd'] == "draw_circle":
self.draw_circle(msg['args'])
elif msg['cmd'] == "connect_points":
self.connect_points(msg['args'])
elif msg['cmd'] == "status_update":
self.status_update(msg['args'])
elif msg['cmd'] == "report_communication":
self.report_communication(msg['args'])
elif msg['cmd'] == "clear_connection_list":
self.clear_connection_list(msg['args'])
else:
print(f"Unknown command: {msg['cmd']}")
else:
print(msg)
else:
receiving = False
receiving = True
except tk.TclError:
print('Close detected. Exit!')
exit()
# Interactive features
def draw_update_hz(self, hz_value):
txt = "{} Hz".format(hz_value)
self.canvas.itemconfig(self.update_hz, text=txt)
def update_frame(self):
try:
self.window.update_idletasks()
self.window.update()
except:
return False
return True
def measure(self, event):
# txt = "Coordinates: ({}, {}) meters".format(round(x), round(y))
# self.details.itemconfig(self.d_mouse, text=txt)
# Check to see if we are measuring
if self.measuring:
# Try to remove the old elements
try:
event.widget.delete(self.cur_line)
event.widget.delete(self.cur_line_txt)
except:
pass
(x, y) = self.translate_screen_pos_to_canvas_pos(event.x, event.y)
x = x + self.canvas.x_pos
y = y + self.canvas.y_pos
# Calculate the rotation between the two points
rotation = 180 - math.degrees(math.atan2(self.start_pos[1] - y,
self.start_pos[0] - x))
# Normalize the rotation
if 90 < rotation < 270:
rotation -= 180
# Convert to radians
rrotation = math.radians(rotation)
# Calculate mid point + rotation offset
midx = (self.start_pos[0] + x) / 2 - math.sin(rrotation) * 10
midy = (self.start_pos[1] + y) / 2 - math.cos(rrotation) * 10
# Calculate distance
dist_num = math.sqrt(
(self.start_pos[0] - x) ** 2 + (self.start_pos[1] - y) ** 2) / self.universal_scale
# Calculate distance string
dist = '{:.0f}m'.format(dist_num)
# Create the text
self.cur_line_txt = event.widget.create_text(midx, midy, text=dist,
fill=self.colors.text,
font=font.Font(family=self.colors.data_font,
size=self.colors.text_size_large),
justify=tk.LEFT, angle=rotation)
# Create the line
self.cur_line = event.widget.create_line(self.start_pos[0], self.start_pos[1], x,
y, fill=self.colors.measure_line_color, dash=self.colors.measure_line_dash, arrow=tk.BOTH, width=self.colors.measure_line_width)
def shrink(self, scale, x=None, y=None):
if x is None or y is None:
x = self.window.winfo_pointerx() - self.window.winfo_rootx()
y = self.window.winfo_pointery() - self.window.winfo_rooty()
# (x, y) = self.translate_screen_pos_to_canvas_pos(0, 0)
# x = x + self.canvas.x_pos
# y = y + self.canvas.y_pos
# print(x, y)
# x = 0
# y = 0
# self.canvas.scale("obj", x, y, scale, scale)
# self.canvas.scale("obj-bg", x, y, scale, scale)
old_scale = self.universal_scale
self.universal_scale *= scale
self.canvas.scale("obj", self.canvas.x_pos, self.canvas.y_pos, scale, scale)
self.canvas.scale("obj-bg", self.canvas.x_pos, self.canvas.y_pos, scale, scale)
def translate_screen_pos_to_canvas_pos(self, x, y):
return x - self.canvas.x_pos - self.canvas.x_offset, y - self.canvas.y_pos - self.canvas.y_offset
def translate_canvas_pos_to_screen_pos(self, x, y):
return x + self.canvas.x_pos + self.canvas.x_offset, y + self.canvas.y_pos + self.canvas.y_offset
def start_measure(self, event):
# Save the initial point
(x, y) = self.translate_screen_pos_to_canvas_pos(event.x, event.y)
x = x + self.canvas.x_pos
y = y + self.canvas.y_pos
self.start_pos = (x, y)
# Set measuring to True
self.measuring = True
def zoom(self, scale, center=False):
if center is False:
self.shrink(scale)
else:
self.shrink(scale, x=0, y=0)
def stop_measure(self, event):
# Include globals
# Set measuring to False
self.measuring = False
now_pos = self.translate_screen_pos_to_canvas_pos(event.x, event.y)
now_pos = (now_pos[0] + self.canvas.x_pos, now_pos[1] + self.canvas.y_pos)
if self.start_pos[0] == now_pos[0] and self.start_pos[1] == now_pos[1]:
self.zoom(1.1)
# Try to remove the old elements
try:
event.widget.delete(self.cur_line)
event.widget.delete(self.cur_line_txt)
except:
pass
# Helper Functions
def clear_canvas(self):
self.canvas.delete("obj")
self.canvas.delete("del")
self.not_cleared = False
@staticmethod
def get_val_from_args(args, val):
if val in args:
return args[val]
else:
return None
def clear_connection_list(self, args):
for key in self.connection_list.keys():
self.connection_list[key]['counter'] = 0
def report_communication(self, args):
key = self.get_val_from_args(args, "key")
if key is None:
print(f"Invalid args input for function 'report_communication': {args}")
return
if key in self.connection_list:
self.connection_list[key]['counter'] += 1
txt = "{}: {:<5}".format(self.connection_list[key]['name'], self.connection_list[key]['counter'])
self.details.itemconfig(self.connection_list[key]['txt_id'], text=txt)
else:
print(f"Nodes '{key}' not in the comm list. Command 'report_communication'.")
def status_update(self, args):
node_id = self.get_val_from_args(args, "node_id")
bat = self.get_val_from_args(args, "bat")
temp = self.get_val_from_args(args, "temp")
heading = self.get_val_from_args(args, "heading")
if node_id is None or bat is None or temp is None:
print(f"Invalid args input for function 'status_update': {args}")
return
if node_id not in self.node_details_list:
if node_id != '0' and node_id != '1':
print(f"Node '{node_id}' not in the details list. Command 'status_update'.")
return
txt = "{:<7}| BAT {:<4}, TEMP {:<5}, HDG {}".format(self.node_details_list[node_id]['name'],
str(round(bat)) + "%", str(round(temp)) + "°C",
str(round(heading)) + "°")
self.details.itemconfig(self.node_details_list[node_id]['txt_id'], text=txt)
def draw_circle(self, args):
x = self.get_val_from_args(args, "x")
y = self.get_val_from_args(args, "y")
r = self.get_val_from_args(args, "r")
fill = self.get_val_from_args(args, "fill")
tags = self.get_val_from_args(args, "tags")
outline = self.get_val_from_args(args, "outline")
width = self.get_val_from_args(args, "width")
text = self.get_val_from_args(args, "text")
text_color = self.get_val_from_args(args, "text_color")
text_size = self.get_val_from_args(args, "text_size")
text_y_bias = self.get_val_from_args(args, "text_y_bias")
if x is None or y is None or r is None:
print(f"Invalid args input for function 'draw_circle': {args}")
return
x = x * self.universal_scale
y = y * self.universal_scale
r = r * self.universal_scale
(x, y) = self.translate_screen_pos_to_canvas_pos(x, y)
if fill is None:
fill = 'text'
if tags is None:
tags = []
if outline is None:
outline = 'blank'
if width is None:
width = 3
x = x * self.meas_to_map
y = y * self.meas_to_map
r = r * self.meas_to_map
self.create_circle(x, y, r, extra_tags=tags, fill=fill, width=width, outline=outline)
if text is not None:
if text_color is None:
text_color = "text"
if text_size is None:
text_size = "text_size_large"
if text_y_bias is None:
ypos = y - r - 20
if ypos < 0:
ypos = y + r + 20
else:
ypos = text_y_bias
self.create_text(x, ypos, text=text, color=text_color, size=text_size)
def connect_points(self, args):
pos1 = self.get_val_from_args(args, "pos1")
pos2 = self.get_val_from_args(args, "pos2")
dashed = self.get_val_from_args(args, "dashed")
color = self.get_val_from_args(args, "color")
text = self.get_val_from_args(args, "text")
text_size = self.get_val_from_args(args, "text_size")
text_color = self.get_val_from_args(args, "text_color")
arrow = self.get_val_from_args(args, "arrow")
if pos1 is None or pos2 is None:
print(f"Invalid args input for function 'connect_points': {args}")
return
if dashed is None:
dashed = True
if arrow is "both":
arrow = tk.BOTH
else:
arrow = None
pos1_scaled = (pos1[0] * self.meas_to_map * self.universal_scale + self.canvas.x_pos,
pos1[1] * self.meas_to_map * self.universal_scale + self.canvas.y_pos)
pos2_scaled = (pos2[0] * self.meas_to_map * self.universal_scale + self.canvas.x_pos,
pos2[1] * self.meas_to_map * self.universal_scale + self.canvas.y_pos)
self._connect_points(pos1_scaled, pos2_scaled, text=text, text_size=text_size, text_color=text_color,
dashed=dashed, color=color, arrow=arrow)
def create_circle(self, x, y, r, extra_tags=[], fill=None, outline=None, **kwargs):
fill = self.refrence_color(fill, default=self.colors.text)
outline = self.refrence_color(outline, default=self.colors.blank)
(x, y) = self.translate_canvas_pos_to_screen_pos(x, y)
tags = ["obj"]
return self.canvas.create_oval(x - r, y - r, x + r, y + r, tags=(tags + extra_tags), fill=fill, outline=outline,
**kwargs)
def create_text(self, x, y, text="", color=None, size=None, extra_tags=[], **kwargs):
size = self.refrence_color(size, self.colors.text_size_large)
color = self.refrence_color(color, default=self.colors.text)
(x, y) = self.translate_canvas_pos_to_screen_pos(x, y)
tags = ["obj"]
self.canvas.create_text(x, y, text=text,
fill=color, font=font.Font(family=self.colors.data_font, size=size),
justify=tk.LEFT, tags=(tags + extra_tags))
def _connect_points(self, node1_pos, node2_pos, text=None, text_size=None, text_color=None, dashed=True,
color="#3c4048", arrow=None):
if node2_pos[0] is None or node2_pos[1] is None or node1_pos[0] is None or node1_pos[1] is None:
return
if text is not None:
# Calculate the rotation between the two points
rotation = 180 - math.degrees(math.atan2(node1_pos[1] - node2_pos[1],
node1_pos[0] - node2_pos[0]))
# node1_pos the rotation
if 90 < rotation < 270:
rotation -= 180
# Convert to radians
rrotation = math.radians(rotation)
# Calculate mid point + rotation offset
midx = (node1_pos[0] + node2_pos[0]) / 2 - math.sin(rrotation) * 5
midy = (node1_pos[1] + node2_pos[1]) / 2 - math.cos(rrotation) * 5
text_size = self.refrence_color(text_size, self.colors.text_size_large)
text_color = self.refrence_color(text_color, default=self.colors.text)
if self.tk_version >= 8.6:
self.canvas.create_text(midx, midy, text=text,
fill=text_color, font=font.Font(family=self.colors.data_font, size=text_size),
justify=tk.LEFT, tags=['scale', 'obj'], angle=rotation)
else:
self.canvas.create_text(midx, midy, text=text,
fill=text_color, font=font.Font(family=self.colors.data_font, size=text_size),
justify=tk.LEFT, tags=['scale', 'obj'])
color = self.refrence_color(color, default=self.colors.main_line)
if dashed is True:
self.canvas.create_line(node1_pos[0], node1_pos[1], node2_pos[0], node2_pos[1],
width=self.colors.line_width, fill=color, dash=self.colors.dash_type, arrow=arrow,
tags="obj")
else:
self.canvas.create_line(node1_pos[0], node1_pos[1], node2_pos[0], node2_pos[1],
width=self.colors.line_width, fill=color, arrow=arrow, tags="obj")
def refrence_color(self, color, default=None):
if color == default and default is not None:
return color
if color is not None and isinstance(color, str) and hasattr(self.colors, color):
color = getattr(self.colors, color)
else:
if default is None:
color = self.colors.text
else:
color = default
return color
class ResizingCanvas(Canvas):
def __init__(self, parent, **kwargs):
Canvas.__init__(self, parent, **kwargs)
self.bind("<Configure>", self.on_resize)
self.height = self.winfo_reqheight()
self.width = self.winfo_reqwidth()
self.scan_x = 0
self.scan_y = 0
self.x_pos = 0
self.y_pos = 0
self.x_offset = 0
self.y_offset = 0
self.move_by(self.width / 4, 100)
self.callback = None
self.last_resize = None
def on_resize(self, event):
# determine the ratio of old width/height to new width/height
wscale = float(event.width) / self.width
hscale = float(event.height) / self.height
old_width = self.width
old_height = self.height
self.width = event.width
self.height = event.height
# resize the canvas
self.scale("bg", 0, 0, wscale, hscale)
self.canvas_shift(0, 0)
if self.callback is not None:
self.callback(event)
self.last_resize = event
def assign_resize_callback(self, callback):
self.callback = callback
if self.last_resize is not None:
self.callback(self.last_resize)
def canvas_shift(self, x, y):
self.move_by(x, y)
def move_by(self, x, y):
self.x_pos = self.x_pos + x
self.y_pos = self.y_pos + y
self.move('obj', x, y)
self.move('obj-bg', x, y)
def main():
if len(sys.argv) > 1:
first = True
use_light_theme = False
repeat_log = False
src = None
for arg in sys.argv:
if first:
first = False
continue
if arg == '-l' or arg == '-light':
use_light_theme = True
elif arg == '-r' or arg == '-repeat':
repeat_log = True
elif src is None:
src = arg
else:
print(f"Unknown command entry: {arg}")
exit
if src is not None:
EngDisplay(src=src, use_light_theme=use_light_theme, repeat_log=repeat_log)
else:
EngDisplay(use_light_theme=use_light_theme, repeat_log=repeat_log)
else:
EngDisplay()
if __name__ == "__main__":
main()
```
#### File: 1961C-GT/BaseCode/meas_history.py
```python
import numpy
import config
class MeasHistory:
MAX_MEAS = 20
MIN_DIST = 750 # mm
MAX_DIST = 500000 # mm
def __init__(self, key, max_meas=config.MAX_HISTORY, min_vals=5):
self.key = key
self.node1, self.node2 = self.key.split('-')
self.meas_list = []
self.added_meas = False
self.volatile_cycle = True
self.max_meas = max_meas
self.min_vals = min_vals
def get_key(self):
return self.key
def get_node_1(self):
return self.node1
def get_node_2(self):
return self.node2
def new_cycle(self):
# pass
if self.added_meas is False and self.volatile_cycle:
self.add_measurement(0, override=True)
self.added_meas = False
self.volatile_cycle = not self.volatile_cycle
def add_measurement(self, dist, override=False):
if not override and (dist < MeasHistory.MIN_DIST or dist > MeasHistory.MAX_DIST):
return
self.added_meas = True
self.meas_list.append(dist)
if len(self.meas_list) > self.max_meas:
self.meas_list.pop(0)
def get_avg(self):
sum_val = 0
counter = 0
for dist in self.meas_list:
sum_val += dist
if dist != 0:
counter += 1
if counter < self.min_vals:
return 0 # TODO: Remove when we do deviation?
return sum_val / counter
def get_std_deviation(self):
return numpy.std(self.meas_list)
```
#### File: BaseCode/simulator/task.py
```python
import math
from node import Node
class Task:
def __init__(self, node, vx, vy, time):
self.node = node
self.vx = vx
self.vy = vy
self.time = time
def poll(self, time):
if (time >= self.time):
self.node.setvx(self.vx)
self.node.setvy(self.vy)
return True
else:
return False
``` |
{
"source": "196693/rlcard",
"score": 3
} |
#### File: rlcard/examples/leduc_holdem_random_multi_process.py
```python
import rlcard
from rlcard.agents import RandomAgent
from rlcard.utils import set_global_seed
def main():
# Make environment
env = rlcard.make('leduc-holdem', config={'seed': 0, 'env_num': 4})
iterations = 1
# Set a global seed
set_global_seed(0)
# Set up agents
agent = RandomAgent(action_num=env.action_num)
env.set_agents([agent, agent])
for it in range(iterations):
# Generate data from the environment
trajectories, payoffs = env.run(is_training=False)
# Print out the trajectories
print('\nIteration {}'.format(it))
for ts in trajectories[0]:
print('State: {}, Action: {}, Reward: {}, Next State: {}, Done: {}'.format(ts[0], ts[1], ts[2], ts[3], ts[4]))
if __name__ == '__main__':
main()
```
#### File: 196693/rlcard/four_dqn_save.py
```python
import shutil
import tensorflow as tf
import os
import time
import rlcard
from rlcard.agents import DQNAgent
from rlcard.agents import RandomAgent
from rlcard.utils import set_global_seed, tournament
from rlcard.utils import Logger
# Make environment
env = rlcard.make('mahjong', config={'seed': 0})
eval_env = rlcard.make('mahjong', config={'seed': 0})
# Set the iterations numbers and how frequently we evaluate the performance
evaluate_every = 3000
evaluate_num = 1000
episode_num = 100000
# The intial memory size
memory_init_size = 1000
# Train the agent every X steps
train_every = 1
# The paths for saving the logs and learning curves
log_dir = f'./experiments/new_dqn_4/mahjong_dqn_4_result_{time.time()}/'
path_prefix = './'
save_dir_pre = f'{path_prefix}models/mahjong_dqn4_'
save_dir_main = f'{save_dir_pre}main/'
save_dir_last = f'{save_dir_pre}last/'
# Set a global seed
set_global_seed(0)
def save_model(sess, saver):
# Save model
# save_dir = f'{save_dir_last}/model'
save_dir2 = f'{save_dir_main}/model.ckpt'
if os.path.exists(save_dir_last):
shutil.rmtree(save_dir_last)
# os.makedirs(save_dir)
if os.path.exists(save_dir_main):
shutil.copytree(save_dir_main, save_dir_last)
shutil.rmtree(save_dir_main)
os.makedirs(save_dir_main)
# os.makedirs(save_dir2)
saver.save(sess, save_dir2)
def load_sess(sess, saver):
sl = os.path.exists(save_dir_last)
sm = os.path.exists(save_dir_main)
if not sl and not sm:
pass
elif sl and not sm:
module_file = tf.train.latest_checkpoint(save_dir_last)
saver.restore(sess, module_file)
else:
module_file = tf.train.latest_checkpoint(save_dir_main)
saver.restore(sess, module_file)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# Initialize a global step
global_step = tf.Variable(0, name='global_step', trainable=False)
# Set up the agents
agents = []
for i in range(4):
agent = DQNAgent(sess,
scope=f'dqn_{i}',
action_num=env.action_num,
replay_memory_size=20000,
replay_memory_init_size=memory_init_size,
train_every=train_every,
state_shape=env.state_shape,
mlp_layers=[512, 512])
agents.append(agent)
random_agent = RandomAgent(action_num=eval_env.action_num)
env.set_agents(agents)
eval_env.set_agents([agents[0], random_agent, random_agent, random_agent])
# Initialize global variables
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
load_sess(sess, saver)
# Init a Logger to plot the learning curve
logger = Logger(log_dir)
start_time = time.time()
logger.log(f'Begin at {time.strftime("%Y-%m-%d %H:%M:%S")}')
for episode in range(episode_num):
# Generate data from the environment
trajectories, _ = env.run(is_training=True)
# Feed transitions into agent memory, and train the agent
for i in range(4):
for ts in trajectories[i]:
agent.feed(ts)
# Evaluate the performance. Play with random agents.
if episode % evaluate_every == 0:
logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])
save_model(sess, saver)
logger.log(f'The episode is : {episode}')
logger.log(f'Traning time: {time.time() - start_time}')
logger.log(f'End at {time.strftime("%Y-%m-%d %H:%M:%S")}')
# Close files in the logger
logger.close_files()
# Plot the learning curve
logger.plot('DQN')
save_model(sess, saver)
```
#### File: rlcard/models/gin_rummy_rule_models.py
```python
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from rlcard.core import Card
from typing import List
import numpy as np
import rlcard
from rlcard.models.model import Model
from rlcard.games.gin_rummy.utils.action_event import *
import rlcard.games.gin_rummy.utils.melding as melding
import rlcard.games.gin_rummy.utils.utils as utils
class GinRummyNoviceRuleAgent(object):
'''
Agent always discards highest deadwood value card
'''
def __init__(self):
self.use_raw = False # FIXME: should this be True ?
@staticmethod
def step(state):
''' Predict the action given the current state.
Novice strategy:
Case where can gin:
Choose one of the gin actions.
Case where can knock:
Choose one of the knock actions.
Case where can discard:
Gin if can. Knock if can.
Otherwise, put aside cards in some best meld cluster.
Choose one of the remaining cards with highest deadwood value.
Discard that card.
Case otherwise:
Choose a random action.
Args:
state (numpy.array): an numpy array that represents the current state
Returns:
action (int): the action predicted
'''
legal_actions = state['legal_actions']
actions = legal_actions.copy()
legal_action_events = [ActionEvent.decode_action(x) for x in legal_actions]
gin_action_events = [x for x in legal_action_events if isinstance(x, GinAction)]
knock_action_events = [x for x in legal_action_events if isinstance(x, KnockAction)]
discard_action_events = [x for x in legal_action_events if isinstance(x, DiscardAction)]
if gin_action_events:
actions = [x.action_id for x in gin_action_events]
elif knock_action_events:
actions = [x.action_id for x in knock_action_events]
elif discard_action_events:
best_discards = GinRummyNoviceRuleAgent._get_best_discards(discard_action_events=discard_action_events,
state=state)
if best_discards:
actions = [DiscardAction(card=card).action_id for card in best_discards]
return np.random.choice(actions)
def eval_step(self, state):
''' Predict the action given the current state for evaluation.
Since the agents is not trained, this function is equivalent to step function.
Args:
state (numpy.array): an numpy array that represents the current state
Returns:
action (int): the action predicted by the agent
probabilities (list): The list of action probabilities
'''
probabilities = []
return self.step(state), probabilities
@staticmethod
def _get_best_discards(discard_action_events, state) -> List[Card]:
best_discards = [] # type: List[Card]
final_deadwood_count = 999
env_hand = state['obs'][0]
hand = utils.decode_cards(env_cards=env_hand)
for discard_action_event in discard_action_events:
discard_card = discard_action_event.card
next_hand = [card for card in hand if card != discard_card]
meld_clusters = melding.get_meld_clusters(hand=next_hand)
deadwood_counts = []
for meld_cluster in meld_clusters:
deadwood_count = utils.get_deadwood_count(hand=next_hand, meld_cluster=meld_cluster)
deadwood_counts.append(deadwood_count)
best_deadwood_count = min(deadwood_counts,
default=utils.get_deadwood_count(hand=next_hand, meld_cluster=[]))
if best_deadwood_count < final_deadwood_count:
final_deadwood_count = best_deadwood_count
best_discards = [discard_card]
elif best_deadwood_count == final_deadwood_count:
best_discards.append(discard_card)
return best_discards
class GinRummyNoviceRuleModel(Model):
''' Gin Rummy Rule Model
'''
def __init__(self):
''' Load pre-trained model
'''
super().__init__()
env = rlcard.make('gin-rummy')
rule_agent = GinRummyNoviceRuleAgent()
self.rule_agents = [rule_agent for _ in range(env.player_num)]
@property
def agents(self):
''' Get a list of agents for each position in a the game
Returns:
agents (list): A list of agents
Note: Each agent should be just like RL agent with step and eval_step
functioning well.
'''
return self.rule_agents
```
#### File: tests/games/test_leducholdem_game.py
```python
import unittest
import numpy as np
from rlcard.games.leducholdem.game import LeducholdemGame as Game
from rlcard.games.leducholdem.player import LeducholdemPlayer as Player
from rlcard.games.leducholdem.judger import LeducholdemJudger as Judger
from rlcard.core import Card
class TestLeducholdemMethods(unittest.TestCase):
def test_get_action_num(self):
game = Game()
action_num = game.get_action_num()
self.assertEqual(action_num, 4)
def test_init_game(self):
game = Game()
state, player_id = game.init_game()
test_id = game.get_player_id()
self.assertEqual(test_id, player_id)
self.assertIn('raise', state['legal_actions'])
self.assertIn('fold', state['legal_actions'])
self.assertIn('call', state['legal_actions'])
def test_step(self):
game = Game()
# test raise
game.init_game()
init_raised = game.round.have_raised
game.step('raise')
step_raised = game.round.have_raised
self.assertEqual(init_raised + 1, step_raised)
# test fold
game.init_game()
game.step('fold')
self.assertTrue(game.round.player_folded)
# test call
game.init_game()
game.step('raise')
game.step('call')
self.assertEqual(game.round_counter, 1)
# test check
game.init_game()
game.step('call')
game.step('check')
self.assertEqual(game.round_counter, 1)
def test_step_back(self):
game = Game(allow_step_back=True)
state, player_id = game.init_game()
action = state['legal_actions'][0]
game.step(action)
game.step_back()
self.assertEqual(game.game_pointer, player_id)
self.assertEqual(game.step_back(), False)
def test_judge_game(self):
np_random = np.random.RandomState()
players = [Player(0, np_random), Player(1, np_random)]
players[0].in_chips = 10
players[1].in_chips = 10
# Test hand is equal
players[0].hand = Card('S', 'J')
players[1].hand = Card('H', 'J')
public_card = Card('S', 'Q')
payoffs = Judger.judge_game(players, public_card)
self.assertEqual(payoffs[0], 0)
self.assertEqual(payoffs[1], 0)
# Test one player get a pair
players[0].hand = Card('S', 'J')
players[1].hand = Card('S', 'Q')
public_card = Card('H', 'J')
payoffs = Judger.judge_game(players, public_card)
self.assertEqual(payoffs[0], 10.0)
self.assertEqual(payoffs[1], -10.0)
# Other cases
# Test one player get a pair
players[0].hand = Card('S', 'J')
players[1].hand = Card('S', 'Q')
public_card = Card('H', 'K')
payoffs = Judger.judge_game(players, public_card)
self.assertEqual(payoffs[0], -10.0)
self.assertEqual(payoffs[1], 10.0)
def test_player_get_player_id(self):
player = Player(0, np.random.RandomState())
self.assertEqual(0, player.get_player_id())
def test_is_over(self):
game = Game()
game.init_game()
game.step('call')
game.step('check')
game.step('check')
game.step('check')
self.assertEqual(game.is_over(), True)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/models/test_models.py
```python
import unittest
from rlcard.models.model import Model
from rlcard.models.pretrained_models import LeducHoldemNFSPModel, LeducHoldemNFSPPytorchModel, LeducHoldemCFRModel
from rlcard.models.leducholdem_rule_models import LeducHoldemRuleModelV1, LeducHoldemRuleModelV2
from rlcard.models.limitholdem_rule_models import LimitholdemRuleModelV1
from rlcard.models.doudizhu_rule_models import DouDizhuRuleModelV1
from rlcard.models.gin_rummy_rule_models import GinRummyNoviceRuleModel
class TestModel(unittest.TestCase):
def test_model(self):
model = Model()
self.assertIsInstance(model, Model)
def test_leduc_holdem_nfsp_model(self):
model = LeducHoldemNFSPModel()
self.assertIsInstance(model, LeducHoldemNFSPModel)
self.assertIsInstance(model.agents, list)
def test_leduc_holdem_nfsp_pytorch_model(self):
model = LeducHoldemNFSPPytorchModel()
self.assertIsInstance(model, LeducHoldemNFSPPytorchModel)
self.assertIsInstance(model.agents, list)
def test_leduc_holdem_cfr_model(self):
model = LeducHoldemCFRModel()
self.assertIsInstance(model, LeducHoldemCFRModel)
self.assertIsInstance(model.agents, list)
def test_leduc_holdem_rule_model_v1(self):
model = LeducHoldemRuleModelV1()
self.assertIsInstance(model, LeducHoldemRuleModelV1)
agent = model.agents[0]
action = agent.step({'raw_legal_actions':['raise']})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['call']})
self.assertEqual(action, 'call')
action = agent.step({'raw_legal_actions':['check']})
self.assertEqual(action, 'check')
action = agent.step({'raw_legal_actions':[]})
self.assertEqual(action, 'fold')
def test_leduc_holdem_rule_model_v2(self):
model = LeducHoldemRuleModelV2()
self.assertIsInstance(model, LeducHoldemRuleModelV2)
agent = model.agents[0]
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['K'], 'public_card':[]}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['Q'], 'public_card':[]}})
self.assertEqual(action, 'check')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['J'], 'public_card':[]}})
self.assertEqual(action, 'fold')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['K', 'J'], 'public_card':['K','J']}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['K', 'Q'], 'public_card':['K','J']}})
self.assertEqual(action, 'fold')
action = agent.step({'raw_legal_actions':['fold', 'check', 'call'], 'raw_obs':{'hand':['K'], 'public_card':[]}})
self.assertEqual(action, 'call')
action = agent.step({'raw_legal_actions':['fold', 'call'], 'raw_obs':{'hand':['Q'], 'public_card':[]}})
self.assertEqual(action, 'fold')
def test_limit_holdem_rule_model_v1(self):
model = LimitholdemRuleModelV1()
self.assertIsInstance(model, LimitholdemRuleModelV1)
agent = model.agents[0]
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['S2', 'H4'], 'public_cards':[]}})
self.assertEqual(action, 'fold')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['SA', 'HA'], 'public_cards':[]}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['SA', 'HT'], 'public_cards':[]}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['S2', 'SA'], 'public_cards':[]}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['HQ', 'SJ'], 'public_cards':[]}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['HQ', 'S2'], 'public_cards':[]}})
self.assertEqual(action, 'fold')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['SA', 'HA'], 'public_cards':['CA', 'C2', 'B4']}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['SA', 'HQ'], 'public_cards':['CJ', 'C2', 'B4']}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['HA', 'H9'], 'public_cards':['HJ', 'C2', 'B4']}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['SK', 'HQ'], 'public_cards':['H6', 'C2', 'B4']}})
self.assertEqual(action, 'call')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['SK', 'HQ'], 'public_cards':['H2', 'C2', 'B4']}})
self.assertEqual(action, 'check')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['SA', 'HA'], 'public_cards':['CA', 'C2', 'B4', 'B6']}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['SA', 'HQ'], 'public_cards':['CJ', 'C2', 'B4', 'B6']}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['H9', 'HA'], 'public_cards':['HJ', 'C2', 'B4', 'B6']}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['H9', 'HA'], 'public_cards':['HJ', 'C2', 'B4', 'B6']}})
self.assertEqual(action, 'raise')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['SK', 'HQ'], 'public_cards':['H6', 'C2', 'B4', 'B5']}})
self.assertEqual(action, 'call')
action = agent.step({'raw_legal_actions':['raise', 'fold', 'check', 'call'], 'raw_obs':{'hand':['SK', 'HQ'], 'public_cards':['H2', 'C2', 'B4', 'B5']}})
self.assertEqual(action, 'fold')
def test_doudizhu_rule_model_v1(self):
model = DouDizhuRuleModelV1()
self.assertIsInstance(model, DouDizhuRuleModelV1)
agent = model.agents[0]
action = agent.step({'raw_obs': {'current_hand': '345567999TTTTQKAA2BR', 'trace':[]}})
self.assertEqual(action, '34567')
action = agent.step({'raw_obs': {'current_hand': '5999TTTTQKAA2BR', 'trace':[(0, '34567'), (1, 'pass'), (2, 'pass')]}})
self.assertEqual(action, '5')
action = agent.step({'raw_obs': {'actions': ['pass', '44455', '44466', 'TTTT', 'BR'], 'trace':[(0, '33344')]}})
self.assertEqual(action, '44455')
action = agent.step({'raw_obs': {'actions': ['pass', '44455', '44466', 'TTTT', 'BR'], 'trace':[(0, '33344'), [1, 'pass']]}})
self.assertEqual(action, '44455')
action = agent.step({'raw_obs': {'actions': ['pass', 'TTTT', 'BR'], 'self': 2, 'landlord': 0, 'trace':[(0, '33344'), (1, '55566')]}})
self.assertEqual(action, 'pass')
def test_gin_rummy_novice_model(self):
model = GinRummyNoviceRuleModel()
self.assertIsInstance(model, GinRummyNoviceRuleModel)
self.assertIsInstance(model.agents, list)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/utils/test_holdem_utils.py
```python
import itertools
import unittest
from rlcard.games.limitholdem.judger import LimitholdemJudger
from rlcard.games.limitholdem.utils import compare_hands
from rlcard.games.limitholdem.utils import Hand as Hand
import numpy as np
''' Combinations selected for testing compare_hands function
Royal straight flush ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA']
Straight flush ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'C7']
Four of a kind ['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'] ['CT', 'ST', 'HT, 'BT', 'CK', 'C8', 'C7']
Fullhouse ['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'] ['CT', 'ST', 'HT', 'B9', 'C9', 'C8', 'C7'] ['CJ', 'SJ', 'HJ', 'B8', 'C8', 'C5', 'C7']
Flush ['CA', 'CQ', 'CT', 'C8', 'C6', 'C4', 'C2'] ['CA', 'CQ', 'CT', 'C8', 'C7', 'C4', 'C2']
Straigt ['CJ', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7'] ['CK', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7']
Three of a kind ['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C7', 'C4'] ['CJ', 'SJ', 'HJ', 'B9', 'C3', 'C8', 'C4'] ['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C8', 'C7'] ['ST', 'ST', 'HT', 'B9', 'C2', 'C8', 'C7']
Two_pairs ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7'] ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C5', 'C7'] ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7']
One_pair ['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7'] ['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C6'] ['CT', 'ST', 'H9', 'B3', 'C2', 'C8', 'C7']
Highcards ['CJ', 'S5', 'H9', 'B4', 'C2', 'C8', 'C7'] ['CJ', 'S5', 'H9', 'B4', 'C3', 'C8', 'C7']
'''
class TestHoldemUtils(unittest.TestCase):
def test_evaluate_hand_exception(self):
hand = Hand(['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8'])
with self.assertRaises(Exception):
hand.evaluateHand()
def test_has_high_card_false(self):
hand = Hand(['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'S3'])
hand.product = 20
self.assertEqual(hand._has_high_card(), False)
def test_compare_hands(self):
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7'], ['CQ', 'SQ', 'H9', 'B3', 'C2', 'C8', 'C6']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'C7'], None])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [None, ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'C7']])
self.assertEqual(winner, [0, 1])
#straight flush
hands1 = [['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7'],
['CJ', 'SJ', 'HT', 'BT', 'C2', 'C8', 'C7'],
['CJ', 'SJ', 'HT', 'BT', 'C2', 'C8', 'C7'],
['CJ', 'SJ', 'HT', 'BT', 'C2', 'C8', 'C7'],
['CJ', 'SJ', 'HT', 'BT', 'C2', 'C8', 'C7']]
winner = compare_hands(hands1)
self.assertEqual(winner, [0, 1, 1, 1, 1])
winner = compare_hands( [['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'C7'], ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA'],
['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'C7']])
self.assertEqual(winner, [0, 1, 0])
winner = compare_hands( [['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'C7'], ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA'],
['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'C7'], None])
self.assertEqual(winner, [0, 1, 0, 0])
winner = compare_hands( [['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA'], ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'C7'], ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'C7']])
self.assertEqual(winner, [1, 1])
#Compare between different catagories
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'], ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA'], ['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'S5', 'H9', 'B3', 'C2', 'C8', 'C7'], ['HJ', 'S5', 'C9', 'B3', 'H2', 'H8', 'H7']])
self.assertEqual(winner, [1, 1])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CA', 'CQ', 'CT', 'C8', 'C6', 'C4', 'C2'], ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA']])
self.assertEqual(winner, [0, 1])
hands2 = [['CJ', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7'],
['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA'],
['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA'],
['CJ', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7']]
winner = compare_hands(hands2)
self.assertEqual(winner, [0, 1, 1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7'], ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'S5', 'H9', 'B3', 'C2', 'C8', 'C7'], ['CJ', 'CT', 'CQ', 'CK', 'C9', 'C8', 'CA']])
self.assertEqual(winner, [0, 1])
#Four of a kind
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'BJ', 'C8', 'C3', 'C4']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C3', 'C4']])
self.assertEqual(winner, [1, 1])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'], ['CA', 'CQ', 'CT', 'C8', 'C6', 'C4', 'C2']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'], ['CJ', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'], ['CJ', 'S5', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
#Fullhouse
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CT', 'ST', 'HT', 'B9', 'C9', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C6']])
self.assertEqual(winner, [1, 1])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'B5', 'C5', 'C8', 'C6']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'BT', 'CT', 'C8', 'C6']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'B8', 'C8', 'C7', 'C6']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CA', 'CQ', 'CT', 'C8', 'C6', 'C4', 'C2']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CJ', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7'], ['CJ', 'S5', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
#Flush
winner = compare_hands( [['CA', 'CQ', 'CT', 'C8', 'C6', 'C4', 'C2'], ['CJ', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CA', 'CQ', 'CT', 'C8', 'C6', 'C4', 'C2'], ['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CA', 'CQ', 'CT', 'C8', 'C6', 'C4', 'C2'], ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CA', 'CQ', 'CT', 'C8', 'C6', 'C4', 'C2'], ['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CA', 'CQ', 'CT', 'C8', 'C6', 'C4', 'C2'], ['CJ', 'S5', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
#Straight
winner = compare_hands( [['CJ', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7'], ['CJ', 'S5', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'S5', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
#Two pairs
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7'], ['CT', 'ST', 'H9', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CT', 'ST', 'H9', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7'],
['CT', 'ST', 'H9', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [0, 1, 0])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'SJ', 'HT', 'BT', 'C2', 'C8', 'C7'],
['CJ', 'SJ', 'HT', 'BT', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [0, 1, 1])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C5', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C5']])
self.assertEqual(winner, [1, 1])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'S5', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7'], ['CJ', 'S5', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'BJ', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'BJ', 'CT', 'B8', 'C7']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CT', 'ST', 'HT', 'B9', 'C9', 'C8', 'C7'], ['CJ', 'SJ', 'HJ', 'B9', 'C9', 'C8', 'C7']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'S5', 'H9', 'B4', 'C2', 'C8', 'C7'], ['CJ', 'S6', 'H9', 'B4', 'C3', 'C8', 'C7']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CA', 'CQ', 'CT', 'C8', 'C6', 'C4', 'C2'], ['CA', 'CQ', 'CT', 'C8', 'C7', 'C4', 'C2']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7'], ['CK', 'ST', 'HQ', 'BK', 'B9', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C8', 'C7'], ['ST', 'ST', 'HT', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
#Three of a kind
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C7', 'C4'], ['CQ', 'SQ', 'HQ', 'B9', 'C3', 'C8', 'C4']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C7', 'C4'], ['CJ', 'SJ', 'HJ', 'BT', 'C3', 'C8', 'C4']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C7', 'C4'], ['CJ', 'SJ', 'HJ', 'B7', 'C3', 'C8', 'C4']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C7', 'C4'], ['CJ', 'SJ', 'HJ', 'B9', 'C3', 'C6', 'C4']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C7', 'C4'], ['CJ', 'SJ', 'HJ', 'B9', 'C3', 'C6', 'C4']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C7', 'C4'], ['CJ', 'SJ', 'HJ', 'B9', 'C3', 'C8', 'C4']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C7', 'C4'], ['CJ', 'SJ', 'HJ', 'B9', 'C3', 'C8', 'C4']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7'], ['CJ', 'SJ', 'H6', 'B6', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B9', 'C2', 'C5', 'C7'], ['CJ', 'SJ', 'H9', 'B9', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'HJ', 'B9', 'C2', 'C8', 'C4'], ['CJ', 'SJ', 'HJ', 'B9', 'C3', 'C8', 'C4']])
self.assertEqual(winner, [1, 1])
#One pair
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7'], ['CT', 'ST', 'H9', 'B3', 'C2', 'C8', 'C7']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7'], ['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C6']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'B3', 'C2', 'C8', 'C7'], ['CQ', 'SQ', 'H9', 'B3', 'C2', 'C8', 'C6']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'HT', 'C8', 'C7', 'B3', 'C2'], ['CJ', 'SJ', 'H9', 'C8', 'C7', 'B3', 'C2']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'C8', 'C7', 'B3', 'C2'], ['CJ', 'SJ', 'HT', 'C8', 'C7', 'B3', 'C2']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'C8', 'C7', 'B3', 'C2'], ['CJ', 'SJ', 'HT', 'C8', 'C7', 'B3', 'C2']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'C8', 'C7', 'B3', 'C2'], ['CJ', 'SJ', 'H9', 'C7', 'C6', 'B3', 'C2']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'C7', 'C6', 'B3', 'C2'], ['CJ', 'SJ', 'H9', 'C8', 'C6', 'B3', 'C2']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'C8', 'C6', 'B3', 'C2'], ['CJ', 'SJ', 'H9', 'C8', 'C7', 'B3', 'C2']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CJ', 'SJ', 'H9', 'C8', 'C6', 'B3', 'C2'], ['CJ', 'SJ', 'H9', 'C8', 'C6', 'B3', 'C2']])
self.assertEqual(winner, [1, 1])
#high_cards
winner = compare_hands( [['CK', 'SJ', 'H9', 'C8', 'C6', 'B3', 'C2'], ['CQ', 'SJ', 'H9', 'C8', 'C7', 'B3', 'C2']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CQ', 'SJ', 'H9', 'C8', 'C7', 'B3', 'C2'], ['CK', 'SJ', 'H9', 'C8', 'C6', 'B3', 'C2']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CK', 'SQ', 'H9', 'C8', 'C7', 'B3', 'C2'], ['CK', 'SJ', 'H9', 'C8', 'C6', 'B3', 'C2']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CK', 'SJ', 'H9', 'C8', 'C6', 'B3', 'C2'], ['CK', 'SQ', 'H9', 'C8', 'C7', 'B3', 'C2']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CK', 'SJ', 'H9', 'C8', 'C7', 'B3', 'C2'], ['CK', 'SJ', 'H8', 'C7', 'C6', 'B3', 'C2']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CK', 'SJ', 'H8', 'C7', 'C6', 'B3', 'C2'], ['CK', 'SJ', 'H9', 'C8', 'C7', 'B3', 'C2']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CK', 'SJ', 'H9', 'C8', 'C7', 'B3', 'C2'], ['CK', 'SJ', 'H9', 'C7', 'C6', 'B3', 'C2']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CK', 'SJ', 'H9', 'C7', 'C6', 'B3', 'C2'], ['CK', 'SJ', 'H9', 'C8', 'C7', 'B3', 'C2']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CK', 'SJ', 'H9', 'C7', 'C6', 'B3', 'C2'], ['CK', 'SJ', 'H9', 'C7', 'C5', 'B3', 'C2']])
self.assertEqual(winner, [1, 0])
winner = compare_hands( [['CK', 'SJ', 'H9', 'C7', 'C5', 'B3', 'C2'], ['CK', 'SJ', 'H9', 'C7', 'C6', 'B3', 'C2']])
self.assertEqual(winner, [0, 1])
winner = compare_hands( [['CK', 'SJ', 'H9', 'C7', 'C6', 'B3', 'C2'], ['CK', 'SJ', 'H9', 'C7', 'C6', 'B3', 'C2']])
self.assertEqual(winner, [1, 1])
winner = compare_hands([['C5', 'S9', 'S6', 'C2', 'CT', 'C7', 'H5'], ['S7', 'SJ', 'S6', 'C2', 'CT', 'C7', 'H5'], None, None, ['H7', 'DJ', 'S6', 'C2', 'CT', 'C7', 'H5'], None])
self.assertEqual(winner, [0, 1, 0, 0, 1, 0])
winner = compare_hands([['H3', 'D5', 'S6', 'H9', 'CA', 'HA', 'SA'], # three of a kind
['H2', 'H3', 'C4', 'D5', 'C6', 'S6', 'ST']]) # straight
self.assertEqual(winner, [0, 1])
winner = compare_hands([['H3', 'D5', 'S6', 'H9', 'CA', 'HA', 'SA'], # three of a kind
['H2', 'H3', 'C4', 'D5', 'CQ', 'SK', 'SA']]) # straight beginning with A
self.assertEqual(winner, [0, 1])
winner = compare_hands([['H5', 'HQ', 'C2', 'D3', 'S4', 'S5', 'HT'], # pair
['D5', 'ST', 'C2', 'D3', 'S4', 'S5', 'HA'], # A,2,3,4,5
['H6', 'HQ', 'C2', 'D3', 'S4', 'S5', 'HT'], # 2,3,4,5,6
['H4', 'HQ', 'C2', 'D3', 'S4', 'S5', 'HT'], # pair
])
self.assertEqual(winner, [0, 0, 1, 0])
winner = compare_hands([['D5', 'ST', 'C2', 'D3', 'S4', 'S5', 'HA'], # A,2,3,4,5
['H6', 'H7', 'CK', 'DQ', 'SJ', 'ST', 'HA'], # T,J,Q,K,A
['HA', 'HT', 'CK', 'DQ', 'SJ', 'ST', 'DT'], # T,J,Q,K,A
['H2', 'H3', 'C4', 'D2', 'CQ', 'S2', 'SA'], # three of a kind
])
self.assertEqual(winner, [0, 1, 1, 0])
winner = compare_hands([['H5', 'HQ', 'C2', 'D3', 'S4', 'S5', 'HT'], # pair
['D5', 'ST', 'S2', 'S3', 'S4', 'S5', 'SA'], # A,2,3,4,5 suited
['C6', 'HQ', 'C2', 'C3', 'C4', 'C5', 'HT'], # 2,3,4,5,6 suited
['H7', 'H6', 'C9', 'C3', 'C4', 'C5', 'HT'], # 3,4,5,6,7 not suited
])
self.assertEqual(winner, [0, 0, 1, 0])
winner = compare_hands([['S2', 'D8', 'H8', 'S7', 'S8', 'C8', 'D3'], # 8-four of a kind, kicker 7
['S2', 'D8', 'H8', 'S9', 'S8', 'C8', 'D3'], # 8-four of a kind, kicker 9
['H3', 'C3', 'HT', 'S3', 'ST', 'CT', 'D3'], # 3-four of a kind, kicker T
])
self.assertEqual(winner, [0, 1, 0])
winner = compare_hands([['CA', 'C2', 'DJ', 'CT', 'S7', 'C5', 'ST'], # pair T, T, kicker A, J
['S3', 'S4', 'DJ', 'CT', 'S7', 'C5', 'ST'], # pair T, T, kicker J, 4
['HQ', 'DA', 'DJ', 'CT', 'S7', 'C5', 'ST'], # pair T, T, kicker A, Q
['SQ', 'HA', 'DJ', 'CT', 'S7', 'C5', 'ST'] # pair T, T, kicker A, Q
])
self.assertEqual(winner, [0, 0, 1, 1])
def test_split_pots_among_players(self):
j = LimitholdemJudger(np.random.RandomState(seed=7))
# simple cases where all players bet same amount of chips
self.assertEqual(j.split_pots_among_players([2, 2], [0, 1]), [0, 4])
self.assertEqual(j.split_pots_among_players([2, 2], [1, 0]), [4, 0])
self.assertEqual(j.split_pots_among_players([2, 2], [1, 1]), [2, 2])
self.assertEqual(j.split_pots_among_players([2, 2, 2], [1, 0, 0]), [6, 0, 0])
self.assertEqual(j.split_pots_among_players([2, 2, 2], [0, 1, 0]), [0, 6, 0])
self.assertEqual(j.split_pots_among_players([2, 2, 2], [0, 0, 1]), [0, 0, 6])
self.assertEqual(j.split_pots_among_players([2, 2, 2], [1, 0, 1]), [3, 0, 3])
self.assertEqual(j.split_pots_among_players([2, 2, 2], [0, 1, 1]), [0, 3, 3])
self.assertEqual(j.split_pots_among_players([2, 2, 2], [1, 1, 0]), [3, 3, 0])
self.assertEqual(j.split_pots_among_players([2, 2, 2], [1, 1, 1]), [2, 2, 2])
self.assertEqual(j.split_pots_among_players([3, 3, 3], [0, 1, 1]), [0, 4, 5])
# for the case above 9 is not divisible by 2 so a random winner get the remainder
# trickier cases with different amounts bet (some players are all in)
self.assertEqual(j.split_pots_among_players([3, 2], [0, 1]), [1, 4])
self.assertEqual(j.split_pots_among_players([3, 2], [1, 0]), [5, 0])
self.assertEqual(j.split_pots_among_players([3, 2], [1, 1]), [3, 2])
self.assertEqual(j.split_pots_among_players([2, 4, 4], [1, 0, 0]), [6, 2, 2])
self.assertEqual(j.split_pots_among_players([2, 4, 4], [0, 1, 0]), [0, 10, 0])
self.assertEqual(j.split_pots_among_players([2, 4, 4], [0, 0, 1]), [0, 0, 10])
self.assertEqual(j.split_pots_among_players([2, 4, 4], [1, 1, 0]), [3, 7, 0])
self.assertEqual(j.split_pots_among_players([2, 4, 4], [1, 0, 1]), [3, 0, 7])
self.assertEqual(j.split_pots_among_players([2, 4, 4], [0, 1, 1]), [0, 5, 5])
self.assertEqual(j.split_pots_among_players([2, 4, 4], [1, 1, 1]), [2, 4, 4])
self.assertEqual(j.split_pots_among_players([1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1]), [0, 2, 0, 4, 0, 6])
def test_split_pots_among_players_cases_generated(self):
def check_result(in_chips, winners, allocated):
"""check that winners have won chips (more chips in allocated than in in_chips)
and than losers have lost chips (strictly less chips in allocated than in chips)"""
assert sum(allocated) == sum(in_chips)
for i in range(len(in_chips)):
if winners[i]:
self.assertGreaterEqual(allocated[i], in_chips[i])
# can be equal for example with 2 winners and 1 loser who has bet one chip (not divisible by 2)
# so the winner who does not get the chip of the loser will have allocated[i] == in_chips[i]
elif in_chips[i] > 0:
self.assertLess(allocated[i], in_chips[i])
# because there is at least one winner so a loser who bet must lose at least one chip
randstate = np.random.RandomState(seed=7)
j = LimitholdemJudger(randstate)
# test many random cases from 2 to 6 players with all winners combinations
nb_cases = 0
for nb_players in range(2, 7):
for _ in range(300):
in_chips = [randstate.randint(0, 10) for _ in range(nb_players)]
for winners in itertools.product([0, 1], repeat=nb_players):
if sum(winners) == 0:
continue # impossible case with no winner
if sum(w * v for w, v in zip(winners, in_chips)) == 0:
continue # impossible case where all winners have not bet
allocated = j.split_pots_among_players(in_chips, winners)
nb_cases += 1
check_result(in_chips, winners, allocated)
self.assertEqual(nb_cases, 34954) # to check that correct number of cases have been tested
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "19690ao/Instantutor",
"score": 3
} |
#### File: DataStructures/Python/RedBlackTree.py
```python
from os import X_OK
from collections import deque
import sys
class Red_Black_Node:
def __init__(self):
self.color = 0
self.val = sys.maxsize
self.parent = None
self.left = None
self.right = None
class Red_Black_Tree:
def __init__(self):
self.root = None
def insert (self, inserted_node: Red_Black_Node) -> None:
curr,parent = self.root , None
while curr:
parent = curr
if inserted_node.val < curr.val: curr = curr.left
else: curr = curr.right
inserted_node.parent = parent
if parent == None: self.root = inserted_node
elif inserted_node.val < parent.val: parent.left = inserted_node
else: parent.right = inserted_node
inserted_node.left, inserted_node.right, inserted_node.color = None, None, 1
if not inserted_node.parent: return
if not inserted_node.parent.parent: return
self.balance(inserted_node)
def rotate_left(self,target_node: Red_Black_Node) -> None:
right = target_node.right
target_node.right= right.left
if right.left: right.left.parent = target_node
right.parent = target_node.parent
if target_node.parent: self.root = right
elif target_node.parent and target_node == target_node.parent.left: target_node.parent.left = right
else: target_node.parent.right = right
right.left = target_node
target_node.parent = right
def rotate_right(self,x: Red_Black_Node) -> None:
y = x.left
x.left = y.right
if not y.right: y.right.parent = x
y.parent = x.parent
if not x.parent: self.root = y
elif x.parent and x == x.parent.right: x.parent.right = y
else: x.parent.left = y
y.right = x
x.parent = y
def balance(self, target_node: Red_Black_Node) -> None:
while target_node.parent.color == 1:
if target_node.parent == target_node.parent.parent.left:
ptr = target_node.parent.parent.right
if ptr and ptr.color == 1:
target_node.parent.color = 0
ptr.color = 0
target_node.parent.parent.color = 1
target_node = target_node.parent.parent
elif target_node == target_node.parent.parent:
target_node = target_node.parent
self.rotate_left(target_node)
target_node.parent.color = 0
target_node.parent.parent.color = 1
self.rotate_right(target_node.parent.parent)
else:
ptr = target_node.parent.parent.left
if ptr and ptr.color == 1:
target_node.parent.color = 0
ptr.color = 0
target_node.parent.parent.color = 1
target_node = target_node.parent.parent
elif target_node == target_node.parent.parent:
target_node = target_node.parent
self.rotate_right(target_node)
target_node.parent.color = 0
target_node.parent.parent.color = 1
self.rotate_left(target_node.parent.parent)
if target_node == self.root: break
self.root.color = 0
def printall(self):
deq,vals = deque([[self.root]]),[[self.root.val,self.root.color]]
while deq:
temp = deq.popleft()
holder,holder2 = [],[]
for i in temp:
if i.left:
holder.append(i.left)
holder2.append((i.left.val,i.left.color))
if i.right:
holder.append(i.right)
holder2.append((i.right.val,i.right.color))
if holder:
deq.append(holder)
vals.append(holder2)
print(vals)
obj = Red_Black_Tree()
for i in range(8):
node = Red_Black_Node()
node.val = i
obj.insert(node)
obj.printall()
``` |
{
"source": "196sigma/ner-10ks",
"score": 3
} |
#### File: ner-10ks/ner/2Get10kLinks.py
```python
import os,sys,csv,urllib2,time
from bs4 import BeautifulSoup #<---- Need to install this package manually using pip
os.chdir('/Users/alvinzuyinzheng/Dropbox/PythonWorkshop/scripts/')#<===The location of your file "LongCompanyList.csv
IndexLinksFile = "IndexLinks.csv" #a csv file (output of the 1GetIndexLinks.py script) with the list of index links for each firm (the file has a line with headers)
Form10kListFile = "10kList.csv" #a csv file (output of the current script) with the list of 10-K links for each firm (the file has a line with headers)
def get10kLink(tickerCode, docIndex, docLink, description, filingDate, newFilingDate, FormType):
csvOutput = open(Form10kListFile,"a+b")
csvWriter = csv.writer(csvOutput, quoting = csv.QUOTE_NONNUMERIC)
pageRequest = urllib2.Request(docLink)
pageOpen = urllib2.urlopen(pageRequest)
pageRead = pageOpen.read()
soup = BeautifulSoup(pageRead,"html.parser")
#Check if there is a table to extract / code exists in edgar database
try:
table = soup.find("table", { "summary" : "Document Format Files" })
except:
print "No tables found for link "+docLink
for row in table.findAll("tr"):
cells = row.findAll("td")
if len(cells)==5:
if cells[3].text.strip() == FormType:
link = cells[2].find("a")
formLink = "https://www.sec.gov"+link['href']
formName = link.text.encode('utf8').strip()
csvWriter.writerow([tickerCode, docIndex, docLink, description, filingDate, newFilingDate, formLink,formName])
csvOutput.close()
def main():
FormType = "10-K" ### <=== Type your document type here
nbDocPause = 10 ### <=== Type your number of documents to download in one batch
nbSecPause = 1 ### <=== Type your pausing time in seconds between each batch
csvFile = open(IndexLinksFile,"r") #<===Open and read from a csv file with the list of index links for each firm (the file has a line with headers)
csvReader = csv.reader(csvFile,delimiter=",")
csvData = list(csvReader)
csvOutput = open(Form10kListFile,"a+b") #<===open and write to a csv file which will include the list of 10-K links. New rows will be appended.
csvWriter = csv.writer(csvOutput, quoting = csv.QUOTE_NONNUMERIC)
csvWriter.writerow(["Ticker", "DocIndex", "IndexLink", "Description", "FilingDate", "NewFilingDate", "Form10KLink","Form10KName"])
csvOutput.close()
i = 1
for rowData in csvData[1:]:
Ticker = rowData[0]
DocIndex = rowData[1]
DocLink = rowData[2]
Description = rowData[3]
FileDate = rowData[4]
NewFileDate = rowData[5]
get10kLink(Ticker,DocIndex,DocLink,Description,FileDate,NewFileDate,FormType)
if i%nbDocPause == 0:
print i
print "Pause for "+str(nbSecPause)+" second .... "
time.sleep(float(nbSecPause))
i=i+1
csvFile.close()
print "done!"
if __name__ == "__main__":
main()
```
#### File: ner-10ks/ner/3DownLoadHTML.py
```python
import os,sys,csv,urllib2,time
os.chdir('/Users/alvinzuyinzheng/Dropbox/PythonWorkshop/scripts/')#<===The location of your file "LongCompanyList.csv
htmlSubPath = "./HTML/" #<===The subfolder with the 10-K files in HTML format
Form10kListFile = "10kList.csv" #a csv file (output of the 2Get10kLinks.py script) with the list of 10-K links
logFile = "10kDownloadLog.csv" #a csv file (output of the current script) with the download history of 10-K forms
def dowmload10k(tickerCode, docIndex, docLink, description, filingDate, newFilingDate, formLink,formName):
csvOutput = open(logFile,"a+b")
csvWriter = csv.writer(csvOutput, quoting = csv.QUOTE_NONNUMERIC)
try:
pageRequest = urllib2.Request(formLink)
pageOpen = urllib2.urlopen(pageRequest)
pageRead = pageOpen.read()
htmlname = tickerCode+"_"+docIndex+"_"+newFilingDate+".htm"
htmlpath = htmlSubPath+htmlname
htmlfile = open(htmlpath,'wb')
htmlfile.write(pageRead)
htmlfile.close()
csvWriter.writerow([tickerCode, docIndex, docLink, description, filingDate, newFilingDate, formLink,formName, htmlname, ""])
except:
csvWriter.writerow([tickerCode, docIndex, docLink, description, filingDate, newFilingDate, formLink,formName, "","not downloaded"])
csvOutput.close()
def main():
if not os.path.isdir(htmlSubPath): ### <=== keep all HTML files in this subfolder
os.makedirs(htmlSubPath)
FormType = "10-K" ### <=== Type your document type here
nbDocPause = 5 ### <=== Type your number of documents to download in one batch
nbSecPause = 1 ### <=== Type your pausing time in seconds between each batch
FormYears = ['2014','2015'] ### <=== Type the years of documents to download here
csvFile = open(Form10kListFile,"r") #<===A csv file with the list of company ticker symbols (the file has a line with headers)
csvReader = csv.reader(csvFile,delimiter=",")
csvData = list(csvReader)
csvOutput = open(logFile,"a+b")
csvWriter = csv.writer(csvOutput, quoting = csv.QUOTE_NONNUMERIC)
csvWriter.writerow(["Ticker", "DocIndex", "IndexLink", "Description", "FilingDate", "NewFilingDate", "Form10KLink","Form10KName", "FileName","Note"])
csvOutput.close()
i = 1
for rowData in csvData[1:]:
Ticker = rowData[0]
DocIndex = rowData[1]
IndexLink = rowData[2]
Description = rowData[3]
FilingDate = rowData[4]
NewFilingDate = rowData[5]
FormLink = rowData[6]
FormName = rowData[7]
for year in FormYears:
if year in FilingDate:
if ".htm" in FormName:
dowmload10k(Ticker, DocIndex, IndexLink, Description, FilingDate, NewFilingDate, FormLink,FormName)
elif ".txt" in FormName:
csvOutput = open(logFile,"a+b")
csvWriter = csv.writer(csvOutput, quoting = csv.QUOTE_NONNUMERIC)
csvWriter.writerow([Ticker, DocIndex, IndexLink, Description, FilingDate, NewFilingDate, FormLink,FormName, "","Text format"])
csvOutput.close()
else:
csvOutput = open(logFile,"a+b")
csvWriter = csv.writer(csvOutput, quoting = csv.QUOTE_NONNUMERIC)
csvWriter.writerow([Ticker, DocIndex, IndexLink, Description, FilingDate, NewFilingDate, FormLink,FormName,"", "No form"])
csvOutput.close()
if i%nbDocPause == 0:
print i
print "Pause for "+str(nbSecPause)+" second .... "
time.sleep(float(nbSecPause))
i=i+1
csvFile.close()
print "done!"
if __name__ == "__main__":
main()
``` |
{
"source": "196sigma/ragde",
"score": 3
} |
#### File: ragde/ragde/readability.py
```python
import warnings
import string
import re
import math
import pkg_resources
from collections import Counter
from functools import lru_cache
from pyphen import Pyphen
langs = {
"en": { # Default config
"fre_base": 206.835,
"fre_sentence_length": 1.015,
"fre_syll_per_word": 84.6,
"syllable_threshold": 3,
}
}
__lang = "en_US"
text_encoding = "utf-8"
def legacy_round(number, points=0):
p = 10 ** points
return float(math.floor((number * p) + math.copysign(0.5, number))) / p
def get_grade_suffix(grade):
"""
Select correct ordinal suffix
"""
ordinal_map = {1: 'st', 2: 'nd', 3: 'rd'}
teens_map = {11: 'th', 12: 'th', 13: 'th'}
return teens_map.get(grade % 100, ordinal_map.get(grade % 10, 'th'))
def _cache_clear(self):
caching_methods = [
method for method in dir(self)
if callable(getattr( method))
and hasattr(getattr( method), "cache_info")
]
for method in caching_methods:
getattr( method).cache_clear()
@lru_cache(maxsize=128)
def char_count( text, ignore_spaces=True):
"""
Function to return total character counts in a text,
pass the following parameter `ignore_spaces = False`
to ignore whitespaces
"""
if ignore_spaces:
text = text.replace(" ", "")
return len(text)
@lru_cache(maxsize=128)
def letter_count( text, ignore_spaces=True):
"""
Function to return total letter amount in a text,
pass the following parameter `ignore_spaces = False`
to ignore whitespaces
"""
if ignore_spaces:
text = text.replace(" ", "")
return len(remove_punctuation(text))
def remove_punctuation(text):
return ''.join(ch for ch in text if ch not in string.punctuation)
@lru_cache(maxsize=128)
def lexicon_count( text, removepunct=True):
"""
Function to return total lexicon (words in lay terms) counts in a text
"""
if removepunct:
text = remove_punctuation(text)
count = len(text.split())
return count
@lru_cache(maxsize=128)
def syllable_count( text, lang=None):
"""
Function to calculate syllable words in a text.
I/P - a text
O/P - number of syllable words
"""
if isinstance(text, bytes):
text = text.decode(text_encoding)
text = text.lower()
text = remove_punctuation(text)
if not text:
return 0
dic = Pyphen(lang=__lang)
count = 0
for word in text.split(' '):
word_hyphenated = dic.inserted(word)
count += max(1, word_hyphenated.count("-") + 1)
return count
@lru_cache(maxsize=128)
def sentence_count( text):
"""
Sentence count of a text
"""
ignore_count = 0
sentences = re.split(r' *[\.\?!][\'"\)\]]*[ |\n](?=[A-Z])', text)
for sentence in sentences:
if lexicon_count(sentence) <= 2:
ignore_count += 1
return max(1, len(sentences) - ignore_count)
@lru_cache(maxsize=128)
def avg_sentence_length( text):
try:
asl = float(lexicon_count(text) / sentence_count(text))
return legacy_round(asl, 1)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def avg_syllables_per_word( text, interval=None):
syllable = syllable_count(text)
words = lexicon_count(text)
try:
if interval:
syllables_per_word = float(syllable) * interval / float(words)
else:
syllables_per_word = float(syllable) / float(words)
return legacy_round(syllables_per_word, 1)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def avg_character_per_word( text):
try:
letters_per_word = float(
char_count(text) / lexicon_count(text))
return legacy_round(letters_per_word, 2)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def avg_letter_per_word( text):
try:
letters_per_word = float(
letter_count(text) / lexicon_count(text))
return legacy_round(letters_per_word, 2)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def avg_sentence_per_word( text):
try:
sentence_per_word = float(
sentence_count(text) / lexicon_count(text))
return legacy_round(sentence_per_word, 2)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def flesch_reading_ease( text):
sentence_length = avg_sentence_length(text)
s_interval = 100 if __get_lang_root() in ['es', 'it'] else None
syllables_per_word = avg_syllables_per_word(text, s_interval)
flesch = (
__get_lang_cfg("fre_base")
- float(
__get_lang_cfg("fre_sentence_length") * sentence_length
)
- float(
__get_lang_cfg("fre_syll_per_word") * syllables_per_word
)
)
return legacy_round(flesch, 2)
@lru_cache(maxsize=128)
def flesch_kincaid_grade( text):
sentence_lenth = avg_sentence_length(text)
syllables_per_word = avg_syllables_per_word(text)
flesch = (
float(0.39 * sentence_lenth)
+ float(11.8 * syllables_per_word)
- 15.59)
return legacy_round(flesch, 1)
@lru_cache(maxsize=128)
def polysyllabcount( text):
count = 0
for word in text.split():
wrds = syllable_count(word)
if wrds >= 3:
count += 1
return count
@lru_cache(maxsize=128)
def smog_index( text):
sentences = sentence_count(text)
if sentences >= 3:
try:
poly_syllab = polysyllabcount(text)
smog = (
(1.043 * (30 * (poly_syllab / sentences)) ** .5)
+ 3.1291)
return legacy_round(smog, 1)
except ZeroDivisionError:
return 0.0
else:
return 0.0
@lru_cache(maxsize=128)
def coleman_liau_index( text):
letters = legacy_round(avg_letter_per_word(text) * 100, 2)
sentences = legacy_round(avg_sentence_per_word(text) * 100, 2)
coleman = float((0.058 * letters) - (0.296 * sentences) - 15.8)
return legacy_round(coleman, 2)
@lru_cache(maxsize=128)
def automated_readability_index( text):
chrs = char_count(text)
words = lexicon_count(text)
sentences = sentence_count(text)
try:
a = float(chrs) / float(words)
b = float(words) / float(sentences)
readability = (
(4.71 * legacy_round(a, 2))
+ (0.5 * legacy_round(b, 2))
- 21.43)
return legacy_round(readability, 1)
except ZeroDivisionError:
return 0.0
#@lru_cache(maxsize=128)
def linsear_write_formula( text):
easy_word = 0
difficult_word = 0
text_list = text.split()[:100]
for word in text_list:
if syllable_count(word) < 3:
easy_word += 1
else:
difficult_word += 1
text = ' '.join(text_list)
number = float(
(easy_word * 1 + difficult_word * 3)
/ sentence_count(text))
if number <= 20:
number -= 2
return number / 2
#@lru_cache(maxsize=128)
def difficult_words( text, syllable_threshold=2):
return len(difficult_words_list(text, syllable_threshold))
#@lru_cache(maxsize=128)
def difficult_words_list( text, syllable_threshold=2):
text_list = re.findall(r"[\w\='‘’]+", text.lower())
diff_words_set = set()
easy_word_set = __get_easy_words()
for value in text_list:
if value not in easy_word_set:
if syllable_count(value) >= syllable_threshold:
diff_words_set.add(value)
return list(diff_words_set)
@lru_cache(maxsize=128)
def gunning_fog( text):
try:
syllable_threshold = __get_lang_cfg("syllable_threshold")
per_diff_words = (
difficult_words(
text,
syllable_threshold=syllable_threshold)
/ lexicon_count(text) * 100)
grade = 0.4 * (avg_sentence_length(text) + per_diff_words)
return legacy_round(grade, 2)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def lix( text):
words = text.split()
words_len = len(words)
long_words = len([wrd for wrd in words if len(wrd) > 6])
per_long_words = (float(long_words) * 100) / words_len
asl = avg_sentence_length(text)
lix = asl + per_long_words
return legacy_round(lix, 2)
@lru_cache(maxsize=128)
def rix( text):
"""
A Rix ratio is simply the number of long words divided by
the number of assessed sentences.
rix = LW/S
"""
words = text.split()
long_words_count = len([wrd for wrd in words if len(wrd) > 6])
sentences_count = sentence_count(text)
try:
rix = long_words_count / sentences_count
except ZeroDivisionError:
rix = 0.00
return legacy_round(rix, 2)
@lru_cache(maxsize=128)
def spache_readability( text, float_output=True):
"""
Function to calculate SPACHE readability formula for young readers.
I/P - a text
O/P - an int Spache Readability Index/Grade Level
"""
total_no_of_words = lexicon_count(text)
count_of_sentences = sentence_count(text)
asl = total_no_of_words / count_of_sentences
pdw = (difficult_words(text) / total_no_of_words) * 100
spache = (0.141 * asl) + (0.086 * pdw) + 0.839
if not float_output:
return int(spache)
else:
return spache
@lru_cache(maxsize=128)
def dale_chall_readability_score_v2( text):
"""
Function to calculate New Dale Chall Readability formula.
I/P - a text
O/P - an int Dale Chall Readability Index/Grade Level
"""
total_no_of_words = lexicon_count(text)
count_of_sentences = sentence_count(text)
asl = total_no_of_words / count_of_sentences
pdw = (difficult_words(text) / total_no_of_words) * 100
raw_score = 0.1579 * (pdw) + 0.0496 * asl
adjusted_score = raw_score
if raw_score > 0.05:
adjusted_score = raw_score + 3.6365
return legacy_round(adjusted_score, 2)
@lru_cache(maxsize=128)
def text_standard( text, float_output=None):
grade = []
# Appending Flesch Kincaid Grade
lower = legacy_round(flesch_kincaid_grade(text))
upper = math.ceil(flesch_kincaid_grade(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Flesch Reading Easy
score = flesch_reading_ease(text)
if score < 100 and score >= 90:
grade.append(5)
elif score < 90 and score >= 80:
grade.append(6)
elif score < 80 and score >= 70:
grade.append(7)
elif score < 70 and score >= 60:
grade.append(8)
grade.append(9)
elif score < 60 and score >= 50:
grade.append(10)
elif score < 50 and score >= 40:
grade.append(11)
elif score < 40 and score >= 30:
grade.append(12)
else:
grade.append(13)
# Appending SMOG Index
lower = legacy_round(smog_index(text))
upper = math.ceil(smog_index(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Coleman_Liau_Index
lower = legacy_round(coleman_liau_index(text))
upper = math.ceil(coleman_liau_index(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Automated_Readability_Index
lower = legacy_round(automated_readability_index(text))
upper = math.ceil(automated_readability_index(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Dale_Chall_Readability_Score
lower = legacy_round(dale_chall_readability_score_v2(text))
upper = math.ceil(dale_chall_readability_score_v2(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Linsear_Write_Formula
lower = legacy_round(linsear_write_formula(text))
upper = math.ceil(linsear_write_formula(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Gunning Fog Index
lower = legacy_round(gunning_fog(text))
upper = math.ceil(gunning_fog(text))
grade.append(int(lower))
grade.append(int(upper))
# Finding the Readability Consensus based upon all the above tests
d = Counter(grade)
final_grade = d.most_common(1)
score = final_grade[0][0]
if float_output:
return float(score)
else:
lower_score = int(score) - 1
upper_score = lower_score + 1
return "{}{} and {}{} grade".format(
lower_score, get_grade_suffix(lower_score),
upper_score, get_grade_suffix(upper_score)
)
@lru_cache(maxsize=128)
def reading_time( text, ms_per_char=14.69):
"""
Function to calculate reading time (Demberg & Keller, 2008)
I/P - a text
O/P - reading time in second
"""
words = text.split()
nchars = map(len, words)
rt_per_word = map(lambda nchar: nchar * ms_per_char, nchars)
reading_time = sum(list(rt_per_word))
return legacy_round(reading_time/1000, 2)
def __get_lang_cfg( key):
default = langs.get("en")
config = langs.get(__get_lang_root(), default)
return config.get(key, default.get(key))
def __get_lang_root():
return __lang.split("_")[0]
def __get_easy_words():
try:
easy_word_set = {ln.decode("utf-8").strip() for ln in pkg_resources.resource_stream("ragde","easy_words.txt")}
#easy_word_set = {ln.decode("utf-8").strip() for ln in open('easy_words.txt', 'rb').readlines()}
except FileNotFoundError:
print("There is no easy words vocabulary.")
return
return easy_word_set
``` |
{
"source": "1970633640/Design-and-Analysis-of-Algorithm",
"score": 3
} |
#### File: 1970633640/Design-and-Analysis-of-Algorithm/ex8.py
```python
from random import randint
import time
def queensLV(stepVegas, x, n):
col = set()
diag45 = set()
diag135 = set()
k = 0
while True:
nb = 0
j = 0
for i in range(n):
if (i not in col) and (i - k not in diag45) and (i + k not in diag135):
nb += 1
if randint(1, nb) == 1:
j = i
if nb > 0:
x[k] = j
col.add(j)
diag45.add(j - k)
diag135.add(j + k)
k += 1
if nb == 0 or k == stepVegas:
if nb > 0:
return backtrace(k, x, n)
else:
return False
def test(k, x):
for i in range(0, k):
if x[i] == x[k] or abs(x[k] - x[i]) == abs(k - i):
return False
return True
def test2(x, n): # 彻底检查是否符合规则
for i in range(0, n - 1):
for j in range(i + 1, n):
if x[i] == x[j] or abs(x[j] - x[i]) == abs(j - i):
print(i, j, x[i], x[j])
return False
return True
def backtrace(start, x, n):
if start >= n:
return True
for i in range(0, n):
x[start] = i
if test(start, x):
if backtrace(start + 1, x, n):
return True
return False
print("stepVegas\tacc(times)\ttime(ms)")
for n in range(12, 21):
#print("n=", n)
for stepVegas in range(1, n + 1):
total = 0
suc = 0
t1 = time.time()
for _ in range(100):
x = [-1 for _ in range(n)]
if queensLV(stepVegas, x, n):
suc += 1
# if not test2(x, n): #验算
# print("ERROR", x)
total += 1
t2 = time.time()
#print("%d\t%d\t%.2f" % (stepVegas, suc, 1000 * (t2 - t1)))
print("%d\t%d\t%d\t%.2f" % (n,stepVegas, suc, 1000 * (t2 - t1)))
``` |
{
"source": "1970938138/LevelUper",
"score": 3
} |
#### File: 1970938138/LevelUper/Comparisoner.py
```python
import re
Checking_Points1=r'<dt>考点:</dt>([\s\S]*?)</dd>'
Checking_Points2=r'.+?\[(.*?)\].+?'
Checking_Points_biology=r'<dd>([\s\S]*)\\n'
QQmsg=r'http://www.zybang.com/question/rcswebview/'
print("The comparisoner has been launched.")
def match(target,html):
if(target=="Checking_Points"):
result=re.findall(Checking_Points1,html)
if(result):
result=re.findall(Checking_Points2,str(result))
if(result):
return result
else:
result=re.findall(Checking_Points1,html)
result=re.findall(Checking_Points_biology,str(result))
if(result):
result_0=str(result[0]).strip('\\n')
result_0=result_0.strip(' ')
return result_0
else:
print("没有找到考点,可能是该题目没有提供考点信息.如果你确定题目提供了考点信息,请联系原作者,并向其发送该题目的网址.","\n")
return None
else:
print("没有找到考点,可能是该题目没有提供考点信息.如果你确定题目提供了考点信息,请联系原作者,并向其发送该题目的网址.","\n")
return None
elif(target=="QQ"):
if(re.match(QQmsg,html)):
return True
else:
return False
``` |
{
"source": "1980744819/playing-mario-with-DQN",
"score": 3
} |
#### File: playing-mario-with-DQN/break_out/model.py
```python
from torch import nn
import torch.nn.functional as F
import torch
class CNN(nn.Module):
def __init__(self, in_channels, num_action):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=8, stride=4),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=1, stride=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.fc4 = nn.Linear(in_features=2 * 1 * 128, out_features=256)
self.fc5 = nn.Linear(in_features=256, out_features=num_action)
def forward(self, x): # 4 178 144
x = self.conv1(x) # 32 21,17
x = self.conv2(x) # 64 4 3
x = self.conv3(x) # 128 2 1
x = self.fc4(x.view(x.size(0), -1))
x = self.fc5(x)
return x
```
#### File: 1980744819/playing-mario-with-DQN/double_main.py
```python
from env.gym_super_mario_bros import env
from double_dqn.brain import Brain
import numpy as np
from utils.img import get_gif
import PIL.Image as Image
def RGB2gray(obs):
img = Image.fromarray(obs).crop((0, 40, 256, 240)).resize((200, 200))
img = img.convert('L')
return np.asarray(img)
if __name__ == '__main__':
state = env.reset() # 240 256 3
state = RGB2gray(state)
frame_len = 4
memory_size = 1500
brain = Brain(memory_size=memory_size,
input_args=frame_len,
num_actions=7,
shape=state.shape,
learning_rate=0.00025,
reward_decay=0.99,
e_greedy=0.9,
e_greedy_increment=0.001,
e_greedy_start=0,
batch_size=32,
replace_target_iter=10000)
brain.store_start_frame(state)
for i in range(int(memory_size / 3) + 5):
print(i)
action = env.action_space.sample()
obs, re, done, info = env.step(action)
obs = RGB2gray(obs)
env.render()
re /= 15.0
brain.store_transition(action=action, reward=re, obs_=obs)
if done:
env.reset()
step = 1
last_info = env.unwrapped._get_info()
while True:
last_frame = brain.get_last_memory()
# get_gif(last_frame)
action = brain.choose_action(last_frame)
obs_, re, done, info = env.step(action)
if done:
obs_ = env.reset()
obs_ = RGB2gray(obs_)
env.render()
reward = re / 15.0
print(action, reward, brain.epsilon, step)
if reward < -0.6:
print(reward)
print(last_info)
print(info)
brain.store_transition(action=action, reward=reward, obs_=obs_)
last_info = info
if step % 30 == 0:
brain.double_learn()
step += 1
```
#### File: playing-mario-with-DQN/dueling/memory.py
```python
import numpy as np
class SumTree(object):
data_index = 0
def __init__(self, size, frame_len, w, h):
self.data_size = size
self.tree_size = 2 * size - 1
self.tree = np.zeros(self.tree_size,dtype=np.float32)
self.data_obs = np.zeros((size, w, h), dtype=np.uint8)
self.data_reward = np.zeros(size, dtype=np.float32)
self.data_action = np.zeros(size, dtype=np.uint8)
self.frame_len = frame_len
self.num_data = 0
self.data_count = 0
def add(self, tree_point, action, reward, obs_):
tree_index = self.data_index + self.data_size - 1
self.data_action[self.data_index] = action
self.data_reward[self.data_index] = reward
self.data_index = int((self.data_index + 1) % self.data_size)
self.data_obs[self.data_index] = obs_
self.update(tree_index, tree_point)
self.data_count += 1
self.num_data = min(self.data_size, self.data_count)
def update(self, tree_index, pointer):
change = pointer - self.tree[tree_index]
self.tree[tree_index] = pointer
while tree_index != 0:
tree_index = (tree_index - 1) // 2
self.tree[tree_index] += change
@property
def total_weight(self):
return self.tree[0]
def get_leaf(self, value):
parent_index = 0
while True:
left_index = 2 * parent_index + 1
right_index = left_index + 1
if left_index >= self.tree_size:
break
else:
if value <= self.tree[left_index]:
parent_index = left_index
else:
value -= self.tree[left_index]
parent_index = right_index
leaf_index = parent_index
# if leaf_index == 0:
# leaf_index = 1
data_index = leaf_index - (self.data_size - 1)
# data_index_ = (data_index + 1) % self.data_size
obs_frame, obs_frame_ = self.get_frame(data_index)
return leaf_index, self.tree[leaf_index], obs_frame, self.data_action[data_index], \
self.data_reward[data_index], obs_frame_
def store_obs(self, obs):
self.data_obs[self.data_index] = obs
def get_last_frame(self):
start = self.data_index - self.frame_len + 1
end = self.data_index
if start < 0:
start += self.num_data
obs_frame = np.concatenate((self.data_obs[start:self.num_data],
self.data_obs[0:end + 1]))
else:
obs_frame = self.data_obs[start:end + 1]
return obs_frame
def get_frame(self, data_index):
obs_start = data_index - self.frame_len + 1
obs_end = data_index
obs_start_ = int((data_index + 1) % self.num_data)
obs_end_ = obs_start_ + self.frame_len - 1
if obs_start < 0:
obs_start += self.num_data
obs_frame = np.concatenate((self.data_obs[obs_start:self.num_data], self.data_obs[0:obs_end + 1]))
else:
obs_frame = self.data_obs[obs_start:obs_end + 1]
if obs_end_ >= self.num_data:
obs_end_ -= self.num_data
obs_frame_ = np.concatenate((self.data_obs[obs_start_:self.num_data], self.data_obs[0:obs_end_ + 1]))
else:
obs_frame_ = self.data_obs[obs_start_:obs_end_ + 1]
# if obs_frame.shape[0] != self.frame_len or obs_frame_.shape[0] != self.frame_len:
# print('\r --------', obs_start, obs_end, obs_start_, obs_end_)
return obs_frame, obs_frame_
class Memory(object):
epsilon = 0.01
alpha = 0.6
beta = 0.4
beta_increment_per_sampling = 0.001
abs_err_upper = 1
def __init__(self, size, frame_len, w, h):
self.size = size
self.frame_len = frame_len
self.w = w
self.h = h
self.tree = SumTree(size=self.size, frame_len=self.frame_len, w=self.w, h=self.h)
def store_transition(self, action, reward, obs_):
max_leaf_weight = np.max(self.tree.tree[-self.tree.data_size:])
if max_leaf_weight == 0:
max_leaf_weight = self.abs_err_upper
self.tree.add(max_leaf_weight, action, reward, obs_)
def get_memory(self, batch_size):
batch_leaf_index = np.zeros(batch_size, dtype=np.int32)
batch_action = np.zeros(batch_size, dtype=np.uint8)
batch_reward = np.zeros(batch_size, dtype=np.float32)
batch_obs = np.zeros((batch_size, self.frame_len, self.w, self.h), dtype=np.uint8)
batch_obs_ = np.zeros((batch_size, self.frame_len, self.w, self.h), dtype=np.uint8)
IS_weights = np.zeros((batch_size, 1))
priority_segment = self.tree.total_weight / batch_size
print('total_weight: ', self.tree.total_weight)
self.beta = np.min([1, self.beta + self.beta_increment_per_sampling])
end = self.tree.data_size + self.tree.num_data - 1
min_probability = np.min(self.tree.tree[-self.tree.data_size:end]) / self.tree.total_weight
values = []
leafs = []
leaf_values = []
for i in range(batch_size):
low = priority_segment * i
high = priority_segment * (i + 1)
# print('low: ', low, 'high', high, 'priority_segment:', priority_segment,
# 'total_weight: ', self.tree.total_weight, 'min_probability: ', min_probability, 'end: ', end,
# 'data_size: ',
# self.tree.data_size, 'num_data: ', self.tree.num_data, )
value = np.random.uniform(low, high)
leaf_index, leaf_value, obs, action, reward, obs_ = self.tree.get_leaf(value)
probability = leaf_value / self.tree.total_weight
IS_weights[i, 0] = np.power(probability / min_probability, -self.beta)
batch_leaf_index[i] = leaf_index
values.append(value)
leafs.append(leaf_index)
leaf_values.append(leaf_value)
batch_obs[i] = obs
batch_obs_[i] = obs_
batch_action[i] = action
batch_reward[i] = reward
# print(values)
# print(leafs)
print(leaf_values)
return batch_leaf_index, IS_weights, batch_obs, batch_action, batch_reward, batch_obs_
def batch_update(self, tree_index, abs_errors):
abs_errors += self.epsilon
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for t_index, p in zip(tree_index, ps):
self.tree.update(t_index, p)
def store_frame(self, obs):
self.tree.store_obs(obs)
def get_last_frame(self):
return self.tree.get_last_frame()
```
#### File: playing-mario-with-DQN/dueling/model.py
```python
import torch
import torch.nn as nn
class DuelingCNN(nn.Module):
def __init__(self, in_channels, num_action):
super(DuelingCNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=8, stride=4),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=2, stride=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.Advantage_fc4 = nn.Linear(in_features=2 * 2 * 128, out_features=512)
self.Advantage_fc5 = nn.Linear(in_features=512, out_features=num_action)
self.Value_fc4 = nn.Linear(in_features=2 * 2 * 128, out_features=128)
self.Value_fc5 = nn.Linear(in_features=128, out_features=1)
def forward(self, x): # 8 200 200
x = self.conv1(x) # 32 24,24
x = self.conv2(x) # 64 5 5
x = self.conv3(x) # 128 2 2
A = self.Advantage_fc4(x.view(x.size(0), -1))
A = self.Advantage_fc5(A)
V = self.Value_fc4(x.view(x.size(0), -1))
V = self.Value_fc5(V)
A_mean = torch.mean(A, dim=1, keepdim=True)
return V + (A - A_mean)
class CNN(nn.Module):
def __init__(self, in_channels, num_action):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=8, stride=4),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=2, stride=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.fc4 = nn.Linear(in_features=2 * 2 * 128, out_features=256)
self.fc5 = nn.Linear(in_features=256, out_features=num_action)
def forward(self, x): # 8 200 200
x = self.conv1(x) # 32 24,24
x = self.conv2(x) # 64 5 5
x = self.conv3(x) # 128 2 2
x = self.fc4(x.view(x.size(0), -1))
x = self.fc5(x)
return x
```
#### File: playing-mario-with-DQN/utils/img.py
```python
import numpy as np
from PIL import Image
def RGB_to_gray(obs):
img = Image.fromarray(obs).crop((0, 40, 256, 240)).resize((200, 200))
img = img.convert('L')
return np.asarray(img)
def get_gif(ims, name):
sequence = []
for item in ims:
sequence.append(Image.fromarray(item))
sequence[0].save(str(name) + '.gif', save_all=True, append_images=sequence[1:])
```
#### File: playing-mario-with-DQN/utils/reward.py
```python
def get_reward(info, last_info):
re = info['coins'] - last_info['coins'] + info['time'] - last_info['time'] + (
info['lives'] - last_info['lives']) * 10 + info['score'] - last_info['score'] + info['xscrollLo'] - \
last_info['xscrollLo'] - 0.1
return re / 1000.0
``` |
{
"source": "19829984/Blender_Make_Target_Bones",
"score": 2
} |
#### File: 19829984/Blender_Make_Target_Bones/ui.py
```python
import bpy
from .operator_make_target_bones import MakeTargetBones
class MakeTargetBonePanel(bpy.types.Panel):
"""Makes panel in mesh edit context in the 3D view side panel"""
bl_label = "Make Target Bone"
bl_idname = "EDIT_PT_make_tgt_bone_panel"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Edit'
bl_context = "armature_edit"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return context.mode in ['EDIT_ARMATURE']
def draw(self, context):
layout = self.layout
row = layout.row()
row.operator(MakeTargetBones.bl_idname, text="Generate Target Bones")
def register():
bpy.utils.register_class(MakeTargetBonePanel)
def unregister():
bpy.utils.unregister_class(MakeTargetBonePanel)
``` |
{
"source": "1984shekhar/qpid-dispatch-samples",
"score": 3
} |
#### File: python-qpid-proton/multi-thread-client/sender.py
```python
import logging
import threading
import signal
import os
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
import common
"""
This AMQP sender application, sends one message, with a pre-defined
body and properties sizes, every `interval_delay` (or CLIENT_INTERVAL_SECONDS
env) seconds. After `reconnect_after` (or CLIENT_RECONNECT_AFTER env) messages
have been accepted, the sender will recycle its connection.
When executed, it will spawn multiple threads based on provided `--processes`
(or CLIENT_PROCESSES env).
You can execute it using CLI arguments, or setting environment variables.
Run with `--help` for more info.
"""
# public sender variables (initialized after parsing)
message_body = ""
interrupted = False
class Sender(MessagingHandler, threading.Thread):
def __init__(self, opts):
super(Sender, self).__init__()
threading.Thread.__init__(self)
self.host = opts.host
self.port = opts.port
self.address = opts.address
self.interval_delay = opts.interval_delay
self.reconnect_after = opts.reconnect_after
self.ttl = opts.ttl
self.container = None
self._sender = None
self._url = "amqp://%s:%s/%s" % (self.host, self.port, self.address)
self._next_task = None
# internal stats
self._sent = 0
self._accepted = 0
self._released = 0
self._rejected = 0
def run(self):
while not interrupted:
logging.info("starting sender container")
self.container = Container(self)
self.container.run()
logging.error("sender container stopped [interrupted = %s]" % interrupted)
def interrupt(self):
if self._sender:
logging.info("sender has been interrupted")
self._sender.close()
self.container.stop()
def on_start(self, event):
self.create_sender(event)
def create_sender(self, event):
if self._sender is not None:
logging.info("closing sender")
self._sender.close()
if event.connection:
event.connection.close()
self._reset_stats()
logging.info("creating sender")
self._sender = event.container.create_sender(self._url)
# def on_unhandled(self, method, *args):
# logging.error("on_unhandled - method: %s - args: %s", method, args)
def on_sendable(self, event):
if self._sender.credit > 0 and self._next_task is None:
props = common.generate_application_properties(parsed_opts.properties_size)
msg = Message(id="%s-%d" % (os.getpid(), self._sent + 1),
properties=props,
body=message_body,
ttl=self.ttl,
durable=True)
self._sender.send(msg)
self._sent += 1
if self.interval_delay > 0:
logging.debug("delaying %d secs [credit=%d]" % (self.interval_delay, self._sender.credit))
self._next_task = event.reactor.schedule(self.interval_delay, self)
def on_timer_task(self, event):
logging.debug("tick")
self._next_task = None
self.on_sendable(event)
def on_accepted(self, event):
self._accepted += 1
if self._accepted == self.reconnect_after:
logging.info("recycling sender connection")
self.create_sender(event)
def on_released(self, event):
self._released += 1
logging.warning("message released [%s]" % event.delivery.tag)
def on_rejected(self, event):
self._rejected += 1
logging.warning("message rejected [%s]" % event.delivery.tag)
def _reset_stats(self):
self._sent = 0
self._accepted = 0
self._released = 0
self._rejected = 0
# Main flow for sender app
if __name__ == "__main__":
# parsing arguments
parsed_opts, args = common.parse_opts(True)
# same message body will be used by all sender instances
message_body = common.generate_message_body(parsed_opts.message_size, "abcdedfgijklmnopqrstuvwxyz0123456789")
# initializing logging
common.initialize_logging(parsed_opts.log_level)
# list of spawned sender processes
processes = list()
# Interrupts all running senders
def interrupt_handler(sig, f):
global interrupted
interrupted = True
for sender in processes:
sender.interrupt()
# Capturing SIGINT
signal.signal(signal.SIGINT, interrupt_handler)
signal.signal(signal.SIGTERM, interrupt_handler)
# spawning sender threads
for i in range(parsed_opts.processes):
s = Sender(parsed_opts)
processes.append(s)
s.start()
# waiting on all threads to finish
for t in processes:
t.join()
print("Exiting")
``` |
{
"source": "1985312383/contest",
"score": 3
} |
#### File: 1985312383/contest/task1_makeTrainingDataset.py
```python
import csv
import re
import numpy as np
thre = 1.5 # 要调整的参数,这个是阈值
iteration_num = 2 # 要调整的参数,这个是迭代次数
def KalmanFilter(z, n_iter=20):
# 卡尔曼滤波
# 这里是假设A=1,H=1的情况
# intial parameters
sz = (n_iter,) # size of array
# Q = 1e-5 # process variance
Q = 1e-6 # process variance
# allocate space for arrays
xhat = np.zeros(sz) # a posteri estimate of x
P = np.zeros(sz) # a posteri error estimate
xhatminus = np.zeros(sz) # a priori estimate of x
Pminus = np.zeros(sz) # a priori error estimate
K = np.zeros(sz) # gain or blending factor
R = 0.015 ** 2 # estimate of measurement variance, change to see effect
# intial guesses
xhat[0] = 0.0
P[0] = 1.0
A = 1
H = 1
for k in range(1, n_iter):
# time update
xhatminus[k] = A * xhat[k - 1] # X(k|k-1) = AX(k-1|k-1) + BU(k) + W(k),A=1,BU(k) = 0
Pminus[k] = A * P[k - 1] + Q # P(k|k-1) = AP(k-1|k-1)A' + Q(k) ,A=1
# measurement update
K[k] = Pminus[k] / (Pminus[k] + R) # Kg(k)=P(k|k-1)H'/[HP(k|k-1)H' + R],H=1
xhat[k] = xhatminus[k] + K[k] * (z[k] - H * xhatminus[k]) # X(k|k) = X(k|k-1) + Kg(k)[Z(k) - HX(k|k-1)], H=1
P[k] = (1 - K[k] * H) * Pminus[k] # P(k|k) = (1 - Kg(k)H)P(k|k-1), H=1
return xhat
def data_process(file_path: str):
with open(file_path, "r") as f: # 打开文件
f.readline() # 去掉第一行
data = f.readlines() # 读取文件
data_num = len(data) / 4
if int(data_num) - data_num < -0.1:
raise ValueError("数据数量不对!")
initial_time = re.search(":.*:([0-9]*)", data[0], flags=0) # 获取初始数据序列
initial_time = int(initial_time.group(1))
Measures = []
for i in range(int(data_num)):
measure = []
for j in range(4):
device = []
anchor = re.search(":[0-9]*?:RR:0:([0-9]):[0-9]*?:([0-9]*?):[0-9]*?:([0-9]*)", data[4 * i + j], flags=0)
device.extend([int(anchor.group(3)) - initial_time, anchor.group(1), anchor.group(2)]) # 获取数据序号、设备号、测量值
device = list(map(int, device))
measure.append(device) # 一个measure就是四个设备拿到的四份数据
Measures.append(measure)
Measures = np.array(Measures) # Measures是三维数组是获取的所有测量数据
normalized_device_data = []
normalized_device_data_x = []
device_data = []
device_data_x = []
for i in range(4):
device_data.append(Measures[:, i, 2])
device_data_x.append(np.arange(len(Measures[:, i, 2])))
normalized_device_data.append(device_data[i] / np.max(Measures[:, i, 2])) # 最大值归一化
normalized_device_data_x = device_data_x
normalized_device_data = np.array(normalized_device_data)
normalized_device_data_x = np.array(normalized_device_data_x)
device_data = np.array(device_data)
device_data_x = np.array(device_data_x)
processed_device_data = np.array(device_data).copy()
device_mean = np.mean(device_data, axis=1)
device_std = np.std(device_data, axis=1)
low_thre = device_mean - device_std * thre # 去除离群点
high_thre = device_mean + device_std * thre # 去除离群点
for _ in range(iteration_num):
for i in range(4):
for j in range(len(device_data[i, :])):
if device_data[i, j] < low_thre[i] or device_data[i, j] > high_thre[i]:
processed_device_data[i, j] = device_mean[i]
xhat = []
for i in range(4):
# raw_data = device_data[i]
raw_data = processed_device_data[i]
xhat.append(KalmanFilter(raw_data, n_iter=len(raw_data)))
xhat = np.array(xhat)
xhat = np.around(xhat, 1) # 将滤波后的四组坐标值,保留一位小数
return device_data, xhat # device_data为原始数据,xhat是离群点去除且卡尔曼滤波后的数据
def save_data(file_path: str, Measures):
with open(file_path, "w+", newline="") as datacsv:
# dialect为打开csv文件的方式,默认是excel,delimiter="\t"参数指写入的时候的分隔符
csvwriter = csv.writer(datacsv, dialect=("excel"))
# csv文件插入一行数据,把下面列表中的每一项放入一个单元格(可以用循环插入多行)
csvwriter.writerow(["Number", "A0", "A1", "A2", "A3"])
csvwriter.writerows(np.column_stack((np.arange(Measures.shape[1]), Measures.T)), )
def collect_dataset(kind):
for i in range(1, 325):
file_path = f"./data/附件1:UWB数据集/{kind}数据/{i}.{kind}.txt"
original_data, final_processed_data = data_process(file_path)
save_data(f"cleaned_data/{kind}数据/{i}.{kind}.csv", final_processed_data)
def collect_labels():
pass
if __name__ == '__main__':
collect_dataset("正常")
collect_dataset("异常")
```
#### File: 1985312383/contest/task2_getAbnormalModel.py
```python
import numpy as np
from scipy.optimize import root
import pandas as pd
import csv
global d0, d1, d2, d3 # 即 A0,A1,A2,A3的测量值
# 设置坐标
len_x = 5000
len_y = 5000
len_z = 3000
x_mid = len_x/2
y_mid = len_y/2
z_mid = len_z/2
A0_z = 1300
A1_z = 1700
A2_z = 1700
A3_z = 1300
def A0_A1_A2_Positioning(Tag):
x, y, z = Tag[0], Tag[1], Tag[2]
return [
(x - 0) ** 2 + (y - 0) ** 2 + (z - A0_z) ** 2 - d0 ** 2, # A0距离约束
(x - len_x) ** 2 + (y - 0) ** 2 + (z - A1_z) ** 2 - d1 ** 2, # A1距离约束
(x - 0) ** 2 + (y - len_y) ** 2 + (z - A2_z) ** 2 - d2 ** 2, # A2距离约束
]
def A0_A1_A3_Positioning(Tag):
x, y, z = Tag[0], Tag[1], Tag[2]
return [
(x - 0) ** 2 + (y - 0) ** 2 + (z - A0_z) ** 2 - d0 ** 2, # A0距离约束
(x - len_x) ** 2 + (y - 0) ** 2 + (z - A1_z) ** 2 - d1 ** 2, # A1距离约束
(x - len_x) ** 2 + (y - len_y) ** 2 + (z - A3_z) ** 2 - d3 ** 2, # A3距离约束
]
def A0_A2_A3_Positioning(Tag):
x, y, z = Tag[0], Tag[1], Tag[2]
return [
(x - 0) ** 2 + (y - 0) ** 2 + (z - A0_z) ** 2 - d0 ** 2, # A0距离约束
(x - 0) ** 2 + (y - len_y) ** 2 + (z - A2_z) ** 2 - d2 ** 2, # A2距离约束
(x - len_x) ** 2 + (y - len_y) ** 2 + (z - A3_z) ** 2 - d3 ** 2, # A3距离约束
]
def A1_A2_A3_Positioning(Tag):
x, y, z = Tag[0], Tag[1], Tag[2]
return [
(x - 5000) ** 2 + (y - 0) ** 2 + (z - A1_z) ** 2 - d1 ** 2, # A1距离约束
(x - 0) ** 2 + (y - len_y) ** 2 + (z - A2_z) ** 2 - d2 ** 2, # A2距离约束
(x - len_x) ** 2 + (y - len_y) ** 2 + (z - A3_z) ** 2 - d3 ** 2, # A3距离约束
]
def read_predict_data(file_path):
data = pd.read_csv(file_path, encoding="gbk")
data = data.drop(columns=data.columns[0])
lable = data.diff()
lable = lable.drop(index=lable.index[0])
lable = lable.drop(index=lable.index[0])
lable = np.array(lable)
lable = np.absolute(lable)
lable = np.amax(lable, axis=0)
last_distance = data.iloc[-1]
last_distance = np.array(last_distance)
global d0, d1, d2, d3
d0, d1, d2, d3 = last_distance # 获取最后一行的A0-A3
tag = []
if np.sum(lable > 10) == 1:
if lable[0] > 10:
tag.append(root(A1_A2_A3_Positioning, [x_mid, y_mid, len_z]).x)
elif lable[1] > 10:
tag.append(root(A0_A2_A3_Positioning, [x_mid, y_mid, len_z]).x)
elif lable[2] > 10:
tag.append(root(A0_A1_A3_Positioning, [x_mid, y_mid, len_z]).x)
elif lable[3] > 10:
tag.append(root(A0_A1_A2_Positioning, [x_mid, y_mid, len_z]).x)
elif np.sum(lable > 10) > 1:
if lable[0] > 10:
tag.append(root(A1_A2_A3_Positioning, [x_mid, y_mid, len_z]).x)
if lable[1] > 10:
tag.append(root(A0_A2_A3_Positioning, [x_mid, y_mid, len_z]).x)
if lable[2] > 10:
tag.append(root(A0_A1_A3_Positioning, [x_mid, y_mid, len_z]).x)
if lable[3] > 10:
tag.append(root(A0_A1_A2_Positioning, [x_mid, y_mid, len_z]).x)
tag = np.array(tag)
if len(tag) > 1:
for i in range(len(tag) - 1):
if (tag[i][0] > len_x) or (tag[i][0] < 0):
tag = np.delete(tag, obj=i, axis=0)
if len(tag) > 1:
for i in range(len(tag) - 1):
if tag[i][1] > len_y or tag[i][1] < 0:
tag = np.delete(tag, obj=i, axis=0)
if len(tag) > 1:
for i in range(len(tag) - 1):
if tag[i][2] > len_z or tag[i][2] < 0:
tag = np.delete(tag, obj=i, axis=0)
# print(tag)
return np.array(tag)
def clustering_4_tag_position(cluster_tag):
pass
def read_label(file_path):
pass
def tag_check(cluster_tag, predicted_tag, actual_tag):
pass
thre = 1.5 # 要调整的参数,这个是阈值
iteration_num = 2 # 要调整的参数,这个是迭代次数
'''
for _ in range(iteration_num):
for i in range(4):
for j in range(len(device_data[i, :])):
if device_data[i, j] < low_thre[i] or device_data[i, j] > high_thre[i]:
processed_device_data[i, j] = device_mean[i]
'''
def getData(kind):
with open("submit/task2/abnormal_data.csv", "w+", newline="") as datacsv:
# dialect为打开csv文件的方式,默认是excel,delimiter="\t"参数指写入的时候的分隔符
csvwriter = csv.writer(datacsv, dialect=("excel"))
# csv文件插入一行数据,把下面列表中的每一项放入一个单元格(可以用循环插入多行)
csvwriter.writerow(["Number", "x1", "y1", "z1", "x", "y", "z", "xyz_error", "xy_error", "xz_error", "yz_error", "x_error", "y_error", "z_error"])
correct_tag_position = pd.read_table("data/附件1:UWB数据集/Tag坐标信息.txt", delim_whitespace=True) # 打开文件
correct_tag_position = np.array(correct_tag_position.drop(columns=correct_tag_position.columns[0]))
for index in range(1, 325):
# data = pd.read_csv(f"cleaned_data/{kind}数据/{i}.{kind}.csv")
# last_line = np.array(data.tail(1))
cluster_tag = np.array(read_predict_data(f"cleaned_data/{kind}数据/{index}.{kind}.csv")) # 产生2-4个可行点,用于聚类
# cluster_tag_mean = cluster_tag.mean(axis=0)
# cluster_tag_std = cluster_tag.std(axis=0)
# low_thre = cluster_tag_mean - cluster_tag_std * thre # 去除离群点
# high_thre = cluster_tag_mean + cluster_tag_std * thre # 去除离群点
# for _ in range(iteration_num):
# for i in range(4):
# for j in range(3):
# if cluster_tag[i, j] < low_thre[j] or cluster_tag[i, j] > high_thre[j]:
# cluster_tag[i, j] = cluster_tag_mean[j]
predicted_tag = np.around(cluster_tag.mean(axis=0) / 10.0, 2)
result = np.append(index, np.append(predicted_tag.T, np.array(correct_tag_position[index - 1])))
result = np.append(result,calculate_3D_error(result[1],result[2],result[3],result[4],result[5],result[6]))
result = np.append(result,calculate_2D_error(result[1],result[2],result[4],result[5]))
result = np.append(result, calculate_2D_error(result[1],result[3],result[4],result[6]))
result = np.append(result, calculate_2D_error(result[2], result[3], result[5], result[6]))
result = np.append(result, calculate_1D_error(result[1],result[4]))
result = np.append(result, calculate_1D_error(result[2], result[5]))
result = np.append(result, calculate_1D_error(result[3], result[6]))
result = np.around(result, 2)
csvwriter.writerow(result)
def test(D0, D1, D2, D3):
global d0, d1, d2, d3
d0, d1, d2, d3 = D0, D1, D2, D3 # 获取最后一行的A0-A3
tag = []
tag.append(root(A0_A1_A2_Positioning, [x_mid, y_mid, len_z]).x) # 初始点选了4个anchor的中点
tag.append(root(A0_A1_A3_Positioning, [x_mid, y_mid, len_z]).x) # 初始点选了4个anchor的中点
tag.append(root(A0_A2_A3_Positioning, [x_mid, y_mid, len_z]).x) # 初始点选了4个anchor的中点
tag.append(root(A1_A2_A3_Positioning, [x_mid, y_mid, len_z]).x) # 初始点选了4个anchor的中点
tag = np.array(tag)
if len(tag) > 1:
for i in range(len(tag) - 1):
if (tag[i][0] > len_x) or (tag[i][0] < 0):
tag = np.delete(tag, obj=i, axis=0)
if len(tag) > 1:
for i in range(len(tag) - 1):
if tag[i][1] > len_y or tag[i][1] < 0:
tag = np.delete(tag, obj=i, axis=0)
if len(tag) > 1:
for i in range(len(tag) - 1):
if tag[i][2] > len_z or tag[i][2] < 0:
tag = np.delete(tag, obj=i, axis=0)
tag = np.around(tag.mean(axis=0) / 10.0, 2)
# print(tag)
return np.array(tag)
def calculate_3D_error(x1, y1, z1, x, y, z):
return (x1 - x) ** 2 + (y1 - y) ** 2 + (z1 - z) ** 2
def calculate_2D_error(x1, y1, x, y):
return (x1 - x) ** 2 + (y1 - y) ** 2
def calculate_1D_error(x1, x):
return abs(x1-x)
if __name__ == '__main__':
getData("异常")
# 计算测试数据只需注释getdata,赋值最开始的d0,d1,d2,d3即可,然后运行test
# print(test(1620,3950,2580,4440))
#计算各维度的平均误差,单位cm,计算数据时请注释
# error = pd.read_csv("submit/task2/abnormal_data.csv")
# error = np.array(error)
# average_error = error.mean(axis=0)
# average_error = np.around(average_error, 2)
# for i in range(7, 14):
# print(average_error[i])
breakpoint()
``` |
{
"source": "19857625778/watchlist",
"score": 2
} |
#### File: 19857625778/watchlist/app.py
```python
from flask import Flask
app = Flask(_name_)
@app.route('/')
def hello():
return 'welcome to my watchlist'
``` |
{
"source": "1986MMartin/coding-sections-markus",
"score": 2
} |
#### File: python_lessons/freecodecamp_python/003_Calculation.py
```python
def fred():
print("Zap")
def jane():
print("ABC")
jane()
fred()
jane()
``` |
{
"source": "1987539447/start-python",
"score": 3
} |
#### File: start-python/demo/decorator.py
```python
import functools
import time
def log(func):
def wrapper(*args, **kw):
print('call %s() ' % func.__name__)
func(*args, **kw)
return wrapper
@log
def now():
print('2018')
now()
print(now.__name__)
def logger(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('%s %s' % (text, func.__name__))
func(*args, **kw)
return wrapper
return decorator
@logger('execute')
def today():
print('2018-02-02')
today()
print(today.__name__)
def metric(fn):
def wrapper(*args, **kw):
start = time.time()
fn(*args, **kw)
print('%s executed in %s ms' % (fn.__name__, time.time() - start))
return wrapper
# 测试
@metric
def fast(x, y):
time.sleep(0.0012)
return x + y
@metric
def slow(x, y, z):
time.sleep(0.1234)
return x * y * z
f = fast(11, 22)
s = slow(11, 22, 33)
if f != 33:
print('测试失败!')
elif s != 7986:
print('测试失败!')
```
#### File: start-python/demo/do_dir.py
```python
from datetime import datetime
import os
pwd = os.path.abspath('.')
print(' Size Last Modified Name')
print('---------------------------------------------------------------------')
for x in os.listdir(pwd):
size = os.path.getsize(x)
mtime = datetime.fromtimestamp(os.path.getmtime(x)).strftime('%Y-%m-%d %H:%M')
flag = '/' if os.path.isdir(x) else ''
print('%10d %s %s%s ' % (size, mtime, x, flag))
def find_file(p, target):
for root, dirs, files in os.walk(os.path.abspath(p), topdown=False):
for x in files:
if target in x:
print(x, '---->', root)
target = str(input('输入文件名...:'))
p = str(input('输入目录...:'))
find_file(p,target)
```
#### File: start-python/demo/do_map.py
```python
def f(x):
return x * x
print(list(map(f, [1, 2, 3, 4, 5, 6, 7, 8, 9])))
def normalize(name):
return name[0].upper() + name.lower()[1:-1]
# 测试:
L1 = ['adam', 'LISA', 'barT']
L2 = list(map(normalize, L1))
print(L2)
```
#### File: start-python/demo/do_queue.py
```python
from multiprocessing import Process,Queue
import os, time, random
# 向队列写数据
def write(q):
print('Process to write %s' % os.getpid())
for x in ['A', 'B', 'C', 'D']:
print('put %s to queue....' % x)
q.put(x)
time.sleep(random.random() * 20)
# 从队列读数据
def read(q):
print('Process to read %s' % os.getpid())
while True:
r = q.get(True)
print('Get %s from queue' % r)
if __name__ == '__main__':
q = Queue()
pr = Process(target=read, args=(q,))
pw = Process(target=write, args=(q,))
# 启动读写进程
pw.start()
pr.start()
# 等待写进程结束
pw.join()
# 读进程强制结束
pr.terminate()
```
#### File: start-python/demo/multi_threading.py
```python
import threading, time
def loop():
print('thread %s is running.....' % threading.current_thread().name)
n = 0
while n < 5:
print('thread %s >>> %s' % (threading.current_thread().name, n))
n = n+1
print('thread %s ended ' % threading.current_thread().name)
print('threading %s is running....' % threading.current_thread().name)
t = threading.Thread(target=loop, name='LoopThread')
t.start()
t.join()
print('thread %s ended' % threading.current_thread().name)
```
#### File: start-python/demo/use_json.py
```python
import json
bob = dict(name='Bob', age=20, score=88)
json_str = json.dumps(bob)
print('json str--', json_str)
reborn = json.loads(json_str)
print(reborn)
class Student(object):
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
def __str__(self):
return 'Student object (%s %s %s)' % (self.name, self.age, self.score)
s = Student('Michel', 21, 99)
stu_data = json.dumps(s, default=lambda obj:obj.__dict__)
print('to json--', stu_data)
rebuild = json.loads(stu_data, object_hook=lambda d: Student(d['name'], d['age'], d['score']))
print(rebuild)
```
#### File: start-python/demo/use_urllib.py
```python
from urllib import request, parse
import json
# get
with request.urlopen('https://api.douban.com/v2/book/2129650') as f:
data = f.read()
print('Status', f.status, f.reason)
for k, v in f.getheaders():
print('%s:%s' % (k, v))
print('Data:', data.decode('utf-8'))
# advance get
req = request.Request('http://www.douban.com/')
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
with request.urlopen(req) as f:
data = f.read()
print('Status', f.status, f.reason)
for k, v in f.getheaders():
print('%s:%s' % (k, v))
print('Data:', data.decode('utf-8'))
# post
# print('login to weibo.cn ....')
# email = input('Email:')
# password = input('Password:')
# login_data = parse.urlencode([
# ('username', email),
# ('password', password),
# ('entry', 'mweibo'),
# ('client_id', ''),
# ('savestate', 1),
# ('ec', ''),
# ('pagerefer', 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F%3Fjumpfrom%3Dweibocom&jumpfrom=weibocom')
# ])
# req = request.Request('https://passport.weibo.cn/sso/login')
# req.add_header('Original', 'https://passport.weibo.cn')
# req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
# req.add_header('Refer', 'https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F%3Fjumpfrom%3Dweibocom')
# with request.urlopen(req, data=login_data.encode('utf-8')) as f:
# data = f.read()
# print('Status', f.status, f.reason)
# for k, v in f.getheaders():
# print('%s:%s' % (k, v))
# print('Data:', data.decode('utf-8'))
# with proxy and proxy auth:
# proxy_handler = request.ProxyHandler({'http': 'http://www.example.com:3128/'})
# proxy_auth_handler = request.ProxyBasicAuthHandler()
# proxy_auth_handler.add_password('realm', 'host', 'username', 'password')
# opener = request.build_opener(proxy_handler, proxy_auth_handler)
# with opener.open('http://www.example.com/login.html') as f:
# pass
def fetch_data(url):
with request.urlopen(url) as f:
data = f.read()
return json.loads(data.decode('utf-8'))
# 测试
URL = 'https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20where%20woeid%20%3D%202151330&format=json'
data = fetch_data(URL)
print(data)
assert data['query']['results']['channel']['location']['city'] == 'Beijing'
print('ok')
``` |
{
"source": "1989Ryan/Digital-Image-Processing-Project",
"score": 2
} |
#### File: CV_python_toolbox/script/filter.py
```python
import sys
import os
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
lib_path = os.path.abspath(os.path.join(sys.path[0], '..'))
sys.path.append(lib_path)
from src.basic_cv_tool import *
'''This is the script for project No.4 which consists of all the required
assignments.
'''
def gauss_process(imagename):
image_name1 = "../../homework4/project4/"+imagename
result = "../../homework4/result1"+imagename[:5]+".png"
result2 = "../../homework4/result1"+imagename[:5]+"1.png"
tool = basic_cv_tool(image_name1)
img = tool.ImageRead(image_name1)
img1 = tool.GaussFilter(img, 3)
img2 = tool.GaussFilter(img, 5)
img3 = tool.GaussFilter(img, 7)
plt.figure(figsize = (16,5))
p1 = plt.subplot(131)
p1.set_title('gaussfilter, 3x3', fontsize = 11)
p1.imshow(img1,cmap='gray')
p2 = plt.subplot(132)
p2.set_title('gaussfilter, 5x5', fontsize = 11)
p2.imshow(img2,cmap='gray')
p3 = plt.subplot(133)
p3.set_title('gaussfilter, 7x7', fontsize = 11)
p3.imshow(img3,cmap='gray')
plt.savefig(result)
res = np.hstack((img1, img2, img3))
cv2.imwrite(result2,res)
def mid(imagename):
image_name1 = "../../homework4/project4/"+imagename
result = "../../homework4/result2"+imagename[:5]+".png"
result2 = "../../homework4/result2"+imagename[:5]+"1.png"
tool = basic_cv_tool(image_name1)
img = tool.ImageRead(image_name1)
img1 = tool.MediumFilter(img, 3)
img2 = tool.MediumFilter(img, 5)
img3 = tool.MediumFilter(img, 7)
plt.figure(figsize = (16,5))
p1 = plt.subplot(131)
p1.set_title('midfilter, 3x3', fontsize = 11)
p1.imshow(img1,cmap='gray')
p2 = plt.subplot(132)
p2.set_title('midfilter, 5x5', fontsize = 11)
p2.imshow(img2,cmap='gray')
p3 = plt.subplot(133)
p3.set_title('midfilter, 7x7', fontsize = 11)
p3.imshow(img3,cmap='gray')
plt.savefig(result)
res = np.hstack((img1, img2, img3))
cv2.imwrite(result2,res)
def high_pass_filter_process(imagename):
image_name1 = "../../homework4/project4/"+imagename
result = "../../homework4/result2"+imagename[:5]+".png"
result2 = "../../homework4/result3"+imagename[:5]+"1.png"
tool = basic_cv_tool(image_name1)
img = tool.ImageRead(image_name1)
img1 = tool.laplace_filter(img)
img2 = tool.sobel_filter(img)
img3 = tool.unsharp_mask_filter(img, 0.5)
img4 = tool.canny(img)
plt.figure(figsize = (16,4))
p1 = plt.subplot(141)
p1.set_title('laplace', fontsize = 11)
p1.imshow(img1,cmap='gray')
p2 = plt.subplot(142)
p2.set_title('sobel', fontsize = 11)
p2.imshow(img2,cmap='gray')
p3 = plt.subplot(143)
p3.set_title('unsharp', fontsize = 11)
p3.imshow(img3,cmap='gray')
p3 = plt.subplot(144)
p3.set_title('canny', fontsize = 11)
p3.imshow(img4,cmap='gray')
plt.savefig(result)
res = np.hstack((img1, img2, img3, img4))
cv2.imwrite(result2,res)
if __name__ == '__main__':
gauss_process("test1.pgm")
gauss_process("test2.tif")
mid("test1.pgm")
mid("test2.tif")
high_pass_filter_process("test3_corrupt.pgm")
high_pass_filter_process("test4.tif")
``` |
{
"source": "1989Ryan/Fuzzy-Control-Project",
"score": 3
} |
#### File: Fuzzy-Control-Project/src/PID.py
```python
import time
class PID:
def __init__(self, P = 0.2, I = 0, D = 0):
'''
Initialization.
:param P: Proportional Parameter
:param I: integral Parameter
:param D: Derivative Parameter
'''
self.Kp, self.Ki, self.Kd = P, I, D
self.sample_time = 0.0
self.current_time = time.time()
self.last_time = self.current_time
self.clear()
def clear(self):
'''
Clear all parameters.
'''
self.SetPoint = 0.0
self.PTerm = 0.0
self.ITerm = 0.0
self.DTerm = 0.0
self.last_error = 0.0
self.int_error = 0.0
self.windup_guard = 15.0
self.output = 0.0
def update(self, feedback_value):
'''
State Update.
:param feedback_value: Current state value
'''
error = self.SetPoint - feedback_value
self.current_time = time.time()
delta_time = self.current_time - self.last_time
delta_error = error - self.last_error
if (delta_time >= self.sample_time):
pTerm = self.Kp * error
if (pTerm < -self.windup_guard):
self.PTerm = -self.windup_guard
elif (pTerm > self.windup_guard):
self.PTerm = self.windup_guard
else:
self.PTerm = pTerm
self.ITerm += self.Ki * error * delta_time
if (self.ITerm < -self.windup_guard):
self.ITerm = -self.windup_guard
elif (self.ITerm > self.windup_guard):
self.ITerm = self.windup_guard
if delta_time > 0:
self.DTerm = self.Kd * delta_error / delta_time
if (self.DTerm < -self.windup_guard):
self.DTerm = -self.windup_guard
elif (self.DTerm > self.windup_guard):
self.DTerm = self.windup_guard
self.last_time = self.current_time
self.last_error = error
Output = self.PTerm + (self.ITerm) + (self.DTerm)
if Output > 20:
self.output = 20
elif Output < -20:
self.output = -20
else:
self.output = Output
def setKp(self, Proportional_gain):
self.Kp = Proportional_gain
def setKi(self, Integral_gain):
self.Ki = Integral_gain
def setKd(self, derivative_gain):
self.Kd = derivative_gain
def setSampleTime(self, sample_time):
self.sample_time = sample_time
def setSetPoint(self, setpoint):
self.SetPoint = setpoint
``` |
{
"source": "1989Ryan/pyro",
"score": 2
} |
#### File: pyro/ops/integrator.py
```python
import math
from copy import deepcopy
from cProfile import run
from torch.autograd import grad
import pyro.poutine as poutine
import torch
import time
# from tqdm import tqdm
def run_prog(
model,
z,
transforms,
*args,
**kwargs,
):
"""
run probabilistic program to get new `z`
given the current z: the value of the each step
need to construct the trace from the value z
"""
conditioned_model = poutine.condition(model, data=z)
trace = poutine.trace(conditioned_model).get_trace(*args, **kwargs)
# new_trace = poutine.trace(poutine.replay(model, trace=trace)).get_trace()
new_trace = dict(trace.nodes)
new_z = {site_name: new_trace[site_name]["value"] for site_name in new_trace \
if site_name not in ["_INPUT", "_RETURN", "obs"]}
is_cont = {site_name: new_trace[site_name]["is_cont"] for site_name in new_trace \
if site_name not in ["_INPUT", "_RETURN", "obs"]}
is_cont_vector = torch.tensor([new_trace[site_name]["is_cont"] for site_name in new_trace\
if site_name not in ["_INPUT", "_RETURN", "obs"]])
return new_z, is_cont, is_cont_vector
def velocity_verlet(
z, r, potential_fn, kinetic_grad, step_size, num_steps=1, z_grads=None
):
r"""
Second order symplectic integrator that uses the velocity verlet algorithm.
:param dict z: dictionary of sample site names and their current values
(type :class:`~torch.Tensor`).
:param dict r: dictionary of sample site names and corresponding momenta
(type :class:`~torch.Tensor`).
:param callable potential_fn: function that returns potential energy given z
for each sample site. The negative gradient of the function with respect
to ``z`` determines the rate of change of the corresponding sites'
momenta ``r``.
:param callable kinetic_grad: a function calculating gradient of kinetic energy
w.r.t. momentum variable.
:param float step_size: step size for each time step iteration.
:param int num_steps: number of discrete time steps over which to integrate.
:param torch.Tensor z_grads: optional gradients of potential energy at current ``z``.
:return tuple (z_next, r_next, z_grads, potential_energy): next position and momenta,
together with the potential energy and its gradient w.r.t. ``z_next``.
"""
z_next = z.copy()
r_next = r.copy()
for _ in range(num_steps):
z_next, r_next, z_grads, potential_energy = _single_step_verlet(
z_next, r_next, potential_fn, kinetic_grad, step_size, z_grads
)
return z_next, r_next, z_grads, potential_energy
def _single_step_verlet(z, r, potential_fn, kinetic_grad, step_size, z_grads=None):
r"""
Single step velocity verlet that modifies the `z`, `r` dicts in place.
"""
z_grads = potential_grad(potential_fn, z)[0] if z_grads is None else z_grads
# print(r)
# print(z)
for site_name in r:
r[site_name] = r[site_name] + 0.5 * step_size * (
-z_grads[site_name]
) # r(n+1/2)
r_grads = kinetic_grad(r)
for site_name in z:
z[site_name] = z[site_name] + step_size * r_grads[site_name] # z(n+1)
z_grads, potential_energy = potential_grad(potential_fn, z)
for site_name in r:
r[site_name] = r[site_name] + 0.5 * step_size * (-z_grads[site_name]) # r(n+1)
return z, r, z_grads, potential_energy
def leapfrog_discontiouous(
z, r, is_cont, model, transforms, potential_fn, kinetic_grad, step_size, num_steps=1, z_grads=None
):
r"""
Leapfrog algorithm for discontinuous HMC
"""
# assert len(z) == len(r)
z_next = z.copy()
r_next = r.copy()
z_0 = z.copy()
r_0 = r.copy()
for _ in range(num_steps):
# print("running leapfrog")
# start = time.time()
z_next, r_next, z_grads, potential_energy, r_0, is_cont, _ = _single_step_leapfrog_discontiuous(
z_next, r_next, z_0, r_0, is_cont, transforms, model, potential_fn, kinetic_grad, step_size, z_grads
)
# end = time.time()
# print("single step: {}".format(end-start))
return z_next, r_next, z_grads, potential_energy, r_0, is_cont
def _single_step_leapfrog_discontiuous(z, r, z_0, r_0, is_cont, transforms, model, potential_fn, kinetic_grad, step_size, z_grads=None):
r"""
Single step leapfrog algorithm that modifies the `z` and `r` dicts in place by Laplace momentum
for discontinuous HMC
"""
# update the momentum
# assert len(z) == len(r)
first_start = time.time()
z_grads = potential_grad(potential_fn, z)[0] if z_grads is None else z_grads
for site_name in z_grads:
r[site_name] = r[site_name] + 0.5 * step_size * (
-z_grads[site_name]
) * is_cont[site_name] # r(n+1/2)
# update the variable
for site_name in z:
z[site_name] = z[site_name] + 0.5 * step_size * r[site_name] * is_cont[site_name] # z(n+1)
z, is_cont, is_cont_vector = run_prog(model, z, transforms)
# print(z)
assert len(z) == len(r)
disc_indices = torch.flatten(torch.nonzero(~is_cont_vector.clone(), as_tuple=False))
perm = torch.randperm(len(disc_indices))
disc_indices_permuted = disc_indices[perm]
# assert len(z) == len(r)
# print("finish the first part of leapfrog")
# coord_start = time.time()
for j in disc_indices_permuted:
if j >= len(z):
continue
z, r, is_cont, is_cont_vector, r_0 = _coord_integrator(z, r, z_0, r_0, is_cont, is_cont_vector,
int(j.item()), model, transforms, potential_fn,
kinetic_grad, step_size, z_grads)
# second_start = time.time()
# print("coord integrator time: {}".format(end-start))
# print("finish discontinuous part")
# update the variable
z_grads, potential_energy = potential_grad(potential_fn, z)
# t1 = time.time()
z_ = z.copy()
for site_name in z:
pre_z = z[site_name].clone()
z[site_name] = z[site_name] + 0.5 * step_size * r[site_name] * is_cont[site_name] # r(n+1)
if pre_z != z[site_name]:
print(is_cont[site_name])
# if math.isnan(z[site_name].item()):
# print(is_cont[site_name])
# print(r)
assert z_ == z
z, is_cont, is_cont_vector = run_prog(model, z, transforms)
assert len(z) == len(r)
# update momentum
# t2 = time.time()
if z_ != z:
z_grads, potential_energy = potential_grad(potential_fn, z)
# t3 = time.time()
for site_name in z_grads:
r[site_name] = r[site_name] + 0.5 * step_size * (
-z_grads[site_name]
) * is_cont[site_name] # r(n+1/2)
# print("finish leap frog")
# finish = time.time()
# print(t1-second_start, t3-t2)
# if finish - first_start > 0.1:
# print("first step: {}".format(-first_start + coord_start))
# print("coord step: {}".format(-coord_start + second_start))
# print("secon step: {}".format(-second_start+finish))
return z, r, z_grads, potential_energy, r_0, is_cont, is_cont_vector
def _coord_integrator(z, r, z_0, r_0, is_cont, is_cont_vec, idx, model, transforms, potential_fn, kinetic_grad, step_size, z_grads=None):
r"""
Coordinatewise integrator for dynamics with Laplace momentum for discontinuous HMC
"""
# print("z: {}, r: {}".format(len(z), len(r)))
# print("running")
# print(z)
# z, _, _ = run_prog(model, z, transforms)
# print(z)
assert len(z) == len(r)
U = potential_fn(z)
new_z = z.copy()
site_name = list(new_z.keys())[idx]
new_z[site_name] = new_z[site_name].clone().detach() + step_size * torch.sign(r[site_name])
new_z, new_is_cont, new_is_cont_vec = run_prog(model, new_z, transforms)
new_U = potential_fn(new_z)
delta_U = new_U - U
if not torch.isfinite(new_U) or torch.abs(r[site_name]) <= delta_U:
# print("changing dir")
r[site_name] = -r[site_name]
else:
# print("changing dim")
r[site_name] = r[site_name].clone().detach() - torch.sign(r[site_name].clone().detach()) * delta_U
N2 = len(new_z)
N = len(z)
site_name_list = list(new_z.keys())
old_site_name_list = list(z.keys())
z = new_z.copy()
is_cont = new_is_cont.copy()
is_cont_vec = new_is_cont_vec.clone()
if N2 > N:
unused_site_name_list = [ele for ele in site_name_list if ele not in old_site_name_list]
# start = time.time()
# extend everything to the higher dimension
gauss = torch.distributions.Normal(0, 1).sample([N2-N])
laplace = torch.distributions.Laplace(0, 1).sample([N2-N])
r_padding = gauss * new_is_cont_vec[N:N2] + laplace * ~new_is_cont_vec[N:N2]
for i in range(N2-N):
site_name = unused_site_name_list[i]
r[site_name] = r_padding[i]
r_0[site_name] = r_padding[i]
# end = time.time()
# print("extension time: {}".format(end-start))
else:
# start = time.time()
unused_site_name_list = [ele for ele in old_site_name_list if ele not in site_name_list]
# truncate everything to the lower dimension
for i in range(N-N2):
site_name = unused_site_name_list[i]
r.pop(site_name)
r_0.pop(site_name)
# end = time.time()
# print("truncation time: {}".format(end-start))
assert len(z) == len(r)
assert len(r_0) == len(r)
assert len(is_cont) == len(r)
return z, r, is_cont, is_cont_vec, r_0
def potential_grad(potential_fn, z):
"""
Gradient of `potential_fn` w.r.t. parameters z.
:param potential_fn: python callable that takes in a dictionary of parameters
and returns the potential energy.
:param dict z: dictionary of parameter values keyed by site name.
:return: tuple of `(z_grads, potential_energy)`, where `z_grads` is a dictionary
with the same keys as `z` containing gradients and potential_energy is a
torch scalar.
"""
z_keys, z_nodes = zip(*z.items())
# index = 0
for node in z_nodes:
node.requires_grad_(True)
# print(z_keys[index], z[z_keys[index]])
# index += 1
try:
potential_energy = potential_fn(z)
# deal with singular matrices
except RuntimeError as e:
if "singular U" in str(e) or "input is not positive-definite" in str(e):
grads = {k: v.new_zeros(v.shape) for k, v in z.items()}
return grads, z_nodes[0].new_tensor(float("nan"))
else:
raise e
# print(z_nodes)
if torch.isfinite(potential_energy):
grads = grad(potential_energy, z_nodes, allow_unused=True)
if None in grads:
grads = list(grads)
grads[grads.index(None)] = torch.tensor(0.0)
grads = tuple(grads)
else:
grads = torch.zeros(len(z_nodes))
grad_ret = dict(zip(z_keys, grads))
assert len(grad_ret) == len(z)
return grad_ret, potential_energy.detach()
``` |
{
"source": "1989wanghang/statistics-scripts",
"score": 3
} |
#### File: 1989wanghang/statistics-scripts/duration.py
```python
import sys
import plotly.graph_objects as go
import plotly.offline as py
fps = 120
total_log = ''
def sprintf(str_value):
global total_log
total_log += str_value + '\n'
def ReadFile(fichier_html_graphs, file_path):
f = open(file_path, 'r')
total_values = []
x_values = []
x_show_values = []
y_values = []
colors = []
sizes = []
element_num = 0
total_duration = 0
first_value = -1
for line in f.readlines():
line = line.strip()
if not len(line) or line.startswith('#'):
continue
strs = line.split()
values = list(map(int, strs))
element_num = len(values)
if element_num < 2:
print("need two timestamp")
return None, None
diff = values[1] - values[0]
if diff < 0:
sprintf("ignore broken value: [{0} - {1}]".format(
values[1], values[0]))
continue
if first_value == -1:
first_value = values[0]
sprintf("first_value = {0}".format(first_value))
total_values.append(values)
x_values.append(values[0] / 1000)
x_show_values.append("D{0}".format(values[0]))
total_duration += diff
y_values.append(diff / 1000)
colors.append(0)
sizes.append(3)
min_gap = min(y_values)
max_gap = max(y_values)
gap_strs = []
gaps = []
times = []
ratios = []
t = int((max_gap - min_gap) * fps / 1000) + 2
t_index = int(min_gap * fps / 1000)
for i in range(t):
down = t_index * 1000 / fps
up = (t_index + 1) * 1000 / fps
t_index += 1
gap_strs.append('(' + format(down, "0.2f") + ',' + format(up, "0.2f") +
']')
gaps.append([down, up])
times.append(0)
v_index = 0
for v in y_values:
for k in range(len(gaps)):
pair = gaps[k]
if v > pair[0] and v < pair[1]:
times[k] = times[k] + 1
colors[v_index] = 2 * (k + 1) * (k + 1)
if fps <= 60:
sizes[v_index] = (k + 1) * (k + 1) + 2
else:
sizes[v_index] = k * (k**0.5) + 2
v_index += 1
for tt in times:
ratios.append(tt * 100 / len(y_values))
traces = []
trace_name = file_path.split('/')[-1].split('.')[0]
traces.append(
go.Scatter(
x=x_show_values,
y=y_values,
mode='lines+markers',
marker=dict(
size=sizes,
color=colors,
colorscale='Viridis', # one of plotly colorscales
showscale=True),
name=trace_name))
layout = go.Layout(title=(trace_name + ' 耗时'),
xaxis=dict(title='当前时间戳(D us)'),
yaxis=dict(title='耗时时长(ms)'))
fig = go.Figure(data=traces, layout=layout)
py.plot(fig, filename=trace_name + '_duration0.html', auto_open=False)
fichier_html_graphs.write(" <object data=\"" + trace_name +
'_duration0.html' +
"\" width=\"1600\" height=\"480\"></object>" +
"\n")
sprintf("最小耗时: {0}".format(min_gap))
max_gap_idx = y_values.index(max_gap)
sprintf("最大耗时(idx+1={0}): {1},发生在[{2} - {3}]".format(
max_gap_idx + 1, max_gap, total_values[max_gap_idx][0],
total_values[max_gap_idx][1]))
sprintf("平均帧率: {0}".format(len(y_values) * 1000000 / total_duration))
for i in range(len(times)):
if times[i] > 0:
sprintf("{0}: {1}次,{2}% ".format(gap_strs[i], times[i],
format(ratios[i], "0.2f")))
trace1 = go.Bar(x=gap_strs, y=times, name='次数')
trace2 = go.Scatter(x=gap_strs, y=ratios, name='占比(%)', yaxis='y2')
l = go.Layout(title=(trace_name + ' 区间次数'),
yaxis=dict(title='次数'),
yaxis2=dict(title='占比 %', overlaying='y', side='right'))
data = [trace1, trace2]
fig2 = go.Figure(data=data, layout=l)
py.plot(fig2, filename=trace_name + '_duration_bar.html', auto_open=False)
fichier_html_graphs.write(" <object data=\"" + trace_name +
'_duration_bar.html' +
"\" width=\"650\" height=\"480\"></object>" +
"\n")
textlog = open(trace_name + "_duration_print_log.html", 'w')
textlog.write(
"<html><head></head><body><style>textarea{border-style:none;font-size:16px;width:100%;height:100%;}</style><textarea readonly>\n"
)
textlog.write(total_log)
textlog.write("</textarea></body></html>")
textlog.close()
fichier_html_graphs.write(" <object data=\"" + trace_name +
'_duration_print_log.html' +
"\" width=\"800\" height=\"480\"></object>" +
"\n")
return x_values, y_values
def main():
print('处理耗时:', str(sys.argv[1]))
filepath = sys.argv[1]
trace_name = filepath.split('/')[-1].split('.')[0]
fichier_html_graphs = open(trace_name + "_duration.html", 'w')
fichier_html_graphs.write("<html><head></head><body>" + "\n")
x_values, y_values = ReadFile(fichier_html_graphs, filepath)
fichier_html_graphs.write("</body></html>")
fichier_html_graphs.close()
if __name__ == "__main__":
main()
```
#### File: 1989wanghang/statistics-scripts/trace.py
```python
import sys
import plotly.graph_objects as go
import plotly.offline as py
import numpy as np
min_time = -1
max_time = -1
y = 0
y_gap = 0
def ReadFile(file_path):
global min_time, max_time, y
y -= y_gap
f = open(file_path, 'r')
x_values = []
y_values = []
element_num = 0
tmp_values = []
for line in f.readlines():
line = line.strip()
if not len(line) or line.startswith('#'):
continue
strs = line.split()
values = list(map(int, strs))
if min_time != -1 and values[-1] < min_time:
continue
if max_time != -1 and values[0] > max_time:
continue
if min_time == -1:
min_time = values[0]
if max_time == -1:
tmp_values += values
for i in range(len(values)):
y_values.append(y)
values[i] = values[i] / 1000000
x_values += values
element_num = len(values)
if element_num > 1:
x_values.append(None)
y_values.append(None)
if len(x_values) == 0:
y += y_gap
if max_time == -1:
max_time = max(tmp_values)
return x_values, y_values, element_num
def main():
fichier_html_graphs = open("DASHBOARD.html", 'w')
fichier_html_graphs.write("<html><head></head><body>" + "\n")
#fig = go.Figure()
traces = []
global min_time, max_time, y
y = len(sys.argv) * y_gap
print('参数列表:', str(sys.argv))
for i in range(1, len(sys.argv)):
filepath = sys.argv[i]
print('参数 %s 为: %s' % (i, filepath))
x_values, y_values, element_num = ReadFile(filepath)
if len(x_values) == 0:
continue
trace_name = filepath.split('/')[-1].split('.')[0]
if element_num == 1:
traces.append(
go.Scatter(x=x_values,
y=y_values,
mode='markers',
name=trace_name))
elif element_num > 1:
traces.append(
go.Scatter(x=x_values,
y=y_values,
mode='lines+markers',
name=trace_name))
else:
print('impossiable element_num == 0')
layout = go.Layout(title=('总数据'), xaxis=dict(title='时间戳(s)'))
fig = go.Figure(data=traces, layout=layout)
py.plot(fig, filename='large.html', auto_open=False)
fichier_html_graphs.write(" <object data=\"" + 'large.html' +
"\" width=\"1600\" height=\"800\"></object>" +
"\n")
fichier_html_graphs.write("</body></html>")
if __name__ == "__main__":
main()
``` |
{
"source": "19905386/del_clustering",
"score": 3
} |
#### File: del_clustering/experiment/exp_plot.py
```python
from math import ceil
import numpy as np
#initiate offline plotting for plotly
import colorlover as cl
import plotly.offline as offline
import plotly.graph_objs as go
from plotly import tools
offline.init_notebook_mode(connected=True)
from experiment.experimental_model import readClasses
def plotClassDist(year, class_dir):
"""
This function plots the probability distribution over all the inferred classes for all the AnswerIDs
in a given year.
"""
colors = cl.flipper()['div']['5']['RdGy']
scl = [[0,colors[2]],[0.25,colors[3]],[0.5,colors[4]],[0.75,colors[1]],[1,colors[0]]]
df = readClasses(year, class_dir)
melt = df.reset_index().melt(id_vars='AnswerID')
melt['tixnames'] = melt.apply(lambda x: 'AnswerID: '+ str(x['AnswerID'])+'<br />class: '+ x['variable']+'<br />likelihood: '+"{0:.3f}".format(x['value']), axis=1)
trace = go.Heatmap(z=melt.value,
x=melt.AnswerID,
y=melt.variable,
colorscale = scl,
colorbar=dict(
title='likelihood'),
text=melt['tixnames'],
hoverinfo='text'
)
data=[trace]
layout = go.Layout(title='Probability Distribution of Customer Classes for ' + str(year),
xaxis=dict(
title='household IDs',
type = 'category',
showticklabels=False,
ticks = '',
showline=True,
),
yaxis=dict(
type = 'category',
showline=True,),
margin=go.layout.Margin(
l=175,
r=75,
b=50,
t=100
)
)
return offline.iplot({"data":data, "layout":layout})
def plotClassYearRange(yearstart, yearend, class_dir):
"""
This function creates subplots of the probability distribution over all the inferred classes
for a range of years.
"""
colors = cl.flipper()['div']['5']['RdGy']
scl = [[0,colors[2]],[0.25,colors[3]],[0.5,colors[4]],[0.75,colors[1]],[1,colors[0]]]
ncol = 3
nplots = yearend - yearstart + 1
nrow = int(ceil(nplots/ncol))
fig = subplots.make_subplots(rows=nrow, cols=int(ncol), subplot_titles=list(range(yearstart, yearend + 1)), print_grid=False)
r = 1
c = 1
for y in range(yearstart, yearend + 1):
if c == ncol + 1:
c = 1
ro = int(ceil(r/ncol))
if r == 1: #toggle colorscale
scl_switch=True
else:
scl_switch=False
try:
df = readClasses(y, class_dir)
melt = df.reset_index().melt(id_vars='AnswerID')
melt['tixnames'] = melt.apply(lambda x: 'AnswerID: '+ str(x['AnswerID'])+'<br />class: '+ x['variable']+'<br />likelihood: '+"{0:.3f}".format(x['value']), axis=1)
trace = go.Heatmap(z=melt.value,
x=melt.AnswerID,
y=melt.variable,
text=melt['tixnames'],
hoverinfo='text',
colorscale = scl,
showscale=scl_switch,
colorbar=dict(
title='likelihood',
len=0.5,
yanchor='bottom'))
fig.append_trace(trace, ro, c)
except:
pass
c += 1
r += 1
fig['layout'].update(showlegend=False, title='Probability Distribution of Customer Classes from' + str(yearstart)+'-'+str(yearend),
height=350+300*(nrow-1),
margin=dict(l=140))
for k in np.arange(1, yearend+1, 3):
fig['layout'].update({'yaxis{}'.format(k): go.YAxis(type = 'category',
showline=True),
'xaxis{}'.format(k): go.XAxis(#title = 'household IDs',
type = 'category',
showticklabels=False,
ticks = '',
showline=True)
})
for k in np.setdiff1d(np.arange(1, 8),np.arange(1, 8, 3)):
fig['layout'].update({'yaxis{}'.format(k): go.YAxis(showticklabels=False,
ticks = '',
showline=True),
'xaxis{}'.format(k): go.XAxis(#title = 'household IDs',
type = 'category',
showticklabels=False,
ticks = '',
showline=True)
})
return offline.iplot(fig)
``` |
{
"source": "1990chs/pymapdl-reader",
"score": 2
} |
#### File: mapdl/reader/cyclic_reader.py
```python
from functools import wraps
from vtk import vtkMatrix4x4, vtkTransform, vtkAppendFilter
import numpy as np
import pyvista as pv
from ansys.mapdl.reader.common import (STRESS_TYPES, STRAIN_TYPES,
PRINCIPAL_STRESS_TYPES,
THERMAL_STRAIN_TYPES)
from ansys.mapdl.reader.rst import Result, check_comp
from ansys.mapdl.reader import _binary_reader
from ansys.mapdl.reader.common import axis_rotation
np.seterr(divide='ignore', invalid='ignore')
class CyclicResult(Result):
"""Adds cyclic functionality to the result class"""
def __init__(self, filename):
"""Initializes cyclic result"""
super().__init__(filename)
# sanity check
if not self._is_cyclic:
raise TypeError('Result is not a cyclic model')
self._rotor_cache = None
self._has_duplicate_sector = None
self._is_repeated_mode = np.empty(0)
self._repeated_index = np.empty(0)
self._add_cyclic_properties()
def plot_sectors(self, **kwargs):
"""Plot the full rotor and individually color the sectors.
Parameters
----------
kwargs : keyword arguments
Additional keyword arguments. See ``help(pyvista.plot)``
Examples
--------
>>> from ansys.mapdl.reader import examples
>>> rst = examples.download_academic_rotor()
>>> rst.plot_sectors()
Save a screenshot of the sectors
>>> rst.plot_sectors(screenshot='sectors.png')
"""
scalars = np.empty((self.n_sector, self._mas_grid.n_points))
scalars[:] = np.arange(self.n_sector).reshape(-1, 1)
kwargs.setdefault('show_edges', True)
kwargs.setdefault('n_colors', self.n_sector)
return self._plot_cyclic_point_scalars(scalars, None, add_text=False, **kwargs)
def plot(self, **kwargs):
"""Plot the full rotor geometry.
Parameters
----------
kwargs : keyword arguments
Additional keyword arguments. See ``help(pyvista.plot)``
Returns
-------
cpos : list
List of camera position, focal point, and view up.
Examples
--------
>>> from ansys.mapdl.reader import examples
>>> rst = examples.download_academic_rotor()
>>> rst.plot()
Save a screenshot of the rotor
>>> rst.plot(screenshot='rotor.png')
"""
kwargs.setdefault('color', 'w')
kwargs.setdefault('show_edges', True)
return self._plot_cyclic_point_scalars(None, None, add_text=False, **kwargs)
def _add_cyclic_properties(self):
"""Add cyclic properties"""
# idenfity the sector based on number of elements in master sector
cs_els = self._resultheader['csEls']
mask = self.quadgrid.cell_arrays['ansys_elem_num'] <= cs_els
self.master_cell_mask = mask
self._mas_grid = self.grid.extract_cells(mask)
# NOTE: number of nodes in sector may not match number of
# nodes in geometry
node_mask = self._neqv[self._sidx] <= self._resultheader['csNds']
self._mas_ind = np.nonzero(node_mask)[0]
self._dup_ind = np.nonzero(~node_mask)[0]
self._has_duplicate_sector = np.any(self._dup_ind)
# determine repeated modes
mask_a = np.isclose(self.time_values, np.roll(self.time_values, 1))
mask_b = np.isclose(self.time_values, np.roll(self.time_values, -1))
self._is_repeated_mode = np.logical_or(mask_a, mask_b)
# edge case for single pair of repeated modes
if self._is_repeated_mode.size == 2 and self._is_repeated_mode.all():
self._repeated_index = np.array([1, 0])
return
elif self._is_repeated_mode.size == 1: # edge case single result
self._is_repeated_mode = np.array([False])
return
self._repeated_index = np.empty(self._is_repeated_mode.size, np.int)
self._repeated_index[:] = -1
if np.any(self._is_repeated_mode):
self._repeated_index[mask_a] = np.nonzero(mask_b)[0]
self._repeated_index[mask_b] = np.nonzero(mask_a)[0]
def nodal_solution(self, rnum, phase=0, full_rotor=False, as_complex=False):
"""Returns the DOF solution for each node in the global
cartesian coordinate system.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
phase : float, optional
Phase to rotate sector result in radians.
full_rotor : bool, optional
Expands the single sector solution for the full rotor.
Sectors are rotated counter-clockwise about the axis of
rotation. Default False.
as_complex : bool, optional
Returns result as a complex number, otherwise as the real
part rotated by phase. Default False.
Returns
-------
nnum : numpy.ndarray
Node numbers of master sector.
result : numpy.ndarray
Result is (nnod x numdof), nnod is the number of nodes in
a sector and numdof is the number of degrees of freedom.
When full_rotor is True the array will be (nSector x nnod
x numdof).
Examples
--------
Visualize the 1st nodal diameter mode.
>>> from ansys.mapdl import reader as pymapdl_reader
>>> from ansys.mapdl.reader import examples
>>> result = examples.download_academic_rotor()
>>> result.nodal_solution((2, 1))
Same result but uses Python (zero based) cumulative indexing
>>> result.nodal_solution(2)
Notes
-----
Somewhere between v15.0 and v18.2 ANSYS stopped writing the
duplicate sector to the result file and instead records results in
pairs (i.e. harmonic index 1, -1).
"""
func = super().nodal_solution
return self._get_full_result(rnum, func, phase, full_rotor, as_complex,
tensor=False)
@wraps(nodal_solution)
def nodal_displacement(self, *args, **kwargs):
"""wraps nodal_solution"""
return self.nodal_solution(*args, **kwargs)
def _expand_cyclic_static(self, result, tensor=False, stress=True):
"""Expand cyclic static result for a full rotor"""
cs_cord = self._resultheader['csCord']
if cs_cord > 1:
matrix = self.cs_4x4(cs_cord, as_vtk_matrix=True)
i_matrix = self.cs_4x4(cs_cord, as_vtk_matrix=True)
i_matrix.Invert()
else:
matrix = vtkMatrix4x4()
i_matrix = vtkMatrix4x4()
shp = (self.n_sector, result.shape[0], result.shape[1])
full_result = np.empty(shp)
full_result[:] = result
rang = 360.0 / self.n_sector
for i in range(1, self.n_sector):
# transform to standard position, rotate about Z axis,
# transform back
transform = vtkTransform()
transform.RotateZ(rang*i)
transform.Update()
rot_matrix = transform.GetMatrix()
if cs_cord > 1:
temp_matrix = vtkMatrix4x4()
rot_matrix.Multiply4x4(i_matrix, rot_matrix, temp_matrix)
rot_matrix.Multiply4x4(temp_matrix, matrix, rot_matrix)
trans = pv.array_from_vtkmatrix(rot_matrix)
if tensor:
if stress:
_binary_reader.tensor_arbitrary(full_result[i], trans)
else:
_binary_reader.tensor_strain_arbitrary(full_result[i], trans)
else:
_binary_reader.affline_transform(full_result[i], trans)
return full_result
def _expand_cyclic_modal(self, result, result_r, hindex, phase, as_complex,
full_rotor):
""" Combines repeated results from ANSYS """
if as_complex or full_rotor:
# Matches ansys direction
# if self._positive_cyclic_dir:
result_combined = result + result_r*1j
# else:
# result_combined = result - result_r*1j
if phase:
result_combined *= 1*np.cos(phase) - 1j*np.sin(phase)
else: # convert to real
result_combined = result*np.cos(phase) - result_r*np.sin(phase)
# just return single sector
if not full_rotor:
return result_combined
# Generate full rotor solution
result_expanded = []
angles = np.linspace(0, 2*np.pi, self.n_sector + 1)[:-1] + phase
for angle in angles:
# need to rotate solution and rotate direction
result_expanded.append(axis_rotation(result_combined,
angle, deg=False,
axis='z'))
result_expanded = np.asarray(result_expanded, dtype=np.complex128)
# ANSYS scales the result
if hindex == 0 or hindex == self.n_sector/2:
result_expanded /= self.n_sector**0.5
else:
result_expanded /= (self.n_sector/2)**0.5
# adjust phase of the full result based on the harmonic index
f_arr = np.zeros(self.n_sector)
f_arr[hindex] = 1
jang = np.fft.ifft(f_arr)[:self.n_sector]*self.n_sector
cjang = jang * (np.cos(phase) - np.sin(phase) * 1j) # 14-233
result_expanded *= cjang.reshape(-1, 1, 1)
if as_complex:
return result_expanded
else:
return np.real(result_expanded)
def _expand_cyclic_modal_tensor(self, result, result_r, hindex,
phase, as_complex, full_rotor, stress=True):
"""Combines repeated results from ANSYS and optionally
duplicates/rotates it around the axis of rotation"""
# must scale value
if as_complex or full_rotor:
# if self._positive_cyclic_dir:
result_combined = result + result_r*1j
# else:
# result_combined = result - result_r*1j
if phase:
result_combined *= 1*np.cos(phase) - 1j*np.sin(phase)
else: # convert to real
result_combined = result*np.cos(phase) - result_r*np.sin(phase)
# just return single sector
if not full_rotor:
return result_combined
# Generate full rotor solution
shp = (self.n_sector, result.shape[0], result.shape[1])
result_expanded = np.empty(shp,np.complex128)
result_expanded[:] = result_combined
# convert hindex to nodal content
f_arr = np.zeros(self.n_sector)
f_arr[hindex] = self.n_sector
jang = np.fft.ifft(f_arr)[:self.n_sector]
# must be adjusted of to expand
if hindex == 0 or hindex == self.n_sector/2:
jang *= self.n_sector**(-1/2)
else:
jang *= 2*(2*self.n_sector)**(-1/2)
cjang = jang * (np.cos(phase) - np.sin(phase) * 1j)
full_result = np.empty(shp)
full_result[:] = np.real(result_expanded*cjang.reshape(-1, 1, 1))
cs_cord = self._resultheader['csCord']
if cs_cord > 1:
matrix = self.cs_4x4(cs_cord, as_vtk_matrix=True)
i_matrix = self.cs_4x4(cs_cord, as_vtk_matrix=True)
i_matrix.Invert()
else:
matrix = vtkMatrix4x4()
i_matrix = vtkMatrix4x4()
rang = 360.0 / self.n_sector
for i in range(1, self.n_sector):
# transform to standard position, rotate about Z axis,
# transform back
transform = vtkTransform()
transform.RotateZ(rang*i)
transform.Update()
rot_matrix = transform.GetMatrix()
if cs_cord > 1:
temp_matrix = vtkMatrix4x4()
rot_matrix.Multiply4x4(i_matrix, rot_matrix, temp_matrix)
rot_matrix.Multiply4x4(temp_matrix, matrix, rot_matrix)
trans = pv.array_from_vtkmatrix(rot_matrix)
if stress:
_binary_reader.tensor_arbitrary(full_result[i], trans)
else:
_binary_reader.tensor_strain_arbitrary(full_result[i], trans)
return full_result
def harmonic_index_to_cumulative(self, hindex, mode):
"""Converts a harmonic index and a 0 index mode number to a
cumulative result index.
Harmonic indices are stored as positive and negative pairs for
modes other than 0 and N/nsectors.
Parameters
----------
hindex : int
Harmonic index. Must be less than or equal to nsectors/2.
May be positive or negative
mode : int
Mode number. 0 based indexing. Access mode pairs by with
a negative/positive harmonic index.
Returns
-------
rnum : int
Cumulative index number. Zero based indexing.
"""
hindex_table = self._resultheader['hindex']
if not np.any(abs(hindex) == np.abs(hindex_table)):
raise Exception('Invalid harmonic index.\n' +
'Available indices: %s' % np.unique(hindex_table))
mask = np.logical_and(hindex == hindex_table,
mode == self.mode_table)
if not mask.any():
mode_mask = abs(hindex) == np.abs(hindex_table)
avail_modes = np.unique(self.mode_table[mode_mask])
raise Exception('Invalid mode for harmonic index %d\n' % hindex +
'Available modes: %s' % avail_modes)
index = mask.nonzero()[0]
assert index.size == 1, 'Multiple cumulative index matches'
return index[0]
@property
def mode_table(self):
"""Unique modes for cyclic results"""
hindex_table = self._resultheader['hindex']
diff = np.diff(np.abs(hindex_table))
freqs = self.time_values
mode_table = [0]
c = 0
for i in range(1, freqs.size):
if diff[i - 1]:
c = 0
mode_table.append(c)
elif np.isclose(freqs[i], freqs[i - 1]):
mode_table.append(c)
else:
c += 1
mode_table.append(c)
return np.asarray(mode_table)
@property
def harmonic_indices(self):
"""Harmonic indices of the result file.
Harmonic index is simply the Nodal Diameter of the mode. This
is defined as the number of complete sine waves that pass
through the circumference.
Examples
--------
>>> rst.harmonic_indices
array([ 0, 0, 0, 0, 0, 0, -1, 1, -1, 1, 1, -1,
-2, 2, -2, 2, -2, 2, 3, 3, 3, 3, 3, 3], dtype=int32)
"""
return self._resultheader['hindex']
def nodal_stress(self, rnum, phase=0, as_complex=False, full_rotor=False):
"""Retrieves the component stresses for each node in the
solution.
The order of the results corresponds to the sorted node
numbering.
Computes the nodal stress by averaging the stress for each
element at each node. Due to the discontinuities across
elements, stresses will vary based on the element they are
evaluated from.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
phase : float
Phase adjustment of the stress in degrees.
as_complex : bool, optional
Reports stress as a complex result. Real and imaginary
stresses correspond to the stress of the main and repeated
sector. Stress can be "rotated" using the phase
parameter.
full_rotor : bool, optional
Expands the results to the full rotor when True. Default
False.
Returns
-------
nodenum : numpy.ndarray
Node numbers of the result.
stress : numpy.ndarray
Stresses at Sx Sy Sz Sxy Syz Sxz averaged at each corner
node. For the corresponding node numbers, see where
result is the result object.
Examples
--------
>>> nnum, stress = rst.nodal_stress(0)
Notes
-----
Nodes without a stress value will be NAN.
"""
func = super().nodal_stress
return self._get_full_result(rnum, func, phase, full_rotor, as_complex,
tensor=True, stress=True)
def _get_full_result(self, rnum, func, phase, full_rotor, as_complex,
tensor=True, stress=False):
"""Return the full rotor result or the complex result for a cyclic model.
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
phase : float
Phase adjustment of the stress in radians.
tensor : bool
True when the result is a stress/strain tensor. False
when a scalar or displacement value.
stress : bool
True when tensor is a stress. False when tensor is a
strain. Ignored when not a tensor.
"""
rnum = self.parse_step_substep(rnum)
nnum, full_result = func(rnum)
# full result may or may not contain the duplicate sector
if self._has_duplicate_sector:
result = full_result[self._mas_ind]
nnum = nnum[self._mas_ind]
else:
result = full_result
if self._resultheader['kan'] == 0: # static result
if full_rotor:
expanded_result = self._expand_cyclic_static(result,
tensor=tensor,
stress=stress)
else:
return nnum, result
elif self._resultheader['kan'] == 2: # modal analysis
# combine modal solution results
hindex_table = self._resultheader['hindex']
hindex = hindex_table[rnum] # move this to expand_modal_tensor
result_r = self._get_complex_result(func, rnum, result)
if tensor:
expanded_result = self._expand_cyclic_modal_tensor(result,
result_r,
hindex,
phase,
as_complex,
full_rotor,
stress=stress)
else:
assert result.shape == result_r.shape
expanded_result = self._expand_cyclic_modal(result,
result_r,
hindex,
phase,
as_complex,
full_rotor)
else:
raise RuntimeError('Unsupported analysis type')
return nnum, expanded_result
def _get_complex_result(self, func, rnum, full_result):
"""Acquire the duplicate sector or repeated result.
Depending on the version of MAPDL, this may mean using the
result from the duplicate sector or the mode pair when there
are duplicate modes.
When there is no repeated mode or duplicate sector, returns an
all zero array.
Parameters
----------
func : function
Function to acquire the sector only result.
rnum : int
Cumulative result number.
full_result : np.ndarray
Full result (may include duplicate sector).
Returns
--------
result_r : np.ndarray
Repeated result
"""
has_dup_result = False
if self._has_duplicate_sector:
has_dup_result = self._dup_ind[-1] <= full_result.shape[0] - 1
if self._is_repeated_mode[rnum]: # get mode pair result
_, result_r = func(self._repeated_index[rnum])
if result_r.shape[0] != self._mas_ind.size:
result_r = result_r[self._mas_ind]
elif has_dup_result: # use the duplicate sector
result_r = full_result[self._dup_ind]
else: # otherwise, a standing wave (no complex component)
result_r = np.zeros((self._mas_ind.size, full_result.shape[1]),
dtype=full_result.dtype)
return result_r
def nodal_thermal_strain(self, rnum, phase=0, as_complex=False, full_rotor=False):
"""Nodal component thermal strains. This record contains
strains in the order X, Y, Z, XY, YZ, XZ, EQV, and eswell
(element swelling strain). Thermal strains are always values
at the integration points moved to the nodes.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
phase : float
Phase adjustment of the stress in degrees.
as_complex : bool, optional
Reports stress as a complex result. Real and imaginary
stresses correspond to the stress of the main and repeated
sector. Stress can be "rotated" using the phase
parameter.
full_rotor : bool, optional
Expands the results to the full rotor when True. Default
False.
Returns
-------
nodenum : numpy.ndarray
Node numbers of the result.
thermal_strain : np.ndarray
Nodal component plastic strains. Array is in the order
X, Y, Z, XY, YZ, XZ, EQV, ESWELL
Examples
--------
Load the nodal thermal strain for the first result.
>>> nnum, thermal_strain = rst.nodal_thermal_strain(0)
Notes
-----
Nodes without a strain will be NAN.
"""
func = super().nodal_thermal_strain
return self._get_full_result(rnum, func, phase, full_rotor, as_complex,
tensor=True, stress=False)
def plot_nodal_thermal_strain(self, rnum,
comp=None,
phase=0,
full_rotor=True,
show_displacement=False,
displacement_factor=1,
node_components=None,
element_components=None,
sel_type_all=True,
add_text=True,
overlay_wireframe=False,
treat_nan_as_zero=True,
**kwargs):
"""Plot nodal thermal strain.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
comp : str, optional
Thermal strain component to display. Available options:
- ``"X"``
- ``"Y"``
- ``"Z"``
- ``"XY"``
- ``"YZ"``
- ``"XZ"``
- ``"EQV"``
- ``"ESWELL"`` (element swelling strain)
phase : float, optional
Phase angle of the modal result in radians. Only valid
when full_rotor is True. Default 0.
full_rotor : bool, optional
Expand the sector solution to the full rotor.
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
element_components : list, optional
Accepts either a string or a list strings of element
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default ``True``.
treat_nan_as_zero : bool, optional
Treat NAN values (i.e. stresses at midside nodes) as zero
when plotting.
Returns
-------
cpos : list
Camera position from vtk render window.
Examples
--------
Plot nodal thermal strain for an academic rotor
>>> rst.plot_nodal_thermal_strain(0)
"""
if not full_rotor:
return super().plot_nodal_thermal_strain(rnum,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
idx = check_comp(THERMAL_STRAIN_TYPES, comp)
_, strain = self.nodal_thermal_strain(rnum, phase, False, True)
scalars = strain[:, :, idx]
kwargs.setdefault('scalar_bar_args', {'title': f'{comp} Nodal Thermal Strain'})
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['show_displacement'] = show_displacement
kwargs['displacement_factor'] = displacement_factor
kwargs['overlay_wireframe'] = overlay_wireframe
kwargs['add_text'] = add_text
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['sel_type_all'] = sel_type_all
kwargs['phase'] = phase
return self._plot_cyclic_point_scalars(scalars,
rnum,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
def nodal_elastic_strain(self, rnum, phase=0, as_complex=False,
full_rotor=False):
"""Nodal component elastic strains. This record contains
strains in the order X, Y, Z, XY, YZ, XZ, EQV.
Elastic strains can be can be nodal values extrapolated from
the integration points or values at the integration points
moved to the nodes.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
phase : float
Phase adjustment of the stress in radians.
as_complex : bool, optional
Reports stress as a complex result. Real and imaginary
stresses correspond to the stress of the main and repeated
sector. Stress can be "rotated" using the phase
parameter.
full_rotor : bool, optional
Expands the results to the full rotor when True. Default
False.
Returns
-------
nodenum : numpy.ndarray
Node numbers of the result.
elastic_strain : numpy.ndarray
Nodal component elastic strains. Array is in the order
X, Y, Z, XY, YZ, XZ, EQV.
Examples
--------
Load the nodal elastic strain for the first result.
>>> nnum, elastic_strain = rst.nodal_stress(0)
Notes
-----
Nodes without a strain will be NAN.
"""
func = super().nodal_elastic_strain
return self._get_full_result(rnum, func, phase, full_rotor, as_complex,
tensor=True, stress=False)
def plot_nodal_elastic_strain(self, rnum,
comp=None, phase=0,
full_rotor=True,
show_displacement=False,
displacement_factor=1,
node_components=None,
element_components=None,
sel_type_all=True,
add_text=True,
overlay_wireframe=False,
treat_nan_as_zero=True,
**kwargs):
"""Plot nodal elastic strain.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
comp : str, optional
Elastic strain component to display. Available options:
- ``"X"``
- ``"Y"``
- ``"Z"``
- ``"XY"``
- ``"YZ"``
- ``"XZ"``
phase : float, optional
Phase angle of the modal result in radians. Only valid
when full_rotor is True. Default 0
full_rotor : bool, optional
Expand the sector solution to the full rotor.
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
element_components : list, optional
Accepts either a string or a list strings of element
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default ``True``.
treat_nan_as_zero : bool, optional
Treat NAN values (i.e. stresses at midside nodes) as zero
when plotting.
Returns
-------
cpos : list
Camera position from vtk render window.
Examples
--------
Plot nodal elastic strain for an academic rotor.
>>> result.plot_nodal_elastic_strain(0, 'X')
"""
idx = check_comp(STRAIN_TYPES[:-1], comp)
_, strain = self.nodal_elastic_strain(rnum, phase, False, full_rotor)
scalars = strain[:, :, idx]
kwargs.setdefault('scalar_bar_args', {'title': f'{comp} Nodal Elastic Strain'})
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['show_displacement'] = show_displacement
kwargs['displacement_factor'] = displacement_factor
kwargs['overlay_wireframe'] = overlay_wireframe
kwargs['add_text'] = add_text
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['sel_type_all'] = sel_type_all
kwargs['phase'] = phase
return self._plot_cyclic_point_scalars(scalars,
rnum,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
def nodal_plastic_strain(self, rnum, phase=0, as_complex=False,
full_rotor=False):
"""Nodal component plastic strains. This record contains
strains in the order X, Y, Z, XY, YZ, XZ, EQV.
Plastic strains can be can be nodal values extrapolated from
the integration points or values at the integration points
moved to the nodes.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
phase : float
Phase adjustment of the stress in degrees.
as_complex : bool, optional
Reports stress as a complex result. Real and imaginary
stresses correspond to the stress of the main and repeated
sector. Stress can be "rotated" using the phase
parameter.
full_rotor : bool, optional
Expands the results to the full rotor when True. Default
False.
Returns
-------
nodenum : numpy.ndarray
Node numbers of the result.
plastic_strain : numpy.ndarray
Nodal component plastic strains. Array is in the order
X, Y, Z, XY, YZ, XZ, EQV.
Examples
--------
Load the nodal plastic strain for the first result.
>>> nnum, plastic_strain = rst.nodal_stress(0)
Notes
-----
Nodes without a strain will be NAN.
"""
func = super().nodal_plastic_strain
return self._get_full_result(rnum, func, phase, full_rotor, as_complex,
tensor=True, stress=False)
def plot_nodal_plastic_strain(self, rnum,
comp=None, phase=0,
full_rotor=True,
show_displacement=False,
displacement_factor=1,
node_components=None,
element_components=None,
sel_type_all=True,
add_text=True,
overlay_wireframe=False,
treat_nan_as_zero=True,
**kwargs):
"""Plot nodal plastic strain.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
comp : str, optional
Plastic strain component to display. Available options:
- ``"X"``
- ``"Y"``
- ``"Z"``
- ``"XY"``
- ``"YZ"``
- ``"XZ"``
phase : float, optional
Phase angle of the modal result in radians. Only valid
when full_rotor is True. Default 0
full_rotor : bool, optional
Expand the sector solution to the full rotor.
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
element_components : list, optional
Accepts either a string or a list strings of element
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default ``True``.
treat_nan_as_zero : bool, optional
Treat NAN values (i.e. stresses at midside nodes) as zero
when plotting.
Returns
-------
cpos : list
Camera position from vtk render window.
Examples
--------
Plot nodal plastic strain for an academic rotor
>>> result.plot_nodal_plastic_strain(0)
"""
idx = check_comp(STRAIN_TYPES[:-1], comp)
_, strain = self.nodal_plastic_strain(rnum, phase, False, full_rotor)
scalars = strain[:, :, idx]
kwargs.setdefault('scalar_bar_args', {'title': f'{comp} Nodal Plastic Strain'})
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['show_displacement'] = show_displacement
kwargs['displacement_factor'] = displacement_factor
kwargs['overlay_wireframe'] = overlay_wireframe
kwargs['add_text'] = add_text
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['sel_type_all'] = sel_type_all
kwargs['phase'] = phase
return self._plot_cyclic_point_scalars(scalars,
rnum,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
def principal_nodal_stress(self, rnum, phase=0, as_complex=False,
full_rotor=False):
"""Computes the principal component stresses for each node in
the solution.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
phase : float
Phase adjustment of the stress in degrees.
as_complex : bool, optional
Returns result as a complex number, otherwise as the real
part rotated by phase. Default False.
full_rotor : bool, optional
Expand sector solution to full rotor.
Returns
-------
nodenum : numpy.ndarray
Node numbers of the result.
pstress : numpy.ndarray
Principal stresses, stress intensity, and equivalent stress.
[sigma1, sigma2, sigma3, sint, seqv]
Notes
-----
ANSYS equivalent of:
PRNSOL, S, PRIN
which returns:
S1, S2, S3 principal stresses, SINT stress intensity, and SEQV
equivalent stress.
"""
if as_complex and full_rotor:
raise ValueError('complex and full_rotor cannot both be True')
# get component stress
nnum, stress = self.nodal_stress(rnum, phase, as_complex, full_rotor)
# compute principle stress
if as_complex:
stress_r = np.imag(stress)
stress = np.real(stress)
pstress, isnan = _binary_reader.compute_principal_stress(stress)
pstress[isnan] = np.nan
pstress_r, isnan = _binary_reader.compute_principal_stress(stress_r)
pstress_r[isnan] = np.nan
return nnum, pstress + 1j*pstress_r
elif full_rotor:
# compute principle stress for each sector
pstress = np.empty((self.n_sector, stress.shape[1], 5), np.float64)
for i in range(stress.shape[0]):
pstress[i], isnan = _binary_reader.compute_principal_stress(stress[i])
pstress[i, isnan] = np.nan
return nnum, pstress
else:
pstress, isnan = _binary_reader.compute_principal_stress(stress)
pstress[isnan] = np.nan
return nnum, pstress
def plot_nodal_solution(self, rnum, comp='norm',
phase=0,
full_rotor=True,
show_displacement=False,
displacement_factor=1.0,
node_components=None,
element_components=None,
overlay_wireframe=False,
add_text=True,
sel_type_all=True,
treat_nan_as_zero=True,
**kwargs):
"""Plot the nodal solution (generally displacement).
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
comp : str, optional
Display component to display. Options are 'x', 'y', 'z',
and 'norm', corresponding to the x directin, y direction,
z direction, and the normalized direction:
``(x**2 + y**2 + z**2)**0.5``
full_rotor : bool, optional
Expand sector solution to full rotor.
phase : float, optional
Phase angle of the modal result in radians. Only valid
when full_rotor is True. Default 0
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
element_components : list, optional
Accepts either a string or a list strings of element
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default ``True``.
treat_nan_as_zero : bool, optional
Treat NAN values (i.e. stresses at midside nodes) as zero
when plotting.
Returns
-------
cpos : list
Camera position from vtk render window.
Examples
--------
Plot the displacement of the first cyclic result.
>>> result.plot_nodal_solution(0)
"""
# Load result from file
if not full_rotor:
return super().plot_nodal_solution(rnum,
comp,
show_displacement=show_displacement,
displacement_factor=displacement_factor,
node_components=node_components,
element_components=element_components,
sel_type_all=sel_type_all,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
rnum = self.parse_step_substep(rnum)
_, result = self.nodal_solution(rnum, phase, full_rotor, as_complex=False)
# Process result
label = 'Cyclic Rotor\nDisplacement'
if comp == 'x':
scalars = result[:, :, 0]
title = 'X {:s}\n'.format(label)
elif comp == 'y':
scalars = result[:, :, 1]
title = 'Y {:s}\n'.format(label)
elif comp == 'z':
scalars = result[:, :, 2]
title = 'Z {:s}\n'.format(label)
else:
# Normalize displacement
scalars = (result*result).sum(2)**0.5
title = 'Normalized\n%s\n' % label
kwargs.setdefault('scalar_bar_args', {'title': title})
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['show_displacement'] = show_displacement
kwargs['displacement_factor'] = displacement_factor
kwargs['overlay_wireframe'] = overlay_wireframe
kwargs['add_text'] = add_text
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['sel_type_all'] = sel_type_all
kwargs['phase'] = phase
return self._plot_cyclic_point_scalars(scalars,
rnum,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
def plot_nodal_stress(self, rnum,
comp=None,
phase=0,
full_rotor=True,
show_displacement=False,
displacement_factor=1,
node_components=None,
element_components=None,
overlay_wireframe=False,
add_text=True,
sel_type_all=True,
treat_nan_as_zero=True,
**kwargs):
"""Plot nodal stress of a given component
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
comp : str, optional
Stress component to display. Available options:
- ``"X"``
- ``"Y"``
- ``"Z"``
- ``"XY"``
- ``"YZ"``
- ``"XZ"``
phase : float, optional
Phase angle of the modal result in radians. Only valid
when full_rotor is True. Default 0
full_rotor : bool, optional
Expand the sector solution to the full rotor.
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
element_components : list, optional
Accepts either a string or a list strings of element
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default ``True``.
treat_nan_as_zero : bool, optional
Treat NAN values (i.e. stresses at midside nodes) as zero
when plotting.
Returns
-------
cpos : list
Camera position from vtk render window.
Examples
--------
Plot the ``"Z"`` nodal stress of the first cyclic result.
>>> result.plot_nodal_stress(0, comp="Z")
"""
if not full_rotor:
super().plot_nodal_stress(rnum, comp,
show_displacement,
displacement_factor,
node_components,
element_components,
sel_type_all,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
idx = check_comp(STRESS_TYPES, comp)
_, stress = self.nodal_stress(rnum, phase, False, full_rotor=True)
scalars = stress[:, :, idx]
kwargs.setdefault('scalar_bar_args',
{'title': f'Cyclic Rotor\nNodal Stress\n{comp}\n'})
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['show_displacement'] = show_displacement
kwargs['displacement_factor'] = displacement_factor
kwargs['overlay_wireframe'] = overlay_wireframe
kwargs['add_text'] = add_text
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['sel_type_all'] = sel_type_all
kwargs['phase'] = phase
return self._plot_cyclic_point_scalars(scalars,
rnum,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
def plot_principal_nodal_stress(self, rnum,
comp=None,
phase=0,
full_rotor=True,
show_displacement=False,
displacement_factor=1,
node_components=None,
element_components=None,
sel_type_all=True,
add_text=True,
overlay_wireframe=False,
treat_nan_as_zero=True,
**kwargs):
"""Plot the nodal principal stress.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
comp : string
Stress component to plot. S1, S2, S3 principal stresses, SINT
stress intensity, and SEQV equivalent stress.
Stress type must be a string from the following list:
``['S1', 'S2', 'S3', 'SINT', 'SEQV']``
phase : float, optional
Phase angle of the modal result in radians. Only valid
when full_rotor is True. Default 0
full_rotor : bool, optional
Expand sector solution to full rotor.
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default ``True``.
treat_nan_as_zero : bool, optional
Treat NAN values (i.e. stresses at midside nodes) as zero
when plotting.
kwargs : keyword arguments
Additional keyword arguments. See ``help(pyvista.plot)``
Returns
-------
cpos : list
VTK camera position.
Examples
--------
Plot the von Mises stress of the first cyclic result.
>>> result.plot_principal_nodal_stress(0, comp='SEQV')
"""
if not full_rotor:
return super().plot_principal_nodal_stress(rnum, comp,
show_displacement,
displacement_factor,
node_components,
element_components,
sel_type_all,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
# get the correct component of the principal stress for the rotor
idx = check_comp(PRINCIPAL_STRESS_TYPES, comp)
_, pstress = self.principal_nodal_stress(rnum, phase, full_rotor=True)
scalars = pstress[:, :, idx]
kwargs.setdefault('scalar_bar_args',
{'title': f'Cyclic Rotor\nPrincipal Nodal Stress\n{comp}\n'})
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['show_displacement'] = show_displacement
kwargs['displacement_factor'] = displacement_factor
kwargs['overlay_wireframe'] = overlay_wireframe
kwargs['add_text'] = add_text
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['sel_type_all'] = sel_type_all
kwargs['phase'] = phase
self._plot_cyclic_point_scalars(scalars,
rnum,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
def nodal_temperature(self, rnum, full_rotor=False):
"""Retrieves the temperature for each node in the solution.
The order of the results corresponds to the sorted node
numbering.
Equivalent MAPDL commands:
PRNSOL, TEMP
PRNSOL, BFE
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
full_rotor : bool, optional
Expand sector solution to full rotor.
Returns
-------
nnum : numpy.ndarray
Node numbers of the result.
temperature : numpy.ndarray
Temperature at each node.
Examples
--------
>>> nnum, stress = rst.nodal_temperature(0)
"""
nnum, temp = super()._nodal_result(rnum, 'EPT')
nnum = nnum[self._mas_ind]
temp = temp[self._mas_ind]
if not full_rotor: # return only the master sector
return nnum, temp.ravel()
# otherwise, duplicate and repeat as temperature is constant across sectors
return nnum, temp.T.repeat(self.n_sector, axis=0)
def plot_nodal_temperature(self, rnum,
phase=0,
full_rotor=True,
show_displacement=False,
displacement_factor=1.0,
node_components=None,
overlay_wireframe=False,
add_text=True,
element_components=None,
sel_type_all=True,
treat_nan_as_zero=True,
**kwargs):
"""Plot the nodal temperature.
Parameters
----------
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
full_rotor : bool, optional
Expand the sector solution and plot the full rotor.
phase : float, optional
Phase angle of the modal result in radians. Only valid
when full_rotor is True. Default 0
node_components : list, optional
Accepts either a string or a list strings of node
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
element_components : list, optional
Accepts either a string or a list strings of element
components to plot. For example:
``['MY_COMPONENT', 'MY_OTHER_COMPONENT]``
sel_type_all : bool, optional
If node_components is specified, plots those elements
containing all nodes of the component. Default ``True``.
treat_nan_as_zero : bool, optional
Treat NAN values (i.e. stresses at midside nodes) as zero
when plotting.
Returns
-------
cpos : list
Camera position from vtk render window.
Examples
--------
Plot the nodal temperature of a rotor for the first result.
>>> result.plot_nodal_temperature(0)
"""
# Load result from file
if not full_rotor:
return super().plot_nodal_temperature(rnum,
show_displacement=show_displacement,
displacement_factor=displacement_factor,
node_components=node_components,
element_components=element_components,
sel_type_all=sel_type_all,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
_, temp = self.nodal_temperature(rnum, True)
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['show_displacement'] = show_displacement
kwargs['displacement_factor'] = displacement_factor
kwargs['overlay_wireframe'] = overlay_wireframe
kwargs['add_text'] = add_text
kwargs['node_components'] = node_components
kwargs['element_components'] = element_components
kwargs['sel_type_all'] = sel_type_all
kwargs['phase'] = phase
return self._plot_cyclic_point_scalars(temp,
rnum,
treat_nan_as_zero=treat_nan_as_zero,
**kwargs)
def animate_nodal_solution(self, rnum, comp='norm',
displacement_factor=0.1,
nangles=180,
add_text=True, loop=True,
movie_filename=None,
**kwargs):
"""Animate nodal solution. Assumes nodal solution is a
displacement array from a modal solution.
rnum : int or list
Cumulative result number with zero based indexing, or a
list containing (step, substep) of the requested result.
comp : str, optional
Component of displacement to display. Options are 'x',
'y', 'z', or 'norm', which correspond to the x , y, z, or
the normalized direction ``(x**2 + y**2 + z**2)**0.5``
displacement_factor : float, optional
Increases or decreases displacement by a factor.
nangles : int, optional
Number of "frames" between each full cycle.
show_phase : bool, optional
Shows the phase at each frame.
add_text : bool, optional
Includes result information at the bottom left-hand corner
of the plot.
interpolate_before_map : bool, optional
Leaving this at default generally results in a better plot.
movie_filename : str, optional
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See ``imagio.get_writer``.
A single loop of the mode will be recorded.
kwargs : optional keyword arguments, optional
See help(pyvista.plot) for additional keyword arguments.
"""
rnum = self.parse_step_substep(rnum) # need cumulative
if 'full_rotor' in kwargs:
raise NotImplementedError('``full_rotor`` keyword argument not supported')
# normalize nodal solution
_, complex_disp = self.nodal_solution(rnum, as_complex=True,
full_rotor=True)
complex_disp *= displacement_factor
complex_disp = complex_disp.reshape(-1, 3)
if comp == 'x':
axis = 0
elif comp == 'y':
axis = 1
elif comp == 'z':
axis = 2
else:
axis = None
result_info = ''
if add_text:
result_info = self.text_result_table(rnum)
# need only the surface of the full rotor
plot_mesh = self.full_rotor.extract_surface()
orig_pt = plot_mesh.points.copy()
# reduce the complex displacement to just the surface points
ind = plot_mesh.point_arrays['vtkOriginalPointIds']
complex_disp = np.take(complex_disp, ind, axis=0)
if axis is not None:
scalars = complex_disp[:, axis]
else:
scalars = (complex_disp*complex_disp).sum(1)**0.5
# initialize plotter
text_color = kwargs.pop('text_color', None)
cpos = kwargs.pop('cpos', None)
off_screen = kwargs.pop('off_screen', None)
plotter = pv.Plotter(off_screen=off_screen)
if kwargs.pop('show_axes', True):
plotter.add_axes()
if 'rng' not in kwargs:
smax = np.abs(scalars).max()
if comp == 'norm':
kwargs['rng'] = [0, smax]
else:
kwargs['rng'] = [-smax, smax]
background = kwargs.pop('background', None)
if background:
plotter.set_background(background)
plotter.add_mesh(plot_mesh,
scalars=np.real(scalars),
**kwargs)
# setup text
plotter.add_text(' ', font_size=20, position=[0, 0], color=text_color)
if cpos:
plotter.camera_position = cpos
if movie_filename:
if movie_filename.strip()[-3:] == 'gif':
plotter.open_gif(movie_filename)
else:
plotter.open_movie(movie_filename)
self._animating = True
def q_callback():
"""exit when user wants to leave"""
self._animating = False
plotter.add_key_event("q", q_callback)
# run until q is pressed
plotter.show(interactive=False, auto_close=False,
interactive_update=not off_screen)
first_loop = True
while self._animating:
for angle in np.linspace(0, np.pi*2, nangles):
padj = 1*np.cos(angle) - 1j*np.sin(angle)
complex_disp_adj = np.real(complex_disp*padj)
if axis is not None:
scalars = complex_disp_adj[:, axis]
else:
scalars = (complex_disp_adj*complex_disp_adj).sum(1)**0.5
plotter.update_scalars(scalars, render=False)
plot_mesh.points[:] = orig_pt + complex_disp_adj
if add_text:
plotter.textActor.SetInput('%s\nPhase %.1f Degrees' %
(result_info, (angle*180/np.pi)))
plotter.update(1, force_redraw=True)
if not self._animating:
break
if movie_filename and first_loop:
plotter.write_frame()
first_loop = False
if not loop:
break
cpos = plotter.camera_position
plotter.close()
return cpos
@wraps(animate_nodal_solution)
def animate_nodal_displacement(self, *args, **kwargs):
"""wraps animate_nodal_solution"""
return self.animate_nodal_solution(*args, **kwargs)
def _gen_full_rotor(self):
""" Create full rotor vtk unstructured grid """
grid = self._mas_grid.copy()
# transform to standard coordinate system
cs_cord = self._resultheader['csCord']
if cs_cord > 1:
matrix = self.cs_4x4(cs_cord, as_vtk_matrix=True)
grid.transform(matrix)
# consider forcing low and high to be exact
# self._mas_grid.point_arrays['CYCLIC_M01H'] --> rotate and match
vtkappend = vtkAppendFilter()
# vtkappend.MergePointsOn()
# vtkappend.SetTolerance(1E-3) # not available until vtk 9+
rang = 360.0 / self.n_sector
for i in range(self.n_sector):
# Transform mesh
sector = grid.copy()
sector_id = np.empty(grid.n_points)
sector_id[:] = i
sector.point_arrays['sector_id'] = sector_id
sector.rotate_z(rang * i)
vtkappend.AddInputData(sector)
vtkappend.Update()
full_rotor = pv.wrap(vtkappend.GetOutput())
if cs_cord > 1:
matrix.Invert()
full_rotor.transform(matrix)
return full_rotor
@property
def full_rotor(self):
"""UnstructuredGrid of the full replicated rotor"""
if self._rotor_cache is None:
self._rotor_cache = self._gen_full_rotor()
return self._rotor_cache
def _plot_cyclic_point_scalars(self, scalars, rnum,
show_displacement=False,
displacement_factor=1,
overlay_wireframe=False,
add_text=True,
node_components=None,
element_components=None,
sel_type_all=True,
phase=None,
treat_nan_as_zero=True,
**kwargs):
"""Plot point scalars on active mesh.
Parameters
----------
scalars : numpy.ndarray
Node scalars to plot.
rnum : int, optional
Cumulative result number. Used for adding informative
text.
grid : pyvista.PolyData or pyvista.UnstructuredGrid, optional
Uses self.grid by default. When specified, uses this grid
instead.
show_displacement : bool, optional
Deforms mesh according to the result.
displacement_factor : float, optional
Increases or decreases displacement by a factor.
overlay_wireframe : bool, optional
Overlay a wireframe of the original undeformed mesh.
add_text : bool, optional
Adds information about the result when rnum is given.
treat_nan_as_zero : bool, optional
Treat NAN values (i.e. stresses at midside nodes) as zero
when plotting.
kwargs : keyword arguments
Additional keyword arguments. See ``help(pyvista.plot)``
Returns
-------
cpos : list
Camera position.
"""
# extract a sub-component of the grid if requested
grid = self._mas_grid
if node_components:
grid, ind = self._extract_node_components(node_components,
sel_type_all)
if scalars is not None:
scalars = scalars[:, ind]
elif element_components:
grid, ind = self._extract_element_components(element_components)
if scalars is not None:
scalars = scalars[:, ind]
if treat_nan_as_zero and scalars is not None:
scalars[np.isnan(scalars)] = 0
# must be removed before add_mesh **kwargs
window_size = kwargs.pop('window_size', None)
full_screen = kwargs.pop('full_screen', False)
screenshot = kwargs.pop('screenshot', None)
text_color = kwargs.pop('text_color', None)
kwargs.setdefault('cmap', 'jet')
if scalars is not None:
kwargs.setdefault('rng', [np.nanmin(scalars), np.nanmax(scalars)])
# Plot off screen when not interactive
off_screen = kwargs.pop('off_screen', None)
plotter = pv.Plotter(off_screen=off_screen)
# various plotting properties that must be removed before add_mesh
if kwargs.pop('show_axes', True):
plotter.add_axes()
plotter.background_color = kwargs.pop('background', None)
cpos = kwargs.pop('cpos', None)
cs_cord = self._resultheader['csCord']
if cs_cord > 1:
matrix = self.cs_4x4(cs_cord, as_vtk_matrix=True)
i_matrix = self.cs_4x4(cs_cord, as_vtk_matrix=True)
i_matrix.Invert()
else:
matrix = vtkMatrix4x4()
i_matrix = vtkMatrix4x4()
if overlay_wireframe:
rang = 360.0 / self.n_sector
for i in range(self.n_sector):
actor = plotter.add_mesh(grid.copy(False), color='w',
style='wireframe',
opacity=0.5, **kwargs)
# transform to standard position, rotate about Z axis,
# transform back
transform = vtkTransform()
transform.RotateZ(rang*i)
transform.Update()
rot_matrix = transform.GetMatrix()
if cs_cord > 1:
temp_matrix = vtkMatrix4x4()
rot_matrix.Multiply4x4(i_matrix, rot_matrix, temp_matrix)
rot_matrix.Multiply4x4(temp_matrix, matrix, rot_matrix)
transform.SetMatrix(rot_matrix)
actor.SetUserTransform(transform)
# add main mesh
if show_displacement:
_, disp = self.nodal_solution(rnum, phase, full_rotor=True)
disp *= displacement_factor
if node_components:
_, ind = self._extract_node_components(node_components, sel_type_all)
disp = disp[:, ind]
elif element_components:
_, ind = self._extract_element_components(element_components)
disp = disp[:, ind]
disp = disp.reshape(-1, 3)
rotor = self.full_rotor.copy()
rotor.points += disp
actor = plotter.add_mesh(rotor,
scalars=scalars.reshape(-1, 3),
**kwargs)
else:
surf_sector = grid.extract_surface()
ind = surf_sector.point_arrays['vtkOriginalPointIds']
rang = 360.0 / self.n_sector
for i in range(self.n_sector):
if scalars is not None:
sector_scalars = scalars[i, ind]
else:
sector_scalars = None
actor = plotter.add_mesh(surf_sector.copy(False),
scalars=sector_scalars,
**kwargs)
# NAN/missing data are white
plotter.mapper.GetLookupTable().SetNanColor(1, 1, 1, 1)
# transform to standard position, rotate about Z axis,
# transform back
transform = vtkTransform()
transform.RotateZ(rang*i)
transform.Update()
rot_matrix = transform.GetMatrix()
if cs_cord > 1:
temp_matrix = vtkMatrix4x4()
rot_matrix.Multiply4x4(i_matrix, rot_matrix, temp_matrix)
rot_matrix.Multiply4x4(temp_matrix, matrix, rot_matrix)
transform.SetMatrix(rot_matrix)
actor.SetUserTransform(transform)
# add table
if isinstance(add_text, str):
plotter.add_text(add_text, font_size=20, position=[0, 0],
color=text_color)
elif add_text:
rnum = self.parse_step_substep(rnum)
plotter.add_text(self.text_result_table(rnum), font_size=20,
color=text_color)
# must set camera position at the ended
if cpos is not None:
plotter.camera_position = cpos
if screenshot:
cpos = plotter.show(auto_close=False,
window_size=window_size,
full_screen=full_screen)
plotter.screenshot(screenshot)
plotter.close()
else:
cpos = plotter.show(window_size=window_size, full_screen=full_screen)
return cpos
```
#### File: mapdl/reader/rst_avail.py
```python
DESCRIPTION = {
'EMS': 'miscellaneous summable items(normally includes face pressures)',
'ENF': 'nodal forces',
'ENS': 'nodal stresses',
'ENG': 'element energies and volume',
'EGR': 'nodal gradients',
'EEL': 'nodal elastic strains',
'EPL': 'nodal plastic strains',
'ECR': 'nodal creep strains',
'ETH': 'nodal thermal strains (includes swelling strains)',
'EUL': 'element euler angles',
'EFX': 'nodal fluxes',
'ELF': 'nodal forces generated by the element, e.g. lorentz or maxwell forces',
'EMN': 'miscellaneous nonsummable items',
'ECD': 'nodal current densities',
'ENL': 'nodal nonlinear items, e.g. equivalent plastic strains',
'EHC': 'element joule heating',
'EPT': 'nodal temperatures',
'SURF': 'face surface stresses',
'EDI': 'nodal diffusion strains',
'ESTR': 'POST1 element (ETABLE) data',
'ECT': 'nodal contact items, e.g. penetration',
'EXYZ': 'integration point locations',
'EBAC': 'back stresses(for kinematic hardening)',
'ESVR': 'saved variables from usermat',
# 'EMR': 'element material saved record',
'NSL': 'Nodal displacements',
'VSL': 'Nodal velocities',
'ASL': 'Nodal accelerations',
'RF': 'Nodal reaction forces',
}
class AvailableResults():
def __init__(self, avail_bits, is_thermal):
"""Parse the available bits and determine if a given result is
available.
"""
self._avail_bits = avail_bits
self._parsed_bits = {
'EMS': self.ems,
'ENF': self.enf,
'ENS': self.ens,
'ENG': self.eng,
'EGR': self.egr,
'EEL': self.eel,
'EPL': self.epl,
'ECR': self.ecr,
'ETH': self.eth,
'EUL': self.eul,
'EFX': self.efx,
'ELF': self.elf,
'EMN': self.emn,
'ECD': self.ecd,
'ENL': self.enl,
'EHC': self.ehc,
'EPT': self.ept,
'SURF': self.surf,
'EDI': self.edi,
'ESTR': self.estr,
'ECT': self.ect,
'EXYZ': self.exyz,
'EBAC': self.ebac,
'ESVR': self.esvr,
# 'EMR': self.emr,
'NSL': self.nsl,
'VSL': self.vsl,
'ASL': self.asl,
'RF': self.rf,
}
self._valid_results = []
for key, value in self._parsed_bits.items():
if value:
self._valid_results.append(key)
self.description = DESCRIPTION
if is_thermal:
self.description['NSL'] = 'Nodal temperatures'
def __getitem__(self, key):
"""Allow a key access"""
return self._parsed_bits[key]
def __iter__(self):
for key in self._valid_results:
yield key
@property
def ems(self):
"""Miscellaneous summable items(normally includes face pressures)"""
return bool(self._avail_bits & 0b1 << 1)
@property
def enf(self):
"""Nodal forces"""
return bool(self._avail_bits & 0b1 << 2)
@property
def ens(self):
"""Nodal stresses"""
return bool(self._avail_bits & 0b1 << 3)
@property
def eng(self):
"""Element energies and volume"""
return bool(self._avail_bits & 0b1 << 4)
@property
def egr(self):
"""Nodal gradients"""
return bool(self._avail_bits & 0b1 << 5)
@property
def eel(self):
"""Nodal elastic strains"""
return bool(self._avail_bits & 0b1 << 6)
@property
def epl(self):
"""Nodal plastic strains"""
return bool(self._avail_bits & 0b1 << 7)
@property
def ecr(self):
"""Nodal creep strains"""
return bool(self._avail_bits & 0b1 << 8)
@property
def eth(self):
"""Nodal thermal strains (includes swelling strains)"""
return bool(self._avail_bits & 0b1 << 9)
@property
def eul(self):
"""Element euler angles"""
return bool(self._avail_bits & 0b1 << 10)
@property
def efx(self):
"""Nodal fluxes"""
return bool(self._avail_bits & 0b1 << 11)
@property
def elf(self):
"""Nodal forces generated by the element, e.g. lorentz or maxwell forces"""
return bool(self._avail_bits & 0b1 << 12)
@property
def emn(self):
"""Miscellaneous nonsummable items"""
return bool(self._avail_bits & 0b1 << 13)
@property
def ecd(self):
"""Nodal current densities"""
return bool(self._avail_bits & 0b1 << 14)
@property
def enl(self):
"""Nodal nonlinear items, e.g. equivalent plastic strains"""
return bool(self._avail_bits & 0b1 << 15)
@property
def ehc(self):
"""Element joule heating"""
return bool(self._avail_bits & 0b1 << 16)
@property
def ept(self):
"""Nodal temperatures"""
return bool(self._avail_bits & 0b1 << 17)
@property
def surf(self):
"""Face surface stresses"""
return bool(self._avail_bits & 0b1 << 18)
@property
def edi(self):
"""Nodal diffusion strains"""
return bool(self._avail_bits & 0b1 << 19)
@property
def estr(self):
"""Post1 element (ETABLE) data"""
return bool(self._avail_bits & 0b1 << 20)
@property
def ect(self):
"""Nodal contact items, e.g. penetration"""
return bool(self._avail_bits & 0b1 << 21)
@property
def exyz(self):
"""Integration point locations"""
return bool(self._avail_bits & 0b1 << 22)
@property
def ebac(self):
"""Back stresses(for kinematic hardening)"""
return bool(self._avail_bits & 0b1 << 23)
@property
def esvr(self):
"""Saved variables from usermat"""
return bool(self._avail_bits & 0b1 << 24)
# @property
# def emr(self):
# """Element material saved record"""
# return bool(self._avail_bits & 0b1 << 25)
@property
def nsl(self):
"""Nodal displacements"""
return bool(self._avail_bits & 0b1 << 27)
@property
def vsl(self):
"""Nodal velocities"""
return bool(self._avail_bits & 0b1 << 28)
@property
def asl(self):
"""Nodal accelerations"""
return bool(self._avail_bits & 0b1 << 29)
@property
def rf(self):
"""Nodal reaction forces"""
return bool(self._avail_bits & 0b1 << 30)
def __repr__(self):
txt = "Available Results:\n"
if self.ems:
txt += "EMS : Miscellaneous summable items (normally includes face pressures)\n"
if self.enf:
txt += "ENF : Nodal forces\n"
if self.ens:
txt += "ENS : Nodal stresses\n"
if self.eng:
txt += "ENG : Element energies and volume\n"
if self.egr:
txt += "EGR : Nodal gradients\n"
if self.eel:
txt += "EEL : Nodal elastic strains\n"
if self.epl:
txt += "EPL : Nodal plastic strains\n"
if self.ecr:
txt += "ECR : Nodal creep strains\n"
if self.eth:
txt += "ETH : Nodal thermal strains (includes swelling strains)\n"
if self.eul:
txt += "EUL : Element euler angles\n"
if self.efx:
txt += "EFX : Nodal fluxes\n"
if self.elf:
txt += "ELF : Nodal forces generated by the element, e.g. lorentz or maxwell forces\n"
if self.emn:
txt += "EMN : Miscellaneous nonsummable items\n"
if self.ecd:
txt += "ECD : Nodal current densities\n"
if self.enl:
txt += "ENL : Nodal nonlinear items, e.g. equivalent plastic strains\n"
if self.ehc:
txt += "EHC : Element joule heating\n"
if self.ept:
txt += "EPT : Nodal temperatures\n"
if self.surf:
txt += "SURF: Face surface stresses\n"
if self.edi:
txt += "EDI : Nodal diffusion strains\n"
if self.estr:
txt += "ESTR: Post1 element (ETABLE) data\n"
if self.ect:
txt += "ECT : Nodal contact items, e.g. penetration\n"
if self.exyz:
txt += "EXYZ: Integration point locations\n"
if self.ebac:
txt += "EBAC: Back stresses (for kinematic hardening)\n"
if self.esvr:
txt += "ESVR: Saved variables from usermat\n"
# if self.emr:
# txt += "EMR : Element material saved record\n"
if self.nsl:
txt += "NSL : Nodal displacements\n"
if self.vsl:
txt += "VSL : Nodal velocities\n"
if self.asl:
txt += "ASL : Nodal accelerations\n"
if self.rf:
txt += "RF : Nodal reaction forces\n"
return txt
``` |
{
"source": "1990huiyuan/EDA-on-Yelp-Restaurant-data",
"score": 4
} |
#### File: 1990huiyuan/EDA-on-Yelp-Restaurant-data/Yelp_restaurant.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from mpl_toolkits.basemap import Basemap
import seaborn as sns
from tkinter import *
import imageio
import folium
import folium.plugins as plugins
import pymysql.cursors
from math import sin, cos, sqrt, atan2, radians
import pymysql as pymysql
# # Section 1. General explorary: understanding the data
# In[2]:
##import sql data into pandans
db = pymysql.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="<PASSWORD>", # your password
db="yelpdb") # name of the data base
# In[46]:
## Top 10 cities have have the most data
most_data_cities = pd.read_sql(sql = 'select city, count(*) as Restaurants_Number from yelp1 group by city order by count(*) desc limit 10', con = db)
x_city=most_data_cities['city']
y_city=most_data_cities['Restaurants_Number']
#chart
plt.figure(figsize=(16,4))
ax = sns.barplot(x_city, y_city, alpha=0.8)
plt.title("Top 10 cities that have the most data",fontsize=12)
locs, labels = plt.xticks()
#plt.setp(labels, rotation=20)
plt.ylabel('Data Number', fontsize=12)
plt.xlabel('City', fontsize=12)
#adding the text labels
rects = ax.patches
labels = y_city
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
# In[41]:
## Top restaurants that have the most number of reviews
most_data_cities = pd.read_sql(sql = 'select name, review_count from yelp1 order by review_count desc limit 10', con = db)
x_rest=most_data_cities['review_count']
y_rest=most_data_cities['name']
#chart
plt.figure(figsize=(16,4))
ax = sns.barplot(x_rest, y_rest, alpha=0.8)
plt.title("Top 10 restaurants that have the most number of reviews",fontsize=12)
locs, labels = plt.xticks()
#plt.setp(labels, rotation=20)
plt.ylabel('Restaurants', fontsize=12)
plt.xlabel('Number of Reviews', fontsize=12)
# In[56]:
##Find the stars components for the restaurant that has the most reviews
hot_cities = pd.read_sql(sql = 'select date, stars from yelp2 where business_id= (select new.business_id from (select name, business_id, review_count from yelp1 where review_count=(select max(review_count) from yelp1)) new)', con = db)
count_hotcity=hot_cities.groupby('stars').count()
star=list(count_hotcity.index.values)
count=list(count_hotcity['date'])
labels = ['Star' + str(s) for s in star]
explode = (0.1, 0, 0,0,0) # only "explode" the 1st slice
fig1, ax1 = plt.subplots(figsize=(8,8))
ax1.pie(count, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title("Pie plot for review stars for the hottest restaurants")
plt.show()
# In[77]:
##plot the total number of reviews from 2005 to 2018 for the hottest restaurants
hot_cities['date']=pd.to_datetime(hot_cities['date'].values)
hot_cities.set_index('date').resample('A').count().plot()
plt.ylabel('Number of all reviews',fontsize=12)
plt.xlabel('Year',fontsize=12)
plt.title("Time series of review number for the hottest restaurants")
# In[80]:
## Top 10 reviewers that give the most number of useful, funny or cool reviews
most_active_user = pd.read_sql(sql = 'select user_id, count(*) as Number from yelp2 group by user_id order by count(*) desc limit 10', con = db)
x_user=most_active_user['Number']
y_user=most_active_user['user_id']
#chart
plt.figure(figsize=(16,4))
ax = sns.barplot(x_user, y_user, alpha=0.8)
plt.title("Top 10 users that give the most reviews",fontsize=12)
locs, labels = plt.xticks()
#plt.setp(labels, rotation=20)
plt.ylabel('User ID', fontsize=12)
plt.xlabel('Number of reviews', fontsize=12)
# In[83]:
##Find the stars components for the user that gives the most reviews
active_user = pd.read_sql(sql = 'select date, stars, useful, cool, funny from yelp2 where user_id=(select new.user_id from (select user_id, count(*) as number from yelp2 group by user_id order by count(*) desc limit 1) new)', con = db)
groupby_hotuser=active_user.groupby('stars').count()
star=list(groupby_hotuser.index.values)
count=list(groupby_hotuser['date'])
labels = ['Star' + str(s) for s in star]
explode = (0.1, 0, 0,0,0) # only "explode" the 1st slice
fig1, ax1 = plt.subplots(figsize=(8,8))
ax1.pie(count, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title("Pie plot for review stars for the most active user")
plt.show()
# In[86]:
##Find the percent of useful comments from the user that gives the most reviews
groupby_useful=active_user.groupby('useful').count()
useful=list(groupby_useful.index.values)
count=list(groupby_useful['date'])
labels = ['Score' + str(s) for s in useful]
explode = (0.1, 0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0) # only "explode" the 1st slice
fig1, ax1 = plt.subplots(figsize=(8,8))
ax1.pie(count, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title("Pie plot for useful comments from the most active user")
plt.show()
# In[90]:
##plot the total number of reviews from 2005 to 2018 given by the most active user
active_user['date']=pd.to_datetime(active_user['date'].values)
active_user.set_index('date').resample('A').count()['stars'].plot()
plt.ylabel('Number of all reviews',fontsize=12)
plt.xlabel('Year',fontsize=12)
plt.title("Time series of review number given by the most active user")
# In[152]:
##Get the opening hours for restaurants on Sunday
from datetime import datetime
Sunday_hour = pd.read_sql(sql = 'select Sunday, count(*) from yelp_h group by Sunday', con = db)
open_hour=[]
for i in range(len(Sunday_hour)):
a=list(Sunday_hour['Sunday'].values)[i].split('-')
#set the date and time format
date_format = "%H:%M"
#convert string to actual date and time
if (a!=['']):
time1 = datetime.strptime(a[0], date_format)
time2 = datetime.strptime(a[1], date_format)
open_hour.append(str(time2-time1))
else:
open_hour.append('NaN')
new_open_hour = [x.replace('-1 day, ', '') for x in open_hour]
Sunday_hour['open_hour']=new_open_hour
open_data=Sunday_hour.groupby('open_hour').sum()
y_hour=list(open_data['count(*)'].values)[:50]
x_hour=list(open_data.index.values)[:50]
#chart
plt.figure(figsize=(16,4))
ax = sns.barplot(x_hour, y_hour, alpha=0.8)
plt.title("Open hours for all restaurants on Sunday",fontsize=12)
locs, labels = plt.xticks()
plt.setp(labels, rotation=40)
plt.ylabel('Number of restaurants', fontsize=12)
plt.xlabel('Open Hours', fontsize=12)
# # Section2.Plot the restaurants in maps
# In[3]:
restaurants = pd.read_sql(sql = 'SELECT business_id, name, categories, latitude, longitude , city, state, postal_code, stars, review_count, NoiseLevel, RestaurantsAttire, RestaurantsPriceRange2, Alcohol FROM YELP1', con = db)
review = pd.read_sql(sql = 'SELECT business_id, review_id, date, stars FROM YELP2', con = db)
# In[4]:
restaurants.head()
restaurants.shape ##There are total 57173 restaurants included in the database, 32 field for this database
# In[5]:
restaurants_attributes=list(restaurants)
print(restaurants_attributes)### Restaurant attributes
# In[6]:
review.head()
review.shape ##There are total 3654797 reviews included in the database, 8 field for this database
# In[7]:
###Show the geographical distribution all restaurants in United States in the map
fig = plt.figure(figsize=(8, 8))
m = Basemap(projection='lcc', resolution=None,
width=8E6, height=8E6,
lat_0=42, lon_0=-100)
m.etopo(scale=0.5, alpha=0.5)
x=restaurants["longitude"].values
y=restaurants["latitude"].values
x, y = m(x, y)
plt.plot(x, y, 'ok', markersize=5)
##Number of total restuarants shown in the map
print("Total Number of Restaurants:", x.size)
# In[8]:
## Give a more closer look at the regional distribution based on users specified cities.
cities=set(restaurants["city"].values)
## Users are able to choose a city among this list
cities
# In[3]:
#### Provide the users a GUI where they can input their interested city
##This is function is adapted from https://www.python-course.eu/tkinter_entry_widgets.php
def show_entry_fields():
print("The user is interested in the restaurants in %s in %s state" % (e1.get(), e2.get()))
master = Tk()
Label(master, text="City").grid(row=0)
Label(master, text="State").grid(row=1)
e1 = Entry(master)
e2 = Entry(master)
e1.grid(row=0, column=1)
e2.grid(row=1, column=1)
Button(master, text='Quit', command=master.quit).grid(row=3, column=0, sticky=W, pady=4)
Button(master, text='Show', command=show_entry_fields).grid(row=3, column=1, sticky=W, pady=4)
mainloop( )
# In[10]:
interested_city=e1.get()
interested_state=e2.get()
print(interested_city)
print(interested_state)
# In[11]:
###Plot Scatter for the restaurants in the selected city
selected_restaurants = restaurants.loc[(restaurants['city'] == interested_city) & (restaurants['state'] == interested_state)]
fig = plt.figure(figsize=(5,5))
#m = Basemap(projection='merc',llcrnrlat=selected_restaurants['latitude'].nsmallest(2).iloc[-1],urcrnrlat=selected_restaurants['latitude'].max(),\
# llcrnrlon=selected_restaurants['longitude'].min(),urcrnrlon=selected_restaurants['longitude'].max(),lat_ts=20,resolution='c')
x = selected_restaurants['longitude'].values
y = selected_restaurants['latitude'].values
colors = selected_restaurants['RestaurantsPriceRange2'].values
sizes = selected_restaurants['stars'].values*5
#x, y = m(x, y)
plt.scatter(x, y, c=colors, s=sizes, alpha=0.3,
cmap='jet')
plt.ylim(selected_restaurants['latitude'].nsmallest(2).iloc[-1], selected_restaurants['latitude'].max())
cbar=plt.colorbar() # show color scale
cbar.set_label("Price Range")
# make legend with dummy points
for a in [1, 3, 5]:
plt.scatter([], [], c='k', alpha=0.5, s=a*5,
label='review start' + str(a))
plt.legend(scatterpoints=1, frameon=False,
labelspacing=1, loc='upper left')
plt.title("Restaurants distribution in %s in %s state" % (interested_city, interested_state))
plt.xlabel("Longitude")
plt.ylabel("Latitude")
# Section 2: Map animation show
# In[12]:
##Generate an animation that show the locations of restaurants with different review stars
data=[]
stars_list=sorted(list(selected_restaurants['stars'].unique()))
for star in stars_list:
subset=selected_restaurants[selected_restaurants['stars']==star]
data.append(subset[['latitude','longitude']].values.tolist())
lat = selected_restaurants['latitude'].mean()
lon = selected_restaurants['longitude'].mean()
zoom_start=10
print("Animation for restaurants with different starts in %s in %s state" % (interested_city, interested_state))
# basic map
m = folium.Map(location=[lat, lon], tiles="OpenStreetMap", zoom_start=zoom_start)
#inprovising the Heatmapwith time plugin to show variations across star ratings
hm = plugins.HeatMapWithTime(data,index=stars_list,max_opacity=0.3,auto_play=True,display_index=True,radius=10)
hm.add_to(m)
m
# # Section 3. Explorary Data Analysis--Plots of restaurant attributes
# In[13]:
##Distribution of price range in the interested city
price_range=selected_restaurants.groupby('RestaurantsPriceRange2').count()
range_set=list(price_range.index.values)
number_set=list(price_range['business_id'])
labels = ['Price Range' + str(s) for s in range_set]
explode = (0.1, 0, 0,0,0) # only "explode" the 1st slice
fig1, ax1 = plt.subplots(figsize=(8,8))
ax1.pie(number_set, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title("Pie plot for restaurants with price ranges")
plt.show()
# In[14]:
### Plot the number of restaurants for different categories
Category=['American','Chinese','Italian','French','Indian','Mexican','Afghan','Japanese','Wine Bars','Sports Bars']
cat=[]
for i in selected_restaurants['categories'].values:
cator=[j for j in Category if j in i]
if cator != []:
cator=cator[0]
else:
cator='Others'
cat.append(cator)
selected_restaurants['Simple_Category']=cat
category_number=selected_restaurants.groupby('Simple_Category').count()
plot_x=category_number.index.values
plot_y=category_number['business_id'].values
#chart
plt.figure(figsize=(16,4))
ax = sns.barplot(plot_x, plot_y, alpha=0.8)
plt.title("Number of restaurants for different categories",fontsize=12)
locs, labels = plt.xticks()
#plt.setp(labels, rotation=20)
plt.ylabel('Number', fontsize=12)
plt.xlabel('Restaurant Categorye', fontsize=12)
#adding the text labels
rects = ax.patches
labels = plot_y
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
# In[16]:
##For each category of restaurant, suggest the 'hottest' restaurant that received the most reviews
hot=selected_restaurants[['name','review_count','Simple_Category']].groupby('Simple_Category').max()
hot
#chart
plt.figure(figsize=(16,4))
ax = sns.barplot(hot.index.values, hot['review_count'], alpha=0.8)
plt.title("The hottest restaurant from each category",fontsize=12)
locs, labels = plt.xticks()
#plt.setp(labels, rotation=20)
plt.ylabel('Review Number', fontsize=12)
plt.xlabel('Restaurant Categorye', fontsize=12)
#adding the text labels
rects = ax.patches
labels = hot['name']
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
# In[15]:
##find the top 10 restaurants that get the most 'five star' review in the interested city
interested_busID=selected_restaurants['business_id'].values
interested_review=review[review['business_id'].isin(interested_busID)]
five_star_review=interested_review[interested_review['stars']==5]
sorted_list=five_star_review.groupby('business_id').count().sort_values(by=['review_id'], ascending=False)
top_ten=list(sorted_list[:10].index.values)
top_restaurant=selected_restaurants[selected_restaurants['business_id'].isin(top_ten)]
names=top_restaurant['name'].values
counts=list(sorted_list[:10]['review_id'].values)
#chart
plt.figure(figsize=(16,4))
ax = sns.barplot(names, counts, alpha=0.8)
plt.title("Top ten restaurants based on number of 'five-star' reviews",fontsize=12)
locs, labels = plt.xticks()
plt.setp(labels, rotation=20)
plt.ylabel('Number of five-star reviews', fontsize=12)
plt.xlabel('Restaurant Name', fontsize=12)
#adding the text labels
rects = ax.patches
labels = counts
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
# In[16]:
##For these top ten restaurants, the number of reviews change with year
merged=pd.merge(interested_review,selected_restaurants[['business_id','name']])
top_ten_annual_review=merged[merged['business_id'].isin(top_ten)]
top_ten_annual_review = top_ten_annual_review.set_index(pd.DatetimeIndex(top_ten_annual_review['date']))
figure=plt.figure(figsize=(10,10))
a=top_ten_annual_review.groupby('name').resample('A').count()['business_id']
for i in names:
a[i].plot(label=i)
plt.legend()
plt.ylabel('Number of all reviews',fontsize=12)
plt.xlabel('Year',fontsize=12)
plt.title("Time series of review number for the top ten restaurants")
# Section 3. Explorary Data Analysis--Export relationship between variables
# In[17]:
###Explore price range versus cloth required
#sns.pairplot(selected_restaurants[['RestaurantsAttire','RestaurantsPriceRange2']])
ax = sns.boxplot(x="RestaurantsAttire", y="RestaurantsPriceRange2", data=selected_restaurants, whis=np.inf)
ax = sns.swarmplot(x="RestaurantsAttire", y="RestaurantsPriceRange2", data=selected_restaurants, color=".2")
# In[20]:
###Explore number of reviews versus star rates
sns.swarmplot(x="stars", y="review_count", data=selected_restaurants).set_title("Relationship of stars and number of reviews")
# In[18]:
## Explore number of five-star review versus restaurant type
merged_type=pd.merge(selected_restaurants[['business_id','Simple_Category']],interested_review)
total=merged_type.groupby('Simple_Category').count()['business_id']
type_all=list(total.index.values)
five_review_total=np.array(total.values)
merged_type_five=merged_type[merged_type['stars']==5]
b=merged_type_five.groupby('Simple_Category').count()['business_id']
type_cat=list(b.index.values)
five_review_NO=np.array(b.values)
percent=five_review_NO/five_review_total
#chart
plt.figure(figsize=(16,4))
ax = sns.barplot(type_cat, percent, alpha=0.8)
plt.title("Satisfactory for different restaurant category",fontsize=12)
locs, labels = plt.xticks()
plt.setp(labels, rotation=20)
plt.ylabel('percent of five-star reviews(%)', fontsize=12)
plt.xlabel('Restaurant Category', fontsize=12)
plt.show()
# # Section 4. A filter to restaurants
# Based on categories: e.g. price, star rates, restaurant type, number of reviews, distance, whether they can be taken out, whether accept credit card, etc that the user could select from, we give a list of qualified restaurants and plot them in the google map with the scatterplot.
#
# In[4]:
def show_entry_fields():
print(""""The user is interested in the restaurants in %s in %s state that in category of %s, postal area is
%s, %s star, number of reviews more than %s, price range is %s""" % (interested_city,interested_state,e1.get(),
e2.get(),e3.get(),e4.get(),e5.get()))
master = Tk()
Label(master, text="Category").grid(row=0)
Label(master, text="Zip code").grid(row=1)
Label(master, text="Star").grid(row=2)
Label(master, text="Review Number").grid(row=3)
Label(master, text="Price Range").grid(row=4)
e1 = Entry(master)
e2 = Entry(master)
e3 = Entry(master)
e4 = Entry(master)
e5 = Entry(master)
e1.grid(row=0, column=1)
e2.grid(row=1, column=1)
e3.grid(row=2, column=1)
e4.grid(row=3, column=1)
e5.grid(row=4, column=1)
Button(master, text='Quit', command=master.quit).grid(row=6, column=0, sticky=W, pady=4)
Button(master, text='Show', command=show_entry_fields).grid(row=6, column=1, sticky=W, pady=4)
mainloop( )
# In[20]:
filtered_cat=e1.get()
filtered_post=e2.get()
filtered_star=e3.get()
filtered_review=e4.get()
filtered_price=e5.get()
# In[21]:
filtered_restaurants = selected_restaurants.loc[(selected_restaurants['Simple_Category']==filtered_cat)
& (selected_restaurants['postal_code'] == filtered_post)
& (selected_restaurants['stars'] == int(filtered_star))
& (selected_restaurants['review_count'] >= int(filtered_review))
& (selected_restaurants['RestaurantsPriceRange2'] == int(filtered_price))]
show_table=filtered_restaurants[['name','postal_code']]
show_table
# In[22]:
##Show the filtered restaurants on map
filterd_lat=filtered_restaurants['latitude'].mean()
filterd_lon=filtered_restaurants['longitude'].mean()
filterd_map = folium.Map(location=[filterd_lat, filterd_lon],
tiles = "OpenStreetMap",
zoom_start = 14)
# 'width=int' and 'height=int' can also be added to the map
lat_x=list(filtered_restaurants['latitude'].values)
lon_y=list(filtered_restaurants['longitude'].values)
filtered_name=list(filtered_restaurants['name'].values)
for i in range(len(filtered_name)):
folium.Marker([lat_x[i],lon_y[i]], popup=filtered_name[i].replace("'","")).add_to(filterd_map)
####may be add open hours as well
filterd_map
# # Section 5. Restaurants recommender
# The user insert a restaurant name, we can get the properties of that restaurant and give the other restaurants with the same properties (including price, star rates, restaurant type, number of reviews, distance, whether they can be taken out, whether accept credit card, etc). Plot out those restaurants in the map.
#
# In[ ]:
### Before we recommend you restaurants, please tell us where you are now
def show_entry_fields():
print(""""You are in altitude of %s, longtitude of %s.\n Your favourite retaurant is %s.\n You want restaurants within %s miles.""" % (e1.get(),
e2.get(),e3.get(),e4.get()))
master = Tk()
Label(master, text="Current Latitude").grid(row=0)
Label(master, text="Current longtitude").grid(row=1)
Label(master, text="Favourite Retaurant").grid(row=2)
Label(master, text="Miles").grid(row=3)
e1 = Entry(master)
e2 = Entry(master)
e3 = Entry(master)
e4 = Entry(master)
e1.grid(row=0, column=1)
e2.grid(row=1, column=1)
e3.grid(row=2, column=1)
e4.grid(row=3, column=1)
Button(master, text='Quit', command=master.quit).grid(row=5, column=0, sticky=W, pady=4)
Button(master, text='Show', command=show_entry_fields).grid(row=5, column=1, sticky=W, pady=4)
mainloop( )
# In[24]:
current_lat=float(e1.get())
current_lon=float(e2.get())
favourite_rest=e3.get()
miles=float(e4.get())
# In[25]:
##Get the star, price range,noise level, alcohol and Category of the favourite restaurant
favourite_star=int(selected_restaurants[selected_restaurants['name']==favourite_rest]['stars'].values)
favourite_price=int(selected_restaurants[selected_restaurants['name']==favourite_rest]['RestaurantsPriceRange2'].values)
favourite_cat=selected_restaurants[selected_restaurants['name']==favourite_rest]['Simple_Category'].values[0]
favourite_noise=selected_restaurants[selected_restaurants['name']==favourite_rest]['NoiseLevel'].values[0]
favourite_alcohol=selected_restaurants[selected_restaurants['name']==favourite_rest]['Alcohol'].values[0]
# In[26]:
##get the recommend list
recommend_restaurants = selected_restaurants.loc[(selected_restaurants['Simple_Category']==favourite_cat)
& (selected_restaurants['stars'] == favourite_star)
& (selected_restaurants['RestaurantsPriceRange2'] == favourite_price)
& (selected_restaurants['NoiseLevel'] == favourite_noise)
& (selected_restaurants['Alcohol'] == favourite_alcohol)]
recommend_restaurants
# In[27]:
## Calculate two points distance given their latitude and longitude
# approximate radius of earth in km
def cal_distance(current_lat,current_lon,new_lat,new_lon):
R = 6373.0
K_M=0.62
lat1 = radians(current_lat)
lon1 = radians(current_lon)
lat2 = radians(new_lat)
lon2 = radians(new_lon)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c * K_M
return distance
# In[28]:
##filter out the qualified restaurants that are too far away from the current location
recommend_x=list(recommend_restaurants['latitude'].values)
recommend_y=list(recommend_restaurants['longitude'].values)
recommend_name=list(recommend_restaurants['name'].values)
name_list=[]
for i in range(len(recommend_x)):
if cal_distance(current_lat,current_lon,recommend_x[i],recommend_y[i]) <= miles:
name_list.append(recommend_name[i])
name_list
# In[29]:
recommend_final = recommend_restaurants.loc[(recommend_restaurants['name'].isin(name_list))
& (recommend_restaurants['name'] != favourite_rest)]
recommend_final
# In[30]:
##Show recommended restaurants on map based on the users preferences
rcd_lat=recommend_final['latitude'].mean()
rcd_lon=recommend_final['longitude'].mean()
rcd_map = folium.Map(location=[rcd_lat, rcd_lon],
tiles = "OpenStreetMap",
zoom_start = 14)
rcd_x=list(recommend_final['latitude'].values)
rcd_y=list(recommend_final['longitude'].values)
rcd_name=list(recommend_final['name'].values)
for i in range(len(rcd_name)):
folium.Marker([rcd_x[i],rcd_y[i]], popup=rcd_name[i].replace("'","")).add_to(rcd_map)
folium.Marker([current_lat,current_lon], popup="You current location",icon=folium.Icon(color='red')).add_to(rcd_map)
rcd_map
``` |
{
"source": "1992huanghai/DeepCTR",
"score": 3
} |
#### File: estimator/models/xdeepfm.py
```python
import tensorflow as tf
from ..feature_column import get_linear_logit, input_from_feature_columns
from ..utils import deepctr_model_fn, DNN_SCOPE_NAME, variable_scope
from ...layers.core import DNN
from ...layers.interaction import CIN
from ...layers.utils import concat_func, add_func, combined_dnn_input
def xDeepFMEstimator(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 256),
cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001,
l2_reg_embedding=0.00001, l2_reg_dnn=0, l2_reg_cin=0, seed=1024, dnn_dropout=0,
dnn_activation='relu', dnn_use_bn=False, task='binary', model_dir=None, config=None,
linear_optimizer='Ftrl',
dnn_optimizer='Adagrad', training_chief_hooks=None):
"""Instantiates the xDeepFM architecture.
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network
:param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit
:param cin_activation: activation function used on feature maps
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
:param l2_reg_embedding: L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: L2 regularizer strength applied to deep net
:param l2_reg_cin: L2 regularizer strength applied to CIN.
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:param model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
:param config: tf.RunConfig object to configure the runtime settings.
:param linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. Defaults to FTRL optimizer.
:param dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. Defaults to Adagrad optimizer.
:param training_chief_hooks: Iterable of `tf.train.SessionRunHook` objects to
run on the chief worker during training.
:return: A Tensorflow Estimator instance.
"""
def _model_fn(features, labels, mode, config):
train_flag = (mode == tf.estimator.ModeKeys.TRAIN)
linear_logits = get_linear_logit(features, linear_feature_columns, l2_reg_linear=l2_reg_linear)
logits_list = [linear_logits]
with variable_scope(DNN_SCOPE_NAME):
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding=l2_reg_embedding)
fm_input = concat_func(sparse_embedding_list, axis=1)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input, training=train_flag)
dnn_logit = tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_output)
logits_list.append(dnn_logit)
if len(cin_layer_size) > 0:
exFM_out = CIN(cin_layer_size, cin_activation,
cin_split_half, l2_reg_cin, seed)(fm_input, training=train_flag)
exFM_logit = tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.glorot_normal(seed) )(exFM_out)
logits_list.append(exFM_logit)
logits = add_func(logits_list)
return deepctr_model_fn(features, mode, logits, labels, task, linear_optimizer, dnn_optimizer,
training_chief_hooks=training_chief_hooks)
return tf.estimator.Estimator(_model_fn, model_dir=model_dir, config=config)
``` |
{
"source": "1995chen/jingdong_financial",
"score": 2
} |
#### File: jingdong_financial/app/dependencies.py
```python
import json
import sys
import os
from typing import Dict, Any, List
from dataclasses import dataclass, Field, field
import template_logging
from dacite import from_dict
from dacite.dataclasses import get_fields
from inject import autoparams
from redis import StrictRedis
from celery import Celery
from sqlalchemy.orm import scoped_session, Session
from template_cache import Cache
from template_babel.babel import TemplateBabel
from template_rbac import OAuth2SSO, Auth
from template_pagination import Pagination
from template_migration import Migration
from template_json_encoder import TemplateJSONEncoder
from template_apollo import ApolloClient
from work_wechat import WorkWeChat
from app.constants.enum import SysCMD
logger = template_logging.getLogger(__name__)
@dataclass
class Config:
PORT: int = os.getenv('PORT', 8080)
CMD: SysCMD = field(default_factory=lambda: SysCMD[sys.argv[1].replace('-', '_').upper()])
# 服务内部调用UUID
INNER_CALL_UID: str = '6401e2c6-9b85-11ec-b3c0-1e00621ab048'
# 项目名称
PROJECT_NAME: str = 'jingdong_financial'
# 项目路径
PROJECT_PATH: str = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
FRONTEND_PATH: str = os.path.join(PROJECT_PATH, 'static', 'release')
API_PREFIX: str = '/api'
# 中间件配置
REDIS_URL: str = None
SQLALCHEMY_DATABASE_URI: str = None
CELERY_BROKER: str = None
CELERY_BACKEND: str = None
REDBEAT_REDIS_URL: str = None
# celery beat 锁超时时间
REDBEAT_LOCK_TIMEOUT: int = 600
# 定时任务并发进程数
BEAT_WORKER_NUM: int = 2
# worker并发进程数
WORKER_NUM: int = 2
# 每个worker进程最大执行任务数
MAX_TASKS_PER_CHILD: int = 20
# OAuth2 SSO配置
OAUTH2_CLIENT_ID: str = ''
OAUTH2_CLIENT_SECRET: str = ''
OAUTH2_AUTHORIZATION_ENDPOINT: str = ''
OAUTH2_TOKEN_ENDPOINT: str = ''
OAUTH2_USERINFO_ENDPOINT: str = ''
# 应用后端地址
APP_ROOT_URI: str = 'http://127.0.0.1:8000/'
# 应用前端地址
APP_FRONTEND_URI: str = 'http://127.0.0.1:3000/'
APP_AUTH_PATH: str = '/api/login'
APP_LOGOUT_PATH: str = '/api/logout'
# jwt token 超时时间
JWT_EXPIRE_TIME: int = 86400
# jwt token secret key
JWT_SECRET_KEY: str = ''
RUNTIME_ENV: str = os.getenv('RUNTIME_ENV', 'DEV')
# 是否是测试环境
IS_DEV: bool = False if RUNTIME_ENV == 'PROD' else True
# 阿波罗配置
APOLLO_APP_ID: str = 'jingdong_financial'
APOLLO_CONFIG_SERVER_URL: str = 'http://apollo.local.domain:13043'
# 企业微信 CORP_ID
CORP_ID: str = ''
# 企业微信 CORP_SECRET
CORP_SECRET: str = ''
# 企业微信应用 AGENT_ID
AGENT_ID: int = ''
# 京东金融API配置
JD_FINANCE_API_URL: str = "https://ms.jr.jd.com/gw/generic/hj/h5/m/latestPrice"
# header信息
JD_FINANCE_API_HEADERS: str = json.dumps(
{
"referer": "https://m.jdjygold.com/finance-gold/msjgold/homepage?orderSource=7",
"host": "ms.jr.jd.com"
}
)
# params信息
JD_FINANCE_API_PARAMS: str = json.dumps({})
# 样本数量[用于计算上涨下跌幅度]
SAMPLE_COUNT: int = 20
# 上涨幅度超过该值通知[金额]
TARGET_RISE_PRICE: float = 2.0
# 下跌幅度超过该值通知[金额]
TARGET_FALL_PRICE: float = 2.0
# 金价高于该值通知[具体价格]
RISE_TO_TARGET_PRICE: float = 400.0
# 金价低于该值通知[具体价格]
FALL_TO_TARGET_PRICE: float = 365.0
# 设置同类型通知在多长时间范围内限制重复推送次数[秒]
DUPLICATE_NOTIFY_TIME_LIMIT: int = 90
# 设置同类型通知重复推送多少次
DUPLICATE_NOTIFY_TIMES: int = 3
class MainDBSession(scoped_session, Session):
pass
class CacheRedis(StrictRedis):
pass
@autoparams()
def init_oauth2_sso(config: Config) -> OAuth2SSO:
# 初始化
oauth2_sso_instance: OAuth2SSO = OAuth2SSO(
# OAuth2服务配置
client_id=config.OAUTH2_CLIENT_ID,
client_secret=config.OAUTH2_CLIENT_SECRET,
authorization_endpoint=config.OAUTH2_AUTHORIZATION_ENDPOINT,
token_endpoint=config.OAUTH2_TOKEN_ENDPOINT,
userinfo_endpoint=config.OAUTH2_USERINFO_ENDPOINT,
# 后端服务地址
app_root_url=config.APP_ROOT_URI,
# 认证成功后跳转该地址并带上token
after_login_redirect_url=config.APP_FRONTEND_URI,
# 后端服务认证地址
api_auth_path=config.APP_AUTH_PATH,
# 后端服务登出地址
api_logout_path=config.APP_LOGOUT_PATH,
jwt_secret=config.JWT_SECRET_KEY,
token_timeout=config.JWT_EXPIRE_TIME,
debug_mode=config.IS_DEV
)
# sso绑定handler
from app.handlers.oauth2_sso import (
before_redirect_handler, oauth2_sso_generate_token_info_handler, oauth2_sso_logout_handler
)
oauth2_sso_instance.set_generate_token_info_handler(oauth2_sso_generate_token_info_handler)
oauth2_sso_instance.set_before_redirect_handler(before_redirect_handler)
oauth2_sso_instance.set_logout_handler(oauth2_sso_logout_handler)
return oauth2_sso_instance
@autoparams()
def init_auth(config: Config) -> Auth:
# 初始化
auth: Auth = Auth(config.JWT_SECRET_KEY, not config.IS_DEV)
# 绑定handler
from app.handlers.auth import get_user_roles_handler, get_user_info_handler, user_define_validator_handler
auth.set_get_user_info_handler(get_user_info_handler)
auth.set_get_user_roles_handler(get_user_roles_handler)
auth.set_user_define_validator_handler(user_define_validator_handler)
return auth
@autoparams()
def init_cache(_config: Config) -> Cache:
from app.handlers.cache import get_cache_handler, store_cache_handler, generate_cache_key_handler
# 初始化
cache_instance: Cache = Cache()
# 设置handler
cache_instance.set_get_cache_handler(get_cache_handler)
cache_instance.set_store_cache_handler(store_cache_handler)
cache_instance.set_generate_cache_key_handler(generate_cache_key_handler)
return cache_instance
@autoparams()
def init_migrate(_config: Config) -> Migration:
from app.handlers.migrate import init_data_handler
# 初始化
migration_instance: Migration = Migration(
database_uri=_config.SQLALCHEMY_DATABASE_URI, project=_config.PROJECT_NAME, workspace=_config.PROJECT_PATH
)
# 设置handler
migration_instance.set_do_init_data_handler(init_data_handler)
return migration_instance
def init_json_encoder() -> TemplateJSONEncoder:
return TemplateJSONEncoder()
def init_i18n() -> TemplateBabel:
# 获得翻译文件路径
translate_cfg_root = os.path.join(os.path.dirname(os.path.dirname(__file__)), "translations")
return TemplateBabel("messages", translate_cfg_root)
def init_pagination() -> Pagination:
pagination: Pagination = Pagination()
# 绑定handler
from app.handlers.pagination import get_page_paginate_params_handler, do_after_paginate_handler
pagination.set_get_page_paginate_params_handler(get_page_paginate_params_handler)
pagination.set_do_after_paginate_handler(do_after_paginate_handler)
return pagination
def init_apollo_client() -> ApolloClient:
from app.handlers.apollo import config_changed_handler
# 获取阿波罗配置中心的环境变量
apollo_config: ApolloClient = ApolloClient(
app_id=Config.APOLLO_APP_ID,
config_server_url=Config.APOLLO_CONFIG_SERVER_URL
)
apollo_config.set_config_changed_handler(config_changed_handler)
return apollo_config
@autoparams()
def init_main_db_session(config: Config):
from sqlalchemy import create_engine
from sqlalchemy.orm import (
sessionmaker,
scoped_session
)
engine = create_engine(config.SQLALCHEMY_DATABASE_URI, pool_recycle=3600, isolation_level='READ COMMITTED')
return scoped_session(sessionmaker(engine))
@autoparams()
def init_redis_session(config: Config) -> CacheRedis:
r: CacheRedis = StrictRedis.from_url(config.REDIS_URL)
return r
@autoparams()
def bind_config(apollo_config: ApolloClient) -> Config:
_fields: List[Field] = get_fields(Config)
config_dict: Dict[str, Any] = dict()
for _field in _fields:
_v: Any = apollo_config.get_value(_field.name)
if _v:
# 类型转换
config_dict[_field.name] = _field.type(_v)
_config = from_dict(Config, config_dict)
return _config
@autoparams()
def init_work_wechat(config: Config) -> WorkWeChat:
return WorkWeChat(corpid=config.CORP_ID, corpsecret=config.CORP_SECRET)
def bind(binder):
from app.tasks import init_celery
# 初始化阿波罗
binder.bind_to_constructor(ApolloClient, init_apollo_client)
# 初始化配置
binder.bind_to_constructor(Config, bind_config)
# 初始化celery
binder.bind_to_constructor(Celery, init_celery)
# 初始化主库
binder.bind_to_constructor(MainDBSession, init_main_db_session)
# 初始化redis
binder.bind_to_constructor(CacheRedis, init_redis_session)
# 初始化国际化
binder.bind_to_constructor(TemplateBabel, init_i18n)
# 增加idc认证
binder.bind_to_constructor(OAuth2SSO, init_oauth2_sso)
# 增加角色认证插件
binder.bind_to_constructor(Auth, init_auth)
# 增加分页插件
binder.bind_to_constructor(Pagination, init_pagination)
# 增加缓存插件
binder.bind_to_constructor(Cache, init_cache)
# 增加数据迁移插件
binder.bind_to_constructor(Migration, init_migrate)
# 增加Json序列化Encoder
binder.bind_to_constructor(TemplateJSONEncoder, init_json_encoder)
# 初始化企业微信
binder.bind_to_constructor(WorkWeChat, init_work_wechat)
```
#### File: app/handlers/cache.py
```python
import hashlib
import pickle
from typing import Any, Optional
from types import FunctionType
import inject
import template_logging
from app.dependencies import CacheRedis
logger = template_logging.getLogger(__name__)
redis_cache: CacheRedis = inject.instance(CacheRedis)
def get_cache_handler(key: str, timeout: Optional[int] = None, **_user_kwargs: Any) -> Any:
"""
获得缓存的handler
"""
_value: Any = redis_cache.get(key)
# 重置缓存时间
if _user_kwargs.get('refresh_on_read', False) and timeout:
redis_cache.expire(key, timeout)
return _value
def store_cache_handler(key: str, value: Any, timeout: Optional[int] = None, **_user_kwargs: Any) -> Any:
"""
存储cache的handler
"""
redis_cache.set(key, value, timeout)
def generate_cache_key_handler(func: FunctionType, *args: Any, **kwargs: Any) -> str:
"""
根据方法以及方法的参数生成唯一key
func: 方法
args: 列表参数
kwargs: 字典参数
"""
param_bytes: bytes = pickle.dumps((args, kwargs))
# key 过长优化
cache_key: str = func.__module__ + '.' + func.__name__ + '.' + hashlib.md5(param_bytes).hexdigest()
return cache_key
```
#### File: app/resources/gold.py
```python
from typing import Any, Optional
import template_logging
from flask import Blueprint
from flask_restful import Resource, reqparse
from app.resources import Api
from app.models import GoldPrice
from app.services.gold import get_latest_price, get_current_price
logger = template_logging.getLogger(__name__)
class GoldPriceInfoList(Resource):
# http://www.pythondoc.com/Flask-RESTful/reqparse.html
# 这里是公共参数定义
common_parser = reqparse.RequestParser()
def get(self) -> Any:
"""
获取用户登陆信息
"""
return get_latest_price()
class GoldPriceInfo(Resource):
# http://www.pythondoc.com/Flask-RESTful/reqparse.html
# 这里是公共参数定义
common_parser = reqparse.RequestParser()
def get(self) -> Optional[GoldPrice]:
"""
获取用户登陆信息
"""
return get_current_price()
def get_resources():
blueprint = Blueprint('GoldPrice', __name__)
api = Api(blueprint)
api.add_resource(GoldPriceInfoList, '/list')
api.add_resource(GoldPriceInfo, '/latest')
return blueprint
```
#### File: app/services/gold.py
```python
from typing import Optional
import inject
import template_logging
from template_pagination import Pagination
from sqlalchemy.orm import Query
from sqlalchemy import desc
from template_transaction import CommitContext
from app.models import GoldPrice
from app.dependencies import MainDBSession
logger = template_logging.getLogger(__name__)
pagination: Pagination = inject.instance(Pagination)
"""
Service 中不应该出现Schema
理想情况下所有涉及参数校验均应该在dataclass中的__post_init__方法内完成
"""
def get_current_price() -> Optional[GoldPrice]:
"""
获得当前金价
"""
session = inject.instance(MainDBSession)
with CommitContext(session):
gold_info: Optional[GoldPrice] = session.query(GoldPrice).order_by(desc(GoldPrice.time)).first()
return gold_info
@pagination.with_paginate()
def get_latest_price() -> Query:
"""
获得最近一段时间的黄金价格
"""
session = inject.instance(MainDBSession)
with CommitContext(session):
query: Query = session.query(GoldPrice)
return query
```
#### File: tasks/async_tasks/test_task.py
```python
import inject
import template_logging
from celery import Celery
from app.dependencies import Config
logger = template_logging.getLogger(__name__)
celery_app: Celery = inject.instance(Celery)
@celery_app.task(ignore_result=True, time_limit=600)
def do_test():
config: Config = inject.instance(Config)
logger.info(f'run do_test done, config is {config}')
```
#### File: app/tasks/__init__.py
```python
import json
import pkgutil
import importlib
from typing import List
from types import ModuleType
import inject
import template_logging
from celery.signals import task_postrun, task_prerun
from celery import Celery
from celery.schedules import crontab
from kombu import Exchange, Queue
from app.dependencies import Config
logger = template_logging.getLogger(__name__)
class CrontabEncoder(json.JSONEncoder):
"""
定义一个crontab的encoder
"""
def default(self, obj):
if isinstance(obj, crontab):
return str(obj)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def get_sub_modules(_modules: List[str]) -> List[str]:
"""
获得模块下所有子模块列表
_modules: 待检索模块列表
"""
_sub_modules: List[str] = list()
# 检索目录下所有模块
for _pkg in _modules:
_module: ModuleType = importlib.import_module(_pkg)
for _, _sub_module_name, _ in pkgutil.iter_modules(_module.__path__):
_sub_modules.append(f"{_pkg}.{_sub_module_name}")
return _sub_modules
@inject.autoparams()
def init_celery(config: Config):
# 异步任务根目录
async_task_root: str = 'app.tasks.async_tasks'
# 定时任务根目录
schedule_task_root: str = 'app.tasks.schedule_tasks'
_celery = Celery(
config.PROJECT_NAME,
include=get_sub_modules([async_task_root, schedule_task_root])
)
# 定时任务
beat_schedule = {
f'{schedule_task_root}.daily_do_task.test_scheduler': {
'task': f'{schedule_task_root}.daily_do_task.test_scheduler',
'args': (),
'schedule': 60,
'options': {
# 该定时任务会被调度到这个队列
'queue': f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-beat-queue'
}
},
# 同步黄金价格
f'{schedule_task_root}.gold_task.sync_gold_price': {
'task': f'{schedule_task_root}.gold_task.sync_gold_price',
'args': (),
'schedule': 5,
'options': {
# 该定时任务会被调度到这个队列
'queue': f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-beat-queue'
}
},
# 黄金通知
f'{schedule_task_root}.gold_task.gold_price_remind': {
'task': f'{schedule_task_root}.gold_task.gold_price_remind',
'args': (),
'schedule': 5,
'options': {
# 该定时任务会被调度到这个队列
'queue': f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-beat-queue'
}
},
}
logger.info(
f'\n*********************************Scheduled tasks*********************************\n'
f'{json.dumps(beat_schedule, indent=4, cls=CrontabEncoder)}\n'
f'*********************************Scheduled tasks*********************************\n'
)
_celery.conf.update(
CELERYBEAT_SCHEDULE=beat_schedule,
# 定义队列[如果需要额外的队列,定义在这里]
CELERY_QUEUES=[
# 该默认队列可以不用定义,这里定义作为Example
Queue(
f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-queue',
# 交换机持久化
Exchange(f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-exchange', durable=True, delivery_mode=2),
routing_key=f'{config.PROJECT_NAME}-routing',
# 队列持久化
durable=True
),
# 定义定时任务队列
Queue(
f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-beat-queue',
Exchange(f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-exchange', durable=True, delivery_mode=2),
routing_key=f'{config.PROJECT_NAME}-beat-routing',
# 队列持久化
durable=True
),
],
# 定义路由[部分任务需要单独的队列处理用于提速,定义在这里]
CELERY_ROUTES={
# 该任务可以不用定义,这里定义作为Example
f'{async_task_root}.test_task.do_test': {
'queue': f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-queue',
'routing_key': f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-routing'
},
},
# 默认队列
CELERY_DEFAULT_QUEUE=f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-queue',
# 默认交换机
CELERY_DEFAULT_EXCHANGE=f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-exchange',
# 默认路由
CELERY_DEFAULT_ROUTING_KEY=f'{config.PROJECT_NAME}-{config.RUNTIME_ENV}-routing',
CELERY_DEFAULT_EXCHANGE_TYPE='direct',
# 任务持久化
CELERY_DEFAULT_DELIVERY_MODE="persistent",
BROKER_URL=config.CELERY_BROKER,
CELERY_RESULT_BACKEND=config.CELERY_BACKEND,
# 任务的硬超时时间
CELERYD_TASK_TIME_LIMIT=300,
CELERY_ACKS_LATE=True,
CELERY_RESULT_PERSISTENT=False,
# 默认忽略结果, 需要保存结果的任务需要手动定义ignore_result=True
CELERY_IGNORE_RESULT=True,
CELERY_TASK_RESULT_EXPIRES=86400,
# celery允许接收的数据格式
CELERY_ACCEPT_CONTENT=['pickle', 'json'],
# 异步任务的序列化器,也可以是json
CELERY_TASK_SERIALIZER='pickle',
# 任务结果的数据格式,也可以是json
CELERY_RESULT_SERIALIZER='pickle',
CELERY_TIMEZONE='Asia/Shanghai',
CELERY_ENABLE_UTC=True,
BROKER_CONNECTION_TIMEOUT=10,
# 拦截根日志配置
CELERYD_HIJACK_ROOT_LOGGER=False,
CELERYD_LOG_FORMAT='[%(name)s]:%(asctime)s:%(filename)s:%(lineno)d %(levelname)s/%(processName)s %(message)s',
# REDBEAT_REDIS_URL 多实例允许celery beat
redbeat_redis_url=config.REDBEAT_REDIS_URL or config.CELERY_BROKER,
# 锁前缀
redbeat_key_prefix=f"{config.PROJECT_NAME}-{config.RUNTIME_ENV}-redbeat",
# 锁超时时间
redbeat_lock_timeout=config.REDBEAT_LOCK_TIMEOUT
)
return _celery
@task_prerun.connect()
def task_prerun_handler(task_id, task, *args, **kwargs):
config: Config = inject.instance(Config)
logger.info(f'task_prerun_handler, config is {config}')
@task_postrun.connect()
def task_postrun_handler(*args, **kwargs):
from app.dependencies import MainDBSession
inject.instance(MainDBSession).remove()
``` |
{
"source": "1995chen/python-common-libs",
"score": 3
} |
#### File: template_exception/template_exception/base.py
```python
class BaseLibraryException(Exception):
"""
所有依赖库异常的基类
"""
def __init__(self, message=''):
super().__init__()
self.code = None
self.message = message
def __str__(self):
return str(self.message)
class SSOException(BaseLibraryException):
"""
SSO 异常基类
"""
pass
class ClientException(BaseLibraryException):
"""
客户端错误
"""
def __init__(self, message=''):
super().__init__(message)
self.code = 4001
class ServerException(BaseLibraryException):
"""
服务器端错误
"""
def __init__(self, message=''):
super().__init__(message)
self.code = 5001
class RemoteServerException(BaseLibraryException):
"""
远端服务器端错误
"""
def __init__(self, message=''):
super().__init__(message)
self.code = 5001
```
#### File: template_exception/template_exception/rbac.py
```python
from typing import List
from .base import SSOException
class AuthorizedFailException(SSOException):
"""
认证失败
"""
pass
class TokenInvalidException(SSOException):
"""
异常的Token
"""
pass
class SSOServerException(SSOException):
"""
SSO服务异常
"""
pass
class UserResourceNotFoundException(SSOException):
"""
用户不存在
"""
pass
class PermissionsDenyException(SSOException):
"""
权限不足
"""
def __init__(self, user_roles: List[str], require_roles: List[str]):
message: str = f"need {require_roles}, but provide {user_roles}"
super().__init__(message)
```
#### File: template_logging/template_logging/handlers.py
```python
import os
import re
import tarfile
import time
import datetime
from logging.handlers import BaseRotatingHandler
class TemplateTimedRotatingFileHandler(BaseRotatingHandler):
def __init__(self, filename, backup_count=0, encoding=None, delay=False, utc=False):
self.utc = utc
self.suffix = "%Y-%m-%d"
self.baseFilename = os.path.abspath(filename)
self.currentFileName = self._compute_fn()
self.backup_count = backup_count
self.ext_match = re.compile(r"^\d{4}-\d{2}-\d{2}(\.\w+)?$", re.ASCII)
super(BaseRotatingHandler, self).__init__(filename, 'a', encoding, delay)
def shouldRollover(self, _record):
if self.currentFileName != self._compute_fn():
return True
return False
def _compute_fn(self):
if self.utc:
t = time.gmtime()
else:
t = time.localtime()
return self.baseFilename + "." + time.strftime(self.suffix, t)
def get_files_to_backup(self):
dir_name, base_name = os.path.split(self.baseFilename)
file_names = os.listdir(dir_name)
result = []
prefix = base_name + "."
plen = len(prefix)
for fileName in file_names:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.ext_match.match(suffix):
result.append(os.path.join(dir_name, fileName))
if len(result) < self.backup_count:
result = []
else:
result.sort()
result = result[:len(result) - self.backup_count]
return result
def clean_log_zip(self):
"""
清理3个月前的日志
:return:
"""
# 获得3个月以前的时间
clean_date = datetime.datetime.strptime(
datetime.datetime.now().strftime('%Y-%m-%d'), '%Y-%m-%d') - datetime.timedelta(days=60)
dir_name, base_name = os.path.split(self.baseFilename)
file_names = os.listdir(dir_name)
for fileName in file_names:
if fileName.endswith('.gz'):
_result = re.match(r'.*log.(.*)_.*', fileName)
if _result is None:
continue
_zip_date = datetime.datetime.strptime(_result.group(1).strip(), '%Y-%m-%d')
if clean_date > _zip_date:
os.remove(os.path.join(dir_name, fileName))
def doRollover(self):
# 清理日志压缩包
self.clean_log_zip()
if self.stream:
self.stream.close()
self.stream = None
self.currentFileName = self._compute_fn()
if self.backup_count > 0:
files_to_backup = self.get_files_to_backup()
if len(files_to_backup) >= self.backup_count:
file_dir_name = os.path.dirname(files_to_backup[0])
filename = os.path.basename(self.baseFilename)
tar_file_name = (
filename + '.' +
files_to_backup[0].split('.')[-1] +
'_' +
files_to_backup[self.backup_count - 1].split('.')[-1] +
'.tar.gz'
)
tar_file_path = os.path.join(file_dir_name, tar_file_name)
with tarfile.open(tar_file_path, 'w') as tar:
for log_file in files_to_backup[0:self.backup_count]:
tar.add(log_file, arcname=os.path.basename(log_file))
for log_file in files_to_backup[0:self.backup_count]:
os.remove(log_file)
def _open(self):
stream = open(self.currentFileName, self.mode, encoding=self.encoding)
if os.path.exists(self.baseFilename):
try:
os.remove(self.baseFilename)
except OSError:
pass
try:
os.symlink(os.path.basename(self.currentFileName), self.baseFilename)
except OSError:
pass
return stream
```
#### File: template_migration/template_migration/migrate.py
```python
import os
import re
import logging
from typing import List, Optional, Callable
from sqlalchemy import func, create_engine
from sqlalchemy.orm import sessionmaker, scoped_session, Session
from template_transaction import CommitContext
from template_exception import HandlerUnCallableException
from .models import MigrationLog, Base
logger = logging.getLogger(__name__)
class MigrationSession(scoped_session, Session):
pass
class Migration:
"""
服务数据迁移脚本
"""
def __init__(
self, database_uri: str, project: str, workspace: str,
isolation_level: Optional[str] = None
) -> None:
"""
初始化方法
database_uri: 数据库连接URL
project: 项目名称
workspace: migrate脚本工作空间[数据迁移脚本在 workspace/migrations/ 目录下]
"""
# 创建engine
self.engine = create_engine(database_uri, pool_recycle=3600, isolation_level=isolation_level)
# 创建Session
self.session: Session = MigrationSession(sessionmaker(self.engine))
self.project = project
# 脚本存放路径
self.script_path = os.path.join(workspace, 'migrations')
# 自动创建目录
if not os.path.exists(self.script_path):
os.mkdir(self.script_path)
self.do_init_data_handler: Optional[Callable] = None
def set_do_init_data_handler(self, handler: Callable) -> None:
"""
设置初始化数据handler
:param handler:
:return:
"""
if not callable(handler):
raise HandlerUnCallableException(f"{type(self).__name__}.set_do_init_data_handler")
self.do_init_data_handler = handler
def _generate_table(self) -> None:
"""
创建表
:return:
"""
Base.metadata.create_all(bind=self.engine)
logger.info("generate_table success")
def _drop_table(self) -> None:
"""
删除表
:return:
"""
Base.metadata.drop_all(bind=self.engine)
logger.info("drop_table success")
def _init_data(self, *args, **kwargs) -> None:
"""
初始化服务基础数据
"""
# 执行用户自定义handler
if callable(self.do_init_data_handler):
logger.info(f"call user defined init handler")
self.do_init_data_handler(*args, **kwargs)
# 添加migrate记录
_migrate = MigrationLog(version=0, script='sys_init', success=True, project=self.project)
self.session.add(_migrate)
self.session.commit()
# 添加migrate记录
self._add_migration_log()
logger.info(f"init_data success!")
def is_inited(self) -> bool:
"""
返回服务是否初始化过
"""
_migration_log: MigrationLog = self.session.query(MigrationLog).filter(
MigrationLog.project == self.project).first()
return True if _migration_log else False
def _add_migration_log(self) -> None:
"""
该方法仅会创建migrate记录, 并不会执行其中脚本
"""
_session: Session = self.session
_project: str = self.project
with CommitContext(_session):
# 每次重新创建数据库后需将当前所有迁移脚本设备已处理
regexp = re.compile(r'migration_prod_(\d+).py$')
max_version = _session.query(MigrationLog).filter_by(success=True, project=_project).with_entities(
func.max(MigrationLog.version)).first()[0]
max_version = max_version if max_version is not None else -1
matches = {}
migration_list: List[str] = os.listdir(self.script_path)
for f in migration_list:
match = regexp.match(f)
if match is not None:
matches[f] = int(match.group(1))
files = sorted([x for x in migration_list if matches.get(x) is not None], key=lambda x: matches[x])
for f in files:
version = matches[f]
if version > max_version:
# 保存数据
_migrate = MigrationLog(version=version, script=f, success=True, project=_project)
_session.add(_migrate)
_session.commit()
logger.info(f"success add migrate log")
def __execute_migration_scripts(self) -> None:
"""
执行migration脚本
"""
_session: Session = self.session
_project: str = self.project
with CommitContext(_session):
# 找出符合命名规则的migrate脚本
regexp = re.compile(r'migration_prod_(\d+).py$')
failed_fp = '/tmp/migration.failed'
migrations_logs = _session.query(MigrationLog).filter(MigrationLog.project == _project).all()
# 执行成功记录
success_versions = sorted(set([_i.version for _i in filter(lambda _x: _x.success, migrations_logs)]))
# 执行失败记录,且该版本后来也没有成功
fail_versions = sorted(
set([
_i.version
for _i in filter(
lambda _x: not _x.success and _x.version not in success_versions, migrations_logs
)])
)
# 当前执行成功的migration中的最大版本号
max_success_version = -1
if success_versions:
max_success_version = max(success_versions)
# migration文件名与版本号映射
matches = dict()
migration_file_list = os.listdir(self.script_path)
for f in migration_file_list:
match = regexp.match(f)
if match is not None:
matches[f] = int(match.group(1))
# 无执行记录的migration号
executed_versions = success_versions + fail_versions
no_executed_versions = sorted([v for v in matches.values() if v not in executed_versions])
logger.info('max successful version: %s' % str(max_success_version))
logger.info('successful versions: %s' % str(success_versions))
logger.info('failed versions: %s' % str(fail_versions))
logger.info('non-executed versions: %s' % str(no_executed_versions))
with open(failed_fp, 'w') as fp:
line = str(fail_versions)
fp.write(line)
files = sorted(filter(lambda x: matches.get(x) is not None, migration_file_list), key=lambda x: matches[x])
for f in files:
version = matches[f]
if version > max_success_version:
migrate_func = os.path.splitext(os.path.basename(f))[0]
# noinspection PyBroadException
try:
migrations = __import__(f'migrations.{migrate_func}')
migrations_prod = getattr(migrations, migrate_func)
migrations_prod.do()
success = True
except Exception as e:
logger.error(f"migration failed for {version}", exc_info=True)
success = False
raise e
finally:
# 保存数据
_migrate = MigrationLog(version=version, script=f, project=_project, success=success)
_session.add(_migrate)
_session.commit()
logger.info('Migrate successfully')
def do_migrate(self, *args, **kwargs) -> None:
"""
执行migrate操作
当服务没有初始化时, 仅调用初始化脚本,不回执行任何migrate脚本
因此, 初始化脚本必须随服务版本变动而更新
:return:
"""
# 创建表
self._generate_table()
# 判断服务是否初始化
_is_inited: bool = self.is_inited()
logger.info(f"init state is {_is_inited}")
if not _is_inited:
self._init_data(*args, **kwargs)
return
# 执行脚本
self.__execute_migration_scripts()
```
#### File: template_rbac/template_rbac/base.py
```python
import json
import time
import logging
from typing import Dict, Callable, Optional, Any
from uuid import uuid4
from dataclasses import dataclass
import jwt
from flask import Blueprint
from flask_restful import Api
from template_exception import (
HandlerUnCallableException, KeyParamsTypeInvalidException,
AuthorizedFailException, TokenInvalidException
)
from template_json_encoder import TemplateJSONEncoder
from .helpers import url_path_append, url_query_join
logger = logging.getLogger(__name__)
@dataclass
class ITokenInfo:
access_token: str
# token有效截止时间
expires_at: int
refresh_token: str
# refresh_token有效截止时间
refresh_expires_at: int
token_type: str
# 用户唯一id
user_id: str
# 用户名
username: str
# 邮箱
email: Optional[str]
# 姓名
name: str
# 姓
family_name: str
# 名
given_name: str
class SSOBase:
"""
仅支持response_type为code的模式
"""
def __init__(
self, client_id: str, client_secret, app_root_url: str, after_login_redirect_url: Optional[str],
api_auth_path: str, api_logout_path: str, jwt_secret: str,
token_timeout: Optional[int] = None, debug_mode: bool = False
):
"""
:param client_id: 客户端id
:param client_secret: 客户端secret
:param app_root_url: app根url地址
:param api_auth_path: 认证api地址
:param api_logout_path: 登出api地址
:param jwt_secret: jwt密钥
:param token_timeout: jwt token超时时间
:param debug_mode: 调试模式
"""
self.client_id = client_id
self.client_secret = client_secret
self.app_root_url = app_root_url
# 登录后重定向到前端的url[如果前后端url不一致]
self.after_login_redirect_url = after_login_redirect_url or app_root_url
# 登录地址
self.api_auth_path = api_auth_path
# 登出地址
self.api_logout_path = api_logout_path
self.jwt_secret = jwt_secret
self.token_timeout = token_timeout
self.debug_mode = debug_mode
self.response_type = 'code'
# 重定向url[去除多余的/]
self.redirect_uri = url_path_append(app_root_url, self.api_auth_path)
# 根据payload生成jwt之前调用的handler
self.before_generate_jwt_handler: Optional[Callable] = None
# 认证完成后, 返回之前调用handler
self.before_redirect_handler: Optional[Callable] = None
# 登出handler[该handler必须实现]
self.logout_handler: Optional[Callable] = None
# 根据获取的token信息生成ITokenInfo的handler[该handler会在获取到token信息后调用]
self.generate_token_info_handler: Optional[Callable] = None
def set_logout_handler(self, handler: Callable) -> None:
"""
设置handler
该handler会在登出前调用
:param handler:
:return:
"""
if not callable(handler):
raise HandlerUnCallableException(f"{type(self).__name__}.set_logout_handler")
self.logout_handler = handler
def set_before_generate_jwt_handler(self, handler: Callable) -> None:
"""
设置handler
该handler会在payload生成jwt之前调用
:param handler:
:return:
"""
if not callable(handler):
raise HandlerUnCallableException(f"{type(self).__name__}.set_generate_token_handler")
self.before_generate_jwt_handler = handler
def set_generate_token_info_handler(self, handler: Callable) -> None:
"""
设置handler
该handler会根据access_token_info生成ITokenInfo
:param handler:
:return:
"""
if not callable(handler):
raise HandlerUnCallableException(f"{type(self).__name__}.set_generate_token_info_handler")
self.generate_token_info_handler = handler
def set_before_redirect_handler(self, handler: Callable) -> None:
"""
设置handler
该handler在认证完成后, 重定向页面前调用
:param handler:
:return:
"""
if not callable(handler):
raise HandlerUnCallableException(f"{type(self).__name__}.set_do_before_redirect_handler")
self.before_redirect_handler = handler
def _get_access_token_info_by_code(self, code: str) -> Dict[str, Any]:
"""
需要子类实现的接口
根据code获取access_token
:param code: 接收到的code
:return: Dict[str, Any]
"""
raise NotImplementedError('_get_token_info_by_code')
def _default_generate_token_info(self, access_token_info: Dict[str, Any]) -> ITokenInfo:
"""
子类需要实现默认的方法
根据access_token_info生成ITokenInfo
"""
raise NotImplementedError('_default_generate_token_info')
def _generate_token_info(self, access_token_info: Dict[str, Any]) -> ITokenInfo:
"""
根据access_token_info生成ITokenInfo
"""
# 调用已经实现的handler
if callable(self.generate_token_info_handler):
return self.generate_token_info_handler(access_token_info)
# 默认实现
return self._default_generate_token_info(access_token_info)
def get_token_info_by_code(self, code: str) -> ITokenInfo:
"""
需要子类实现的接口
根据code获取token
:param code: 接收到的code
:return: ITokenInfo
"""
# 根据code获取access_token_info
access_token_info: Dict[str, Any] = self._get_access_token_info_by_code(code)
# 根据access_token_info生成ITokenInfo
return self._generate_token_info(access_token_info)
def after_get_code(self, redirect_url: str, state: str, target_user: Optional[str], code: str) -> str:
"""
返回带token的url
:param redirect_url: 覆盖重定向url
:param state: state
:param target_user: 切换到当前用户
:param code:
:return:
"""
jwt_token: str = self.__generate_token_by_code(code=code, target_user=target_user)
# 重定向query string参数
args: Dict[str, str] = dict()
args['token'] = jwt_token
args['next'] = state
# 调用handler
self.__do_before_redirect_handler(args)
final_redirect_url: str = redirect_url or self.after_login_redirect_url
return url_query_join(final_redirect_url, **args)
def generate_token(self, token_info: ITokenInfo, expire: Optional[int] = None) -> str:
"""
jwt token中data信息
:param token_info:
:param expire: 超时时间 默认3600秒
:return:
"""
if not isinstance(token_info, ITokenInfo):
raise KeyParamsTypeInvalidException('token_info', ITokenInfo)
jwt_token_timeout: int = self.token_timeout or 86400
# 优先选取传入的超时时间
jwt_token_timeout = expire if isinstance(expire, int) else jwt_token_timeout
expires_at: int = jwt_token_timeout + int(time.time())
return self.__generate_token(token_info, expires_at)
def __generate_token(self, token_info: ITokenInfo, expires_at: int) -> str:
"""
所有生成token的基础方法
:param token_info:
:param expires_at:
:return:
"""
if not isinstance(expires_at, int):
raise KeyParamsTypeInvalidException('expire', int)
now_ts = int(time.time())
pay_load: Dict[str, Any] = dict(
iat=now_ts,
iss=self.app_root_url,
jti=str(uuid4())
)
# 添加data数据
pay_load['data'] = json.loads(json.dumps(token_info, default=TemplateJSONEncoder().default))
# 默认的超时时间
pay_load['exp'] = expires_at
# 调用handler
self.__before_generate_jwt_handler(pay_load)
logger.info(f"pay_load is {pay_load}, jwt_secret is {self.jwt_secret}")
jwt_token = jwt.encode(pay_load, self.jwt_secret, algorithm='HS256')
if isinstance(jwt_token, bytes):
jwt_token = jwt_token.decode('utf-8')
return jwt_token
def __before_generate_jwt_handler(self, pay_load: Dict[str, Any]) -> None:
"""
调用handler
:param pay_load:
:return:
"""
if callable(self.before_generate_jwt_handler):
logger.info(f"before call generate_token_handler(pay_load), pay_load: {pay_load}")
self.before_generate_jwt_handler(pay_load)
logger.info(f"after call generate_token_handler(pay_load), pay_load: {pay_load}")
def __generate_token_by_code(self, code: str, target_user: Optional[str]) -> str:
"""
根据code生成token
:param code:
:return:
"""
# 获取用户信息
_token_data: ITokenInfo = self.get_token_info_by_code(code)
# 需要切换用户
if target_user:
_token_data.username = target_user
# 优先选取配置的超时时间
expires_at: int = _token_data.expires_at
if self.token_timeout:
expires_at = self.token_timeout + int(time.time())
return self.__generate_token(_token_data, expires_at)
def _refresh_token(self, refresh_token: str) -> ITokenInfo:
"""
具体refresh token的操作由子类实现
"""
raise NotImplementedError('_refresh_token')
def refresh_token(self, jwt_token: str, refresh_token_handler: Optional[Callable] = None) -> str:
"""
刷新用户缓存
:param jwt_token: 原始jwt缓存
:param refresh_token_handler: handler方法
:return:
"""
try:
jwt_obj = jwt.decode(
jwt_token,
key=self.jwt_secret,
verify=True,
algorithms=['HS256'],
# 忽略超时, 会尝试refresh_token
options={"verify_exp": False},
)
except (jwt.InvalidSignatureError, Exception) as e:
logger.warning(f"decode jwt failed, token is {jwt_token}", exc_info=True)
raise AuthorizedFailException(str(e))
# 原始token的data内容
jwt_data: Dict[str, Any] = jwt_obj["data"]
# 基本的数据验证
if not (isinstance(jwt_data, dict) and jwt_data['access_token']):
logger.warning(f"invalid token {jwt_data}", exc_info=True)
raise TokenInvalidException()
# 获取当前时间
now_ts = int(time.time())
pay_load = {
'iat': now_ts,
'iss': self.app_root_url,
'jti': str(uuid4()),
}
# 定义飞书token信息
lark_token_data: Dict[str, Any]
# 刷新用户缓存
if now_ts >= jwt_data['expires_at']:
if 'refresh_expires_at' not in jwt_data or now_ts >= jwt_data['refresh_expires_at']:
raise AuthorizedFailException()
# 可以refresh
try:
logger.info(f"try to refresh token use refresh_token({jwt_data['refresh_token']})")
token_info: ITokenInfo = self._refresh_token(jwt_data['refresh_token'])
# 转化为字典
lark_token_data = json.loads(json.dumps(token_info, cls=TemplateJSONEncoder))
except Exception:
logger.warning(f"failed to refresh token use refresh_token({jwt_data['refresh_token']})")
raise AuthorizedFailException()
else:
# token未过期
lark_token_data = jwt_data
pay_load.update({
'data': lark_token_data,
'exp': lark_token_data['refresh_expires_at']
})
# 调用handler, 由于refresh token会取邮件的username,因此,如果
# 需要切换用户, 则需要覆盖该handler, 修改username即可
if callable(refresh_token_handler):
logger.info(f"before call refresh_token_handler, pay_load: {pay_load}")
refresh_token_handler(pay_load)
logger.info(f"after call refresh_token_handler, pay_load: {pay_load}")
logger.info(f"pay_load is {pay_load}, jwt_secret is {self.jwt_secret}")
jwt_token: str = jwt.encode(pay_load, self.jwt_secret, algorithm='HS256')
if isinstance(jwt_token, bytes):
jwt_token = jwt_token.decode('utf-8')
return jwt_token
def __do_before_redirect_handler(self, args: Dict[str, str]) -> None:
if callable(self.before_redirect_handler):
logger.info(f"before call do_before_redirect_handler(args), args: {args}")
self.before_redirect_handler(args)
logger.info(f"after call do_before_redirect_handler(args), args: {args}")
def do_logout_handler(self) -> Any:
"""
调用handler
:return:
"""
if self.logout_handler is None:
logger.error(f"handler logout_handler not implement")
raise NotImplementedError('logout_handler')
if not callable(self.logout_handler):
raise HandlerUnCallableException('logout_handler')
logger.info('before call logout_handle()')
_logout_res: Any = self.logout_handler()
logger.info(f'after call logout_handler(), _logout_res is {_logout_res}')
return _logout_res
def get_resources(self) -> Blueprint:
from .apis import TemplateSSOLogin, TemplateSSOLogout
# 将inject实例注入类变量中
TemplateSSOLogin.template_rbac_cls = self.__class__
TemplateSSOLogout.template_rbac_cls = self.__class__
blueprint = Blueprint('TemplateSSO', __name__)
api = Api(blueprint)
api.add_resource(TemplateSSOLogin, self.api_auth_path)
api.add_resource(TemplateSSOLogout, self.api_logout_path)
return blueprint
```
#### File: python-common-libs/test/test_template_cache.py
```python
import os
import unittest
import pickle
import codecs
from typing import Optional, Any, Callable, Dict
import inject
import template_logging
from template_cache import Cache
# 创建日志目录
os.makedirs('./logs/', exist_ok=True)
template_logging.init_logger()
logger = template_logging.getLogger(__name__)
class TestTemplateCacheMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
进行该测试用例整体的初始化
"""
# 存储缓存仓库
cls.cache_store = dict()
def get_cache_handler(key: str, timeout: Optional[int] = None, **user_kwargs) -> Any:
"""
获得缓存的handler
"""
return cls.cache_store.get(key, None)
def store_cache_handler(key: str, value: Any, timeout: Optional[int] = None, **user_kwargs) -> Any:
"""
存储cache的handler
"""
cls.cache_store[key] = value
def generate_cache_key_handler(func: Callable, *args: Any, **kwargs: Any) -> str:
"""
生成缓存key
"""
key_dict: Dict[str, Any] = {
'module': object.__getattribute__(func, '__module__'),
'func_name': func.__name__,
'args': args,
'kwargs': kwargs,
}
key: str = codecs.encode(pickle.dumps(key_dict), "base64").decode()
return key
def my_config(binder):
cache_instance: Cache = Cache()
# 设置handler
cache_instance.set_get_cache_handler(get_cache_handler)
cache_instance.set_store_cache_handler(store_cache_handler)
cache_instance.set_generate_cache_key_handler(generate_cache_key_handler)
binder.bind(Cache, cache_instance)
# 将实例绑定到inject
inject.configure(my_config)
@classmethod
def tearDownClass(cls):
# 清理
inject.clear()
cls.cache_store = None
def setUp(self):
self.passed: bool = False
self.cache: Optional[Cache] = inject.instance(Cache)
def tearDown(self):
# 销毁
self.cache = None
# 打印结果
logger.info(
f"func {self.__class__.__name__}.{self._testMethodName}.........{'passed' if self.passed else 'failed'}"
)
def test_unuse_cache(self):
@self.cache.use_cache()
def test_add_func(a: int, b: int):
"""
这是一个测试方法,接下来会对该方法进行缓存
"""
return a + b
self.assertEqual(len(self.cache_store), 0)
self.assertEqual(test_add_func(1, 2), 3)
self.assertEqual(len(self.cache_store), 1)
self.passed = True
def test_use_cache(self):
# 自定义异常
class MyException(Exception):
pass
@self.cache.use_cache()
def test_add_func(a: int, b: int):
"""
这是一个测试方法,接下来会对该方法进行缓存
"""
raise MyException()
self.assertEqual(test_add_func(1, 2), 3)
# 清除缓存
self.cache_store.clear()
# 断言预期的异常
with self.assertRaises(MyException):
test_add_func(1, 2)
self.passed = True
if __name__ == '__main__':
unittest.main()
```
#### File: python-common-libs/test/test_template_transaction.py
```python
import os
import unittest
import shutil
from typing import Optional, Any
import inject
import template_logging
from sqlalchemy.orm import scoped_session, Session, sessionmaker
from sqlalchemy import Column, Integer, create_engine
from sqlalchemy.ext.declarative import declarative_base
from template_transaction import CommitContext, autocommit
# 创建日志目录
os.makedirs('./logs/', exist_ok=True)
template_logging.init_logger()
logger = template_logging.getLogger(__name__)
# 定义Base
Base = declarative_base()
class TestTransaction(Base):
"""
数据库migrate需要的Model
"""
__tablename__ = 'test_transaction'
id = Column(Integer, primary_key=True, autoincrement=True, comment='主键')
value = Column(Integer)
class MainDBSession(scoped_session, Session):
pass
# 定义一个异常
class MyException(Exception):
pass
class TestTemplateTransactionMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
进行该测试用例整体的初始化
"""
cls.db_path: str = 'db'
cls.db_url = f"sqlite:///{cls.db_path}/unittest.db"
cls.engine = create_engine(cls.db_url, pool_recycle=3600)
# 清空数据库文件以及migration脚本目录
shutil.rmtree(cls.db_path, ignore_errors=True)
os.makedirs(cls.db_path, exist_ok=True)
def init_main_db_session():
return scoped_session(sessionmaker(cls.engine))
def my_config(binder):
binder.bind_to_constructor(MainDBSession, init_main_db_session)
# 将实例绑定到inject
inject.configure(my_config)
# 创建测试表
Base.metadata.create_all(bind=cls.engine)
logger.info("do init test data done")
@classmethod
def tearDownClass(cls):
cls.engine = None
# 清理
inject.clear()
db_path = object.__getattribute__(cls, 'db_path')
# 清空数据库文件以及migration脚本目录
shutil.rmtree(db_path, ignore_errors=True)
def setUp(self):
# 获得session
self.main_session: Optional[MainDBSession] = inject.instance(MainDBSession)
self.passed: bool = False
def tearDown(self):
# 打印结果
logger.info(
f"func {self.__class__.__name__}.{self._testMethodName}.........{'passed' if self.passed else 'failed'}"
)
def test_transaction_decorator(self):
"""
测试事务注解
"""
@autocommit(MainDBSession)
def test_query() -> Any:
self.main_session.add(TestTransaction(id=2, value=4))
self.main_session.add(TestTransaction(id=3, value=2))
raise MyException()
# 创建一条测试数据
self.main_session.add(TestTransaction(id=1, value=5))
# 验证数据成功创建
result: TestTransaction = self.main_session.query(TestTransaction).filter(TestTransaction.id == 1).first()
self.assertEqual(result.id, 1)
self.assertEqual(result.value, 5)
# 断言预期的异常
with self.assertRaises(MyException):
test_query()
# 查询数据是否回滚
result: TestTransaction = self.main_session.query(TestTransaction).filter(TestTransaction.id == 2).first()
self.assertIsNone(result)
result: TestTransaction = self.main_session.query(TestTransaction).filter(TestTransaction.id == 3).first()
self.assertIsNone(result)
self.passed = True
def test_transaction_context(self):
"""
测试事务上下文
"""
def test_query() -> Any:
with CommitContext(self.main_session):
self.main_session.add(TestTransaction(id=5, value=9))
self.main_session.add(TestTransaction(id=6, value=6))
raise MyException()
# 创建一条测试数据
self.main_session.add(TestTransaction(id=4, value=6))
# 验证数据成功创建
result: TestTransaction = self.main_session.query(TestTransaction).filter(
TestTransaction.id == 4).first()
self.assertEqual(result.id, 4)
self.assertEqual(result.value, 6)
# 断言预期的异常
with self.assertRaises(MyException):
test_query()
# 查询数据是否回滚
result: TestTransaction = self.main_session.query(TestTransaction).filter(
TestTransaction.id == 5).first()
self.assertIsNone(result)
result: TestTransaction = self.main_session.query(TestTransaction).filter(
TestTransaction.id == 6).first()
self.assertIsNone(result)
self.passed = True
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1995subhankar1995/Machine_Learning_codes",
"score": 3
} |
#### File: 1995subhankar1995/Machine_Learning_codes/ANN from scratch.py
```python
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import sklearn.linear_model
import pandas as pd
%matplotlib inline
np.random.seed(1) # set a seed so that the results are consistent
#import data
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:-1].values
y = dataset.iloc[:, -1].values
#Handling missing data
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values = np.nan, strategy = 'most_frequent')
imputer.fit(X[:, 0:])
X[:, 0:] = imputer.transform(X[:, 0:])
#Handling categorial data
from sklearn.preprocessing import LabelEncoder
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
le = LabelEncoder()
X[:, 2] = le.fit_transform(X[:, 2])
ct = ColumnTransformer(transformers =[('encoder',OneHotEncoder(),[1])], remainder ='passthrough')
X = np.array(ct.fit_transform(X))
########## Splitting Data into Train and Test data ##########
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size = 0.2, random_state = 1)
######### Train data scaling ######################
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train[:, 0:] = sc.fit_transform(X_train[:, 0:])
X_test[:, 0:] = sc.transform(X_test[:, 0:])
X_train = X_train.T
Y_train = y_train.reshape(1, len(y_train))
X_test = X_test.T
Y_test = y_test.reshape(1, len(y_test))
### START CODE HERE ### (≈ 3 lines of code)
shape_X = X_train.shape
shape_Y = Y_train.shape
m = X_train.shape[1] # training set size
### END CODE HERE ###
print ('The shape of X is: ' + str(shape_X))
print ('The shape of Y is: ' + str(shape_Y))
print ('I have m = %d training examples!' % (m))
# GRADED FUNCTION: layer_sizes
def layer_sizes(X, Y):
### START CODE HERE ### (≈ 3 lines of code)
n_x = X.shape[0] # size of input layer
n_h = 4
n_y = Y.shape[0] # size of output layer
### END CODE HERE ###
return (n_x, n_h, n_y)
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
# print("initialize_parameters")
np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def sigmoid(x):
return 1/(1 + np.exp(-(x.astype(float))))
def Tanh(x):
return (np.exp(2*x.astype(float)) - 1)/(np.exp(2*x.astype(float)) + 1)
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
# print("forward_propagation")
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
### END CODE HERE ###
# Implement Forward Propagation to calculate A2 (probabilities)
### START CODE HERE ### (≈ 4 lines of code)
Z1 = np.dot(W1, X) + b1
A1 = Tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
### END CODE HERE ###
assert(A2.shape == (1, X.shape[1]))
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
# GRADED FUNCTION: compute_cost
def compute_cost(A2, Y, parameters):
# print("compute_cost")
m = Y.shape[1] # number of example
# Compute the cross-entropy cost
### START CODE HERE ### (≈ 2 lines of code)
logprobs = np.log(A2)
cost = -1/m*np.sum(Y*logprobs + (1 - Y)*np.log(1 - A2))
### END CODE HERE ###
cost = float(np.squeeze(cost)) # makes sure cost is the dimension we expect.
# E.g., turns [[17]] into 17
assert(isinstance(cost, float))
return cost
# GRADED FUNCTION: backward_propagation
def backward_propagation(parameters, cache, X, Y):
# print("backward_propagation")
m = X.shape[1]
# First, retrieve W1 and W2 from the dictionary "parameters".
### START CODE HERE ### (≈ 2 lines of code)
W1 = parameters["W1"]
W2 = parameters["W2"]
### END CODE HERE ###
# Retrieve also A1 and A2 from dictionary "cache".
### START CODE HERE ### (≈ 2 lines of code)
A1 = cache["A1"]
A2 = cache["A2"]
### END CODE HERE ###
# Backward propagation: calculate dW1, db1, dW2, db2.
### START CODE HERE ### (≈ 6 lines of code, corresponding to 6 equations on slide above)
dZ2 = A2 - Y
dW2 = 1/m * np.dot(dZ2, A1.T)
db2 = 1/m * np.sum(dZ2, axis = 1, keepdims = True)
dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))
dW1 = 1/m * np.dot(dZ1, X.T)
db1 = 1/m * np.sum(dZ1, axis = 1, keepdims = True)
### END CODE HERE ###
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate = 1.2):
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
### END CODE HERE ###
# print("update_parameters")
# Retrieve each gradient from the dictionary "grads"
### START CODE HERE ### (≈ 4 lines of code)
dW1 = grads["dW1"]
db1 = grads["db1"]
dW2 = grads["dW2"]
db2 = grads["db2"]
## END CODE HERE ###
# Update rule for each parameter
### START CODE HERE ### (≈ 4 lines of code)
W1 = W1 - learning_rate * dW1
b1 = b1 - learning_rate * db1
W2 = W2 - learning_rate * dW2
b2 = b2 - learning_rate * db2
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
# GRADED FUNCTION: nn_model
def nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
# Initialize parameters
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(X.shape[0], n_h, Y.shape[0])
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
### START CODE HERE ### (≈ 4 lines of code)
# Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache".
A2, cache = forward_propagation(X, parameters)
# Cost function. Inputs: "A2, Y, parameters". Outputs: "cost".
cost = compute_cost(A2, Y, parameters)
# Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads".
grads = backward_propagation(parameters, cache, X, Y)
# Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters".
parameters = update_parameters(parameters, grads, learning_rate = 1.2)
### END CODE HERE ###
# Print the cost every 1000 iterations
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
# GRADED FUNCTION: predict
def predict(parameters, X):
# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.
### START CODE HERE ### (≈ 2 lines of code)
A2, cache = forward_propagation(X, parameters)
predictions = np.multiply((A2 > 0.5), 1)
### END CODE HERE ###
return predictions
parameters = nn_model(X_train, Y_train, n_h = 4, num_iterations = 10000, print_cost=True)
predictions = predict(parameters, X_test)
#print(predictions)
print ('Accuracy: %d' % float((np.dot(Y_test,predictions.T) + np.dot(1-Y_test,1-predictions.T))/float(Y_test.size)*100) + '%')
```
#### File: 1995subhankar1995/Machine_Learning_codes/Decision_tree_classification.py
```python
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
########### Importing Dataset #######################
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
X_0 = X[np.where(y == 0)]
X_1 = X[np.where(y == 1)]
########## Splitting Data into Train and Test data ##########
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size = 0.2, random_state = 1)
######### Train data scaling ######################
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train[:, 0:] = sc.fit_transform(X_train[:, 0:])
X_test[:, 0:] = sc.transform(X_test[:, 0:])
############# Training model and prediction ############
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
pred_label = classifier.predict(X_test)
train_pred = classifier.predict(X_train)
def Accuracy(X, y_actual, y_pred):
matrix = confusion_matrix(y_actual, y_pred)
return (1/len(X))*sum(y_actual == y_pred)*100, matrix
test_accuracy, Confuse_mat = Accuracy(X_test, y_test, pred_label)
print("Test Accuracy:",test_accuracy,"%\nConfusion matrix:\n", Confuse_mat)
train_accuracy, Confuse_mat1 = Accuracy(X_train, y_train, train_pred)
print("Train Accuracy:",train_accuracy,"%\nConfusion matrix:\n", Confuse_mat1)
```
#### File: 1995subhankar1995/Machine_Learning_codes/K_nearest neighbor.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import confusion_matrix
########### Importing Dataset #######################
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
########## Splitting Data into Train and Test data ##########
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size = 0.2, random_state = 1)
def KNN(X_train, X_test, y_train, k):
D = np.zeros(len(X_train))
y_pred = np.zeros(len(X_test))
std= np.zeros(X_train.shape[1])
for i in range((X_train.shape[1])):
std[i] = np.std(X_train[:, i])
for i in range(len(X_train)):
temp = 0
for j in range(X_train.shape[1]):
temp +=((X_train[i, j]-X_test[j])**2)/(std[j])**2
D[i] = temp
arg_indices = (np.argsort(D))[0:k]
y_KNN = y_train[arg_indices]
return int((sum(y_KNN == 1) > sum(y_KNN == 0)))
def Accuracy(X, y_actual, y_pred):
matrix = confusion_matrix(y_actual, y_pred)
return (1/len(X))*sum(y_actual == y_pred)*100, matrix
k = 13
y_predict_train = np.zeros(len(X_train))
y_predict_test = np.zeros(len(X_test))
for i in range(len(X_train)):
y_predict_train[i] = KNN(X_train, X_train[i,:], y_train, k)
for i in range(len(X_test)):
y_predict_test[i] = KNN(X_train, X_test[i,:], y_train, k)
accuracy1, confuse1 = Accuracy(X_train, y_train, y_predict_train)
accuracy2, test_confuse = Accuracy(X_test, y_test, y_predict_test)
print("Train accuracy:", accuracy1,"%\n","Test accuracy:",
accuracy2, "%\n")
print("Train confusion matrix:\n",confuse1, "\n",
"Train confusion matrix:\n", test_confuse)
```
#### File: 1995subhankar1995/Machine_Learning_codes/SVM.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from qpsolvers import solve_qp
from sklearn.metrics import confusion_matrix
########### Importing Dataset #######################
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
for i in range(len(X)):
if(y[i] == 0):
y[i] = -1
########## Splitting Data into Train and Test data ##########
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size = 0.2, random_state = 1)
######### Train data scaling ######################
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train[:, 0:] = sc.fit_transform(X_train[:, 0:])
X_test[:, 0:] = sc.transform(X_test[:, 0:])
def Kernel(X, y):
K = np.zeros((len(X), len(X)))
for i in range(len(X)):
for j in range(len(X)):
K[i, j] = y[i]*np.dot(X[i, :], X[j, :])*y[j]
return K
def SVM_learner(X, y, C):
P = Kernel(X, y)
p1 = 1e-5*np.identity(len(X))
P = P + p1
q = -1*np.ones(len(X))
A = y;
A = A.reshape(1, len(X))
u = np.zeros(1)
h1 = C*np.ones(len(X))
h2 = np.zeros(len(X))
h = np.concatenate((h1, h2))
G1 = np.identity(len(X_train))
G2 = -1*np.identity(len(X_train))
G = np.concatenate((G1, G2))
alphas = solve_qp(P, q, G, h, A, u)
SV_indices = []
for i in range(len(X)):
if(alphas[i] >= 0.001):
SV_indices.append(i)
SV = X[SV_indices]
SV_labels = y[SV_indices]
SV_alphas = alphas[SV_indices]
W = np.zeros(X.shape[1])
for i in range(len(SV_alphas)):
W += SV_alphas[i]*SV_labels[i]*SV[i]
b = SV_labels[25] - np.dot(W, SV[25])
class model_struct:
pass
model = model_struct()
model.W = W
model.b = b
model.SV = SV
model.SV_labels = SV_labels
model.SV_alphas = SV_alphas
return model
def Prediction(X, model):
return np.sign(np.dot(X, model.W) + model.b)
def Accuracy(X, y_actual, y_pred):
matrix = confusion_matrix(y_actual, y_pred)
return (1/len(X))*sum(y_actual == y_pred)*100, matrix
C = 1
model = SVM_learner(X_train, y_train, C)
y_predict_train = Prediction(X_train, model)
y_predict_test = Prediction(X_test, model)
accuracy1, confuse1 = Accuracy(X_train, y_train, y_predict_train)
accuracy2, test_confuse = Accuracy(X_test, y_test, y_predict_test)
print("Train accuracy:", accuracy1,"%\n","Test accuracy:",
accuracy2, "%\n")
print("Train confusion matrix:\n",confuse1, "\n",
"Test confusion matrix:\n", test_confuse)
``` |
{
"source": "19961115/windows-yolov5",
"score": 3
} |
#### File: 19961115/windows-yolov5/excel.py
```python
import xlwt
#创建表格并赋值
def create_excel(file_name):
data = ["windows-close", "windows-open", "windows-half-open"]
workbook = xlwt.Workbook(encoding='utf-8')
ws = workbook.add_sheet("windows")
ws.write(0, 0, label=data[0])
ws.write(0, 1, label=data[1])
ws.write(0, 2, label=data[2])
ws.write(0,3,"图片名称")
ws.write(0,4,"窗户总数")
ws.write(0,5,"% windows-close")
ws.write(0,6,"% windows-open")
ws.write(0,7,"% windows-half-open")
workbook.save(file_name)
#设置某一行全为0的函数
def do_all_zero(i,file_name):
import xlrd
from xlutils.copy import copy
# 打开想要更改的excel文件
old_excel = xlrd.open_workbook(file_name, formatting_info=True)
# 将操作文件对象拷贝,变成可写的workbook对象
new_excel = copy(old_excel)
# 获得第一个sheet的对象
ws = new_excel.get_sheet(0)
# 写入数据
ws.write(i, 0, 0)
ws.write(i,1,0)
ws.write(i,2,0)
ws.write(i, 3, 0)
ws.write(i, 4, 0)
ws.write(i, 5, 0)
ws.write(i, 6, 0)
ws.write(i, 7, 0)
new_excel.save(file_name)
#修改表格数据函数,包括某行某列,包括数据大小,包括文件名称
def reator(i,j,num,file_name):
import xlrd
from xlutils.copy import copy
# 打开想要更改的excel文件
old_excel = xlrd.open_workbook(file_name, formatting_info=True)
# 将操作文件对象拷贝,变成可写的workbook对象
new_excel = copy(old_excel)
# 获得第一个sheet的对象
ws = new_excel.get_sheet(0)
# 写入数据
ws.write(i, j, num)
new_excel.save(file_name)
#处理excel表格数据
import os
def myrename(path):
file_list=os.listdir(path)
i=100
for fi in file_list:
old_name=os.path.join(path,fi)
new_name=os.path.join(path,str(i)+".jpg")
os.rename(old_name,new_name)
i+=1
#myrename("C:\\Users\\20170\\PycharmProjects\\students-inclassroom-monitoring-main\\exp\\")
#降低文件夹图片的分辨率
def decline_pr():
import os
from PIL import Image
import glob
img_path = glob.glob("C:/Users/20170/PycharmProjects/students-inclassroom-monitoring-main/exp/*.jpg")
path_save = "C:/Users/20170/PycharmProjects/students-inclassroom-monitoring-main/exp1"
for file in img_path:
name = os.path.join(path_save, file)
im = Image.open(file)
im.thumbnail((1000, 1000))
print(im.format, im.size, im.mode)
im.save(name)
decline_pr()
``` |
{
"source": "1997alireza/Autoencoding-Graph-for-Document-Clustering",
"score": 3
} |
#### File: LoNGAE/models/ae.py
```python
import numpy as np
from keras.layers import Input, Dense, Dropout, Lambda, Activation
from keras.models import Model
from keras import optimizers
from keras import backend as K
from ..layers.custom import DenseTied
def mvn(tensor):
"""Per row mean-variance normalization."""
epsilon = 1e-6
mean = K.mean(tensor, axis=1, keepdims=True)
std = K.std(tensor, axis=1, keepdims=True)
mvn = (tensor - mean) / (std + epsilon)
return mvn
def autoencoder_with_node_features(adj_row_length, features_length):
h = adj_row_length
w = adj_row_length + features_length
kwargs = dict(
use_bias=True,
kernel_initializer='glorot_normal',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
trainable=True,
)
data = Input(shape=(w,), dtype=np.float32, name='data')
noisy_data = Dropout(rate=0.5, name='drop1')(data)
### First set of encoding transformation ###
encoded = Dense(256, activation='relu',
name='encoded1', **kwargs)(noisy_data)
encoded = Lambda(mvn, name='mvn1')(encoded)
### Second set of encoding transformation ###
encoded = Dense(128, activation='relu',
name='encoded2', **kwargs)(encoded)
encoded = Lambda(mvn, name='mvn2')(encoded)
encoded = Dropout(rate=0.5, name='drop2')(encoded)
# the encoder model maps an input to its encoded representation
encoder = Model([data], encoded)
encoded1 = encoder.get_layer('encoded1')
encoded2 = encoder.get_layer('encoded2')
### First set of decoding transformation ###
decoded = DenseTied(256, tie_to=encoded2, transpose=True,
activation='relu', name='decoded2')(encoded)
decoded = Lambda(mvn, name='mvn3')(decoded)
### Second set of decoding transformation - reconstruction ###
decoded = DenseTied(w, tie_to=encoded1, transpose=True,
activation='linear', name='decoded1')(decoded)
# output related to node features
decoded_feats = Lambda(lambda x: x[:, h:],
name='decoded_feats')(decoded)
# output related to adjacency
decoded_adj_logits = Lambda(lambda x: x[:, :h],
name='decoded_adj_logits')(decoded)
decoded_adj = Activation(activation='sigmoid', name='decoded_adj')(decoded_adj_logits)
autoencoder = Model(
inputs=[data], outputs=[decoded_adj, decoded_feats]
)
# compile the autoencoder
adam = optimizers.Adam(lr=0.01, decay=0.0)
autoencoder.compile(
optimizer=adam,
loss={'decoded_adj': 'mean_squared_error',
'decoded_feats': 'mean_squared_error'},
loss_weights={'decoded_adj': 1.0, 'decoded_feats': 1.0}
)
return encoder, autoencoder
```
#### File: src/processing/KCG.py
```python
from src.modelling.NMF_keyword_extraction import extract_top_keywords, THE_DUMMY_NODE
from src.utils.text import split_document
from src.processing.edge_weighting import sentence_similarity_edge
from src.modelling.SBERT_transformer import get_sentence_transformer
from src.utils.datasets import fetch_dataset
import paths
import numpy as np
import pickle
from src.utils.text import preprocess
from src.utils.datasets import name_of_dataset
def create_kcg(documents, big_graph, dataset_name=None):
"""
:param documents: list of documents, each document is taken as a string
:return: nodes (a list of nodes including{'keyword', 'feature'});
node['feature'] is the average of its sentences' SBERT embeddings,
and adjacency (a 2d numpy array containing weights of the graph's adjacency);
each weight is a float in the range [-1, 1]) computed based on sentence set similarity,
and doc_to_node_mapping which is a mapping from documnet index to its related nodes
"""
sentence_transformer = get_sentence_transformer()
documents_sentences = [split_document(doc) for doc in documents]
doc_to_node_mapping = [[] for _ in range(len(documents_sentences))]
# doc_to_node_mapping[i] is a list containing the indexes of the nodes related to the i-th document
keyword_sents = extract_top_keywords(documents_sentences, big_graph, dataset_name=dataset_name)
if THE_DUMMY_NODE in keyword_sents:
del keyword_sents[THE_DUMMY_NODE] # not considering the dummy node in the graph
nodes = [] # a list of {'keyword', 'feature'}
adjacency = np.zeros([len(keyword_sents), len(keyword_sents)], dtype=float)
for node_idx, keyword in enumerate(keyword_sents):
print('node {}/{}'.format(node_idx, len(keyword_sents)))
sentences_idx_tuple = keyword_sents[keyword]
embeddings_list = sentence_transformer.encode(
[documents_sentences[doc_idx][sent_idx] for doc_idx, sent_idx in sentences_idx_tuple])
average_embeddings = np.sum(embeddings_list, axis=0) / len(sentences_idx_tuple)
# node feature
nodes.append({'keyword': keyword, 'feature': average_embeddings})
# node adjacency vector
for other_node_idx, other_keyword in enumerate(keyword_sents):
print('--inner loop node {}/{}'.format(other_node_idx, len(keyword_sents)))
other_sentences_idx_tuple = keyword_sents[other_keyword]
other_embeddings_list = sentence_transformer.encode(
[documents_sentences[doc_idx][sent_idx] for doc_idx, sent_idx in other_sentences_idx_tuple])
edge_weight = sentence_similarity_edge(embeddings_list, other_embeddings_list)
adjacency[node_idx, other_node_idx] = edge_weight
for doc_idx, _ in sentences_idx_tuple:
doc_to_node_mapping[doc_idx].append(node_idx)
print('nodes\' features and adjacency vector are computed')
print('KCG is created')
return nodes, adjacency, doc_to_node_mapping
def get_documents_kcg(dataset_path, big_graph):
"""
:param dataset_path: paths.reuters_dataset or paths.the20news_dataset
:return: nodes, adjacency, doc_to_node_mapping; same as create_kcg function,
and documents_labels; a list of each document's label
"""
dataset_name = name_of_dataset(dataset_path)
if big_graph:
graph_file_path = paths.models + 'keyword_correlation_graph/big_' + dataset_name + '.pkl'
else:
graph_file_path = paths.models + 'keyword_correlation_graph/' + dataset_name + '.pkl'
data = fetch_dataset(dataset_path)
try:
nodes, adjacency, doc_to_node_mapping = pickle.load(open(graph_file_path, 'rb'))
documents_labels = data[:, 0]
print('KCG is loaded')
except FileNotFoundError:
documents_labels = data[:, 0]
documents = data[:, 1]
documents = [preprocess(doc) for doc in documents]
nodes, adjacency, doc_to_node_mapping = create_kcg(documents, big_graph, dataset_name)
pickle.dump((nodes, adjacency, doc_to_node_mapping), open(graph_file_path, 'wb'))
return nodes, adjacency, doc_to_node_mapping, documents_labels
``` |
{
"source": "1997alireza/Movie-Casting-Problems",
"score": 3
} |
#### File: src/experiments/actors_network.py
```python
import paths
import plotly.express as px
import pickle
from src.processing.GAE_on_actors import get_rating_predictor
import numpy as np
def weight_distribution():
# the distribution of edges' weight of the actors network
# the histogram plot is located in /docs/plots/actors network weights.png
graph_file_path = paths.models + 'actors_network/graph_{}_{}.pkl'.format(5, 2)
graph = pickle.load(open(graph_file_path, 'rb'))
graph_weights = []
for _, _, w in graph.edges.data("weight"):
graph_weights.append(w)
fig = px.histogram(graph_weights, x='graph weights')
fig.show()
def predicted_weight_distribution():
# the distribution of edges' weight of the predicted actors network by LoNGAE
# the histogram plot is located in /docs/plots/predicted actors network weights.png
bin_counts = [0] * int(1.0 / 0.005) # bins for the range [0., 1.] whose lengths is 0.005
model_rating_predictor, actors_id = get_rating_predictor()
for i, actor_id in enumerate(actors_id):
print('{}/{}'.format(i, len(actors_id)))
adjacency_vector = model_rating_predictor(actor_id)[0]
for w in adjacency_vector:
bin_number = int(w / 0.005)
bin_counts[bin_number] += 1
fig = px.bar(x=list(np.arange(0., 1., .005)), y=bin_counts, labels={'x': 'predicted graph weights', 'y': 'count'})
fig.show()
```
#### File: src/experiments/alternative_actor_suggestion.py
```python
from src.modelling.actors_network import parse_movie_cast
from src.processing.alternative_actor_suggestion.movie_sim_based_AAS import get_alternative_actors
from src.processing.GAE_on_actors import get_rating_predictor
from src.processing.alternative_actor_suggestion.GAE_based_AAS import find_alternates
from src.processing.movie_cast_rating import get_movie_cast_rating, movie_id, cast_group_rating
from src.utils.TM_dataset import get_actor_movies, get_cast, genres_of_movie
def compare_alternative_actor_algorithms_using_cast_rating():
"""Here we try to compute alternative actor for each actor using two algorithms
we provided: using movie similarity and vector space, then we try to compute score
of cast if th give actor is replaced by the alternative actor an compare values
Sample Experiments show 2nd algorithm is better"""
__, actors_id = get_rating_predictor()
for actor in actors_id:
for movie in [get_actor_movies(actor)]:
try:
movie_genres = genres_of_movie(movie_id)
print("original score: " + str(get_movie_cast_rating(movie, 5)))
cast = parse_movie_cast(get_cast(movie), 5)
cast.remove(int(actor))
cast.append(get_alternative_actors(actor))
print("alg 1 score (graph neighbour based): " + str(cast_group_rating(cast, movie_genres)))
cast = parse_movie_cast(get_cast(movie_id), 5)
cast.remove(int(actor))
cast.append(find_alternates(actor, 1))
print("alg 2 score (neural network based): " + str(cast_group_rating(cast, movie_genres)))
except Exception as e:
print(e)
if __name__ == '__main__':
compare_alternative_actor_algorithms_using_cast_rating()
```
#### File: src/experiments/movie_cast_rating.py
```python
from src.processing.movie_cast_rating import get_movie_cast_rating, movie_id, movie_name
import paths
import pandas as pd
import csv
import random
def top_cast_ratings():
top_cast_movies = ['The Big Lebowski', 'The Godfather', '12 Angry Men', 'The Departed', 'The Return of The King',
'The Dark Knight', 'Black Hawk Dawn', 'Inception', 'Pulp Fiction', 'American Hustle']
ratings = {}
for movie_name in top_cast_movies:
try:
rating = get_movie_cast_rating(movie_id(movie_name), 5)
ratings[movie_name] = rating
except:
pass
# result:
# The Big Lebowski: 0.6488287370278465
# The Godfather: 0.632729155711032
# 12 Angry Men: 0.6251690814605261
# The Departed: 0.6478975342809675
# Inception: 0.6398410374304397
# Pulp Fiction: 0.6077448804333317
# American Hustle: 0.6945717414391934
return ratings
def random_movies_cast_ratings():
from src.processing.movie_cast_rating import __credits
file_path = paths.logs + '1000_movie_cast_ratings.csv'
ratings_file = open(file_path, mode='w')
writer = csv.writer(ratings_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
movie_ids = list(__credits['id'])
random.shuffle(movie_ids)
movie_ids = movie_ids[:1100]
for movie_id in movie_ids:
try:
rating = get_movie_cast_rating(movie_id, 5)
writer.writerow([movie_name(str(movie_id)), str(rating)])
except Exception:
pass
ratings_file.close()
def descending_random_generated_ratings():
file_path = paths.logs + '1000_movie_cast_ratings.csv'
df = pd.read_csv(file_path, usecols=['name', 'cast_rating'])
sorted_ratings = []
for i in range(len(df)):
sorted_ratings.append(df['cast_rating'][i])
return sorted_ratings[::-1] # convert ascending list tobe descending
def top_casts_percentile():
"""showing the percentile of several high quality casts between a hge amount of random casts"""
top_ratings = top_cast_ratings()
sorted_random_ratings = descending_random_generated_ratings()
for movie_name in top_ratings:
top_r = top_ratings[movie_name]
for idx, r in enumerate([*sorted_random_ratings, float('inf')]):
if r > top_r:
percentile = int(idx / len(sorted_random_ratings) * 100)
print('{}: {}, percentile {}%'.format(movie_name, top_r, percentile))
break
# result:
# The Big Lebowski: 0.647287028451108, percentile 93%
# The Godfather: 0.6583803229268587, percentile 96%
# 12 Angry Men: 0.597728030674651, percentile 55%
# The Departed: 0.6616886311115655, percentile 96%
# Inception: 0.6664521220990737, percentile 97%
# Pulp Fiction: 0.5992585656312174, percentile 56%
# American Hustle: 0.6967548573250509, percentile 99%
```
#### File: experiments/visualization/actor_genre_radar_chart.py
```python
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from src.processing.GAE_on_actors import get_rating_predictor
from src.processing.movie_cast_rating import rating
from src.utils.TM_dataset import __top_genres_list, __top_genres_movies_count, actor_id
__, actors_id = get_rating_predictor()
actor_names = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>']
def get_actor_ratings(actor_id):
actor_data = []
for g_id, genre in enumerate(__top_genres_list):
actor_data.append(rating(actor_id, genre) / __top_genres_movies_count[g_id])
return actor_data
# def create_df():
# sample_count = 100
# data = []
# for a_idx, actor in enumerate(actors_id):
# if a_idx == sample_count:
# break
# actor_data = [actor_name(actor)]
# for g_id, genre in enumerate(__top_genres_list):
# actor_data.append(rating(actor, genre) / __top_genres_movies_count[g_id])
# data.append(actor_data)
# cols = ['Name'] + __top_genres_list
# df = pd.DataFrame(data, columns=cols)
# return df
def radar_chart():
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(children='Actor-Genre Radar Chart'),
html.Div(children='''
You can see how good plays an actor in the given top genres and compare two actors.
'''),
html.Br(),
html.Div([dcc.Dropdown(
id='input-actor-a',
options=[{'label': actor_name, 'value': actor_name} for actor_name in actor_names],
value='<NAME>'
)],
style={'width': '28%', 'display': 'inline-block'}),
html.Div([dcc.Dropdown(
id='input-actor-b',
options=[{'label': actor_name, 'value': actor_name} for actor_name in actor_names],
value='<NAME>'
)],
style={'width': '28%', 'display': 'inline-block'}),
dcc.Graph(
id='radar-chart'
)
])
@app.callback(
Output('radar-chart', 'figure'),
Input('input-actor-a', 'value'),
Input('input-actor-b', 'value'))
def update_figure(input_actor_a, input_actor_b):
fig = go.Figure()
fig.add_trace(go.Scatterpolar(
r=get_actor_ratings(actor_id(input_actor_a)),
theta=__top_genres_list,
fill='toself',
name=input_actor_a,
hoverinfo='skip'
))
fig.add_trace(go.Scatterpolar(
r=get_actor_ratings(actor_id(input_actor_b)),
theta=__top_genres_list,
fill='toself',
name=input_actor_b,
hoverinfo='skip'
))
fig.update_layout(
polar=dict(
radialaxis=dict(
visible=False,
# range=[0, 1]
)),
showlegend=True
)
return fig
app.run_server(debug=True, use_reloader=False)
if __name__ == '__main__':
radar_chart()
```
#### File: experiments/visualization/bipartite_graph.py
```python
import matplotlib.pyplot as plt
import networkx as nx
def draw_bipartite_graph(graph, first_node_set):
pos = nx.bipartite_layout(graph, first_node_set)
# get adjacency matrix
A = nx.adjacency_matrix(graph)
A = A.toarray()
# plot adjacency matrix
plt.imshow(A, cmap='Greys')
plt.show()
# plot graph visualisation
nx.draw(graph, pos, with_labels=False)
plt.show()
```
#### File: src/modelling/actors_network.py
```python
import pandas as pd
import networkx as nx
import ast
import paths
from src.utils.TM_dataset import rating_of_movie, in_top_genres
from src.utils.mathematical import get_all_pairs
pd.set_option('display.max_colwidth', None)
__credits = pd.read_csv(paths.the_movies_dataset + '/credits.csv', usecols=['id', 'cast'])
__movies = pd.read_csv(paths.the_movies_dataset + '/movies_metadata.csv', usecols=['id', 'vote_average'])
def parse_movie_cast(cast, actor_depth):
movie_cast = []
res = ast.literal_eval(cast)
for j in range(min(len(res), actor_depth)):
movie_cast.append(res[j]['id'])
return movie_cast
def get_network(actor_depth, coacting_count_threshold):
"""
generating the undirected actors graph
each edge has three values:
{count: number of common movies, rating_sum: sum of common movies' normalized vote averages,
weight=rating_sum/common_count}
the edge's weight would be in the range [0, 1]
:param actor_depth: only the first actor_depth actors are considered in each movie
:param coacting_count_threshold: if two actors have at least coacting_count_threshold common movies they would have an edge
:return:
"""
actors_graph = nx.Graph()
for i in range(len(__credits)):
movie_id = __credits['id'][i]
# checking primary condition: only movies with ratings and top genres
if not in_top_genres(movie_id):
continue
try:
movie_rating = rating_of_movie(movie_id)
except Exception: # no rating has been found for the movie
continue
movie_cast = parse_movie_cast(['cast'][i], actor_depth)
# self-connection edges
for actor_id in movie_cast:
if actors_graph.has_edge(actor_id, actor_id):
common_count = actors_graph[actor_id][actor_id]['count'] + 1
rating_sum = actors_graph[actor_id][actor_id]['rating_sum'] + movie_rating
else:
common_count = 1
rating_sum = movie_rating
actors_graph.add_edge(actor_id, actor_id,
count=common_count, rating_sum=rating_sum, weight=rating_sum / common_count)
edges = get_all_pairs(movie_cast) # extracting subsets with length equals to 2
for k in range(len(edges)):
if actors_graph.has_edge(edges[k][0], edges[k][1]):
common_count = actors_graph[edges[k][0]][edges[k][1]]['count'] + 1
rating_sum = actors_graph[edges[k][0]][edges[k][1]]['rating_sum'] + movie_rating
else:
common_count = 1
rating_sum = movie_rating
actors_graph.add_edge(edges[k][0], edges[k][1],
count=common_count, rating_sum=rating_sum, weight=rating_sum / common_count)
selected = [(u, v, d) for (u, v, d) in actors_graph.edges(data=True) if d['count'] >= coacting_count_threshold]
final_graph = nx.Graph()
final_graph.add_edges_from(selected)
# edges_list = []
# for u, v, d in final_graph.edges(data=True):
# edges_list.append((u, v, d))
print('Actors network has been created with {} nodes and {} edges'.format(
final_graph.number_of_nodes(), final_graph.number_of_edges()))
return final_graph
```
#### File: src/processing/actors_network_features.py
```python
import paths
import pickle
import networkx as nx
from src.modelling.actors_network import get_network
from src.utils.TM_dataset import actors_rating_genre_based
def get_actors_network_features(actor_depth=5, coacting_count_threshold=2):
actors_adjacency, actors_id = __get_actors_network(actor_depth, coacting_count_threshold)
actors_feature = __get_actors_features(actors_id, actor_depth, coacting_count_threshold)
return actors_adjacency, actors_feature, actors_id
def __get_actors_network(actor_depth, coacting_count_threshold):
graph_file_path = paths.models + 'actors_network/graph_{}_{}.pkl'.format(actor_depth, coacting_count_threshold)
try: # loading the graph from disk if it has been saved before
graph = pickle.load(open(graph_file_path, 'rb'))
print('Actors graph has been loaded from disk')
except FileNotFoundError:
graph = get_network(actor_depth, coacting_count_threshold)
pickle.dump(graph, open(graph_file_path, 'wb'))
return nx.adjacency_matrix(graph).toarray(), list(graph.nodes)
def __get_actors_features(actors_id, actor_depth, coacting_count_threshold):
features_file_path = paths.models + 'actors_network/features_{}_{}.pkl'.format(actor_depth, coacting_count_threshold)
try: # loading the node features from disk if they have been saved before
actors_feature = pickle.load(open(features_file_path, 'rb'))
print('Actors features have been loaded from disk')
except FileNotFoundError:
actors_feature = actors_rating_genre_based(actors_id)
pickle.dump(actors_feature, open(features_file_path, 'wb'))
return actors_feature
```
#### File: src/processing/GAE_on_actors.py
```python
from src.modelling.LoNGAE.train_lp_with_feats import run
from src.processing.actors_network_features import get_actors_network_features
from src.utils.TM_dataset import actors_feature_balancing_weight
from datetime import datetime
import numpy as np
import paths
from tensorflow import keras
from src.modelling.LoNGAE.models.ae import autoencoder_with_node_features
def train():
# training and validation logs on each epochs are saved in /docs/logs/ae training.txt
time_zero = datetime.now()
actors_adjacency, actors_feature, actors_id = get_actors_network_features()
node_features_weight = actors_feature_balancing_weight()
print('delta T: ', datetime.now() - time_zero)
time_zero = datetime.now()
run(actors_adjacency, actors_feature, node_features_weight, evaluate_lp=True)
print('delta T: ', datetime.now() - time_zero)
def load_encoder_model(file_path=paths.models+'actors_graph_ae/encoder.keras'):
try:
encoder = keras.models.load_model(file_path)
print('The encoder model has been loaded from disk')
except OSError:
raise Exception('The model must be trained beforehand using the train function')
return encoder
def load_autoencoder_model(adj, feats, node_features_weight,
file_path=paths.models + 'actors_graph_ae/autoencoder_weights.h5'):
try:
_, ae = autoencoder_with_node_features(adj.shape[1], feats.shape[1], node_features_weight)
ae.load_weights(file_path)
print('Weights of the autoencoder model have been loaded from disk')
except OSError:
raise Exception('the model must be trained beforehand using the train function')
return ae
def get_latent_vector_generator():
"""
create a function mapping from actor to the latent space
Note: the model must be trained beforehand
:return: the latent vectors generator of actors, and the list of actor ids
"""
encoder = load_encoder_model()
actors_adjacency, actors_feature, actors_id = get_actors_network_features()
def latent_vector_generator(actor_id):
"""
:param actor_id:
:return: a numpy array in shape (128,)
"""
try:
actor_idx = actors_id.index(actor_id)
except ValueError:
raise Exception('actor id is not found in the graph')
adj = actors_adjacency[actor_idx]
feat = actors_feature[actor_idx]
adj_aug = np.concatenate((adj, feat))
return encoder.predict(adj_aug.reshape(1, -1))[0] # prediction on one sample
return latent_vector_generator, actors_id
def get_rating_predictor():
"""
create a function mapping from actor to predicted ratings for its edges and genres
Note: the model must be trained beforehand
:return: the rating predictor of actors, and the list of actor ids
"""
actors_adjacency, actors_feature, actors_id = get_actors_network_features()
node_features_weight = actors_feature_balancing_weight()
ae = load_autoencoder_model(actors_adjacency, actors_feature, node_features_weight)
def rating_predictor(actor_id):
"""
:param actor_id:
:return: two 1d numpy arrays;
first array is predicted average ratings for its edges with length of number of the nodes in the graph
second array is predicted average ratings for the actor in each genres
"""
try:
actor_idx = actors_id.index(actor_id)
except ValueError:
raise Exception('actor id is not found in the graph')
adj = actors_adjacency[actor_idx]
feat = actors_feature[actor_idx]
adj_aug = np.concatenate((adj, feat))
adj_aug_outs = ae.predict(adj_aug.reshape(1, -1)) # a list with length 2 [decoded_adj, decoded_feats]
adj_outs, feat_outs = adj_aug_outs[0], adj_aug_outs[1]
adj_out, feat_out = adj_outs[0], feat_outs[0] # prediction on one sample
return adj_out, feat_out
return rating_predictor, actors_id
def target_actor_weight(target_actor_id, actors_id, predicted_weights):
"""Co-Star Rating:
the predicted weight between two actors is considered as the measure showing how the actors match to be co-stars in a movie"""
try:
target_idx = actors_id.index(target_actor_id)
except:
raise Exception('Actor id {} is not found in the graph'.format(target_actor_id))
return predicted_weights[target_idx]
class LinkWeightPredictor:
def __init__(self):
self.predictor, self.actors_id = get_rating_predictor()
def predict(self, actor_a, actor_b):
"""
It is symmetrical with respect to two actors
:param actor_a: first actor id
:param actor_b: second actor id
:return: undirected link weight between given actors
"""
ratings_a, _ = self.predictor(actor_a)
a2b_weight = target_actor_weight(actor_b, self.actors_id, ratings_a)
ratings_b, _ = self.predictor(actor_b)
b2a_weight = target_actor_weight(actor_a, self.actors_id, ratings_b)
return (a2b_weight + b2a_weight) / 2
if __name__ == '__main__':
# train()
# exit()
rating_predictor, actors_id = get_rating_predictor()
edges_weights, genres_weights = rating_predictor(actors_id[0])
print(edges_weights[0])
print(target_actor_weight(35, actors_id, edges_weights))
print(genres_weights)
exit()
latent_vector_generator, actors_id = get_latent_vector_generator()
print(latent_vector_generator(actors_id[0]))
```
#### File: src/utils/TM_dataset.py
```python
import ast
import pandas as pd
import paths
import numpy as np
from sklearn.preprocessing import normalize
from math import isnan
__links_df = pd.read_csv(paths.the_movies_dataset + 'links.csv', usecols=['tmdbId', 'movieId'])
__ratings_df = pd.read_csv(paths.the_movies_dataset + 'ratings.csv', usecols=['movieId', 'rating'])
__credits = pd.read_csv(paths.the_movies_dataset + 'credits.csv', usecols=['id', 'cast']) # this id is tmdb_id
__movies = pd.read_csv(paths.the_movies_dataset + 'movies_metadata.csv',
usecols=['id', 'genres', 'vote_average']) # this id is tmdb_id
__actors_id_name = {}
__actors_name_id = {}
__actors_movie_count = {}
__actor_movies = {}
# list of the genres with high number of movies in the movies dataset
__top_genres_list = ['Drama', 'Comedy', 'Thriller', 'Romance', 'Action', 'Horror', 'Crime', 'Documentary', 'Adventure',
'Science Fiction', 'Family', 'Mystery', 'Fantasy', 'Animation', 'Foreign', 'Music', 'History',
'War', 'Western', 'TV Movie']
__top_genres_movies_count = [20265, 13182, 7624, 6735, 6596, 4673, 4307, 3932, 3496, 3049, 2770, 2467, 2313, 1935, 1622,
1598, 1398, 1323, 1042, 767]
# def rating_of_movie(tmdb_id):
# """
# ratings.movieId = links.movieId; so we need to find the related moveId for the input tmdbId using links data
# :param tmdb_id: it's equal to links.tmdbId or movies_metadata.id, and credits.id as well
# :return: the average of the ratings. if there is not any rating available for that movie, it returns NaN
# """
# global __links_df, __ratings_df
#
# try:
# movie_id = __links_df[__links_df.tmdbId == tmdb_id].iloc[0].movieId
# except IndexError:
# raise Exception('tmdb id {} is not found'.format(tmdb_id))
# ratings = __ratings_df[__ratings_df.movieId == movie_id].rating
# if len(ratings):
# raise Exception('no rating has been found for the movie {}'.format(tmdb_id))
#
# return __ratings_df[__ratings_df.movieId == movie_id].rating.mean()
def rating_of_movie(tmdb_id):
"""
:param tmdb_id: an integer which is equal to links.tmdbId or movies_metadata.id, and credits.id as well
:return: normalized average vote (movies_metadata.vote_average/10)
"""
global __movies
try:
normalized_rating = __movies[__movies['id'] == str(tmdb_id)].iloc[0]['vote_average'] / 10
if normalized_rating == 0.0 or isnan(normalized_rating):
raise Exception('the movie {} is not rated'.format(tmdb_id))
else:
return normalized_rating
except Exception:
raise Exception('the movie {} is not rated'.format(tmdb_id))
# TODO: why the extracted vote_average value for movie tmdb_id=82663 is NaN, however it seems that it has a float value in the dataset
def genres_of_movie(tmdb_id):
"""
:param tmdb_id:
:return: list of related genres to the movie
"""
global __movies
tmdb_id = str(tmdb_id)
try:
genres = eval(__movies[__movies.id == tmdb_id].iloc[0].genres)
except IndexError:
raise Exception('tmdb id {} is not found'.format(tmdb_id))
genres = [g['name'] for g in genres]
return genres
def in_top_genres(tmdb_id):
genres = genres_of_movie(tmdb_id)
for g in genres:
if g in __top_genres_list:
return True
return False
# def actor_rating_genre_based(actor_id):
# """
#
# :param actor_id: it's equal to credits.cast.id
# :return: normalized actor's feature based on its ratings in top genres (__genres_list), as a numpy array
# """
# actor_id = int(actor_id)
# genres = dict((g, 0) for g in __top_genres_list)
# for i in range(len(__credits)):
# casts_id = [c['id'] for c in eval(__credits['cast'][i])]
# if actor_id in casts_id:
# movie_id = __credits['id'][i]
# movie_genres = genres_of_movie(movie_id)
#
# try:
# movie_rating = rating_of_movie(movie_id)
# except Exception: # no rating has been found for the movie
# continue
#
# for g in movie_genres:
# if g in genres:
# genres[g] += movie_rating
#
# feature = np.array(list(genres.values()))
# total = sum(feature)
#
# if total == 0:
# raise Exception('actor id {} is not found in any rated movie\'s casts from top genres.'.format(actor_id))
#
# return feature / total
def actors_rating_genre_based(actor_ids):
"""
more efficient than actor_rating_genre_based. here we read the dataset just once.
:param actor_ids: a list of actor_id which is equal to credits.cast.id
:return: list of normalized actors' features based on their ratings in top genres (__genres_list), as a 2d numpy array
"""
features = np.zeros((len(actor_ids), len(__top_genres_list)))
for i in range(len(__credits)):
movie_id = __credits['id'][i]
movie_genres = genres_of_movie(movie_id)
genres_idx = []
for g in movie_genres:
try:
g_idx = __top_genres_list.index(g)
genres_idx.append(g_idx)
except ValueError:
continue
if len(genres_idx) == 0: # the movie is not in any top genre
continue
try:
movie_rating = rating_of_movie(movie_id)
except Exception: # no rating has been found for this movie
continue
casts_id = [c['id'] for c in eval(__credits['cast'][i])]
for cast_id in casts_id:
try:
a_idx = actor_ids.index(cast_id)
except ValueError:
continue
features[a_idx][genres_idx] += movie_rating
print('Actors features are extracted')
return normalize(features)
def actors_feature_balancing_weight():
"""
if a feature is more popular and has a higher value in total, its weight is lower
:return:
"""
return np.array([1 / c for c in __top_genres_movies_count])
def prepare_actors():
"""
creates a global cache of actor and movie relations using python dictionary
:return:
"""
global __credits, __actors_movie_count, __actor_movies
if (not len(__actors_id_name)):
for i in range(len(__credits)):
cast = ast.literal_eval(__credits['cast'][i])
for j in range(len(cast)):
__actor_movies[cast[j]['id']] = int(__credits['id'][i])
__actors_id_name[cast[j]['id']] = [cast[j]['name']]
__actors_name_id[cast[j]['name']] = [cast[j]['id']]
if cast[j]['id'] in __actors_movie_count:
__actors_movie_count[cast[j]['id']] = __actors_movie_count[cast[j]['id']] + 1
else:
__actors_movie_count[cast[j]['id']] = 1
def actor_name(actor_id):
"""
:param actor_id: an integer which is equal to credits.cast.id
:return: credits.cast.name
"""
prepare_actors()
return __actors_id_name[actor_id][0]
def actor_id(actor_name):
"""
:param actor_id: an integer which is equal to credits.cast.id
:return: credits.cast.name
"""
prepare_actors()
return __actors_name_id[actor_name][0]
def get_top_actors(n):
"""
calculates n most active actors
:param n: number of requested actors
:return: list of n actors with most movies played
"""
prepare_actors()
sorted_actors = dict(sorted(__actors_movie_count.items(), key=lambda item: item[1], reverse=True))
i = 0
result = []
for actor in sorted_actors:
i += 1
result.append(actor)
if (i > n):
break
return (result)
def get_genre_index(genre):
global __top_genres_list
return __top_genres_list.index(genre)
def get_actor_movies(actor):
"""
return all movies placed by given actor
"""
prepare_actors()
return __actor_movies[int(actor)]
def get_cast(movie_id):
global __credits
return __credits[__credits['id'] == movie_id]['cast'].values.tolist()[0]
``` |
{
"source": "1997alireza/Optimization-Homework",
"score": 3
} |
#### File: Optimization-Homework/HW3/function_provider.py
```python
import numpy as np
from numpy import matmul
class FunctionProvider:
def __init__(self, f, g, h):
self.f = f
self.grad = g
self.hessian = h
def __add__(self, other):
f = lambda x: self.f(x) + other.f(x)
g = lambda x: self.grad(x) + other.grad(x)
h = lambda x: self.hessian(x) + other.hessian(x)
return FunctionProvider(f, g, h)
def __mul__(self, other):
f = lambda x: self.f(x) * other
g = lambda x: self.grad(x) * other
h = lambda x: self.hessian(x) * other
return FunctionProvider(f, g, h)
def __rmul__(self, other):
return self.__mul__(other)
class QuadraticFunctionProvider(FunctionProvider):
"""
1/2 x^𝑇 A x - b^𝑇 x
"""
def __init__(self, A, b):
b = np.reshape(b, [-1, 1])
assert np.shape(b)[0] == np.shape(A)[0] == np.shape(A)[1]
f = lambda x: 0.5 * matmul(matmul(x.T, A), x) - matmul(b.T, x)
g = lambda x: matmul(A, x) - b
h = lambda x: A
FunctionProvider.__init__(self, f, g, h)
``` |
{
"source": "1997alireza/Optimization",
"score": 3
} |
#### File: Optimization/HW2/functions.py
```python
import numpy as np
class RosenbrockProvider:
"""
Rosenbrock function and it's gradient and hessian matrix
"""
@staticmethod
def f(x1, x2):
return 100 * (x2 - x1 ** 2) ** 2 + (1 - x1) ** 2
@staticmethod
def grad(x1, x2):
g1 = -400 * (x2 - x1 ** 2) * x1 + 2 * (x1 - 1)
g2 = 200 * (x2 - x1 ** 2)
return np.array([g1, g2])
@staticmethod
def hessian(x1, x2):
return np.array([
[400 * (3 * x1 ** 2 - x2) + 2, -400 * x1],
[-400 * x1, 200]
])
class LeastSquares:
"""
Least Squares function and it's gradient and hessian matrix
"""
def __init__(self, A, b):
A_shape = np.shape(A)
b_shape = np.shape(b)
print(A_shape, b_shape)
assert len(b_shape) == 1 and len(A_shape) == 2 and b_shape[0] == A_shape[0] == A_shape[1]
self.A = A
self.b = b
def f(self, *x):
return sum((np.dot(self.A, x) - self.b)**2)
def grad(self, *x):
ax_b = np.dot(self.A, x) - self.b
return 2 * np.dot(self.A.T, ax_b)
def hessian(self, *x):
return 2 * np.dot(self.A.T, self.A)
```
#### File: Optimization/HW2/tools.py
```python
def norm_p(x, p=2):
assert p >= 1
return sum(x**p) ** (1/p)
```
#### File: Optimization/HW3/linear_conjugate_gradient.py
```python
import numpy as np
from numpy import matmul as matmul
from numpy.linalg import norm
import time
class LinearConjugateGradient:
def __init__(self, A, b, x=None):
A = np.array(A)
b = np.array(b)
b = np.reshape(b, (-1, 1))
assert np.shape(b)[0] == np.shape(A)[0]
if not x:
x = np.random.rand(np.shape(A)[1], 1)
else:
x = np.reshape(x, (-1, 1))
assert np.shape(x)[0] == np.shape(A)[1]
self.A = A
self.b = b
self.x = x
def residual(self):
return np.matmul(self.A, self.x) - self.b
_MAX_ITER, _TOLERANCE = 100000, .00000001
def solve(self):
r = self.residual()
p = -r
A = self.A
b = self.b
x = self.x
k = 0
time_start_sec = time.time()
while k <= self._MAX_ITER and norm(r) > self._TOLERANCE:
pAp = matmul(matmul(p.T, A), p)
alpha = - matmul(r.T, p) / pAp
x += alpha * p
r = self.residual()
beta = matmul(matmul(r.T, A), p) / pAp
p = -r + beta * p
k += 1
time_end_sec = time.time()
return x, (time_end_sec - time_start_sec)
``` |
{
"source": "1997alireza/Persian-Telegram-WordCloud",
"score": 2
} |
#### File: Persian-Telegram-WordCloud/telegram_crawler/connector.py
```python
from telethon import TelegramClient
import os
class Connector:
def __init__(self, api_id, api_hash, phone_number):
super().__init__()
self.client = TelegramClient(os.path.dirname(os.path.realpath(__file__)) + '/sessions/' + phone_number, api_id,
api_hash)
self.client.start(phone=phone_number)
```
#### File: Persian-Telegram-WordCloud/telegram_crawler/crawler.py
```python
from telethon import TelegramClient
from telethon.tl.types import Dialog
class Crawler:
def __init__(self, dialog: Dialog, client: TelegramClient, target_entity_id, max_messages_count,
ignore_forwarded_messages):
super().__init__()
self.dialog = dialog
self.client = client
self.target_entity_id = target_entity_id
self.max_messages_count = max_messages_count
self.ignore_forwarded_messages = ignore_forwarded_messages
def extract_messages_body(self):
messages_text = []
chat_name = self.dialog.name
print('Chat Name: ', chat_name)
i = 0
for message in self.client.iter_messages(self.dialog, limit=None):
if 0 <= self.max_messages_count <= i: # if it's lower than zero, continue to the final message
break
i += 1
if message.message is None or len(message.message) == 0:
continue
message_id = message.id
body = message.message
from_id = message.from_id
fwd_from = message.fwd_from
if not(self.ignore_forwarded_messages and fwd_from is not None):
if self.target_entity_id == -1 or from_id == self.target_entity_id:
messages_text.append(body)
# if fwd_from is not None:
# fwd_from = fwd_from.from_id
reply_to_msg_id = message.reply_to_msg_id
date = message.date
return messages_text
``` |
{
"source": "1997jorge/web-python-django",
"score": 2
} |
#### File: website/templatetags/primeira_letra.py
```python
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter
@stringfilter
def primeira_letra(value):
"""
Verifica se <value> inicia com <arg>.
:param value: valor do filtro
:return: True se value iniciar com arg, False caso contrário
"""
return list(value)[0]
``` |
{
"source": "199806070023/smart-match",
"score": 2
} |
#### File: smart-match/smart_match/__init__.py
```python
from .damerau_levenshtein import *
from .levenshtein import *
from .block_distance import *
from .cosine_similarity import *
from .dice_similarity import *
from .monge_elkan import *
from .jaccard import *
from .generalized_jaccard import *
_method = Levenshtein()
def get_method(name=None, inner_method=None, verbose=False):
if not name:
name = 'ED'
if name == 'ED':
if verbose:
print('mode change to Levenshtein')
return Levenshtein()
elif name == 'DL':
if verbose:
print('mode change to DamerauLevenshtein')
return DamerauLevenshtein()
elif name == 'BD':
if verbose:
print('mode change to BlockDistance')
return BlockDistance()
elif name == 'cos':
if verbose:
print('mode change to CosineSimilarity')
return CosineSimilarity()
elif name == 'dice':
if verbose:
print('mode change to DiceSimilarity')
return DiceSimilarity()
elif name == 'jac':
if verbose:
print('mode change to Jaccard')
return Jaccard()
elif name == 'gjac':
if verbose:
print('mode change to GeneralizedJaccard')
return GeneralizedJaccard()
elif name == 'ME':
if verbose:
print('mode change to MongeElkan')
return MongeElkan(inner_method)
else:
raise NotImplementedError
def use(name=None, inner_method=None, verbose=False):
global _method
_method = get_method(name, inner_method, verbose)
def similarity(s, t):
global _method
return _method.similarity(s, t)
def dissimilarity(s, t):
global _method
return _method.dissimilarity(s, t)
def distance(s, t):
global _method
return _method.distance(s, t)
``` |
{
"source": "1998apoorvmalik/generic-genetic-algorithm",
"score": 3
} |
#### File: 1998apoorvmalik/generic-genetic-algorithm/fifteen_puzzle_problem.py
```python
import copy
import numpy as np
from genetic_algorithm import GeneticAlgorithm
class FifteenPuzzleExample(GeneticAlgorithm):
def __init__(self, board=None, solution_size=80, heuristic="a", display_documentation=True):
self.board = board
self.board_size = 4
self.solution_size = solution_size
self.possible_moves = ["u", "d", "l", "r"]
self.example_board = [[14, 0, 11, 10], [13, 3, 7, 5], [12, 15, 2, 9], [4, 1, 8, 6]]
self.target_solution = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
if heuristic == "a":
self.target_score = 0
if heuristic == "b":
self.target_score = 15
if board == None:
self.board = self.example_board
super(FifteenPuzzleExample, self).__init__(
self.possible_moves, self.solution_size, self.target_score, display_documentation=display_documentation
)
def move_piece(self, move=None, board=None, modify=False, display_board=False):
if move == None:
move = np.random.choice(self.possible_moves, 1, replace=False)[0]
assert move in self.possible_moves, "Invalid move!"
if board == None:
board = self.board
board = copy.deepcopy(board)
r = -1
c = -1
for i in range(len(self.target_solution)):
for j in range(len(self.target_solution)):
if board[i][j] == 0:
r, c = i, j
if move == "u":
new_r = r
new_c = c
if r - 1 >= 0:
new_r = r - 1
elif move == "d":
new_r = r
new_c = c
if r + 1 < len(self.target_solution):
new_r = r + 1
elif move == "l":
new_r = r
new_c = c
if c - 1 >= 0:
new_c = c - 1
else:
new_r = r
new_c = c
if c + 1 < len(self.target_solution):
new_c = c + 1
board[new_r][new_c], board[r][c] = board[r][c], board[new_r][new_c]
if display_board:
self.display(board=board)
if modify:
self.board = board
return board
def manhattan_distance_heuristic(self, board=None):
if board == None:
board = self.board
initial_config = list(np.array(board).flatten())
man_distance = 0
for i, item in enumerate(initial_config):
prev_row, prev_col = int(i / self.board_size), i % self.board_size
goal_row, goal_col = int(item / self.board_size), item % self.board_size
man_distance += abs(prev_row - goal_row) + abs(prev_col - goal_col)
return -man_distance
def correct_position_heuristic(self, board=None):
if board == None:
board = self.board
fitness = 0
for r in range(len(self.target_solution)):
for c in range(len(self.target_solution)):
if board[r][c] == self.target_solution[r][c]:
fitness += 1
return fitness
def compute_fitness(self, solution=None, board=None, heuristic="a", modify=False, display_board=False):
if board == None:
board = self.board
board = copy.deepcopy(board)
if solution != None:
for move in solution:
board = self.move_piece(move, board)
if heuristic == "a":
fitness = self.manhattan_distance_heuristic(board)
if heuristic == "b":
fitness = self.correct_position_heuristic(board)
if display_board:
self.display(board=board)
if modify:
self.board = board
return fitness
def display(self, solution=None, board=None):
if board == None:
board = self.board
if solution != None:
for move in solution:
board = self.move_piece(move, board)
for r in range(len(self.target_solution)):
for c in range(len(self.target_solution)):
if board[r][c] < 10:
print(board[r][c], end=" ")
else:
print(board[r][c], end=" ")
print("\n")
print("Fitness: {}\n\n".format(self.compute_fitness(board=board)))
```
#### File: 1998apoorvmalik/generic-genetic-algorithm/genetic_algorithm.py
```python
import random
from abc import ABC, abstractmethod
import numpy as np
class GeneticAlgorithm(ABC):
def __init__(
self, gene_pool, genotype_length, target_max_fitness, target_avg_fitness=None, display_documentation=True
):
self.gene_pool = gene_pool
self.gene_pool_length = len(gene_pool)
self.genotype_length = genotype_length
self.target_max_fitness = target_max_fitness
self.target_avg_fitness = target_avg_fitness
self.selection_message_display = True
self.crossover_message_display = True
self.mutation_message_display = True
if display_documentation:
self.show_documentation()
@abstractmethod
def compute_fitness(self, genotype):
pass
# Override this method in your subclass.
@abstractmethod
def display(self, genotype):
pass
# Override this method in your subclass.
def run_simulation(
self,
population_size=10,
stats_frequency=100,
selection_type="a",
crossover_type_1="a",
crossover_type_2="b",
mutation_type="a",
mutation_type_e_prob=0.02,
):
self.selection_message_display = True
self.crossover_message_display = True
self.mutation_message_display = True
print("Target Fitness = {}".format(self.target_max_fitness))
print("Simulation started with following options:\n")
population = self.generate_population(population_size)
population_fitness = [self.compute_fitness(genotype) for genotype in population]
max_population_fitness = max(population_fitness)
avg_population_fitness = np.mean(population_fitness)
generation = 0
while True:
# Selection
parents = []
for _ in range(int(population_size / 2)):
parents.append(self.selection(population, type=selection_type))
# Crossover
children = []
for parent in parents:
offsprings = self.crossover(parent[0], parent[1], type1=crossover_type_1, type2=crossover_type_2)
for offspring in offsprings:
children.append(offspring)
# Mutation
mutated_children = []
for child in children:
mutated_children.append(self.mutation(child, prob=mutation_type_e_prob, type=mutation_type))
generation += 1
population = mutated_children
population_fitness = [self.compute_fitness(genotype) for genotype in population]
max_population_fitness = max(population_fitness)
avg_population_fitness = np.mean(population_fitness)
max_fit_index = np.argmax(population_fitness)
if generation == 1:
print(
"Generation : {}, Population size = {}, Max population fitness = {}, Avg. population fitness = {}".format(
generation, population_size, max_population_fitness, avg_population_fitness
)
)
if generation % stats_frequency == 0:
print(
"Generation : {}, Population size = {}, Max population fitness = {}, Avg. population fitness = {}".format(
generation, population_size, max_population_fitness, avg_population_fitness
)
)
if max_population_fitness >= self.target_max_fitness and (
self.target_avg_fitness is None or avg_population_fitness >= self.target_avg_fitness
):
break
print(
"\nSolution found after {} generations, Max population fitness = {}, Avg. population fitness = {}\n".format(
generation, max_population_fitness, avg_population_fitness
)
)
self.display(population[max_fit_index])
return population, population[max_fit_index]
def show_documentation(self):
print("\t\t\t\t\tGENETIC ALGORITHM\n\n")
print("You don't need to execute any method other than self.run_simulation(), (p.s: try it at your own risk).")
print("The following Genetic Algorithm implementation provides multiple options for the selection, crossover")
print("and mutation steps, below is the definition for various arguments that you can use for these methods.")
print("It may be a better practice to pass these arguments in the form of a dictionary, for eg:")
print(
"self.run_simulation(**kwargs). It is not compulsory to provide these arguments at all, as default values are"
)
print("already provided for them.\n\n")
print("Argument : population_size : Specifies population size for each generation.\n")
print("Argument : stats_frequency : Specifies how frequently should the simulation statistics")
print("be displayed.\n\n")
print("Selection methods, and corresponding arguments ->\n")
print("Argument : selection_type = 'a' : Returns the most fittest and second most fittest genotype.\n")
print("Argument : selection_type = 'b' : Returns 2 genotypes based on the probabilities derived from fitness")
print("where probability[i] = i/sum(fitness), where i is a single genotype.\n\n")
print("Crossover methods, and corresponding arguments ->\n")
print(
"Argument : crossover_type_1 = 'a' : Single Point Crossover : A crossover point on the parent organism string"
)
print(
"is selected. All data beyond that point in the organism string is swapped between the two parent organisms."
)
print("Strings are characterized by Positional Bias.\n")
print(
"Argument : crossover_type_1 = 'b' : Two Point Crossover : This is a specific case of a N-point Crossover"
)
print(
"technique. Two random points are chosen on the individual chromosomes (strings) and the genetic material"
)
print("is exchanged at these points.\n")
print("Argument : crossover_type_1 = 'c' : Uniform Crossover : Each gene is selected randomly from one of")
print("the corresponding genes of the parent chromosomes.\n")
print(
"Argument : crossover_type_2 = 'a' : This produces two offsprings by alternating the selected crossover step"
)
print("for each one of them.\n")
print(
"Argument : crossover_type_2 = 'b' : This produces two offsprings by alternating the selected crossover step"
)
print("with some probability for each one of them.\n\n")
print("Mutation methods, and corresponding arguments ->\n")
print("Argument : mutation_type = 'a' : Random Resetting : In this, a random value from the set of")
print("permissible values is assigned to a randomly chosen gene.\n")
print("Argument : mutation_type = 'b' : Swap Mutation : we select two positions on the chromosome at random,")
print("and interchange the values. This is common in permutation based encodings.\n")
print("Argument : mutation_type = 'c' : Scramble Mutation: from the entire chromosome, a subset of genes is")
print("chosen and their values are scrambled or shuffled randomly.\n")
print(
"Argument : mutation_type = 'd' : Inversion Mutation : We select a subset of genes like in scramble mutation,"
)
print("but instead of shuffling the subset, we merely invert the entire string in the subset.\n")
print("Argument : mutation_type = 'e' : Probability Random Resetting : In this, a random value from the set of")
print(
"permissible values is assigned to all the genes in a genotype with probability 1/(length of genotype) or"
)
print("some explicitly set probability.\n")
print("Argument : mutation_type_e_prob : This is only required if mutation_type = 'e' is selected, and if you")
print("want to give some explicit probability for this method. The probability values should be in between")
print("0 and 1.\n")
def generate_population(self, size=2):
population = []
for _ in range(size):
genotype = []
for _ in range(self.genotype_length):
genotype.append(np.random.choice(self.gene_pool, size=1, replace=False)[0])
population.append(genotype)
return population
def selection(self, population, type="a"):
assert type == "a" or type == "b", "Invalid 'type' argument in selection method."
fitness = []
for genotype in population:
fitness.append(self.compute_fitness(genotype))
"""
Type 'a' : Returns the most fittest and second most fittest genotype.
"""
if type == "a":
if self.selection_message_display:
print("1)\tType 'a' Selection method")
self.selection_message_display = False
fittest_index = np.argmax(fitness)
fitness.pop(fittest_index)
return [population[fittest_index], population[np.argmax(fitness)]]
"""
Type 'b' : Returns 2 genotypes based on the probabilities derived from fitness where probability[i] = i/sum(fitness),
where i is a single genotype.
"""
if type == "b":
if self.selection_message_display:
print("1)\tType 'b' Selection method")
self.selection_message_display = False
probs = list()
temp_sum = sum(fitness)
for i in fitness:
probs.append(i / temp_sum)
index = np.random.choice(len(population), size=2, replace=True, p=probs)
return [population[index[0]], population[index[1]]]
def single_point_crossover(self, first_genotype, second_genotype, crossover_index, type="a"):
assert type == "a" or type == "b", "Invalid 'type' argument in single_point_crossover."
offsprings = []
if type == "a":
offsprings.append(first_genotype[:crossover_index] + second_genotype[crossover_index:])
offsprings.append(second_genotype[:crossover_index] + first_genotype[crossover_index:])
if type == "b":
for _ in range(2):
if random.randint(1, 2) == 1:
offsprings.append(first_genotype[:crossover_index] + second_genotype[crossover_index:])
else:
offsprings.append(second_genotype[:crossover_index] + first_genotype[crossover_index:])
return offsprings
def two_point_crossover(
self, first_genotype, second_genotype, first_crossover_index, second_crossover_index, type="a"
):
assert type == "a" or type == "b", "Invalid 'type' argument in two_point_crossover."
offsprings = []
if type == "a":
offsprings.append(
first_genotype[:first_crossover_index]
+ second_genotype[first_crossover_index:second_crossover_index]
+ first_genotype[second_crossover_index:]
)
offsprings.append(
second_genotype[:first_crossover_index]
+ first_genotype[first_crossover_index:second_crossover_index]
+ second_genotype[second_crossover_index:]
)
if type == "b":
for _ in range(2):
if random.randint(1, 2) == 1:
offsprings.append(
first_genotype[:first_crossover_index]
+ second_genotype[first_crossover_index:second_crossover_index]
+ first_genotype[second_crossover_index:]
)
else:
offsprings.append(
second_genotype[:first_crossover_index]
+ first_genotype[first_crossover_index:second_crossover_index]
+ second_genotype[second_crossover_index:]
)
return offsprings
def uniform_crossover(self, first_genotype, second_genotype):
offsprings = []
for _ in range(2):
genotype = []
for i in range(self.genotype_length):
if random.randint(1, 2) == 1:
genotype.append(first_genotype[i])
else:
genotype.append(second_genotype[i])
offsprings.append(genotype)
return offsprings
def crossover(
self,
first_genotype,
second_genotype,
first_crossover_index=None,
second_crossover_index=None,
min_crossover_index=None,
max_crossover_index=None,
type1="a",
type2="a",
):
assert (type1 == "a" or type1 == "b" or type1 == "c") and (
type2 == "a" or type2 == "b"
), "Invalid 'type' argument in crossover method."
if min_crossover_index == None:
min_crossover_index = 0
if max_crossover_index == None:
max_crossover_index = self.genotype_length - 1
# Check if everything is valid.
assert len(first_genotype) == len(
second_genotype
), "The length of the two genotypes must be equal for the crossover to happen."
assert (
min_crossover_index >= 0 and max_crossover_index <= self.genotype_length - 1
), "The minmin_crossover_index >= 0 and max_crossover_index <= self.genotype_length"
if first_crossover_index != None:
assert (
first_crossover_index >= min_crossover_index and first_crossover_index <= max_crossover_index
), "Invalid first crossover index."
else:
first_crossover_index = random.randint(min_crossover_index, max_crossover_index)
if second_crossover_index != None:
assert (
second_crossover_index >= min_crossover_index
and second_crossover_index <= max_crossover_index
and second_crossover_index > first_crossover_index
), "Invalid second crossover index."
else:
second_crossover_index = random.randint(first_crossover_index, max_crossover_index)
"""
Type 'a' => Single Point Crossover : A crossover point on the parent organism string is selected.
All data beyond that point in the organism string is swapped between the two parent organisms.
Strings are characterized by Positional Bias.
"""
if type1 == "a":
if self.crossover_message_display:
print("2)\tSingle Point Crossover method")
self.crossover_message_display = False
return self.single_point_crossover(first_genotype, second_genotype, first_crossover_index, type2)
"""
Type 'b' => Two Point Crossover : This is a specific case of a N-point Crossover technique.
Two random points are chosen on the individual chromosomes (strings) and the genetic material
is exchanged at these points.
"""
if type1 == "b":
if self.crossover_message_display:
print("2)\tTwo Point Crossover method")
self.crossover_message_display = False
return self.two_point_crossover(
first_genotype, second_genotype, first_crossover_index, second_crossover_index, type2
)
"""
Type 'c' => Uniform Crossover : Each gene is selected randomly from one of the corresponding genes of the
parent chromosomes.
"""
if type1 == "c":
if self.crossover_message_display:
print("2)\tUniform Crossover method")
self.crossover_message_display = False
return self.uniform_crossover(first_genotype, second_genotype)
def mutation(self, genotype, prob=None, type="a"):
assert (
type == "a" or type == "b" or type == "c" or type == "d" or type == "e"
), "Invalid 'type' argument in selection."
"""
Type 'a' => Random Resetting : In this, a random value from the set of permissible values is assigned
to a randomly chosen gene.
"""
if type == "a":
if self.mutation_message_display:
print("3)\tRandom Resetting mutation method\n")
self.mutation_message_display = False
mutate_index = random.randrange(0, self.genotype_length)
genotype[mutate_index] = self.gene_pool[random.randrange(0, self.gene_pool_length)]
return genotype
"""
Type 'b' => Swap Mutation : we select two positions on the chromosome at random, and interchange the values.
This is common in permutation based encodings.
"""
if type == "b":
if self.mutation_message_display:
print("3)\tSwap Mutation method\n")
self.mutation_message_display = False
first_mutate_index = random.randrange(0, self.genotype_length - 1)
second_mutate_index = random.randrange(first_mutate_index, self.genotype_length)
genotype[first_mutate_index], genotype[second_mutate_index] = (
genotype[second_mutate_index],
genotype[first_mutate_index],
)
return genotype
"""
Type 'c' => Scramble Mutation: from the entire chromosome, a subset of genes is chosen and
their values are scrambled or shuffled randomly.
"""
if type == "c":
if self.mutation_message_display:
print("3)\tScramble Mutation method\n")
self.mutation_message_display = False
first_mutate_index = random.randrange(0, self.genotype_length - 1)
second_mutate_index = random.randrange(first_mutate_index, self.genotype_length)
for mutate_index in range(first_mutate_index, second_mutate_index + 1):
genotype[mutate_index] = self.gene_pool[random.randrange(0, self.gene_pool_length)]
return genotype
"""
Type 'd' => Inversion Mutation : We select a subset of genes like in scramble mutation,
but instead of shuffling the subset, we merely invert the entire string in the subset.
"""
if type == "d":
if self.mutation_message_display:
print("3)\tInversion Mutation method\n")
self.mutation_message_display = False
first_mutate_index = random.randrange(0, self.genotype_length - 1)
second_mutate_index = random.randrange(first_mutate_index, self.genotype_length)
temp = genotype[first_mutate_index : second_mutate_index + 1]
return genotype[0:first_mutate_index] + temp[::-1] + genotype[second_mutate_index + 1 :]
"""
Type 'e' => Probability Random Resetting : In this, a random value from the set of permissible values is assigned
to all the genes in a genotype with probability 1/(length of genotype) or some explicitly set probability.
"""
if type == "e":
if prob == None:
prob = 1 / self.genotype_length
if self.mutation_message_display:
print("3)\tProbability Random Resetting Mutation method, with mutation probability = {}\n".format(prob))
self.mutation_message_display = False
prob = prob * 100
for mutate_index in range(self.genotype_length):
if random.randint(1, 100) <= prob:
genotype[mutate_index] = self.gene_pool[random.randrange(0, self.gene_pool_length)]
return genotype
``` |
{
"source": "1998apoorvmalik/teachable_image_classifier",
"score": 3
} |
#### File: teachable_image_classifier/backend/app.py
```python
from flask import Flask
from flask_socketio import SocketIO, emit
from object_classification.dl_classification_model import DeepLearningModel
app = Flask(__name__)
app.config["SECRET_KEY"] = "secret!"
socketio = SocketIO(app)
model = None
@socketio.on("predict_object_classification_model")
def predict_object_classification_model(data):
if model is None:
print("[Error] No model trained.")
emit("error", "Train a model first")
return
img_path = str(data["img_path"])
if img_path is None:
print("[Error] Invalid image/path.")
return
prob, label = model.predict(img_path)
emit("predict_object_classification_model", label)
print("[INFO] Prediction Done, Details:", prob, label)
@socketio.on("train_object_classification_model")
def train_object_classification_model(data):
def log(val):
emit("object_classification_train_log", val)
def handle_error(error):
emit("error", error)
try:
global model
object_classes = data["object_classes"]
image_paths = []
labels = []
for object_class in object_classes:
image_paths += object_class["sample_paths"]
labels += [str(object_class["name"])] * len(object_class["sample_paths"])
model = DeepLearningModel(
data=image_paths,
labels=labels,
repeat_count=int(data["aug_batch_size"]),
batch_size=int(data["train_batch_size"]),
)
model.compile(
optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.fit(epochs=int(data["epochs"]))
except Exception as exception:
handle_error(str(exception))
return
print("Training Done!")
log("[INFO] Model training successfully completed.")
if __name__ == "__main__":
socketio.run(app, debug=True)
``` |
{
"source": "1998krzysiek/chatbot",
"score": 2
} |
#### File: uploads/core/views.py
```python
from django.shortcuts import render
from django.core.files.storage import FileSystemStorage
def home(request):
return render(request, 'home.html')
def bosak(request):
return render(request, 'bosak.html')
def biedron(request):
return render(request, 'biedron.html')
def duda(request):
return render(request, 'duda.html')
def holownia(request):
return render(request, 'holownia.html')
``` |
{
"source": "1998x-stack/Mava",
"score": 2
} |
#### File: adders/reverb/utils.py
```python
from typing import Iterable, Optional
import tensorflow as tf
import tree
from acme import specs, types
from acme.adders.reverb import utils as acme_utils
from acme.utils import tree_utils
from mava.adders.reverb import base
def final_step_like(
step: base.Step, next_observations: types.NestedArray, next_extras: dict = None
) -> base.Step:
"""Return a list of steps with the final step zero-filled."""
# Make zero-filled components so we can fill out the last step.
zero_action, zero_reward, zero_discount = tree.map_structure(
acme_utils.zeros_like, (step.actions, step.rewards, step.discounts)
)
return base.Step(
observations=next_observations,
actions=zero_action,
rewards=zero_reward,
discounts=zero_discount,
start_of_episode=False,
extras=next_extras
if next_extras
else tree.map_structure(acme_utils.zeros_like, step.extras),
)
def trajectory_signature(
environment_spec: specs.EnvironmentSpec,
sequence_length: Optional[int] = None,
extras_spec: types.NestedSpec = (),
) -> tf.TypeSpec:
"""This is a helper method for generating signatures for Reverb tables.
Signatures are useful for validating data types and shapes, see Reverb's
documentation for details on how they are used.
Args:
environment_spec: A `specs.EnvironmentSpec` whose fields are nested
structures with leaf nodes that have `.shape` and `.dtype` attributes.
This should come from the environment that will be used to generate
the data inserted into the Reverb table.
extras_spec: A nested structure with leaf nodes that have `.shape` and
`.dtype` attributes. The structure (and shapes/dtypes) of this must
be the same as the `extras` passed into `ReverbAdder.add`.
sequence_length: An optional integer representing the expected length of
sequences that will be added to replay.
Returns:
A `Trajectory` whose leaf nodes are `tf.TensorSpec` objects.
"""
def add_time_dim(paths: Iterable[str], spec: tf.TensorSpec) -> None:
return tf.TensorSpec(
shape=(sequence_length, *spec.shape),
dtype=spec.dtype,
name="/".join(str(p) for p in paths),
)
agent_specs = environment_spec.get_agent_specs()
agents = environment_spec.get_agent_ids()
env_extras_spec = environment_spec.get_extra_specs()
extras_spec.update(env_extras_spec)
obs_specs = {}
act_specs = {}
reward_specs = {}
step_discount_specs = {}
for agent in agents:
rewards_spec, step_discounts_spec = tree_utils.broadcast_structures(
agent_specs[agent].rewards, agent_specs[agent].discounts
)
obs_specs[agent] = agent_specs[agent].observations
act_specs[agent] = agent_specs[agent].actions
reward_specs[agent] = rewards_spec
step_discount_specs[agent] = step_discounts_spec
# Add a time dimension to the specs
(
obs_specs,
act_specs,
reward_specs,
step_discount_specs,
soe_spec,
extras_spec,
) = tree.map_structure_with_path(
add_time_dim,
(
obs_specs,
act_specs,
reward_specs,
step_discount_specs,
specs.Array(shape=(), dtype=bool),
extras_spec,
),
)
spec_step = base.Trajectory(
observations=obs_specs,
actions=act_specs,
rewards=reward_specs,
discounts=step_discount_specs,
start_of_episode=soe_spec,
extras=extras_spec,
)
return spec_step
```
#### File: systems/jax/config.py
```python
from dataclasses import is_dataclass
from types import SimpleNamespace
from typing import Any, Dict, List
from mava.utils.config_utils import flatten_dict
class Config:
"""Config handler for Jax-based Mava systems."""
def __init__(self) -> None:
"""Initialise config"""
self._config: Dict = {}
self._current_params: List = []
self._built = False
def add(self, **kwargs: Any) -> None:
"""Add a component config dataclass.
Raises:
Exception: if a config for an identically named component already exists
Exception: if a config shares a parameter name with another config
Exception: if a config is not a dataclass object
"""
if not self._built:
for name, dataclass in kwargs.items():
if is_dataclass(dataclass):
if name in list(self._config.keys()):
raise Exception(
"The given component config is already part of the current \
system. Perhaps try updating the component instead using \
.update() in the system builder."
)
else:
new_param_names = list(dataclass.__dict__.keys())
if set(self._current_params) & set(new_param_names):
raise Exception(
"Component configs share a common parameter name. \
This is not allowed, please ensure config \
parameter names are unique."
)
else:
self._current_params.extend(new_param_names)
self._config[name] = dataclass
else:
raise Exception("Component configs must be a dataclass.")
else:
raise Exception(
"Component configs cannot be added to an already built config."
)
def update(self, **kwargs: Any) -> None:
"""Update a component config dataclass.
Raises:
Exception: if a config shares a parameter name with another config
Exception: if a config is not already part of the system
Exception: if a config is not a dataclass object
"""
if not self._built:
for name, dataclass in kwargs.items():
if is_dataclass(dataclass):
if name in list(self._config.keys()):
new_param_names = list(dataclass.__dict__.keys())
if set(self._current_params) & set(new_param_names):
raise Exception(
"Component configs share a common parameter name. \
This is not allowed, please ensure config \
parameter names are unique."
)
else:
self._current_params.extend(new_param_names)
self._config[name] = dataclass
else:
raise Exception(
"The given component config is not part of the current \
system. Perhaps try adding the component using .add() \
in the system builder."
)
else:
raise Exception("Component configs must be a dataclass.")
else:
raise Exception(
"Component configs cannot be updated if config has already been built."
)
def build(self) -> None:
"""Build the config file, i.e. unwrap dataclass nested dictionaries"""
if not self._built:
config_unwrapped: Dict = {}
for param in self._config.values():
config_unwrapped.update(flatten_dict(param.__dict__))
self._built_config = config_unwrapped
self._built = True
else:
raise Exception("Config has already been built, this can only happen once.")
def set_parameters(self, **kwargs: Any) -> None:
"""Set a specific hyperparameter of a built config.
Raises:
Exception: if a set is attempted on a config not yet built.
Exception: if a set is attempted for a hyperparameter that is not part \
of the built config.
"""
if not self._built:
raise Exception(
"Config must first be built using .build() before hyperparameters \
can be set to different values using .set()."
)
else:
for name, param_value in kwargs.items():
if name in list(self._built_config.keys()):
self._built_config[name] = param_value
else:
raise Exception(
"The given parameter is not part of the current system. \
This should have been added first via a component .add() \
during system building."
)
def get(self) -> SimpleNamespace:
"""Get built config for feeding to a Mava system.
Raises:
Exception: if trying to get without having first built the config
Returns:
built config
"""
if self._built:
return SimpleNamespace(**self._built_config)
else:
raise Exception(
"The config must first be built using .build() before calling .get()."
)
```
#### File: systems/jax/config_test.py
```python
from dataclasses import dataclass
import pytest
from mava.systems.jax import Config
@dataclass
class ComponentConfig:
name: str
setting: int
@dataclass
class HyperparameterConfig:
param_0: float
param_1: float
@dataclass
class SameParameterNameConfig:
param_0: int
param_2: str
@pytest.fixture
def dummy_component_config() -> ComponentConfig:
"""Dummy config dataclass for a component.
Returns:
config dataclass
"""
return ComponentConfig(name="component", setting=5)
@pytest.fixture
def dummy_hyperparameter_config() -> HyperparameterConfig:
"""Dummy config dataclass for component hyperparameters.
Returns:
config dataclass
"""
return HyperparameterConfig(param_0=2.7, param_1=3.8)
@pytest.fixture
def config() -> Config:
"""Config instance.
Returns:
instantiation of a Mava Config class
"""
return Config()
def test_add_single_config(config: Config, dummy_component_config: type) -> None:
"""Test adding a single config.
Args:
config : Mava config
dummy_component_config : component config dataclass
"""
config.add(component=dummy_component_config)
config.build()
conf = config.get()
assert conf.name == "component"
assert conf.setting == 5
def test_add_multiple_configs(
config: Config, dummy_component_config: type, dummy_hyperparameter_config: type
) -> None:
"""Test adding multiple configs at the same time.
Args:
config : Mava config
dummy_component_config : component config dataclass
dummy_hyperparameter_config : component config dataclass of hyperparameters
"""
config.add(
component=dummy_component_config, hyperparameter=dummy_hyperparameter_config
)
config.build()
conf = config.get()
assert conf.name == "component"
assert conf.setting == 5
assert conf.param_0 == 2.7
assert conf.param_1 == 3.8
def test_add_configs_twice(
config: Config, dummy_component_config: type, dummy_hyperparameter_config: type
) -> None:
"""Test add two configs, one after the other.
Args:
config : Mava config
dummy_component_config : component config dataclass
dummy_hyperparameter_config : component config dataclass of hyperparameters
"""
config.add(component=dummy_component_config)
config.add(hyperparameter=dummy_hyperparameter_config)
config.build()
conf = config.get()
assert conf.name == "component"
assert conf.setting == 5
assert conf.param_0 == 2.7
assert conf.param_1 == 3.8
def test_update_config(
config: Config, dummy_component_config: type, dummy_hyperparameter_config: type
) -> None:
"""Test add two configs, one after the other.
Args:
config : Mava config
dummy_component_config : component config dataclass
dummy_hyperparameter_config : component config dataclass of hyperparameters
"""
config.add(component=dummy_component_config)
config.update(component=dummy_hyperparameter_config)
config.build()
conf = config.get()
assert conf.param_0 == 2.7
assert conf.param_1 == 3.8
assert not hasattr(config, "name")
assert not hasattr(config, "setting")
def test_set_existing_parameter_on_the_fly(
config: Config, dummy_component_config: type
) -> None:
"""Test updating a hyperparameter on the fly after the config has been built.
Args:
config : Mava config
dummy_component_config : component config dataclass
"""
# add component dataclasses and build config
config.add(component=dummy_component_config)
config.build()
# set config parameters on the fly
config.set_parameters(name="new_component_name")
conf = config.get()
assert conf.name == "new_component_name"
assert conf.setting == 5
def test_set_before_build_exception(
config: Config, dummy_component_config: type
) -> None:
"""Test that exception is thrown if it is attempted to set a hyperparameter \
before the config has been built.
Args:
config : Mava config
dummy_component_config : component config dataclass
"""
with pytest.raises(Exception):
# add component dataclasses and build config
config.add(component=dummy_component_config)
# Try setting parameters without having built first
config.set_parameters(name="new_component_name")
def test_get_before_build_exception(
config: Config, dummy_component_config: type
) -> None:
"""Test that exception is thrown if it is attempted to call .get() \
before the config has been built.
Args:
config : Mava config
dummy_component_config : component config dataclass
"""
with pytest.raises(Exception):
# add component dataclasses and build config
config.add(component=dummy_component_config)
# Try getting without having built first
config.get()
def test_parameter_setting_that_does_not_exist_exception(
config: Config, dummy_component_config: type
) -> None:
"""Test that exception is thrown if it is attempted to set a hyperparameter \
that does not exist.
Args:
config : Mava config
dummy_component_config : component config dataclass
"""
with pytest.raises(Exception):
# add component dataclasses and build config
config.add(component=dummy_component_config)
config.build()
# Try setting a parameter that does not exist
config.set_parameters(unknown_param="new_value")
def test_accidental_parameter_override_exception(
config: Config, dummy_hyperparameter_config: type
) -> None:
"""Test that exception is thrown when two component config dataclasses share the \
same name for a specific hyperparameter.
Args:
config : Mava config
dummy_hyperparameter_config : component config dataclass of hyperparameters
"""
with pytest.raises(Exception):
# add component dataclasses and build config
config.add(hyperparameter=dummy_hyperparameter_config)
# add new component dataclass with a parameter of the same name
# as an already existing component parameter name
other_hyperparamter_config = SameParameterNameConfig(param_0=2, param_2="param")
config.add(other_hyperparameter=other_hyperparamter_config)
```
#### File: tf/madqn/training.py
```python
import copy
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import tree
import trfl
from acme.tf import utils as tf2_utils
from acme.utils import loggers
import mava
from mava import types as mava_types
from mava.systems.tf.variable_utils import VariableClient
from mava.utils import training_utils as train_utils
from mava.utils.sort_utils import sort_str_num
train_utils.set_growing_gpu_memory()
class MADQNTrainer(mava.Trainer):
"""MADQN trainer.
This is the trainer component of a MADQN system. IE it takes a dataset as input
and implements update functionality to learn from this dataset.
"""
def __init__(
self,
agents: List[str],
agent_types: List[str],
value_networks: Dict[str, snt.Module],
target_value_networks: Dict[str, snt.Module],
optimizer: Union[snt.Optimizer, Dict[str, snt.Optimizer]],
discount: float,
target_averaging: bool,
target_update_period: int,
target_update_rate: float,
dataset: tf.data.Dataset,
observation_networks: Dict[str, snt.Module],
target_observation_networks: Dict[str, snt.Module],
variable_client: VariableClient,
counts: Dict[str, Any],
agent_net_keys: Dict[str, str],
max_gradient_norm: float = None,
logger: loggers.Logger = None,
learning_rate_scheduler_fn: Optional[Dict[str, Callable[[int], None]]] = None,
):
"""Initialise MADQN trainer.
Args:
agents: agent ids, e.g. "agent_0".
agent_types: agent types, e.g. "speaker" or "listener".
value_networks: value networks for each agents in
the system.
target_value_networks: target value networks.
optimizer: optimizer(s) for updating policy networks.
discount: discount factor for TD updates.
target_averaging: whether to use polyak averaging for target network
updates.
target_update_period: number of steps before target networks are
updated.
target_update_rate: update rate when using averaging.
dataset: training dataset.
observation_networks: network for feature
extraction from raw observation.
target_observation_networks: target observation
network.
variable_client: The client used to manage the variables.
counts: step counter object.
agent_net_keys: specifies what network each agent uses.
max_gradient_norm: maximum allowed norm for gradients
before clipping is applied.
logger: logger object for logging trainer
statistics.
learning_rate_scheduler_fn: dict with two functions (one for the policy and
one for the critic optimizer), that takes in a trainer step t and
returns the current learning rate.
"""
self._agents = agents
self._agent_types = agent_types
self._agent_net_keys = agent_net_keys
self._variable_client = variable_client
self._learning_rate_scheduler_fn = learning_rate_scheduler_fn
# Setup counts
self._counts = counts
# Store online and target networks.
self._value_networks = value_networks
self._target_value_networks = target_value_networks
# Ensure obs and target networks are sonnet modules
self._observation_networks = {
k: tf2_utils.to_sonnet_module(v) for k, v in observation_networks.items()
}
self._target_observation_networks = {
k: tf2_utils.to_sonnet_module(v)
for k, v in target_observation_networks.items()
}
# General learner book-keeping and loggers.
self._logger = logger or loggers.make_default_logger("trainer")
# Other learner parameters.
self._discount = discount
# Set up gradient clipping.
if max_gradient_norm is not None:
self._max_gradient_norm = tf.convert_to_tensor(max_gradient_norm)
else: # A very large number. Infinity results in NaNs.
self._max_gradient_norm = tf.convert_to_tensor(1e10)
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_averaging = target_averaging
self._target_update_period = target_update_period
self._target_update_rate = target_update_rate
# Create an iterator to go through the dataset.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
# Dictionary with unique network keys.
self.unique_net_keys = sort_str_num(self._value_networks.keys())
# Get the agents which shoud be updated and ran
self._trainer_agent_list = self._agents
# Create optimizers for different agent types.
if not isinstance(optimizer, dict):
self._optimizers: Dict[str, snt.Optimizer] = {}
for agent in self.unique_net_keys:
self._optimizers[agent] = copy.deepcopy(optimizer)
else:
self._optimizers = optimizer
# Expose the variables.
self._system_network_variables: Dict[str, Dict[str, snt.Module]] = {
"observations": {},
"values": {},
}
for agent_key in self.unique_net_keys:
self._system_network_variables["observations"][
agent_key
] = self._target_observation_networks[agent_key].variables
self._system_network_variables["values"][agent_key] = self._value_networks[
agent_key
].variables
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp: Optional[float] = None
def _update_target_networks(self) -> None:
"""Update the target networks.
Using either target averaging or
by directy copying the weights of the online networks every few steps.
"""
for key in self.unique_net_keys:
# Update target network.
online_variables = (
*self._observation_networks[key].variables,
*self._value_networks[key].variables,
)
target_variables = (
*self._target_observation_networks[key].variables,
*self._target_value_networks[key].variables,
)
if self._target_averaging:
assert 0.0 < self._target_update_rate < 1.0
tau = self._target_update_rate
for src, dest in zip(online_variables, target_variables):
dest.assign(dest * (1.0 - tau) + src * tau)
else:
# Make online -> target network update ops.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(online_variables, target_variables):
dest.assign(src)
self._num_steps.assign_add(1)
def get_variables(self, names: Sequence[str]) -> Dict[str, Dict[str, np.ndarray]]:
"""Depricated"""
pass
def _transform_observations(
self, obs: Dict[str, mava_types.OLT], next_obs: Dict[str, mava_types.OLT]
) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]:
"""Transform the observations using the observation networks of each agent.
We assume the observation network is non-recurrent.
Args:
obs: observations at timestep t-1
next_obs: observations at timestep t
Returns:
Transformed observations
"""
o_tm1 = {}
o_t = {}
for agent in self._agents:
agent_key = self._agent_net_keys[agent]
o_tm1[agent] = self._observation_networks[agent_key](obs[agent].observation)
o_t[agent] = self._target_observation_networks[agent_key](
next_obs[agent].observation
)
# This stop_gradient prevents gradients to propagate into the target
# observation network. In addition, since the online policy network is
# evaluated at o_t, this also means the policy loss does not influence
# the observation network training.
o_t[agent] = tree.map_structure(tf.stop_gradient, o_t[agent])
return o_tm1, o_t
@tf.function
def _step(
self,
) -> Dict[str, Dict[str, Any]]:
"""Trainer step.
Returns:
losses
"""
# Draw a batch of data from replay.
sample: reverb.ReplaySample = next(self._iterator)
# Compute loss
self._forward(sample)
# Compute and apply gradients
self._backward()
# Update the target networks
self._update_target_networks()
# Log losses per agent
return train_utils.map_losses_per_agent_value(self.value_losses)
def _forward(self, inputs: reverb.ReplaySample) -> None:
"""Trainer forward pass.
Args:
inputs: input data from the data table (transitions)
"""
# Unpack input data as follows:
# o_tm1 = dictionary of observations one for each agent
# a_tm1 = dictionary of actions taken from obs in o_tm1
# e_tm1 [Optional] = extra data for timestep t-1
# that the agents persist in replay.
# r_t = dictionary of rewards or rewards sequences
# (if using N step transitions) ensuing from actions a_tm1
# d_t = environment discount ensuing from actions a_tm1.
# This discount is applied to future rewards after r_t.
# o_t = dictionary of next observations or next observation sequences
# e_t [Optional] = extra data for timestep t that the agents persist in replay.
trans = mava_types.Transition(*inputs.data)
o_tm1, o_t, a_tm1, r_t, d_t, _, _ = (
trans.observations,
trans.next_observations,
trans.actions,
trans.rewards,
trans.discounts,
trans.extras,
trans.next_extras,
)
self.value_losses = {}
# Do forward passes through the networks and calculate the losses
with tf.GradientTape(persistent=True) as tape:
o_tm1_trans, o_t_trans = self._transform_observations(o_tm1, o_t)
for agent in self._trainer_agent_list:
agent_key = self._agent_net_keys[agent]
# Double Q-learning
q_tm1 = self._value_networks[agent_key](o_tm1_trans[agent])
q_t_value = self._target_value_networks[agent_key](o_t_trans[agent])
q_t_selector = self._value_networks[agent_key](o_t_trans[agent])
# Legal action masking
q_t_selector = tf.where(
tf.cast(o_t[agent].legal_actions, "bool"), q_t_selector, -999999999
)
# pcont
discount = tf.cast(self._discount, dtype=d_t[agent].dtype)
# Value loss.
value_loss, _ = trfl.double_qlearning(
q_tm1,
a_tm1[agent],
r_t[agent],
discount * d_t[agent],
q_t_value,
q_t_selector,
)
self.value_losses[agent] = tf.reduce_mean(value_loss, axis=0)
self.tape = tape
def _backward(self) -> None:
"""Trainer backward pass updating network parameters"""
# Calculate the gradients and update the networks
value_losses = self.value_losses
tape = self.tape
for agent in self._trainer_agent_list:
agent_key = self._agent_net_keys[agent]
# Get trainable variables.
variables = (
self._observation_networks[agent_key].trainable_variables
+ self._value_networks[agent_key].trainable_variables
)
# Compute gradients.
# Note: Warning "WARNING:tensorflow:Calling GradientTape.gradient
# on a persistent tape inside its context is significantly less efficient
# than calling it outside the context." caused by losses.dpg, which calls
# tape.gradient.
gradients = tape.gradient(value_losses[agent], variables)
# Maybe clip gradients.
gradients = tf.clip_by_global_norm(gradients, self._max_gradient_norm)[0]
# Apply gradients.
self._optimizers[agent_key].apply(gradients, variables)
train_utils.safe_del(self, "tape")
def step(self) -> None:
"""Trainer step to update the parameters of the agents in the system"""
raise NotImplementedError("A trainer statistics wrapper should overwrite this.")
def after_trainer_step(self) -> None:
"""Optionally decay lr after every training step."""
if self._learning_rate_scheduler_fn:
self._decay_lr(self._num_steps)
info: Dict[str, Dict[str, float]] = {}
for agent in self._agents:
info[agent] = {}
info[agent]["learning_rate"] = self._optimizers[
self._agent_net_keys[agent]
].learning_rate
if self._logger:
self._logger.write(info)
def _decay_lr(self, trainer_step: int) -> None:
"""Decay lr.
Args:
trainer_step : trainer step time t.
"""
train_utils.decay_lr(
self._learning_rate_scheduler_fn, # type: ignore
self._optimizers,
trainer_step,
)
class MADQNRecurrentTrainer(mava.Trainer):
"""Recurrent MADQN trainer.
This is the trainer component of a recurrent MADQN system. IE it takes a dataset
as input and implements update functionality to learn from this dataset.
"""
def __init__(
self,
agents: List[str],
agent_types: List[str],
value_networks: Dict[str, snt.Module],
target_value_networks: Dict[str, snt.Module],
optimizer: Union[snt.Optimizer, Dict[str, snt.Optimizer]],
discount: float,
target_averaging: bool,
target_update_period: int,
target_update_rate: float,
dataset: tf.data.Dataset,
observation_networks: Dict[str, snt.Module],
target_observation_networks: Dict[str, snt.Module],
variable_client: VariableClient,
counts: Dict[str, Any],
agent_net_keys: Dict[str, str],
max_gradient_norm: float = None,
logger: loggers.Logger = None,
learning_rate_scheduler_fn: Optional[Dict[str, Callable[[int], None]]] = None,
):
"""Initialise Recurrent MADQN trainer
Args:
agents: agent ids, e.g. "agent_0".
agent_types: agent types, e.g. "speaker" or "listener".
value_networks: value networks for each agent in
the system.
target_value_networks: target value networks.
optimizer: optimizer(s) for updating value networks.
discount: discount factor for TD updates.
target_averaging: whether to use polyak averaging for target network
updates.
target_update_period: number of steps before target networks are
updated.
target_update_rate: update rate when using averaging.
dataset: training dataset.
observation_networks: network for feature
extraction from raw observation.
target_observation_networks: target observation
network.
variable_client: The client used to manage the variables.
counts: step counter object.
agent_net_keys: specifies what network each agent uses.
max_gradient_norm: maximum allowed norm for gradients
before clipping is applied.
logger: logger object for logging trainer
statistics.
learning_rate_scheduler_fn: dict with two functions (one for the policy and
one for the critic optimizer), that takes in a trainer step t and
returns the current learning rate.
"""
self._agents = agents
self._agent_type = agent_types
self._agent_net_keys = agent_net_keys
self._variable_client = variable_client
self._learning_rate_scheduler_fn = learning_rate_scheduler_fn
# Setup counts
self._counts = counts
# Store online and target networks.
self._value_networks = value_networks
self._target_value_networks = target_value_networks
# Ensure obs and target networks are sonnet modules
self._observation_networks = {
k: tf2_utils.to_sonnet_module(v) for k, v in observation_networks.items()
}
self._target_observation_networks = {
k: tf2_utils.to_sonnet_module(v)
for k, v in target_observation_networks.items()
}
# General learner book-keeping and loggers.
self._logger = logger or loggers.make_default_logger("trainer")
# Other learner parameters.
self._discount = discount
# Set up gradient clipping.
if max_gradient_norm is not None:
self._max_gradient_norm = tf.convert_to_tensor(max_gradient_norm)
else: # A very large number. Infinity results in NaNs.
self._max_gradient_norm = tf.convert_to_tensor(1e10)
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_averaging = target_averaging
self._target_update_period = target_update_period
self._target_update_rate = target_update_rate
# Create an iterator to go through the dataset.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
# Dictionary with unique network keys.
self.unique_net_keys = sort_str_num(self._value_networks.keys())
# Get the agents which shoud be updated and ran
self._trainer_agent_list = self._agents
# Create optimizers for different agent types.
if not isinstance(optimizer, dict):
self._optimizers: Dict[str, snt.Optimizer] = {}
for agent in self.unique_net_keys:
self._optimizers[agent] = copy.deepcopy(optimizer)
else:
self._optimizers = optimizer
# Expose the variables.
self._system_network_variables: Dict[str, Dict[str, snt.Module]] = {
"observations": {},
"values": {},
}
for agent_key in self.unique_net_keys:
self._system_network_variables["observations"][
agent_key
] = self._target_observation_networks[agent_key].variables
self._system_network_variables["values"][
agent_key
] = self._target_value_networks[agent_key].variables
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp: Optional[float] = None
def step(self) -> None:
"""Trainer step to update the parameters of the agents in the system"""
raise NotImplementedError("A trainer statistics wrapper should overwrite this.")
def _transform_observations(
self, observations: Dict[str, mava_types.OLT]
) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]:
"""Apply the observation networks to the raw observations from the dataset
We assume that the observation network is non-recurrent.
Args:
observations: raw agent observations
Returns:
obs_trans: transformed agent observation
obs_target_trans: transformed target network observations
"""
# NOTE We are assuming that only the value network
# is recurrent and not the observation network.
obs_trans = {}
obs_target_trans = {}
for agent in self._agents:
agent_key = self._agent_net_keys[agent]
reshaped_obs, dims = train_utils.combine_dim(
observations[agent].observation
)
obs_trans[agent] = train_utils.extract_dim(
self._observation_networks[agent_key](reshaped_obs), dims
)
obs_target_trans[agent] = train_utils.extract_dim(
self._target_observation_networks[agent_key](reshaped_obs),
dims,
)
# This stop_gradient prevents gradients to propagate into
# the target observation network.
obs_target_trans[agent] = tree.map_structure(
tf.stop_gradient, obs_target_trans[agent]
)
return obs_trans, obs_target_trans
def _update_target_networks(self) -> None:
"""Update the target networks.
Using either target averaging or
by directy copying the weights of the online networks every few steps.
"""
for key in self.unique_net_keys:
# Update target network.
online_variables = (
*self._observation_networks[key].variables,
*self._value_networks[key].variables,
)
target_variables = (
*self._target_observation_networks[key].variables,
*self._target_value_networks[key].variables,
)
if self._target_averaging:
assert 0.0 < self._target_update_rate < 1.0
tau = self._target_update_rate
for src, dest in zip(online_variables, target_variables):
dest.assign(dest * (1.0 - tau) + src * tau)
else:
# Make online -> target network update ops.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(online_variables, target_variables):
dest.assign(src)
self._num_steps.assign_add(1)
def get_variables(self, names: Sequence[str]) -> Dict[str, Dict[str, np.ndarray]]:
"""Depricated"""
pass
@tf.function
def _step(
self,
) -> Dict[str, Dict[str, Any]]:
"""Trainer step.
Returns:
losses
"""
# Draw a batch of data from replay.
sample: reverb.ReplaySample = next(self._iterator)
# Compute loss
self._forward(sample)
# Compute and apply gradients
self._backward()
# Update the target networks
self._update_target_networks()
# Log losses per agent
return train_utils.map_losses_per_agent_value(self.value_losses)
def _forward(self, inputs: reverb.ReplaySample) -> None:
"""Trainer forward pass.
Args:
inputs: input data from the data table (transitions)
"""
# Convert to time major
data = tree.map_structure(
lambda v: tf.expand_dims(v, axis=0) if len(v.shape) <= 1 else v, inputs.data
)
data = tf2_utils.batch_to_sequence(data)
# Note (dries): The unused variable is start_of_episodes.
observations, actions, rewards, discounts, _, extras = (
data.observations,
data.actions,
data.rewards,
data.discounts,
data.start_of_episode,
data.extras,
)
# Get initial state for the LSTM from replay and
# extract the first state in the sequence.
core_state = tree.map_structure(lambda s: s[0, :, :], extras["core_states"])
target_core_state = tree.map_structure(
lambda s: s[0, :, :], extras["core_states"]
)
# TODO (dries): Take out all the data_points that does not need
# to be processed here at the start. Therefore it does not have
# to be done later on and saves processing time.
self.value_losses: Dict[str, tf.Tensor] = {}
# Do forward passes through the networks and calculate the losses
with tf.GradientTape(persistent=True) as tape:
# Note (dries): We are assuming that only the policy network
# is recurrent and not the observation network.
obs_trans, target_obs_trans = self._transform_observations(observations)
for agent in self._trainer_agent_list:
agent_key = self._agent_net_keys[agent]
# Double Q-learning
q, _ = snt.static_unroll(
self._value_networks[agent_key],
obs_trans[agent],
core_state[agent][0],
)
q_tm1 = q[:-1] # Chop off last timestep
q_t_selector = q[1:] # Chop off first timestep
q_t_value, _ = snt.static_unroll(
self._target_value_networks[agent_key],
target_obs_trans[agent],
target_core_state[agent][0],
)
q_t_value = q_t_value[1:] # Chop off first timestep
# Legal action masking
q_t_selector = tf.where(
tf.cast(observations[agent].legal_actions[1:], "bool"),
q_t_selector,
-999999999,
)
# Flatten out time and batch dim
q_tm1, _ = train_utils.combine_dim(q_tm1)
q_t_selector, _ = train_utils.combine_dim(q_t_selector)
q_t_value, _ = train_utils.combine_dim(q_t_value)
a_tm1, _ = train_utils.combine_dim(
actions[agent][:-1] # Chop off last timestep
)
r_t, _ = train_utils.combine_dim(
rewards[agent][:-1] # Chop off last timestep
)
d_t, _ = train_utils.combine_dim(
discounts[agent][:-1] # Chop off last timestep
)
# Cast the additional discount to match
# the environment discount dtype.
discount = tf.cast(self._discount, dtype=discounts[agent].dtype)
# Value loss
value_loss, _ = trfl.double_qlearning(
q_tm1, a_tm1, r_t, discount * d_t, q_t_value, q_t_selector
)
# Zero-padding mask
zero_padding_mask, _ = train_utils.combine_dim(
tf.cast(extras["zero_padding_mask"], dtype=value_loss.dtype)[:-1]
)
masked_loss = value_loss * zero_padding_mask
self.value_losses[agent] = tf.reduce_sum(masked_loss) / tf.reduce_sum(
zero_padding_mask
)
self.tape = tape
def _backward(self) -> None:
"""Trainer backward pass updating network parameters"""
# Calculate the gradients and update the networks
value_losses = self.value_losses
tape = self.tape
for agent in self._trainer_agent_list:
agent_key = self._agent_net_keys[agent]
# Get trainable variables.
variables = (
self._observation_networks[agent_key].trainable_variables
+ self._value_networks[agent_key].trainable_variables
)
# Compute gradients.
gradients = tape.gradient(value_losses[agent], variables)
# Maybe clip gradients.
gradients = tf.clip_by_global_norm(gradients, self._max_gradient_norm)[0]
# Apply gradients.
self._optimizers[agent_key].apply(gradients, variables)
train_utils.safe_del(self, "tape")
def after_trainer_step(self) -> None:
"""Optionally decay lr after every training step."""
if self._learning_rate_scheduler_fn:
self._decay_lr(self._num_steps)
info: Dict[str, Dict[str, float]] = {}
for agent in self._agents:
info[agent] = {}
info[agent]["learning_rate"] = self._optimizers[
self._agent_net_keys[agent]
].learning_rate
if self._logger:
self._logger.write(info)
def _decay_lr(self, trainer_step: int) -> None:
"""Decay lr.
Args:
trainer_step : trainer step time t.
"""
train_utils.decay_lr(
self._learning_rate_scheduler_fn, # type: ignore
self._optimizers,
trainer_step,
)
```
#### File: RoboCup_env/robocup_utils/player_world_model.py
```python
import math
import numpy as np
from mava.utils.environments.RoboCup_env.robocup_utils.game_object import Flag
from mava.utils.environments.RoboCup_env.robocup_utils.util_functions import (
rad_rot_to_xy,
)
true_flag_coords = Flag.FLAG_COORDS
def rotate(origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy
def rel_to_abs_coords(obj):
obj_x = obj.distance * math.sin(-obj.direction * math.pi / 180)
obj_y = -obj.distance * math.cos(-obj.direction * math.pi / 180)
return obj_x, obj_y
class WorldModel:
"""
Holds and updates the model of the world as known from current and past
data.
"""
# constants for team sides
SIDE_L = "l"
SIDE_R = "r"
class PlayModes:
"""
Acts as a static class containing variables for all valid play modes.
The string values correspond to what the referee calls the game modes.
"""
BEFORE_KICK_OFF = "before_kick_off"
PLAY_ON = "play_on"
TIME_OVER = "time_over"
KICK_OFF_L = "kick_off_l"
KICK_OFF_R = "kick_off_r"
KICK_IN_L = "kick_in_l"
KICK_IN_R = "kick_in_r"
FREE_KICK_L = "free_kick_l"
FREE_KICK_R = "free_kick_r"
CORNER_KICK_L = "corner_kick_l"
CORNER_KICK_R = "corner_kick_r"
GOAL_KICK_L = "goal_kick_l"
GOAL_KICK_R = "goal_kick_r"
DROP_BALL = "drop_ball"
OFFSIDE_L = "offside_l"
OFFSIDE_R = "offside_r"
def __init__(self):
raise NotImplementedError(
"Don't instantiate a PlayModes class,"
" access it statically through WorldModel instead."
)
class RefereeMessages:
"""
Static class containing possible non-mode messages sent by a referee.
"""
# these are referee messages, not play modes
FOUL_L = "foul_l"
FOUL_R = "foul_r"
GOALIE_CATCH_BALL_L = "goalie_catch_ball_l"
GOALIE_CATCH_BALL_R = "goalie_catch_ball_r"
TIME_UP_WITHOUT_A_TEAM = "time_up_without_a_team"
TIME_UP = "time_up"
HALF_TIME = "half_time"
TIME_EXTENDED = "time_extended"
# these are special, as they are always followed by '_' and an int of
# the number of goals scored by that side so far. these won't match
# anything specifically, but goals WILL start with these.
GOAL_L = "goal_l_"
GOAL_R = "goal_r_"
def __init__(self):
raise NotImplementedError(
"Don't instantiate a RefereeMessages class,"
" access it statically through WorldModel instead."
)
def __init__(self, action_handler):
"""
Create the world model with default values and an ActionHandler class it
can use to complete requested actions.
"""
# we use the action handler to complete complex commands
self.ah = action_handler
# these variables store all objects for any particular game step
self.ball = None
self.flags = []
self.goals = []
self.players = []
self.lines = []
# the default position of this player, its home position
self.home_point = (None, None)
# scores for each side
self.score_l = 0
self.score_r = 0
# the name of the agent's team
self.teamname = None
# handle player information, like uniform number and side
self.side = None
self.uniform_number = None
# stores the most recent message heard
self.last_message = None
# the mode the game is currently in (default to not playing yet)
self.play_mode = WorldModel.PlayModes.BEFORE_KICK_OFF
# Obs updated
# self.obs_updated = False
# body state
self.view_width = None
self.view_quality = None
self.stamina = None
self.effort = None
self.speed_amount = None
self.speed_direction = None
self.neck_direction = None
self.new_data = False
# counts of actions taken so far
self.kick_count = None
self.dash_count = None
self.turn_count = None
self.say_count = None
self.turn_neck_count = None
self.catch_count = None
self.move_count = None
self.change_view_count = None
# apparent absolute player coordinates and neck/body directions
self.abs_coords = (None, None)
self.abs_body_dir = None
self.abs_neck_dir = None
# create a new server parameter object for holding all server params
self.server_parameters = ServerParameters()
def __calculate_abs_info(self):
if self.flags is not None:
rel_coords = []
true_coords = []
for flag in self.flags:
if (
flag is not None
and flag.direction is not None
and flag.distance is not None
and flag.flag_id is not None
):
obs_coords = rel_to_abs_coords(flag)
rel_coords.append(obs_coords)
true_coords.append(true_flag_coords[flag.flag_id])
for goal in self.goals:
if (
goal is not None
and goal.direction is not None
and goal.distance is not None
and goal.goal_id is not None
):
obs_coords = rel_to_abs_coords(goal)
rel_coords.append(obs_coords)
true_coords.append(true_flag_coords[goal.goal_id])
if len(true_coords) > 1 and len(rel_coords) > 1:
# Get means
rel_mean = np.mean(rel_coords, axis=0)
true_mean = np.mean(true_coords, axis=0)
mean_off = rel_mean - true_mean
# Get rotation
rel_de_mean = np.array(rel_coords) - rel_mean
true_de_mean = np.array(true_coords) - true_mean
if len(true_de_mean) > 1:
ang_offs = np.arctan2(
true_de_mean[:, 1], true_de_mean[:, 0]
) - np.arctan2(rel_de_mean[:, 1], rel_de_mean[:, 0])
x, y = rad_rot_to_xy(ang_offs)
x_mean = np.mean(x)
y_mean = np.mean(y)
ang_offs = np.arctan2(y_mean, x_mean)
true_agent_loc = rotate(rel_mean, (0, 0), ang_offs) - mean_off
self.abs_coords = true_agent_loc
self.abs_body_dir = (ang_offs / math.pi) * 180
# TODO: Is this correct?
self.abs_neck_dir = self.abs_body_dir + self.neck_direction
def process_new_info(self, ball, flags, goals, players, lines):
"""
Update any internal variables based on the currently available
information. This also calculates information not available directly
from server-reported messages, such as player coordinates.
"""
# update basic information
self.ball = ball
self.flags = flags
self.goals = goals
self.players = players
self.lines = lines
self.__calculate_abs_info()
def is_playon(self):
"""
Tells us whether it's play time
"""
return (
self.play_mode == WorldModel.PlayModes.PLAY_ON
or self.play_mode == WorldModel.PlayModes.KICK_OFF_L
or self.play_mode == WorldModel.PlayModes.KICK_OFF_R
or self.play_mode == WorldModel.PlayModes.KICK_IN_L
or self.play_mode == WorldModel.PlayModes.KICK_IN_R
or self.play_mode == WorldModel.PlayModes.FREE_KICK_L
or self.play_mode == WorldModel.PlayModes.FREE_KICK_R
or self.play_mode == WorldModel.PlayModes.CORNER_KICK_L
or self.play_mode == WorldModel.PlayModes.CORNER_KICK_R
or self.play_mode == WorldModel.PlayModes.GOAL_KICK_L
or self.play_mode == WorldModel.PlayModes.GOAL_KICK_R
or self.play_mode == WorldModel.PlayModes.DROP_BALL
or self.play_mode == WorldModel.PlayModes.OFFSIDE_L
or self.play_mode == WorldModel.PlayModes.OFFSIDE_R
)
def is_before_kick_off(self):
"""
Tells us whether the game is in a pre-kickoff state.
"""
return self.play_mode == WorldModel.PlayModes.BEFORE_KICK_OFF
def is_kick_off_us(self):
"""
Tells us whether it's our turn to kick off.
"""
ko_left = WorldModel.PlayModes.KICK_OFF_L
ko_right = WorldModel.PlayModes.KICK_OFF_R
# return whether we're on the side that's kicking off
return (
self.side == WorldModel.SIDE_L
and self.play_mode == ko_left
or self.side == WorldModel.SIDE_R
and self.play_mode == ko_right
)
def is_dead_ball_them(self):
"""
Returns whether the ball is in the other team's posession and it's a
free kick, corner kick, or kick in.
"""
# shorthand for verbose constants
kil = WorldModel.PlayModes.KICK_IN_L
kir = WorldModel.PlayModes.KICK_IN_R
fkl = WorldModel.PlayModes.FREE_KICK_L
fkr = WorldModel.PlayModes.FREE_KICK_R
ckl = WorldModel.PlayModes.CORNER_KICK_L
ckr = WorldModel.PlayModes.CORNER_KICK_R
# shorthand for whether left team or right team is free to act
pm = self.play_mode
free_left = pm == kil or pm == fkl or pm == ckl
free_right = pm == kir or pm == fkr or pm == ckr
# return whether the opposing side is in a dead ball situation
if self.side == WorldModel.SIDE_L:
return free_right
else:
return free_left
def is_ball_kickable(self):
"""
Tells us whether the ball is in reach of the current player.
"""
# ball must be visible, not behind us, and within the kickable margin
return (
self.ball is not None
and self.ball.distance is not None
and self.ball.distance <= self.server_parameters.kickable_margin
)
def get_ball_speed_max(self):
"""
Returns the maximum speed the ball can be kicked at.
"""
return self.server_parameters.ball_speed_max
def get_stamina(self):
"""
Returns the agent's current stamina amount.
"""
return self.stamina
def get_stamina_max(self):
"""
Returns the maximum amount of stamina a player can have.
"""
return self.server_parameters.stamina_max
def turn_body_to_object(self, obj):
"""
Turns the player's body to face a particular object.
"""
self.ah.turn(obj.direction)
class ServerParameters:
"""A storage container for all the settings of the soccer server."""
def __init__(self):
"""Initialize default parameters for a server."""
self.audio_cut_dist = 50
self.auto_mode = 0
self.back_passes = 1
self.ball_accel_max = 2.7
self.ball_decay = 0.94
self.ball_rand = 0.05
self.ball_size = 0.085
self.ball_speed_max = 2.7
self.ball_stuck_area = 3
self.ball_weight = 0.2
self.catch_ban_cycle = 5
self.catch_probability = 1
self.catchable_area_l = 2
self.catchable_area_w = 1
self.ckick_margin = 1
self.clang_advice_win = 1
self.clang_define_win = 1
self.clang_del_win = 1
self.clang_info_win = 1
self.clang_mess_delay = 50
self.clang_mess_per_cycle = 1
self.clang_meta_win = 1
self.clang_rule_win = 1
self.clang_win_size = 300
self.coach = 0
self.coach_port = 6001
self.coach_w_referee = 0
self.connect_wait = 300
self.control_radius = 2
self.dash_power_rate = 0.006
self.drop_ball_time = 200
self.effort_dec = 0.005
self.effort_dec_thr = 0.3
self.effort_inc = 0.01
self.effort_inc_thr = 0.6
self.effort_init = 1
self.effort_min = 0.6
self.forbid_kick_off_offside = 1
self.free_kick_faults = 1
self.freeform_send_period = 20
self.freeform_wait_period = 600
self.fullstate_l = 0
self.fullstate_r = 0
self.game_log_compression = 0
self.game_log_dated = 1
self.game_log_dir = "./"
self.game_log_fixed = 0
self.game_log_fixed_name = "rcssserver"
self.game_log_version = 3
self.game_logging = 1
self.game_over_wait = 100
self.goal_width = 14.02
self.goalie_max_moves = 2
self.half_time = 300
self.hear_decay = 1
self.hear_inc = 1
self.hear_max = 1
self.inertia_moment = 5
self.keepaway = 0
self.keepaway_length = 20
self.keepaway_log_dated = 1
self.keepaway_log_dir = "./"
self.keepaway_log_fixed = 0
self.keepaway_log_fixed_name = "rcssserver"
self.keepaway_logging = 1
self.keepaway_start = -1
self.keepaway_width = 20
self.kick_off_wait = 100
self.kick_power_rate = 0.027
self.kick_rand = 0
self.kick_rand_factor_l = 1
self.kick_rand_factor_r = 1
self.kickable_margin = 0.7
self.landmark_file = "~/.rcssserver-landmark.xml"
self.log_date_format = "%Y%m%d%H%M-"
self.log_times = 0
self.max_goal_kicks = 3
self.maxmoment = 180
self.maxneckang = 90
self.maxneckmoment = 180
self.maxpower = 100
self.minmoment = -180
self.minneckang = -90
self.minneckmoment = -180
self.minpower = -100
self.nr_extra_halfs = 2
self.nr_normal_halfs = 2
self.offside_active_area_size = 2.5
self.offside_kick_margin = 9.15
self.olcoach_port = 6002
self.old_coach_hear = 0
self.pen_allow_mult_kicks = 1
self.pen_before_setup_wait = 30
self.pen_coach_moves_players = 1
self.pen_dist_x = 42.5
self.pen_max_extra_kicks = 10
self.pen_max_goalie_dist_x = 14
self.pen_nr_kicks = 5
self.pen_random_winner = 0
self.pen_ready_wait = 50
self.pen_setup_wait = 100
self.pen_taken_wait = 200
self.penalty_shoot_outs = 1
self.player_accel_max = 1
self.player_decay = 0.4
self.player_rand = 0.1
self.player_size = 0.3
self.player_speed_max = 1.2
self.player_weight = 60
self.point_to_ban = 5
self.point_to_duration = 20
self.port = 6000
self.prand_factor_l = 1
self.prand_factor_r = 1
self.profile = 0
self.proper_goal_kicks = 0
self.quantize_step = 0.1
self.quantize_step_l = 0.01
self.record_messages = 0
self.recover_dec = 0.002
self.recover_dec_thr = 0.3
self.recover_init = 1
self.recover_min = 0.5
self.recv_step = 10
self.say_coach_cnt_max = 128
self.say_coach_msg_size = 128
self.say_msg_size = 10
self.send_comms = 0
self.send_step = 150
self.send_vi_step = 100
self.sense_body_step = 100
self.simulator_step = 100
self.slow_down_factor = 1
self.slowness_on_top_for_left_team = 1
self.slowness_on_top_for_right_team = 1
self.stamina_inc_max = 45
self.stamina_max = 4000
self.start_goal_l = 0
self.start_goal_r = 0
self.stopped_ball_vel = 0.01
self.synch_micro_sleep = 1
self.synch_mode = 0
self.synch_offset = 60
self.tackle_back_dist = 0.5
self.tackle_cycles = 10
self.tackle_dist = 2
self.tackle_exponent = 6
self.tackle_power_rate = 0.027
self.tackle_width = 1
self.team_actuator_noise = 0
self.text_log_compression = 0
self.text_log_dated = 1
self.text_log_dir = "./"
self.text_log_fixed = 0
self.text_log_fixed_name = "rcssserver"
self.text_logging = 1
self.use_offside = 1
self.verbose = 0
self.visible_angle = 90
self.visible_distance = 3
self.wind_ang = 0
self.wind_dir = 0
self.wind_force = 0
self.wind_none = 0
self.wind_rand = 0
self.wind_random = 0
```
#### File: utils/environments/smac_utils.py
```python
from typing import Any, Optional
try:
from smac.env import StarCraft2Env
_found_smac = True
except ModuleNotFoundError:
_found_smac = False
from mava.wrappers import SMACWrapper
from mava.wrappers.env_preprocess_wrappers import (
ConcatAgentIdToObservation,
ConcatPrevActionToObservation,
)
if _found_smac:
def make_environment(
map_name: str = "3m",
concat_prev_actions: bool = True,
concat_agent_id: bool = True,
evaluation: bool = False,
random_seed: Optional[int] = None,
) -> Any:
env = StarCraft2Env(map_name=map_name, seed=random_seed)
env = SMACWrapper(env)
if concat_prev_actions:
env = ConcatPrevActionToObservation(env)
if concat_agent_id:
env = ConcatAgentIdToObservation(env)
return env
```
#### File: mava/utils/wrapper_utils.py
```python
import collections
from typing import Any, Dict, List, Tuple, Union
import dm_env
import numpy as np
from dm_env import specs
try:
from pettingzoo.utils.conversions import ParallelEnv
from pettingzoo.utils.env import AECEnv
_has_petting_zoo = True
except ModuleNotFoundError:
_has_petting_zoo = False
pass
# Need to install typing_extensions since we support pre python 3.8
from typing_extensions import TypedDict
from mava import types
SeqTimestepDict = TypedDict(
"SeqTimestepDict",
{"timestep": dm_env.TimeStep, "action": types.Action},
)
def convert_dm_compatible_observations(
observes: Dict,
dones: Dict[str, bool],
observation_spec: Dict[str, types.OLT],
env_done: bool,
possible_agents: List,
) -> Dict[str, types.OLT]:
"""Convert Parallel observation so it's dm_env compatible.
Args:
observes : observations per agent.
dones : dones per agent.
observation_spec : env observation spec.
env_done : is env done.
possible_agents : possible agents in env.
Returns:
a dm compatible observation.
"""
observations: Dict[str, types.OLT] = {}
for agent in possible_agents:
# If we have a valid observation for this agent.
if agent in observes:
observation = observes[agent]
if isinstance(observation, dict) and "action_mask" in observation:
legals = observation["action_mask"].astype(
observation_spec[agent].legal_actions.dtype
)
# Environments like flatland can return tuples for observations
if isinstance(observation_spec[agent].observation, tuple):
# Assuming tuples all have same type.
observation_dtype = observation_spec[agent].observation[0].dtype
else:
observation_dtype = observation_spec[agent].observation.dtype
observation = observation["observation"].astype(observation_dtype)
else:
# TODO Handle legal actions better for continous envs,
# maybe have min and max for each action and clip the
# agents actions accordingly
legals = np.ones(
observation_spec[agent].legal_actions.shape,
dtype=observation_spec[agent].legal_actions.dtype,
)
# If we have no observation, we need to use the default.
else:
# Handle tuple observations
if isinstance(observation_spec[agent].observation, tuple):
observation_spec_list = []
for obs_spec in observation_spec[agent].observation:
observation_spec_list.append(
np.zeros(
obs_spec.shape,
dtype=obs_spec.dtype,
)
)
observation = tuple(observation_spec_list) # type: ignore
else:
observation = np.zeros(
observation_spec[agent].observation.shape,
dtype=observation_spec[agent].observation.dtype,
)
legals = np.ones(
observation_spec[agent].legal_actions.shape,
dtype=observation_spec[agent].legal_actions.dtype,
)
if agent in dones:
terminal = dones[agent]
else:
terminal = env_done
observations[agent] = types.OLT(
observation=observation,
legal_actions=legals,
terminal=np.asarray([terminal], dtype=np.float32),
)
return observations
def generate_zeros_from_spec(spec: specs.Array) -> np.ndarray:
"""Generate zeros following a specific spec.
Args:
spec : data spec.
Returns:
a numpy array with all zeros according to spec.
"""
return np.zeros(spec.shape, spec.dtype)
def convert_np_type(dtype: np.dtype, value: Union[int, float]) -> Union[int, float]:
"""Converts value to np dtype.
Args:
dtype : numpy dtype.
value : value.
Returns:
converted value.
"""
return np.dtype(dtype).type(value)
def parameterized_restart(
reward: types.Reward,
discount: types.Discount,
observation: types.Observation,
) -> dm_env.TimeStep:
"""Returns an initial dm.TimeStep with `step_type` set to `StepType.FIRST`.
Differs from dm_env.restart, since reward and discount can be set to initial types.
Args:
reward : reward at restart.
discount : discount at restart.
observation : observation at restart.
Returns:
a dm.Timestep used for restarts.
"""
return dm_env.TimeStep(dm_env.StepType.FIRST, reward, discount, observation)
def parameterized_termination(
reward: types.Reward,
discount: types.Discount,
observation: types.Observation,
) -> dm_env.TimeStep:
"""Return a terminal dm.Timestep, with `step_type` set to `StepType.LAST`.
Args:
reward : reward at termination.
discount : discount at termination.
observation : observation at termination.
Returns:
a dm.Timestep used for terminal states.
"""
return dm_env.TimeStep(dm_env.StepType.LAST, reward, discount, observation)
def broadcast_timestep_to_all_agents(
timestep: dm_env.TimeStep, possible_agents: list
) -> dm_env.TimeStep:
"""Project single timestep to all agents."""
parallel_timestep = dm_env.TimeStep(
observation={agent: timestep.observation for agent in possible_agents},
reward={agent: timestep.reward for agent in possible_agents},
discount={agent: timestep.discount for agent in possible_agents},
step_type=timestep.step_type,
)
return parallel_timestep
def convert_seq_timestep_and_actions_to_parallel(
timesteps: Dict[str, SeqTimestepDict], possible_agents: list
) -> Tuple[dict, dm_env.TimeStep]:
"""Convert dict of seq timestep and actions to parallel"""
step_types = [timesteps[agent]["timestep"].step_type for agent in possible_agents]
assert all(
x == step_types[0] for x in step_types
), f"Step types should be identical - {step_types} "
parallel_timestep = dm_env.TimeStep(
observation={
agent: timesteps[agent]["timestep"].observation for agent in possible_agents
},
reward={
agent: timesteps[agent]["timestep"].reward for agent in possible_agents
},
discount={
agent: timesteps[agent]["timestep"].discount for agent in possible_agents
},
step_type=step_types[0],
)
parallel_actions = {agent: timesteps[agent]["action"] for agent in possible_agents}
return parallel_actions, parallel_timestep
def apply_env_wrapper_preprocessors(
environment: Any,
env_preprocess_wrappers: List,
) -> Any:
"""Apply env preprocessors to env.
Args:
environment : env.
env_preprocess_wrappers : env preprocessors.
Returns:
env after the preprocessors have been applied.
"""
# Currently only supports PZ envs.
if _has_petting_zoo and (
isinstance(environment, ParallelEnv) or isinstance(environment, AECEnv)
):
if env_preprocess_wrappers and isinstance(env_preprocess_wrappers, List):
for (env_wrapper, params) in env_preprocess_wrappers:
if params:
environment = env_wrapper(environment, **params)
else:
environment = env_wrapper(environment)
return environment
class RunningStatistics:
"""Helper class to comute running statistics such as
the max, min, mean, variance and standard deviation of
a specific quantity.
"""
# The queue_size is used to estimate a moving mean and variance value.
def __init__(self, label: str, queue_size: int = 100) -> None:
self.queue: collections.deque = collections.deque(maxlen=queue_size)
self._max = -float("inf")
self._min = float("inf")
self._mean = 0.0
self._var = 0.0
self._label = label
self._raw = 0.0
def push(self, x: float) -> None:
self._raw = x
self.queue.append(x)
if x > self._max:
self._max = x
if x < self._min:
self._min = x
if len(self.queue) == 1:
self._mean = x
self._var = 0
else:
self._mean = np.mean(self.queue)
self._var = np.var(self.queue)
def max(self) -> float:
return self._max
def min(self) -> float:
return self._min
def mean(self) -> float:
return self._mean
def var(self) -> float:
return self._var
def std(self) -> float:
return np.sqrt(self._var)
def raw(self) -> float:
return self._raw
# Adapted From https://github.com/DLR-RM/stable-baselines3/blob/237223f834fe9b8143ea24235d087c4e32addd2f/stable_baselines3/common/running_mean_std.py # noqa: E501
class RunningMeanStd(object):
def __init__(self, epsilon: float = 1e-4, shape: Tuple[int, ...] = ()):
"""
Calulates the running mean and std of a data stream
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
:param epsilon: helps with arithmetic issues
:param shape: the shape of the data stream's output
"""
self.mean = np.zeros(shape, np.float64)
self.var = np.ones(shape, np.float64)
self.count = epsilon
def update_batch(self, arr: np.ndarray) -> None:
batch_mean = np.mean(arr, axis=0)
batch_var = np.var(arr, axis=0)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update(self, arr: np.ndarray) -> None:
batch_mean = np.mean(arr)
batch_var = np.var(arr)
batch_count = 1
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(
self,
batch_mean: np.ndarray,
batch_var: np.ndarray,
batch_count: int,
) -> None:
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = (
m_a
+ m_b
+ np.square(delta) * self.count * batch_count / (self.count + batch_count)
)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
```
#### File: mava/wrappers/smac.py
```python
from typing import Any, Dict, List, Optional, Union
import dm_env
import numpy as np
from acme import specs
from smac.env import StarCraft2Env
from mava import types
from mava.utils.wrapper_utils import convert_np_type, parameterized_restart
from mava.wrappers.env_wrappers import ParallelEnvWrapper
class SMACWrapper(ParallelEnvWrapper):
"""Environment wrapper for PettingZoo MARL environments."""
def __init__(
self,
environment: StarCraft2Env,
return_state_info: bool = True,
):
"""Constructor for parallel PZ wrapper.
Args:
environment (ParallelEnv): parallel PZ env.
env_preprocess_wrappers (Optional[List], optional): Wrappers
that preprocess envs.
Format (env_preprocessor, dict_with_preprocessor_params).
return_state_info: return extra state info
"""
self._environment = environment
self._return_state_info = return_state_info
self._agents = [f"agent_{n}" for n in range(self._environment.n_agents)]
self._reset_next_step = True
self._done = False
def reset(self) -> dm_env.TimeStep:
"""Resets the env.
Returns:
dm_env.TimeStep: dm timestep.
"""
# Reset the environment
self._environment.reset()
self._done = False
self._reset_next_step = False
self._step_type = dm_env.StepType.FIRST
# Get observation from env
observation = self.environment.get_obs()
legal_actions = self._get_legal_actions()
observations = self._convert_observations(
observation, legal_actions, self._done
)
# Set env discount to 1 for all agents
discount_spec = self.discount_spec()
self._discounts = {
agent: convert_np_type(discount_spec[agent].dtype, 1)
for agent in self._agents
}
# Set reward to zero for all agents
rewards_spec = self.reward_spec()
rewards = {
agent: convert_np_type(rewards_spec[agent].dtype, 0)
for agent in self._agents
}
# Possibly add state information to extras
if self._return_state_info:
state = self.get_state()
extras = {"s_t": state}
else:
extras = {}
return parameterized_restart(rewards, self._discounts, observations), extras
def step(self, actions: Dict[str, np.ndarray]) -> dm_env.TimeStep:
"""Steps in env.
Args:
actions (Dict[str, np.ndarray]): actions per agent.
Returns:
dm_env.TimeStep: dm timestep
"""
# Possibly reset the environment
if self._reset_next_step:
return self.reset()
# Convert dict of actions to list for SMAC
smac_actions = list(actions.values())
# Step the SMAC environment
reward, self._done, self._info = self._environment.step(smac_actions)
# Get the next observations
next_observations = self._environment.get_obs()
legal_actions = self._get_legal_actions()
next_observations = self._convert_observations(
next_observations, legal_actions, self._done
)
# Convert team reward to agent-wise rewards
rewards = self._convert_reward(reward)
# Possibly add state information to extras
if self._return_state_info:
state = self.get_state()
extras = {"s_t": state}
else:
extras = {}
if self._done:
self._step_type = dm_env.StepType.LAST
self._reset_next_step = True
# Discount on last timestep set to zero
self._discounts = {
agent: convert_np_type(self.discount_spec()[agent].dtype, 0.0)
for agent in self._agents
}
else:
self._step_type = dm_env.StepType.MID
# Create timestep object
timestep = dm_env.TimeStep(
observation=next_observations,
reward=rewards,
discount=self._discounts,
step_type=self._step_type,
)
return timestep, extras
def env_done(self) -> bool:
"""Check if env is done.
Returns:
bool: bool indicating if env is done.
"""
return self._done
def _convert_reward(self, reward: float) -> Dict[str, float]:
"""Convert rewards to be dm_env compatible.
Args:
reward: rewards per agent.
"""
rewards_spec = self.reward_spec()
rewards = {}
for agent in self._agents:
rewards[agent] = convert_np_type(rewards_spec[agent].dtype, reward)
return rewards
def _get_legal_actions(self) -> List:
"""Get legal actions from the environment."""
legal_actions = []
for i, _ in enumerate(self._agents):
legal_actions.append(
np.array(self._environment.get_avail_agent_actions(i), dtype="int")
)
return legal_actions
def _convert_observations(
self, observations: List, legal_actions: List, done: bool
) -> types.Observation:
"""Convert SMAC observation so it's dm_env compatible.
Args:
observes (Dict[str, np.ndarray]): observations per agent.
dones (Dict[str, bool]): dones per agent.
Returns:
types.Observation: dm compatible observations.
"""
olt_observations = {}
for i, agent in enumerate(self._agents):
olt_observations[agent] = types.OLT(
observation=observations[i],
legal_actions=legal_actions[i],
terminal=np.asarray([done], dtype=np.float32),
)
return olt_observations
def extra_spec(self) -> Dict[str, specs.BoundedArray]:
"""Function returns extra spec (format) of the env.
Returns:
Dict[str, specs.BoundedArray]: extra spec.
"""
if self._return_state_info:
return {"s_t": self._environment.get_state()}
else:
return {}
def observation_spec(self) -> Dict[str, types.OLT]:
"""Observation spec.
Returns:
types.Observation: spec for environment.
"""
self._environment.reset()
observations = self._environment.get_obs()
legal_actions = self._get_legal_actions()
observation_specs = {}
for i, agent in enumerate(self._agents):
observation_specs[agent] = types.OLT(
observation=observations[i],
legal_actions=legal_actions[i],
terminal=np.asarray([True], dtype=np.float32),
)
return observation_specs
def action_spec(
self,
) -> Dict[str, Union[specs.DiscreteArray, specs.BoundedArray]]:
"""Action spec.
Returns:
spec for actions.
"""
action_specs = {}
for agent in self._agents:
action_specs[agent] = specs.DiscreteArray(
num_values=self._environment.n_actions, dtype=int
)
return action_specs
def reward_spec(self) -> Dict[str, specs.Array]:
"""Reward spec.
Returns:
Dict[str, specs.Array]: spec for rewards.
"""
reward_specs = {}
for agent in self._agents:
reward_specs[agent] = specs.Array((), np.float32)
return reward_specs
def discount_spec(self) -> Dict[str, specs.BoundedArray]:
"""Discount spec.
Returns:
Dict[str, specs.BoundedArray]: spec for discounts.
"""
discount_specs = {}
for agent in self._agents:
discount_specs[agent] = specs.BoundedArray(
(), np.float32, minimum=0, maximum=1.0
)
return discount_specs
def get_stats(self) -> Optional[Dict]:
"""Return extra stats to be logged.
Returns:
extra stats to be logged.
"""
return self._environment.get_stats()
@property
def agents(self) -> List:
"""Agents still alive in env (not done).
Returns:
List: alive agents in env.
"""
return self._agents
@property
def possible_agents(self) -> List:
"""All possible agents in env.
Returns:
List: all possible agents in env.
"""
return self._agents
@property
def environment(self) -> StarCraft2Env:
"""Returns the wrapped environment.
Returns:
ParallelEnv: parallel env.
"""
return self._environment
def __getattr__(self, name: str) -> Any:
"""Expose any other attributes of the underlying environment.
Args:
name (str): attribute.
Returns:
Any: return attribute from env or underlying env.
"""
if hasattr(self.__class__, name):
return self.__getattribute__(name)
else:
return getattr(self._environment, name)
```
#### File: tests/utils/config_utils_test.py
```python
from typing import MutableMapping
import pytest
from mava.utils.config_utils import flatten_dict
@pytest.fixture
def test_mock_nested_dict() -> MutableMapping:
"""Mock nested dictionary."""
return {"a": 1, "c": {"a": 2, "b": {"x": 5, "y": 10}}, "d": [1, 2, 3]}
def test_flatten_dict(test_mock_nested_dict: MutableMapping) -> None:
"""Tests flatten_dict."""
assert flatten_dict(d=test_mock_nested_dict) == {
"a": 1,
"c.a": 2,
"c.b.x": 5,
"c.b.y": 10,
"d": [1, 2, 3],
}
def test_flatten_dict_different_sep(test_mock_nested_dict: MutableMapping) -> None:
"""Tests flatten dict with specified sep token."""
assert flatten_dict(d=test_mock_nested_dict, sep="_") == {
"a": 1,
"c_a": 2,
"c_b_x": 5,
"c_b_y": 10,
"d": [1, 2, 3],
}
def test_flatten_dict_with_parent_key(test_mock_nested_dict: MutableMapping) -> None:
"""Tests flatten dict with specified parent key."""
assert flatten_dict(d=test_mock_nested_dict, parent_key="test_parent") == {
"test_parent.a": 1,
"test_parent.c.a": 2,
"test_parent.c.b.x": 5,
"test_parent.c.b.y": 10,
"test_parent.d": [1, 2, 3],
}
``` |
{
"source": "1999foxes/danmuvis",
"score": 3
} |
#### File: danmuvis/danmuvis/danmaku.py
```python
import xml.sax
import json
import os.path
import ahocorasick
from .danmaku2ass import Danmaku2ASS
def make_AC(AC, word_set):
for word in word_set:
AC.add_word(word, word)
return AC
AC_funny = ahocorasick.Automaton()
key_list_funny = ['233', '哈', 'hhh', '草', '??', '??']
make_AC(AC_funny, set(key_list_funny))
AC_funny.make_automaton()
AC_exciting = ahocorasick.Automaton()
key_list_exciting = ['666', '强', 'oh']
make_AC(AC_exciting, set(key_list_exciting))
AC_exciting.make_automaton()
AC_lovely = ahocorasick.Automaton()
key_list_lovely = ['awsl', 'kksk', '切片', '??', '??', 'hso']
make_AC(AC_lovely, set(key_list_lovely))
AC_lovely.make_automaton()
class Danmaku:
def __init__(self, filename, path):
self.filename = filename
self.path = path
self.highlight = None
def generateHighlight(self):
self.highlight = {'density': [], 'funny': [], 'exciting': [], 'lovely': []}
self.decodeDanmakuXML(self.calcHightlight)
with open(os.path.join(self.path, self.filename).replace('xml', 'json'), 'w') as highlight_file:
json.dump(self.highlight, highlight_file)
def calcHightlight(self, d):
# print(d)
interval = 10
for key in self.highlight:
while len(self.highlight[key]) <= d.time // interval:
self.highlight[key].append(0)
self.highlight['density'][-1] += 1
name_list = list(AC_funny.iter(d.content))
if len(name_list) > 0:
self.highlight['funny'][-1] += 1
name_list = list(AC_exciting.iter(d.content))
if len(name_list) > 0:
self.highlight['exciting'][-1] += 1
name_list = list(AC_lovely.iter(d.content))
if len(name_list) > 0:
self.highlight['lovely'][-1] += 1
def generateASS(self):
Danmaku2ASS(os.path.join(self.path, self.filename), 'Bilibili',
os.path.join(self.path, self.filename).replace('.xml', '.ass'),
1280, 720,
font_size=30, text_opacity=0.8)
class D:
def __init__(self, p, user, content):
attr = p.split(',')
self.time = float(attr[0])
self.user = user
self.content = content
def __str__(self):
return ','.join([str(self.time), self.user, self.content])
class DanmakuHandler(xml.sax.ContentHandler):
def __init__(self, handler):
self.handler = handler
self.p = None
self.user = None
self.content = None
def startElement(self, tag, attributes):
if tag == "d":
self.p = attributes['p']
self.user = attributes['user']
def endElement(self, tag):
if tag == "d":
self.handler(Danmaku.D(self.p, self.user, self.content))
def characters(self, content):
self.content = content
def decodeDanmakuXML(self, handler):
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
danmakuHandler = Danmaku.DanmakuHandler(handler)
parser.setContentHandler(danmakuHandler)
parser.parse(os.path.join(self.path, self.filename))
if __name__ == '__main__':
danmaku = Danmaku(r"C:\my_code\danmuvis\resource\白雪艾莉娅_Official.20210504.【突击直播】一起来修仙.xml", '')
danmaku.generateHighlight()
print(danmaku.highlight)
``` |
{
"source": "1999foxes/run-cmd-from-websocket",
"score": 3
} |
#### File: 1999foxes/run-cmd-from-websocket/main.py
```python
import asyncio
import json
import logging
import websockets
logging.basicConfig()
async def counter(websocket, path):
try:
print("connect")
async for message in websocket:
print(message)
finally:
USERS.remove(websocket)
async def main():
async with websockets.serve(counter, "localhost", 5000):
await asyncio.Future() # run forever
if __name__ == "__main__":
asyncio.run(main())
``` |
{
"source": "1999michael/tinyml",
"score": 3
} |
#### File: mcunet/jobs/models.py
```python
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def _weights_init(m):
classname = m.__class__.__name__
#print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, fc00:db20:35b:7399::5, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet_3layer(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet_3layer, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, int(out.size()[3]))
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class ResNet_2layer(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet_2layer, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.bn2 = nn.BatchNorm2d(32)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.linear = nn.Linear(32, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = F.avg_pool2d(out, int(out.size()[3]))
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class ResNet_1layer(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet_1layer, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.linear = nn.Linear(16, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = F.avg_pool2d(out, int(out.size()[3]))
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
resnet_layers_dict = {1: ResNet_1layer, 2:ResNet_2layer, 3:ResNet_3layer}
def ResNet20(num_classes=8, num_layers=3):
return resnet_layers_dict[num_layers](BasicBlock, [3, 3, 3], num_classes=num_classes)
def ResNet32(num_classes=8, num_layers=3):
return resnet_layers_dict[num_layers](BasicBlock, [5, 5, 5], num_classes=num_classes)
def ResNet44(num_classes=8, num_layers=3):
return resnet_layers_dict[num_layers](BasicBlock, [7, 7, 7], num_classes=num_classes)
def ResNet56(num_classes=8, num_layers=3):
return resnet_layers_dict[num_layers](BasicBlock, [9, 9, 9], num_classes=num_classes)
class Small(nn.Module):
def __init__(self, num_classes=8):
super(Small, self).__init__()
self.conv1 = nn.Conv2d(3, 9, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(9, 17, 2)
self.fc1 = nn.Linear(17 * 27 * 27, 32)
self.fc2 = nn.Linear(32, num_classes)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 17 * 27 * 27)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = x.squeeze(1)
return x
models_dict = {"small": Small, "resnet20": ResNet20, "resnet32": ResNet32, "resnet44": ResNet44, "resnet56": ResNet56}
```
#### File: mcunet/utils/net_config.py
```python
import torch
import torch.nn as nn
__all__ = ['get_network_config_with_activation_shape']
def record_in_out_shape(m, x, y):
x = x[0]
m.input_shape = torch.Tensor(list(x.shape))
m.output_shape = torch.Tensor(list(y.shape))
def record_residual_shape(m, x, y):
from tinynas.nn.modules import ZeroLayer
if m.mobile_inverted_conv is None or isinstance(m.mobile_inverted_conv, ZeroLayer):
pass
elif m.shortcut is None or isinstance(m.shortcut, ZeroLayer):
pass
else: # only record the shape if we actually use the residual connection
m.output_shape = torch.Tensor(list(y.shape))
def add_activation_shape_hook(m_):
from tinynas.nn.networks import MobileInvertedResidualBlock
m_type = type(m_)
if m_type == nn.Conv2d:
m_.register_buffer('input_shape', torch.zeros(4))
m_.register_buffer('output_shape', torch.zeros(4))
m_.register_forward_hook(record_in_out_shape)
elif m_type == MobileInvertedResidualBlock:
m_.register_buffer('output_shape', torch.zeros(4))
m_.register_forward_hook(record_residual_shape)
def get_network_config_with_activation_shape(model, device='cpu', data_shape=(1, 3, 224, 224)):
from tinynas.nn.networks import ProxylessNASNets
assert isinstance(model, ProxylessNASNets)
import copy
model = copy.deepcopy(model).to(device)
model.eval()
model.apply(add_activation_shape_hook)
with torch.no_grad():
_ = model(torch.randn(*data_shape).to(device))
def get_conv_cfg(conv):
conv = conv.conv
return {
'in_channel': int(conv.input_shape[1]),
'in_shape': int(conv.input_shape[2]),
'out_channel': int(conv.output_shape[1]),
'out_shape': int(conv.output_shape[2]),
'kernel_size': conv.kernel_size[0],
'stride': conv.stride[0],
'groups': conv.groups,
'depthwise': conv.groups == int(conv.input_shape[1]),
}
def get_linear_cfg(op):
return {
'in_channel': op.in_features,
'out_channel': op.out_features,
}
def get_block_cfg(block):
# (inverted_bottleneck), depth_conv, point_linear
pdp = block.mobile_inverted_conv
if int(block.output_shape[0]) == 0:
residual = None
else:
assert block.output_shape[2] == block.output_shape[3]
residual = {'in_channel': int(block.output_shape[1]), 'in_shape': int(block.output_shape[2])}
return {
'pointwise1': get_conv_cfg(pdp.inverted_bottleneck) if pdp.inverted_bottleneck is not None else None,
'depthwise': get_conv_cfg(pdp.depth_conv),
'pointwise2': get_conv_cfg(pdp.point_linear),
'residual': residual
}
cfg = {}
cfg['first_conv'] = get_conv_cfg(model.first_conv)
cfg['classifier'] = get_linear_cfg(model.classifier)
# assert input_model.feature_mix_layer is None
if model.feature_mix_layer is not None:
cfg['feature_mix'] = get_conv_cfg(model.feature_mix_layer)
else:
cfg['feature_mix'] = None
block_cfg = []
# now check input_model.blocks
for block in model.blocks:
from tinynas.nn.modules import ZeroLayer
if block.mobile_inverted_conv is None or isinstance(block.mobile_inverted_conv, ZeroLayer): # empty block
continue
block_cfg.append(get_block_cfg(block))
cfg['blocks'] = block_cfg
del model
return cfg
``` |
{
"source": "199-cmd/FlaskDemo",
"score": 3
} |
#### File: FlaskDemo/FlaskDemo/main.py
```python
from flask import Flask,render_template,json,flash,request,session,redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
with open('config.json', 'r') as c:
parameter = json.load(c)["parameter"]
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = parameter['local_uri']
app.secret_key = 'super-secret-key'
db = SQLAlchemy(app)
class Contact(db.Model):
sno = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
email = db.Column(db.String(20), nullable=False)
phone = db.Column(db.String(12), nullable=False)
message = db.Column(db.String(120), nullable=False)
date = db.Column(db.String(12), nullable=True)
@app.route('/')
def home():
return render_template('index.html',parameter=parameter)
@app.route("/contact", methods = ['GET', 'POST'])
def contact():
if(request.method=='POST'):
name = request.form.get('name')
email = request.form.get('email')
phone = request.form.get('phone')
message = request.form.get('message')
entry = Contact(name=name, email = email, phone = phone, message = message, date= datetime.now())
db.session.add(entry)
db.session.commit()
flash("Thank You We will get back to you soon...","success")
return render_template('index.html',parameter=parameter)
``` |
{
"source": "19ahmed99/l2rpn_opponent_modelling",
"score": 2
} |
#### File: Baseline/DoubleDuelingDQN/DoubleDuelingDQNConfig.py
```python
import os
import json
class DoubleDuelingDQNConfig():
"""
DoubleDuelingDQN configurable hyperparameters
exposed as class attributes
"""
LR_DECAY_STEPS = 1024*64
LR_DECAY_RATE = 0.95
INITIAL_EPSILON = 0.99
FINAL_EPSILON = 0.001
DECAY_EPSILON = 1024*64
DISCOUNT_FACTOR = 0.98
PER_CAPACITY = 1024*64
PER_ALPHA = 0.7
PER_BETA = 0.5
UPDATE_FREQ = 28
UPDATE_TARGET_HARD_FREQ = -1
UPDATE_TARGET_SOFT_TAU = 1e-3
N_FRAMES = 4
BATCH_SIZE = 32
LR = 1e-5
VERBOSE = True
@staticmethod
def from_json(json_in_path):
with open(json_in_path, 'r') as fp:
conf_json = json.load(fp)
for k,v in conf_json.items():
if hasattr(DoubleDuelingDQNConfig, k):
setattr(DoubleDuelingDQNConfig, k, v)
@staticmethod
def to_json(json_out_path):
conf_json = {}
for attr in dir(DoubleDuelingDQNConfig):
if attr.startswith('__') or callable(attr):
continue
conf_json[attr] = getattr(DoubleDuelingDQNConfig, attr)
with open(json_out_path, 'w+') as fp:
json.dump(fp, conf_json, indent=2)
``` |
{
"source": "19bcs2410/flask_project_chat_web",
"score": 3
} |
#### File: 19bcs2410/flask_project_chat_web/server_test.py
```python
from flask import Flask,request,render_template,url_for,redirect
from flask_socketio import SocketIO,leave_room,join_room
clients_list={}
app=Flask(__name__)
socketio=SocketIO(app)
@app.route('/')
def homepage():
return render_template('input1.html')
@app.route('/again_join_url/<username>' ,methods=['GET','POST'])
def again_join_url(username):
return render_template('again_input.html',username=username)
@socketio.on('join_room')
def handle_join_room(data):
if clients_list.get(data['room'],0)==0:
clients_list[data['room']]=[]
if data['username'] not in clients_list[data['room']]:
clients_list[data['room']].append(data['username'])
else:
socketio.emit('already', {'username':data['username']});
join_room(data['room'])
print('join {} '.format(data['username']))
socketio.emit('room_joined', data, room=data['room'])
@socketio.on('leave_room')
def leave_room_handler(data):
leave_room(data['room'])
clients_list[data['room']].remove(data['username'])
socketio.emit('leaved_room',{'room':data['room'],'username':data['username']})
@socketio.on('show_online')
def show_online(data):
room_no=data['room']
str_data='<br>'.join(clients_list[room_no])
socketio.emit('online_result',{'result':str_data,'username':data['username']},room=data['room']);
@socketio.on('send_message')
def hand_client_message(data):
socketio.emit('receive_msg',data,room=data['room'])
@app.route('/input',methods=['GET','POST'])
def input():
username=request.form['username']
roomid=request.form['roomid']
if username and roomid:
return render_template('chat1.html',username=username,room=roomid)
else:
return redirect(url_for('homepage'))
if __name__=="__main__":
socketio.run(app,debug=True,host='0.0.0.0');
``` |
{
"source": "19bcs2410/flask_updated-web-chat",
"score": 3
} |
#### File: 19bcs2410/flask_updated-web-chat/new_server.py
```python
import socketio
import socketio
sio = socketio.Client()
@sio.event
def connect():
print('connection established')
@sio.event
def my_message(data):
print('message received with ', data)
sio.emit('my response', {'response': 'my response'})
@sio.event
def disconnect():
print('disconnected from server')
sio.connect('http://localhost:5000')
sio.wait()
``` |
{
"source": "19bcs2410/social-media-plateform-SocialHUb",
"score": 2
} |
#### File: social-media-plateform-SocialHUb/New folder (2)/test.py
```python
from flask import Flask,render_template,request
from flask_socketio import SocketIO, emit, join_room, leave_room
import os
app = Flask(__name__)
app.config["SECRET_KEY"] = os.getenv("SECRET_KEY")
socketio = SocketIO(app)
sid={}
@app.route('/<name>')
def homepage(name):
global my_name
my_name=name
return render_template('test.html')
@socketio.on('connect')
def connected():
print('connect')
sid[my_name]=request.sid
join_room('rahul1');
print(sid)
@socketio.on('video_call')
def new_connection(data):
emit('new_user',data,room=sid['rahul'])
@socketio.on('disconnect')
def disconnect():
print('disconnect')
@socketio.on('cancel_video')
def cancel():
emit('cancel_req',broadcast=True)
if __name__ == '__main__':
socketio.run(app,debug=True)
``` |
{
"source": "19dufbri/hackathon",
"score": 3
} |
#### File: hackathon/app/app.py
```python
import base64
import csv
import json
from flask import Flask, render_template, url_for, request, jsonify, redirect
import matplotlib.pyplot as plt
from io import BytesIO
from flask import jsonify
import numpy as np
app = Flask(__name__)
finalData = []
names = []
@app.route('/')
def index():
return render_template("index.html")
@app.route('/results')
def results():
results = []
if (request.cookies.get("college-input-data") is None):
return redirect("/", code=302)
jsondata = json.loads(request.cookies.get("college-input-data"))
for college in finalData:
if college["INSTNM"] in jsondata["collegeNames"]:
results.append(getPersonalData(jsondata, college))
return render_template("results.html", numres=len(results), results=results)
@app.route('/autocomplete',methods=['GET'])
def autocomplete():
search = request.args.get('term')
app.logger.debug(search)
return jsonify(json_list=names)
def getPersonalData(json, college):
college["picture"] = makePie(college["racepercent"])
if json["degree"] == "1" or json["degree"] == "2":
college["comp"] = int(100 * getFloat(college, "C150_L4"))
college["reten"] = int(100 * getFloat(college, "RET_FTL4"))
else:
college["comp"] = int(100 * getFloat(college, "C150_4"))
college["reten"] = int(100 * getFloat(college, "RET_FT4"))
incomeNum = json["income"]
tuition = college["price"]
college["net"] = tuition[incomeNum]
college["pell"] = int(100 * getFloat(college, "PCTPELL"))
college["loan"] = int(100 * getFloat(college, "PCTFLOAN"))
if college["TUITIONFEE_PROG"] == "NULL":
if college["STABBR"] == json["state"]:
college["tuition"] = getString(college, "TUITIONFEE_IN")
else:
college["tuition"] = getString(college, "TUITIONFEE_OUT")
else:
college["tuition"] = getString(college, "TUITIONFEE_PROG")
sumelems = 0
numelems = 0
if json["income"] == 1:
numelems += 1
sumelems += getFloat(college, "LO_INC_DEBT_MDN")
elif json["income"] == 2 or json["income"] == 3:
numelems += 1
sumelems += getFloat(college, "MD_INC_DEBT_MDN")
elif json["income"] == 4 or json["income"] == 5:
numelems += 1
sumelems += getFloat(college, "HI_INC_DEBT_MDN")
if json["dependent"] == "yes":
numelems += 1
sumelems += getFloat(college, "DEP_DEBT_MDN")
elif json["dependent"] == "no":
numelems += 1
sumelems += getFloat(college, "IND_DEBT_MDN")
if json["pell"] == "yes":
numelems += 1
sumelems += getFloat(college, "PELL_DEBT_MDN")
elif json["pell"] == "no":
numelems += 1
sumelems += getFloat(college, "NOPELL_DEBT_MDN")
if json["sex"] == "male":
numelems += 1
sumelems += getFloat(college, "MALE_DEBT_MDN")
elif json["sex"] == "female":
numelems += 1
sumelems += getFloat(college, "FEMALE_DEBT_MDN")
if json["firstGen"] == "yes":
numelems += 1
sumelems += getFloat(college, "FIRSTGEN_DEBT_MDN")
elif json["firstGen"] == "no":
numelems += 1
sumelems += getFloat(college, "NOTFIRSTGEN_DEBT_MDN")
if numelems == 0:
numelems = 1
sumelems = getFloat(college, "GRAD_DEBT_MDN")
college["debt"] = int(sumelems / numelems)
sumelems = 0
numelems = 0
if json["income"] == 1:
numelems += 1
sumelems += getFloat(college, "LO_INC_RPY_1YR_RT")
elif json["income"] == 2 or json["income"] == 3:
numelems += 1
sumelems += getFloat(college, "MD_INC_RPY_1YR_RT")
elif json["income"] == 4 or json["income"] == 5:
numelems += 1
sumelems += getFloat(college, "HI_INC_RPY_1YR_RT")
if json["dependent"] == "yes":
numelems += 1
sumelems += getFloat(college, "DEP_RPY_1YR_RT")
elif json["dependent"] == "no":
numelems += 1
sumelems += getFloat(college, "IND_RPY_1YR_RT")
if json["pell"] == "yes":
numelems += 1
sumelems += getFloat(college, "PELL_RPY_1YR_RT")
elif json["pell"] == "no":
numelems += 1
sumelems += getFloat(college, "NOPELL_RPY_1YR_RT")
if json["sex"] == "male":
numelems += 1
sumelems += getFloat(college, "MALE_RPY_1YR_RT")
elif json["sex"] == "female":
numelems += 1
sumelems += getFloat(college, "FEMALE_RPY_1YR_RT")
if json["firstGen"] == "yes":
numelems += 1
sumelems += getFloat(college, "FIRSTGEN_RPY_1YR_RT")
elif json["firstGen"] == "no":
numelems += 1
sumelems += getFloat(college, "NOTFIRSTGEN_RPY_1YR_RT")
if numelems == 0:
numelems = 1
sumelems = getFloat(college, "COMPL_RPY_1YR_RT")
college["one"] = int(100 * sumelems / numelems)
sumelems = 0
numelems = 0
if json["income"] == 1:
numelems += 1
sumelems += getFloat(college, "LO_INC_RPY_3YR_RT")
elif json["income"] == 2 or json["income"] == 3:
numelems += 1
sumelems += getFloat(college, "MD_INC_RPY_3YR_RT")
elif json["income"] == 4 or json["income"] == 5:
numelems += 1
sumelems += getFloat(college, "HI_INC_RPY_3YR_RT")
if json["dependent"] == "yes":
numelems += 1
sumelems += getFloat(college, "DEP_RPY_3YR_RT")
elif json["dependent"] == "no":
numelems += 1
sumelems += getFloat(college, "IND_RPY_3YR_RT")
if json["pell"] == "yes":
numelems += 1
sumelems += getFloat(college, "PELL_RPY_3YR_RT")
elif json["pell"] == "no":
numelems += 1
sumelems += getFloat(college, "NOPELL_RPY_3YR_RT")
if json["sex"] == "male":
numelems += 1
sumelems += getFloat(college, "MALE_RPY_3YR_RT")
elif json["sex"] == "female":
numelems += 1
sumelems += getFloat(college, "FEMALE_RPY_3YR_RT")
if json["firstGen"] == "yes":
numelems += 1
sumelems += getFloat(college, "FIRSTGEN_RPY_3YR_RT")
elif json["firstGen"] == "no":
numelems += 1
sumelems += getFloat(college, "NOTFIRSTGEN_RPY_3YR_RT")
if numelems == 0:
numelems = 1
sumelems = getFloat(college, "GRAD_RPY_3YR_RT")
college["three"] = int(100 * sumelems / numelems)
sumelems = 0
numelems = 0
if json["income"] == 1:
numelems += 1
sumelems += getFloat(college, "LO_INC_RPY_5YR_RT")
elif json["income"] == 2 or json["income"] == 3:
numelems += 1
sumelems += getFloat(college, "MD_INC_RPY_5YR_RT")
elif json["income"] == 4 or json["income"] == 5:
numelems += 1
sumelems += getFloat(college, "HI_INC_RPY_5YR_RT")
if json["dependent"] == "yes":
numelems += 1
sumelems += getFloat(college, "DEP_RPY_5YR_RT")
elif json["dependent"] == "no":
numelems += 1
sumelems += getFloat(college, "IND_RPY_5YR_RT")
if json["pell"] == "yes":
numelems += 1
sumelems += getFloat(college, "PELL_RPY_5YR_RT")
elif json["pell"] == "no":
numelems += 1
sumelems += getFloat(college, "NOPELL_RPY_5YR_RT")
if json["sex"] == "male":
numelems += 1
sumelems += getFloat(college, "MALE_RPY_5YR_RT")
elif json["sex"] == "female":
numelems += 1
sumelems += getFloat(college, "FEMALE_RPY_5YR_RT")
if json["firstGen"] == "yes":
numelems += 1
sumelems += getFloat(college, "FIRSTGEN_RPY_5YR_RT")
elif json["firstGen"] == "no":
numelems += 1
sumelems += getFloat(college, "NOTFIRSTGEN_RPY_5YR_RT")
if numelems == 0:
numelems = 1
sumelems = getFloat(college, "COMPL_RPY_5YR_RT")
college["five"] = int(100 * sumelems / numelems)
sumelems = 0
numelems = 0
if json["income"] == 1:
numelems += 1
sumelems += getFloat(college, "LO_INC_RPY_7YR_RT")
elif json["income"] == 2 or json["income"] == 3:
numelems += 1
sumelems += getFloat(college, "MD_INC_RPY_7YR_RT")
elif json["income"] == 4 or json["income"] == 5:
numelems += 1
sumelems += getFloat(college, "HI_INC_RPY_7YR_RT")
if json["dependent"] == "yes":
numelems += 1
sumelems += getFloat(college, "DEP_RPY_7YR_RT")
elif json["dependent"] == "no":
numelems += 1
sumelems += getFloat(college, "IND_RPY_7YR_RT")
if json["pell"] == "yes":
numelems += 1
sumelems += getFloat(college, "PELL_RPY_7YR_RT")
elif json["pell"] == "no":
numelems += 1
sumelems += getFloat(college, "NOPELL_RPY_7YR_RT")
if json["sex"] == "male":
numelems += 1
sumelems += getFloat(college, "MALE_RPY_7YR_RT")
elif json["sex"] == "female":
numelems += 1
sumelems += getFloat(college, "FEMALE_RPY_7YR_RT")
if json["firstGen"] == "yes":
numelems += 1
sumelems += getFloat(college, "FIRSTGEN_RPY_7YR_RT")
elif json["firstGen"] == "no":
numelems += 1
sumelems += getFloat(college, "NOTFIRSTGEN_RPY_7YR_RT")
if numelems == 0:
numelems = 1
sumelems = getFloat(college, "COMPL_RPY_7YR_RT")
college["seven"] = int(100 * sumelems / numelems)
if json["degree"] == 1 or json["degree"] == 2:
if json["race"] == "white":
college["estim"] = getFloat(college, "C150_L4_WHITE")
elif json["race"] == "black":
college["estim"] = getFloat(college, "C150_L4_BLACK")
elif json["race"] == "hispanic":
college["estim"] = getFloat(college, "C150_L4_HISP")
elif json["race"] == "asian":
college["estim"] = getFloat(college, "C150_L4_ASIAN")
elif json["race"] == "northNative":
college["estim"] = getFloat(college, "C150_L4_AIAN")
elif json["race"] == "southNative":
college["estim"] = getFloat(college, "C150_L4_NHPI")
elif json["race"] == "twoOrMore":
college["estim"] = getFloat(college, "C150_L4_2MOR")
elif json["race"] == "nonResAlien":
college["estim"] = getFloat(college, "C150_L4_NRA")
elif json["race"] == "unknown":
college["estim"] = getFloat(college, "C150_L4_UNKN")
else:
college["estim"] = getFloat(college, "C150_L4")
else:
if json["race"] == "white":
college["estim"] = getFloat(college, "C150_4_WHITE")
elif json["race"] == "black":
college["estim"] = getFloat(college, "C150_4_BLACK")
elif json["race"] == "hispanic":
college["estim"] = getFloat(college, "C150_4_HISP")
elif json["race"] == "asian":
college["estim"] = getFloat(college, "C150_4_ASIAN")
elif json["race"] == "northNative":
college["estim"] = getFloat(college, "C150_4_AIAN")
elif json["race"] == "southNative":
college["estim"] = getFloat(college, "C150_4_NHPI")
elif json["race"] == "twoOrMore":
college["estim"] = getFloat(college, "C150_4_2MOR")
elif json["race"] == "nonResAlien":
college["estim"] = getFloat(college, "C150_4_NRA")
elif json["race"] == "unknown":
college["estim"] = getFloat(college, "C150_4_UNKN")
else:
college["estim"] = getFloat(college, "C150_4")
college["estim"] = int(college["estim"] * 100)
sumelems = 0
numelems = 0
if json["dependent"] == "yes":
numelems += 1
sumelems += getFloat(college, "MN_EARN_WNE_INDEP0_P10")
elif json["dependent"] == "no":
numelems += 1
sumelems += getFloat(college, "MN_EARN_WNE_INDEP1_P10")
if json["sex"] == "male":
numelems += 1
sumelems += getFloat(college, "MN_EARN_WNE_MALE1_P10")
elif json["sex"] == "female":
numelems += 1
sumelems += getFloat(college, "MN_EARN_WNE_MALE0_P10")
if numelems == 0:
numelems = 1
sumelems = getFloat(college, "MN_EARN_WNE_P10")
college["ten"] = int(sumelems / numelems);
return college
def initalize():
with open('app/data.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
finalData.append(processRow(row))
names.append(row["INSTNM"])
def processRow(row):
races = {}
races["White"] = getFloat(row, "UGDS_WHITE")
races["Black"] = getFloat(row, "UGDS_BLACK")
races["Hispanic"] = getFloat(row, "UGDS_HISP")
races["Asian"] = getFloat(row, "UGDS_ASIAN")
races["Am. Indian"] = getFloat(row, "UGDS_AIAN")
races["Hawiian/PI"] = getFloat(row, "UGDS_NHPI")
races["Two or more"] = getFloat(row, "UGDS_2MOR")
races["Non-resident"] = getFloat(row, "UGDS_NRA")
races["Unknown"] = getFloat(row, "UGDS_UNKN")
row["racepercent"] = races
tuition = {}
if row["NPT4_PUB"] != "NULL":
tuition["0"] = row["NPT4_PUB"]
tuition["1"] = row["NPT41_PUB"]
tuition["2"] = row["NPT42_PUB"]
tuition["3"] = row["NPT43_PUB"]
tuition["4"] = row["NPT44_PUB"]
tuition["5"] = row["NPT45_PUB"]
else:
tuition["0"] = row["NPT4_PRIV"]
tuition["1"] = row["NPT41_PRIV"]
tuition["2"] = row["NPT42_PRIV"]
tuition["3"] = row["NPT43_PRIV"]
tuition["4"] = row["NPT44_PRIV"]
tuition["5"] = row["NPT45_PRIV"]
row["price"] = tuition
return row
def makePie(dict):
plt.figure()
labels = []
percents = []
colors = ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462", "#b3de69", "#fccde5", "#d9d9d9"]
for k, v in dict.items():
labels.append(k)
percents.append(v)
patches, texts, misc = plt.pie(percents, colors=colors, autopct='%1.1f%%', startangle=90, pctdistance=1.1)
# draw circle
centre_circle = plt.Circle((0, 0), 0.70, fc='white')
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
# Equal aspect ratio ensures that pie is drawn as a circle
plt.axis('equal')
plt.tight_layout()
plt.legend(patches, labels, loc="best")
buf = BytesIO();
plt.savefig(buf)
buf.seek(0)
labels = []
percents = []
return base64.b64encode(buf.getvalue()).decode('utf-8')
def getFloat(college, field):
if college[field] == "NULL" or college[field] == "PrivacySuppressed":
return 0.0
return float(college[field])
def getString(college, field):
if college[field] == "NULL" or college[field] == "PrivacySuppressed":
return "N/A"
return college[field]
def makeScatter(dict1, dict2):
labels = []
avgCost = []
percents = []
colors = ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462", "#b3de69", "#fccde5", "#d9d9d9"]
for k, v in dict1.items():
labels.append(k)
percents.append(v)
patches, texts = plt.pie(percents, colors=colors, startangle=90,frame=false)
plt.legend(patches, labels, loc="best")
buffer = BytesIO();
plt.savefig(buffer, format="jpg")
buffer.seek(0)
return base64.b64encode(buffer.read())
initalize()
if __name__ == '__main__':
app.run(debug=False)
else:
application = app
``` |
{
"source": "19emtuck/hg_delivery",
"score": 2
} |
#### File: hg_delivery/tests/__init__.py
```python
import unittest
from datetime import datetime
# ------------------------------------------------------------------------------
class BasicDataIgnition(unittest.TestCase):
def _add_some_user(self):
from ..models import User
# add a user
user = User('test', 'test', '<EMAIL>', datetime.now())
self.session.add(user)
self.session.flush()
return (user,)
def _add_some_projects(self):
from ..models import Project
# add a project
project_a = Project('project1', 'test', 'test', '127.0.0.1',
'/tmp/project_1', None, False, None, False, True)
self.session.add(project_a)
# add a project
project_b = Project('project2', 'test', 'test', '127.0.0.1',
'/tmp/project_2', None, False, None, False, True)
self.session.add(project_b)
self.session.flush()
return (project_a, project_b)
def _add_some_logs(self, count=2):
from ..models import RemoteLog
# add some logs
user = self.users_list[0]
project_a, project_b = self.projects_list[0:2]
for i in range(count):
self.session.add(RemoteLog(project_a.id, user.id,
'127.0.0.1', '/tmp/test', 'ls -al'))
self.session.add(RemoteLog(project_b.id, user.id,
'127.0.0.1', '/tmp/test', 'ls -al'))
self.session.flush()
```
#### File: hg_delivery/tests/test_macro.py
```python
from . import BasicDataIgnition
import transaction
from mock import MagicMock, Mock
from mock_alchemy.mocking import AlchemyMagicMock
from pyramid import testing
from unittest.mock import patch
from sqlalchemy.exc import IntegrityError, OperationalError
class TestMacro(BasicDataIgnition):
def setUp(self):
self.config = testing.setUp(
settings={'sqlalchemy.url': 'sqlite:///:memory:'})
self.config.include('..models')
settings = self.config.get_settings()
import logging
from ..models import (
get_engine,
get_session_factory,
get_tm_session,
)
from ..models.meta import (
Base,
)
self.engine = get_engine(settings)
session_factory = get_session_factory(self.engine)
Base.metadata.create_all(bind=self.engine)
self.session = get_tm_session(session_factory, transaction.manager)
logging.disable(logging.CRITICAL)
# add a user
self.users_list = self._add_some_user()
def test_add_macro(self):
"""
nominal test
"""
from ..views import create_a_macro
from ..models.hgd_model import Macro, MacroRelations
p_1, p_2 = self._add_some_projects()
request = testing.DummyRequest()
request.dbsession = self.session
# macro that pushes data from 1 to 2
request.matchdict['id'] = p_1.id
result = create_a_macro(request)
self.assertFalse(result['result'])
self.assertIsInstance(result['result'], bool)
self.assertEqual(result['error'], 'macro label is mandatory')
request.params['macro_name'] = 'My MACRO LABEL'
request.params['direction_2'] = 'push'
# if parameters are full filled, no error
result = create_a_macro(request)
self.assertTrue(result['result'])
self.assertIsInstance(result['result'], bool)
self.assertIsNone(result['error'])
# check db
nb_macro = self.session.query(Macro).count()
self.assertEqual(nb_macro, 1)
macro = self.session.query(Macro).first()
self.assertEqual(macro.label, 'My MACRO LABEL')
self.assertEqual(len(macro.relations), 1)
self.assertEqual(macro.relations[0].id, p_1.id)
request.matchdict['id'] = p_2.id
request.params['macro_name'] = 'My MACRO LABEL 2'
request.params['direction_2'] = 'pull'
result = create_a_macro(request)
self.assertTrue(result['result'])
self.assertIsInstance(result['result'], bool)
self.assertIsNone(result['error'])
nb_macro = self.session.query(Macro).count()
self.assertEqual(nb_macro, 2)
macro = self.session.query(Macro).filter(Macro.id != macro.id).first()
self.assertEqual(macro.label, 'My MACRO LABEL 2')
self.assertEqual(len(macro.relations), 1)
self.assertEqual(macro.relations[0].id, p_2.id)
nb_relations = self.session.query(MacroRelations).count()
self.assertEqual(nb_relations, 2)
nb_macro = self.session.query(Macro).count()
self.assertEqual(nb_macro, 2)
request.dbsession = AlchemyMagicMock()
exception_mock = OperationalError(None, None, 'database timeout')
request.dbsession.query = Mock()
request.dbsession.query.side_effect = exception_mock
request.dbsession.query.raiseError.side_effect = exception_mock
result = create_a_macro(request)
self.assertFalse(result['result'])
self.assertEqual(result['error'], 'database error')
def test_add_macro_on_unknown_project(self):
"""
nominal test
"""
from ..views import create_a_macro
from ..models.hgd_model import Macro, MacroRelations
request = testing.DummyRequest()
request.dbsession = self.session
# test unknown project source
request.matchdict['id'] = 42
request.params['macro_name'] = 'My MACRO LABEL'
request.params['direction_2'] = 'push'
result = create_a_macro(request)
self.assertFalse(result['result'])
self.assertIsInstance(result['result'], bool)
self.assertEqual(result['error'], 'source project is unknown')
nb_relations = self.session.query(MacroRelations).count()
self.assertEqual(nb_relations, 0)
nb_macro = self.session.query(Macro).count()
self.assertEqual(nb_macro, 0)
p1, p2 = self._add_some_projects()
request = testing.DummyRequest()
request.dbsession = self.session
# test unknown project targed
request.matchdict['id'] = p1.id
request.params['macro_name'] = 'My MACRO LABEL'
request.params['direction_42'] = 'push'
result = create_a_macro(request)
self.assertFalse(result['result'])
self.assertIsInstance(result['result'], bool)
self.assertEqual(result['error'], 'aim project is unknown')
nb_relations = self.session.query(MacroRelations).count()
self.assertEqual(nb_relations, 0)
nb_macro = self.session.query(Macro).count()
self.assertEqual(nb_macro, 0)
# test wrong format for the direction
request.matchdict['id'] = p1.id
request.params['macro_name'] = 'My MACRO LABEL 3'
request.params['direction_2'] = 'wut'
result = create_a_macro(request)
self.assertFalse(result['result'])
self.assertIsInstance(result['result'], bool)
self.assertEqual(result['error'], 'wrong format')
nb_relations = self.session.query(MacroRelations).count()
self.assertEqual(nb_relations, 0)
nb_macro = self.session.query(Macro).count()
self.assertEqual(nb_macro, 0)
def test_delete_macro(self):
"""
"""
from ..views import create_a_macro, delete_a_macro
from ..models.hgd_model import Macro, MacroRelations
p_1, p_2 = self._add_some_projects()
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = p_1.id
request.params['macro_name'] = 'My MACRO LABEL'
request.params['direction_2'] = 'push'
# create a macro and delete it
result = create_a_macro(request)
self.assertTrue(result['result'])
self.assertIsInstance(result['result'], bool)
self.assertIsNone(result['error'])
first_macro_id = self.session.query(Macro).first().id
request.matchdict['macro_id'] = first_macro_id
result = delete_a_macro(request)
self.assertTrue(result['result'])
self.assertIsInstance(result['result'], bool)
self.assertIsNone(result['error'])
# check db content
nb_relations = self.session.query(MacroRelations).count()
self.assertEqual(nb_relations, 0)
nb_macro = self.session.query(Macro).count()
self.assertEqual(nb_macro, 0)
def test_delete_unknown_macro(self):
"""
"""
from ..views import delete_a_macro
from ..models.hgd_model import Macro, MacroRelations
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['macro_id'] = 42
result = delete_a_macro(request)
self.assertFalse(result['result'])
self.assertIsInstance(result['result'], bool)
self.assertEqual(result['error'], 'Unable to delete macro')
nb_relations = self.session.query(MacroRelations).count()
self.assertEqual(nb_relations, 0)
nb_macro = self.session.query(Macro).count()
self.assertEqual(nb_macro, 0)
def test_run_a_macro_no_authorization(self):
"""
"""
from ..views import run_a_macro, create_a_macro
user = self.users_list[0]
p1, p2 = self._add_some_projects()
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = p1.id
request.params['macro_name'] = 'My MACRO LABEL'
request.params['direction_2'] = 'push'
result = create_a_macro(request)
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = p1.id
request.matchdict['macro_id'] = 1
request.registry = MagicMock()
d_conf = {'hg_delivery.default_login': 'editor'}
request.registry.return_value.settings.return_value = d_conf
request.user = user
result = run_a_macro(request)
self.assertFalse(result['new_branch_stop'])
self.assertFalse(result['new_head_stop'])
self.assertFalse(result['lst_new_branches'])
self.assertIsInstance(result['project_errors'], list)
self.assertEqual(len(result['project_errors']), 1)
self.assertEqual(result['project_errors'][0], 'project2')
self.assertIsInstance(result['buffers'], dict)
self.assertEqual(len(result['buffers']), 1)
self.assertEqual(result['buffers']['project2'],
"user don't have access to project2")
def test_run_a_macro_not_authorized(self):
from ..nodes import PoolSsh
from ..models.hgd_model import Acl
from ..views import run_a_macro, create_a_macro
user = self.users_list[0]
p1, p2 = self._add_some_projects()
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = p1.id
request.params['macro_name'] = 'My MACRO LABEL'
request.params['direction_2'] = 'push'
result = create_a_macro(request)
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = p1.id
request.matchdict['macro_id'] = 1
# auth
request.dbsession.add(Acl(user.id, p1.id, 'edit'))
request.dbsession.add(Acl(user.id, p2.id, 'edit'))
request.dbsession.flush()
request.registry = MagicMock()
d_conf = {'hg_delivery.default_login': 'editor'}
request.registry.return_value.settings.return_value = d_conf
request.user = user
cls_mmock = MagicMock()
returned_data = {'mock': 'ok'}
cls_mmock.push_to.return_value = returned_data
with patch.object(PoolSsh, 'get_node', return_value=cls_mmock):
result = run_a_macro(request)
self.assertTrue(result['result'])
self.assertFalse(result['new_branch_stop'])
self.assertFalse(result['new_head_stop'])
self.assertEqual(str(result['data']), "{'mock': 'ok'}")
def test_update_a_macro(self):
"""
"""
from ..models.hgd_model import Macro, MacroRelations
from ..views import update_a_macro
p_1, p_2 = self._add_some_projects()
request = testing.DummyRequest()
request.dbsession = self.session
# macro that pushes data from 1 to 2
macro = Macro(p_1.id, 'macro test')
self.session.add(macro)
macro_relation = MacroRelations(p_2.id, 'push')
request.dbsession.add(macro_relation)
macro.relations.append(macro_relation)
self.session.flush()
id_macro = macro.id
# missing parameter
result = update_a_macro(request)
self.assertFalse(result['result'])
self.assertEqual(result['explanation'], 'bad parameter')
# good behavior
request.matchdict['macro_id'] = id_macro
request.params['macro_name'] = 'My MACRO LABEL'
request.params['direction_2'] = 'pull'
result = update_a_macro(request)
self.assertTrue(result['result'])
self.assertIsNone(result['explanation'])
# missing macro description, doesn't change anything
request.matchdict['macro_id'] = id_macro
result = update_a_macro(request)
self.assertTrue(result['result'])
self.assertIsNone(result['explanation'])
# relations didn't change
self.assertEqual(request.dbsession.query(MacroRelations).count(), 1)
request.matchdict['macro_id'] = id_macro
request.dbsession = AlchemyMagicMock()
exception_mock = OperationalError(None, None, 'database timeout')
request.dbsession.query = Mock()
request.dbsession.query.side_effect = exception_mock
request.dbsession.query.raiseError.side_effect = exception_mock
result = update_a_macro(request)
self.assertFalse(result['result'])
self.assertEqual(result['explanation'], 'database timeout')
request.matchdict['macro_id'] = id_macro
request.dbsession = AlchemyMagicMock()
exception_mock = IntegrityError(None, None, 'integrity error on table')
request.dbsession.add = Mock()
request.dbsession.add.side_effect = exception_mock
request.dbsession.add.raiseError.side_effect = exception_mock
result = update_a_macro(request)
self.assertFalse(result['result'])
lb = 'This macro has already been define ...'
self.assertEqual(result['explanation'], lb)
def test_view_all_macros(self):
"""
"""
from ..models.hgd_model import Acl, Macro, MacroRelations
from ..views import view_all_macros
# a user without Acl can't see macros
# an administrator user can see macros
# a user with Acl can see macros
default_user = self.users_list[0]
p_1, p_2 = self._add_some_projects()
request = testing.DummyRequest()
request.dbsession = self.session
request.user = default_user
# Sets authenticated_userid
self.config.testing_securitypolicy(userid='editor', permissive=True)
request.registry = MagicMock()
d_conf = {'hg_delivery.default_login': 'editor'}
request.registry.return_value.settings.return_value = d_conf
macro = Macro(p_1.id, 'macro test')
self.session.add(macro)
macro_relation = MacroRelations(p_2.id, 'push')
request.dbsession.add(macro_relation)
macro.relations.append(macro_relation)
request.dbsession.add(Acl(default_user.id, p_1.id, 'edit'))
request.dbsession.add(Acl(default_user.id, p_2.id, 'edit'))
request.dbsession.flush()
self.session.flush()
result = view_all_macros(request)
self.assertEqual(len(result['dict_project_to_macros']), 1)
```
#### File: hg_delivery/tests/test_user.py
```python
from pyramid.httpexceptions import HTTPBadRequest
from . import BasicDataIgnition
import transaction
from mock import MagicMock, Mock
from mock_alchemy.mocking import AlchemyMagicMock
from pyramid import testing
from sqlalchemy.exc import OperationalError
from datetime import datetime
# ------------------------------------------------------------------------------
class TestUser(BasicDataIgnition):
def setUp(self):
self.config = testing.setUp(
settings={'sqlalchemy.url': 'sqlite:///:memory:'})
self.config.include('..models')
settings = self.config.get_settings()
import logging
from ..models import (
get_engine,
get_session_factory,
get_tm_session,
)
from ..models.meta import (
Base,
)
self.engine = get_engine(settings)
session_factory = get_session_factory(self.engine)
Base.metadata.create_all(bind=self.engine)
self.session = get_tm_session(session_factory, transaction.manager)
logging.disable(logging.CRITICAL)
self.users_list = self._add_some_user()
def test_add_user(self):
"""
test dummy view
"""
from ..views import add_user
request = testing.DummyRequest()
settings_mock = MagicMock()
request.registry.settings = settings_mock
settings_dict = {'hg_delivery.default_login': 'editor'}
settings_mock.__getitem__.side_effect = settings_dict.__getitem__
request.dbsession = self.session
# macro that pushes data from 1 to 2
result = add_user(request)
self.assertIsInstance(result, HTTPBadRequest)
lb = 'Your user profile should contain a valid name'
self.assertEqual(result.detail, lb)
request.params['name'] = 'testMyName'
result = add_user(request)
self.assertIsInstance(result, HTTPBadRequest)
lb = 'Your user profile should contain a valid email'
self.assertEqual(result.detail, lb)
request.params['email'] = '<EMAIL>'
result = add_user(request)
self.assertIsInstance(result, HTTPBadRequest)
lb = "Your user profile musn't be empty"
self.assertEqual(result.detail, lb)
request.params['pwd'] = '<PASSWORD>'
result = add_user(request)
self.assertIsInstance(result, dict)
self.assertTrue(result['result'])
# insert twice to trigger integrity error
result = add_user(request)
self.assertIsInstance(result, HTTPBadRequest)
lb = 'This user and this email are already'
lb += ' defined (testMyName <EMAIL>) ...'
self.assertEqual(result.detail, lb)
request.params['name'] = 'testMyName2'
request.dbsession = AlchemyMagicMock()
exception_mock = OperationalError(None, None, 'database timeout')
request.dbsession.add = Mock()
request.dbsession.add.side_effect = exception_mock
request.dbsession.add.raiseError.side_effect = exception_mock
result = add_user(request)
self.assertIsInstance(result, HTTPBadRequest)
self.assertEqual(result.detail, 'database timeout')
def test_add_admin_user(self):
"""
test dummy view
"""
from ..views import add_user
from ..models.hgd_model import User
request = testing.DummyRequest()
request.dbsession = self.session
# Sets authenticated_userid
self.config.testing_securitypolicy(userid='editor', permissive=True)
request.user = User('editor', 'editor', 'editor')
# Sets authenticated_userid
self.config.testing_securitypolicy(userid='editor', permissive=True)
settings_mock = MagicMock()
request.registry.settings = settings_mock
settings_dict = {'hg_delivery.default_login': '<EMAIL>'}
settings_mock.__getitem__.side_effect = settings_dict.__getitem__
request.params['name'] = 'editor'
request.params['email'] = '<EMAIL>'
result = add_user(request)
self.assertIsInstance(result, HTTPBadRequest)
lb = 'Your user profile should contain a valid email'
self.assertEqual(result.detail, lb)
def test_manage_users(self):
"""
test dummy view
"""
from ..views import manage_users
from ..models.hgd_model import Acl, User
p1, p2 = self._add_some_projects()
default_user = self.users_list[0]
request = testing.DummyRequest()
request.dbsession = AlchemyMagicMock()
exception_mock = OperationalError(None, None, 'database timeout')
request.dbsession.query = Mock()
request.dbsession.query.side_effect = exception_mock
request.dbsession.query.raiseError.side_effect = exception_mock
result = manage_users(request)
self.assertFalse(result['result'])
self.assertEqual(result['error'], 'database timeout')
request = testing.DummyRequest()
request.dbsession = self.session
request.dbsession.add(Acl(default_user.id, p1.id, 'edit'))
request.dbsession.flush()
result = manage_users(request)
self.assertTrue(result['result'])
self.assertIsNone(result['error'])
self.assertIsInstance(result['lst_users'], list)
self.assertEqual(len(result['lst_users']), 1)
self.assertIsInstance(result['lst_users'][0], User)
self.assertEqual(len(result['known_acls']), 2)
self.assertEqual(result['known_acls'][0], 'edit')
self.assertEqual(result['known_acls'][1], 'read')
def test_delete_user(self):
"""
"""
from ..views import delete_user
from ..models import User
from pyramid.httpexceptions import HTTPFound, HTTPError
from pyramid.httpexceptions import HTTPServerError
# adding routes is necessary for testing ...
self.config.add_route('users', '/users')
# correct behavior
request = testing.DummyRequest()
request.dbsession = self.session
default_user = self.users_list[0]
request.matchdict['id'] = default_user.id
result = delete_user(request)
self.assertIsInstance(result, HTTPFound)
user_count = self.session.query(User).count()
self.assertEqual(user_count, 0)
# no parameter
request = testing.DummyRequest()
request.dbsession = self.session
result = delete_user(request)
self.assertIsInstance(result, HTTPError)
# sql error on query
self._add_some_user()
user = self.session.query(User).first()
request = testing.DummyRequest()
request.matchdict['id'] = user.id
request.dbsession = AlchemyMagicMock()
exception_mock = OperationalError(None, None, 'database timeout')
request.dbsession.query = Mock()
request.dbsession.query.side_effect = exception_mock
request.dbsession.query.raiseError.side_effect = exception_mock
result = delete_user(request)
self.assertIsInstance(result, HTTPServerError)
# sql error on delete
user = self.session.query(User).first()
request = testing.DummyRequest()
request.matchdict['id'] = user.id
request.dbsession = AlchemyMagicMock()
exception_mock = OperationalError(None, None, 'database timeout')
request.dbsession.delete = Mock()
request.dbsession.delete.side_effect = exception_mock
request.dbsession.delete.raiseError.side_effect = exception_mock
result = delete_user(request)
self.assertIsInstance(result, HTTPServerError)
# no id found
# just dissmiss error ?
def test_get_users(self):
"""
"""
from ..views import get_user
from ..models import User
# parameter error
request = testing.DummyRequest()
request.dbsession = self.session
result = get_user(request)
self.assertFalse(result['result'])
self.assertEqual(result['error'], 'Bad parameter')
self.assertIsNone(result['user'])
# standard use case
request = testing.DummyRequest()
request.dbsession = self.session
default_user = self.users_list[0]
request.matchdict['id'] = default_user.id
result = get_user(request)
self.assertTrue(result['result'])
self.assertIsNone(result['error'])
self.assertIsNotNone(result['user'])
self.assertIsInstance(result['user'], User)
self.assertEqual(result['user'].id, default_user.id)
# db error
request.dbsession = AlchemyMagicMock()
exception_mock = OperationalError(None, None, 'database timeout')
request.dbsession.query = Mock()
request.dbsession.query.side_effect = exception_mock
request.dbsession.query.raiseError.side_effect = exception_mock
result = get_user(request)
self.assertFalse(result['result'])
self.assertEqual(result['error'], 'database timeout')
self.assertIsNone(result['user'])
def test_update_user(self):
""" """
from ..views import update_user
from ..models import User
default_user = self.users_list[0]
# default behavior
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = default_user.id
request.params['name'] = 'new_name'
request.params['email'] = '<EMAIL>'
result = update_user(request)
self.assertTrue(result['result'])
lb = 'This user : new_name (<EMAIL>) has been updated ...'
self.assertEqual(result['explanation'], lb)
# integrity error
second_user = User('test', 'test', '<EMAIL>', datetime.now())
self.session.add(second_user)
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = default_user.id
request.params['name'] = 'new_name'
request.params['email'] = '<EMAIL>'
result = update_user(request)
self.assertFalse(result['result'])
lb = "You can't update this user, this email is"
lb += " already used (new_name <EMAIL>) ..."
self.assertEqual(result['explanation'], lb)
# bad parameters
request = testing.DummyRequest()
request.dbsession = self.session
request.params['name'] = 'new_name'
request.params['email'] = '<EMAIL>'
result = update_user(request)
self.assertFalse(result['result'])
self.assertEqual(result['explanation'], 'Bad parameter')
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = default_user.id
request.params['email'] = '<EMAIL>'
result = update_user(request)
self.assertFalse(result['result'])
self.assertEqual(result['explanation'], 'Bad parameter')
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = default_user.id
request.params['name'] = 'new_name'
result = update_user(request)
self.assertFalse(result['result'])
self.assertEqual(result['explanation'], 'Bad parameter')
# unknown user
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = 42
request.params['name'] = 'new_name'
request.params['email'] = '<EMAIL>'
result = update_user(request)
self.assertFalse(result['result'])
lb = 'This user is unknown or has already been deleted'
self.assertEqual(result['explanation'], lb)
# db error with mock
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = 42
request.params['name'] = 'new_name'
request.params['email'] = '<EMAIL>'
exception_mock = OperationalError(None, None, 'database timeout')
request.dbsession = self.session
request.dbsession.query = Mock()
request.dbsession.query.side_effect = exception_mock
request.dbsession.query.raiseError.side_effect = exception_mock
result = update_user(request)
self.assertFalse(result['result'])
self.assertTrue(result['explanation'].count('database timeout'))
# db error standard exception with mock
request = testing.DummyRequest()
request.dbsession = self.session
request.matchdict['id'] = 42
request.params['name'] = 'new_name'
request.params['email'] = '<EMAIL>'
exception_mock = Exception('standard error')
request.dbsession = self.session
request.dbsession.query = Mock()
request.dbsession.query.side_effect = exception_mock
request.dbsession.query.raiseError.side_effect = exception_mock
result = update_user(request)
self.assertFalse(result['result'])
self.assertEqual(result['explanation'], 'standard error')
exception_mock = OperationalError(None, None, 'database timeout')
request.dbsession = self.session
request.dbsession.query = Mock()
request.dbsession.query.side_effect = exception_mock
request.dbsession.query.raiseError.side_effect = exception_mock
result = update_user(request)
self.assertFalse(result['result'])
self.assertTrue(result['explanation'].count('database timeout'))
``` |
{
"source": "19emtuck/pyramid_oauth2_provider",
"score": 2
} |
#### File: pyramid_oauth2_provider/example/client.py
```python
import sys
import copy
import base64
import logging
import requests
from collections import namedtuple
log = logging.getLogger('example_client')
class Token(namedtuple('Token', 'token_type access_token expires_in '
'refresh_token user_id')):
__slots__ = ()
@classmethod
def fromdict(cls, d):
return cls(
d['token_type'],
d['access_token'],
d['expires_in'],
d['refresh_token'],
d['user_id']
)
class Client(object):
def __init__(self, client_id, client_secret, token_endpoint,
verifySSL=True):
self.client_id = client_id
self.client_secret = client_secret
self.token_endpoint = token_endpoint
self.verifySSL = verifySSL
self.token = None
def _get_client_auth_header(self):
return {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic %s' % base64.b64encode('%s:%s'
% (self.client_id, self.client_secret)),
}
def login(self, username, password):
data = {
'grant_type': 'password',
'username': username,
'password': password,
}
resp = requests.post(self.token_endpoint, data=data,
headers=self._get_client_auth_header(),
verify=self.verifySSL, )#config=dict(verbose=log.debug))
if not resp.ok:
raise RuntimeError
self.token = Token.fromdict(resp.json())
def refresh_login(self):
data = {
'grant_type': 'refresh_token',
'refresh_token': self.token.refresh_token,
'user_id': self.token.user_id,
}
resp = requests.post(self.token_endpoint, data=data,
headers=self._get_client_auth_header(),
verify=self.verifySSL, )#config=dict(verbose=log.debug))
if not resp.ok:
raise RuntimeError
self.token = Token.fromdict(resp.json())
def _get_token_auth_header(self):
return {
'Authorization': '%s %s' % (self.token.token_type,
base64.b64encode(self.token.access_token))
}
def _handle_request(self, method, uri, data=None, headers=None):
if not headers:
headers = {}
else:
headers = copy.copy(headers)
headers.update(self._get_token_auth_header())
handler = getattr(requests, method)
resp = handler(uri, data=data, headers=headers, verify=self.verifySSL,
)#config=dict(verbose=log.debug))
return resp
def get(self, *args, **kwargs):
return self._handle_request('get', *args, **kwargs)
def post(self, *args, **kwargs):
return self._handle_request('post', *args, **kwargs)
def put(self, *args, **kwargs):
return self._handle_request('put', *args, **kwargs)
def delete(self, *args, **kwargs):
return self._handle_request('delete', *args, **kwargs)
def usage(args):
print >>sys.stderr, ('usage: %s <client_id> <client_secret> <token_uri> '
'<username> <password>' % args[0])
return 1
def main(args):
if len(args) != 6:
return usage(args)
client_id = args[1]
client_secret = args[2]
token_uri = args[3]
username = args[4]
password = args[5]
client = Client(client_id, client_secret, token_uri, verifySSL=False)
client.login(username, password)
client.refresh_login()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
``` |
{
"source": "19FM/HerePy",
"score": 3
} |
#### File: HerePy/herepy/here_api.py
```python
import datetime
import logging
import time
import requests
from requests.adapters import HTTPAdapter
from urllib3 import Retry
logger = logging.getLogger(__name__)
class HEREApi(object):
""" Base class from which all wrappers inherit."""
def __init__(self, api_key: str = None, timeout: int = None, max_requests_per_second: int = 1):
"""Returns a Api instance.
Args:
api_key (str):
API key taken from HERE Developer Portal.
timeout (int):
Timeout limit for requests.
max_requests_per_second (int):
Max number of requests per second allowed by respective HERE API.
Note: For this to work you must use the same instance of HEREApi or its subclass for each request.
"""
self.__set_credentials(api_key)
if timeout:
self._timeout = timeout
else:
self._timeout = 20
self._http_session = None
self.min_request_interval = 1 / max_requests_per_second
# initialize earliest request time so requests can be made immediately
self._earliest_request_time = datetime.datetime.now()
def _instantiate_http_session(self, retry_strategy: Retry):
"""Create HTTP session on calling the API for the first time"""
adapter = HTTPAdapter(max_retries=retry_strategy)
self._http_session = requests.Session()
self._http_session.mount("https://", adapter)
self._http_session.mount("http://", adapter)
def _execute(self, url: str, retry_strategy: Retry):
"""
Execute request using specified retry strategy.
Inspired by https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/
"""
if not self._http_session:
self._instantiate_http_session(retry_strategy)
# check rate limit:
if self._earliest_request_time > datetime.datetime.now():
wait_time = (self._earliest_request_time - datetime.datetime.now()).total_seconds()
logger.debug(f"Earliest request time: {self._earliest_request_time}, Now: {datetime.datetime.now()}")
if wait_time > 0:
logger.info(f"Waiting for {wait_time} seconds until min request interval has passed.")
time.sleep(wait_time)
self._earliest_request_time = datetime.datetime.now() + datetime.timedelta(seconds=self.min_request_interval)
return self._http_session.get(url)
def __set_credentials(self, api_key):
"""Setter for credentials.
Args:
api_key (str):
API key taken from HERE Developer Portal.
"""
self._api_key = api_key
```
#### File: HerePy/herepy/traffic_api.py
```python
import sys
import json
import requests
from herepy.here_api import HEREApi
from herepy.utils import Utils
from herepy.error import HEREError, UnauthorizedError, InvalidRequestError
from herepy.models import (
TrafficIncidentResponse,
TrafficFlowResponse,
TrafficFlowAvailabilityResponse,
)
from herepy.here_enum import (
IncidentsCriticalityStr,
IncidentsCriticalityInt,
FlowProximityAdditionalAttributes,
)
from typing import List, Optional
from enum import Enum
class TrafficApi(HEREApi):
"""A python interface into the HERE Traffic API"""
def __init__(self, api_key: str = None, timeout: int = None):
"""Returns a TrafficApi instance.
Args:
api_key (str):
API key taken from HERE Developer Portal.
timeout (int):
Timeout limit for requests.
"""
super(TrafficApi, self).__init__(api_key, timeout)
self._base_url = "https://traffic.ls.hereapi.com/traffic/6.1/"
def __get(self, url, data):
url = Utils.build_url(url, extra_params=data)
response = requests.get(url, timeout=self._timeout)
json_data = json.loads(response.content.decode("utf8"))
if json_data.get("TRAFFICITEMS") != None:
return TrafficIncidentResponse.new_from_jsondict(
json_data, param_defaults={"TRAFFICITEMS": None}
)
elif json_data.get("RWS") != None:
return TrafficFlowResponse.new_from_jsondict(
json_data, param_defaults={"RWS": None}
)
elif json_data.get("Response") != None:
return TrafficFlowAvailabilityResponse.new_from_jsondict(
json_data, param_defaults={"Response": None}
)
else:
error = self.__get_error_from_response(json_data)
raise error
def __get_error_from_response(self, json_data):
if "error" in json_data:
if json_data["error"] == "Unauthorized":
return UnauthorizedError(json_data["error_description"])
error_type = json_data.get("Type")
error_message = json_data.get(
"Message", "Error occurred on " + sys._getframe(1).f_code.co_name
)
if error_type == "Invalid Request":
return InvalidRequestError(error_message)
else:
return HEREError(error_message)
def __prepare_str_values(self, enums: [Enum]):
values = ""
for enum in enums:
values += enum.__str__() + ","
values = values[:-1]
return values
def __prepare_criticality_int_values(
self, criticality_enums: [IncidentsCriticalityInt]
):
criticality_values = ""
for criticality in criticality_enums:
criticality_values += str(criticality.__int__()) + ","
criticality_values = criticality_values[:-1]
return criticality_values
def __prepare_corridor_value(self, points: List[List[float]], width: int):
corridor_str = ""
for lat_long_pair in points:
corridor_str += str.format("{0},{1};", lat_long_pair[0], lat_long_pair[1])
corridor_str += str(width)
return corridor_str
def incidents_in_bounding_box(
self,
top_left: List[float],
bottom_right: List[float],
criticality: [IncidentsCriticalityStr],
) -> Optional[TrafficIncidentResponse]:
"""Request traffic incident information within specified area.
Args:
top_left (List):
List contains latitude and longitude in order.
bottom_right (List):
List contains latitude and longitude in order.
criticality (List):
List of IncidentsCriticalityStr.
Returns:
TrafficIncidentResponse
Raises:
HEREError"""
data = {
"bbox": str.format(
"{0},{1};{2},{3}",
top_left[0],
top_left[1],
bottom_right[0],
bottom_right[1],
),
"apiKey": self._api_key,
"criticality": self.__prepare_str_values(enums=criticality),
}
return self.__get(self._base_url + "incidents.json", data)
def incidents_in_corridor(
self, points: List[List[float]], width: int
) -> Optional[TrafficIncidentResponse]:
"""Request traffic incidents for a defined route.
Args:
points (List):
List contains lists of latitude and longitude pairs in order.
width (int):
Width of corridor.
Returns:
TrafficIncidentResponse
Raises:
HEREError"""
data = {
"corridor": self.__prepare_corridor_value(points=points, width=width),
"apiKey": self._api_key,
}
return self.__get(self._base_url + "incidents.json", data)
def incidents_via_proximity(
self,
latitude: float,
longitude: float,
radius: int,
criticality: [IncidentsCriticalityInt],
) -> Optional[TrafficIncidentResponse]:
"""Request traffic incident information within specified area.
Args:
latitude (float):
Latitude of specified area.
longitude (float):
Longitude of specified area.
radius (int):
Radius of area in meters.
criticality (List):
List of IncidentsCriticalityInt.
Returns:
TrafficIncidentResponse
Raises:
HEREError"""
data = {
"prox": str.format("{0},{1},{2}", latitude, longitude, radius),
"criticality": self.__prepare_criticality_int_values(
criticality_enums=criticality
),
"apiKey": self._api_key,
}
return self.__get(self._base_url + "incidents.json", data)
def flow_using_quadkey(self, quadkey: str) -> Optional[TrafficFlowResponse]:
"""Request traffic flow information using a quadkey.
Args:
quadkey (str):
The quadkey unique defines a region of the globe using a standard addressing algortihm.
Returns:
TrafficFlowResponse
Raises:
HEREError"""
data = {
"quadkey": quadkey,
"apiKey": self._api_key,
}
return self.__get(self._base_url + "flow.json", data)
def flow_within_boundingbox(
self,
top_left: List[float],
bottom_right: List[float],
) -> Optional[TrafficFlowResponse]:
"""Request traffic flow information within specified area.
Args:
top_left (List):
List contains latitude and longitude in order.
bottom_right (List):
List contains latitude and longitude in order.
Returns:
TrafficFlowResponse
Raises:
HEREError"""
data = {
"bbox": str.format(
"{0},{1};{2},{3}",
top_left[0],
top_left[1],
bottom_right[0],
bottom_right[1],
),
"apiKey": self._api_key,
}
return self.__get(self._base_url + "flow.json", data)
def flow_using_proximity(
self, latitude: float, longitude: float, distance: int
) -> Optional[TrafficFlowResponse]:
"""Request traffic flow for a circle around a defined point.
Args:
latitude (float):
List contains latitude and longitude in order.
longitude (float):
List contains latitude and longitude in order.
distance (int):
Extending a distance of metres in all directions.
Returns:
TrafficFlowResponse
Raises:
HEREError"""
data = {
"prox": str.format(
"{0},{1},{2}",
latitude,
longitude,
distance,
),
"apiKey": self._api_key,
}
return self.__get(self._base_url + "flow.json", data)
def flow_using_proximity_returning_additional_attributes(
self,
latitude: float,
longitude: float,
distance: int,
attributes: [FlowProximityAdditionalAttributes],
) -> Optional[TrafficFlowResponse]:
"""Request traffic flow information using proximity, returning shape and functional class.
Args:
latitude (float):
List contains latitude and longitude in order.
longitude (float):
List contains latitude and longitude in order.
distance (int):
Extending a distance of metres in all directions.
attributes (List):
List that contains FlowProximityAdditionalAttributes.
Returns:
TrafficFlowResponse
Raises:
HEREError"""
data = {
"prox": str.format(
"{0},{1},{2}",
latitude,
longitude,
distance,
),
"responseattibutes": self.__prepare_str_values(enums=attributes),
"apiKey": self._api_key,
}
return self.__get(self._base_url + "flow.json", data)
def flow_with_minimum_jam_factor(
self, top_left: List[float], bottom_right: List[float], min_jam_factor: int = 7
) -> Optional[TrafficFlowResponse]:
"""Request traffic flow information in specified area with a jam factor.
Args:
top_left (List):
List contains latitude and longitude in order.
bottom_right (List):
List contains latitude and longitude in order.
min_jam_factor (int):
Severe congestion with a jam factor greater than 7.
Returns:
TrafficFlowResponse
Raises:
HEREError"""
data = {
"bbox": str.format(
"{0},{1};{2},{3}",
top_left[0],
top_left[1],
bottom_right[0],
bottom_right[1],
),
"minjamfactor": str.format("{0}", min_jam_factor),
"apiKey": self._api_key,
}
return self.__get(self._base_url + "flow.json", data)
def flow_in_corridor(
self, points: List[List[float]], width: int
) -> Optional[TrafficFlowResponse]:
"""Request traffic flow for a defined route.
Args:
points (List):
List contains lists of latitude and longitude pairs in order.
width (int):
Width of corridor (in meters).
Returns:
TrafficFlowResponse
Raises:
HEREError"""
data = {
"corridor": self.__prepare_corridor_value(points=points, width=width),
"apiKey": self._api_key,
}
return self.__get(self._base_url + "flow.json", data)
def flow_availability_data(self) -> Optional[TrafficFlowAvailabilityResponse]:
"""Flow availability requests allow you to see what traffic flow coverage exists in the current Traffic API.
Returns:
TrafficFlowAvailabilityResponse
Raises:
HEREError"""
data = {
"apiKey": self._api_key,
}
return self.__get(self._base_url + "flowavailability.json", data)
def additional_attributes(
self, quadkey: str, attributes: [FlowProximityAdditionalAttributes]
) -> [TrafficFlowResponse]:
"""Request traffic flow including shape and functional class information.
Args:
quadkey (str):
The quadkey unique defines a region of the globe using a standard addressing algortihm.
attributes (List):
List that contains FlowProximityAdditionalAttributes.
Returns:
TrafficFlowResponse
Raises:
HEREError"""
data = {
"quadkey": quadkey,
"responseattibutes": self.__prepare_str_values(enums=attributes),
"apiKey": self._api_key,
}
return self.__get(self._base_url + "flow.json", data)
```
#### File: HerePy/tests/test_routing_api.py
```python
import datetime
import os
import sys
import unittest
import responses
import codecs
import herepy
from herepy import (
AvoidArea,
AvoidFeature,
Avoid,
Truck,
ShippedHazardousGood,
TunnelCategory,
TruckType,
)
class RoutingApiTest(unittest.TestCase):
def setUp(self):
api = herepy.RoutingApi("api_key")
self._api = api
def test_initiation(self):
self.assertIsInstance(self._api, herepy.RoutingApi)
self.assertEqual(self._api._api_key, "api_key")
@responses.activate
def test_bicycleroute_withdefaultmodes_whensucceed(self):
with codecs.open(
"testdata/models/routing_bicycle.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.bicycle_route([41.9798, -87.8801], [41.9043, -87.9216])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_bicycleroute_route_short(self):
with codecs.open(
"testdata/models/routing_bicycle.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.bicycle_route([41.9798, -87.8801], [41.9043, -87.9216])
expected_short_route = (
"Mannheim Rd; <NAME>ont Ave; Cullerton St; N Landen Dr; "
"E Fullerton Ave; N Wolf Rd; W North Ave; N Clinton Ave; "
"E Third St; N Caroline Ave"
)
self.assertEqual(response.route_short, expected_short_route)
@responses.activate
def test_carroute_whensucceed(self):
with codecs.open(
"testdata/models/routing.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.car_route(
[11.0, 12.0], [22.0, 23.0], [herepy.RouteMode.car, herepy.RouteMode.fastest]
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_carroute_route_short(self):
with codecs.open(
"testdata/models/routing_car_route_short.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.car_route([38.9, -77.04833], [39.0, -77.1])
expected_short_route = (
"US-29 - K St NW; US-29 - Whitehurst Fwy; "
"I-495 N - Capital Beltway; MD-187 S - Old Georgetown Rd"
)
self.assertEqual(response.route_short, expected_short_route)
@responses.activate
def test_carroute_withdefaultmodes_whensucceed(self):
with codecs.open(
"testdata/models/routing.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.car_route([11.0, 12.0], [22.0, 23.0])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_carroute_when_error_invalid_input_data_occurred(self):
with open("testdata/models/routing_error_invalid_input_data.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.InvalidInputDataError):
self._api.car_route(
[11.0, 12.0],
[22.0, 23.0],
[herepy.RouteMode.pedestrian, herepy.RouteMode.fastest],
)
@responses.activate
def test_carroute_when_error_invalid_credentials_occurred(self):
with open("testdata/models/routing_error_invalid_credentials.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
api = herepy.RoutingApi("wrong_api_key", "wrong_app_code")
with self.assertRaises(herepy.InvalidCredentialsError):
api.car_route([11.0, 12.0], [22.0, 23.0])
@responses.activate
def test_carroute_when_error_no_route_found_occurred(self):
with open("testdata/models/routing_error_no_route_found.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.NoRouteFoundError):
self._api.car_route([11.0, 12.0], [47.013399, -10.171986])
@responses.activate
def test_pedestrianroute_whensucceed(self):
with codecs.open(
"testdata/models/routing_pedestrian.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.pedestrian_route(
[11.0, 12.0],
[22.0, 23.0],
[herepy.RouteMode.pedestrian, herepy.RouteMode.fastest],
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_pedestrianroute_withdefaultmodes_whensucceed(self):
with codecs.open(
"testdata/models/routing_pedestrian.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.pedestrian_route([11.0, 12.0], [22.0, 23.0])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_pedestrianroute_when_error_invalid_input_data_occurred(self):
with open("testdata/models/routing_error_invalid_input_data.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.InvalidInputDataError):
self._api.pedestrian_route(
[11.0, 12.0],
[22.0, 23.0],
[herepy.RouteMode.car, herepy.RouteMode.fastest],
)
@responses.activate
def test_pedestrianroute_when_error_invalid_credentials_occurred(self):
with open("testdata/models/routing_error_invalid_credentials.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
api = herepy.RoutingApi("wrong_api_key", "wrong_app_code")
with self.assertRaises(herepy.InvalidCredentialsError):
api.pedestrian_route([11.0, 12.0], [22.0, 23.0])
@responses.activate
def test_pedestrianroute_when_error_no_route_found_occurred(self):
with open("testdata/models/routing_error_no_route_found.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.NoRouteFoundError):
self._api.pedestrian_route([11.0, 12.0], [47.013399, -10.171986])
@responses.activate
def test_pedestrianroute_route_short(self):
with codecs.open(
"testdata/models/routing_pedestrian.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.pedestrian_route(
[11.0, 12.0],
[22.0, 23.0],
[herepy.RouteMode.pedestrian, herepy.RouteMode.fastest],
)
expected_short_route = (
"Mannheim Rd; W Belmont Ave; Cullerton St; E Fullerton Ave; "
"La Porte Ave; E Palmer Ave; N Railroad Ave; W North Ave; "
"E North Ave; E Third St"
)
self.assertEqual(response.route_short, expected_short_route)
@responses.activate
def test_intermediateroute_whensucceed(self):
with codecs.open(
"testdata/models/routing.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.intermediate_route(
[11.0, 12.0],
[15.0, 16.0],
[22.0, 23.0],
[herepy.RouteMode.car, herepy.RouteMode.fastest],
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_intermediateroute_withdefaultmodes_whensucceed(self):
with codecs.open(
"testdata/models/routing.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.intermediate_route(
[11.0, 12.0], [15.0, 16.0], [22.0, 23.0]
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_intermediateroute_when_error_invalid_input_data_occurred(self):
with open("testdata/models/routing_error_invalid_input_data.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.InvalidInputDataError):
self._api.intermediate_route(
[11.0, 12.0],
[15.0, 16.0],
[22.0, 23.0],
[herepy.RouteMode.car, herepy.RouteMode.fastest],
)
@responses.activate
def test_intermediateroute_when_error_invalid_credentials_occurred(self):
with open("testdata/models/routing_error_invalid_credentials.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
api = herepy.RoutingApi("wrong_api_key", "wrong_app_code")
with self.assertRaises(herepy.InvalidCredentialsError):
api.intermediate_route([11.0, 12.0], [15.0, 16.0], [22.0, 23.0])
@responses.activate
def test_intermediateroute_when_error_no_route_found_occurred(self):
with open("testdata/models/routing_error_no_route_found.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.NoRouteFoundError):
self._api.intermediate_route(
[11.0, 12.0], [47.013399, -10.171986], [22.0, 23.0]
)
@responses.activate
def test_publictransport_whensucceed(self):
with codecs.open(
"testdata/models/routing_public.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.public_transport(
[11.0, 12.0],
[15.0, 16.0],
True,
[herepy.RouteMode.publicTransport, herepy.RouteMode.fastest],
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_publictransport_route_short(self):
with codecs.open(
"testdata/models/routing_public.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.public_transport([11.0, 12.0], [15.0, 16.0], True)
expected_short_route = (
"332 - Palmer/Schiller; 332 - Cargo Rd./Delta Cargo; "
"332 - Palmer/Schiller"
)
self.assertEqual(response.route_short, expected_short_route)
@responses.activate
def test_publictransport_withdefaultmodes_whensucceed(self):
with codecs.open(
"testdata/models/routing_public.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.public_transport([11.0, 12.0], [15.0, 16.0], True)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_publictransport_when_error_invalid_input_data_occurred(self):
with open("testdata/models/routing_error_invalid_input_data.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.InvalidInputDataError):
self._api.public_transport(
[11.0, 12.0],
[15.0, 16.0],
True,
[herepy.RouteMode.car, herepy.RouteMode.fastest],
)
@responses.activate
def test_publictransport_when_error_invalid_credentials_occurred(self):
with open("testdata/models/routing_error_invalid_credentials.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
api = herepy.RoutingApi("wrong_api_key", "wrong_app_code")
with self.assertRaises(herepy.InvalidCredentialsError):
api.public_transport([11.0, 12.0], [15.0, 16.0], True)
@responses.activate
def test_publictransport_when_error_no_route_found_occurred(self):
with open("testdata/models/routing_error_no_route_found.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.NoRouteFoundError):
self._api.public_transport([11.0, 12.0], [47.013399, -10.171986], True)
@responses.activate
def test_publictransporttimetable_withdefaultmodes_whensucceed(self):
with codecs.open(
"testdata/models/routing_public_time_table.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.public_transport_timetable(
[11.0, 12.0], [15.0, 16.0], True
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_publictransporttimetable_route_short(self):
with codecs.open(
"testdata/models/routing_public_time_table.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.public_transport_timetable(
[11.0, 12.0], [15.0, 16.0], True
)
expected_short_route = (
"330 - Archer/Harlem (Terminal); 309 - Elmhurst Metra Station"
)
self.assertEqual(response.route_short, expected_short_route)
@responses.activate
def test_locationnearmotorway_whensucceed(self):
with codecs.open(
"testdata/models/routing.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.location_near_motorway(
[11.0, 12.0], [22.0, 23.0], [herepy.RouteMode.car, herepy.RouteMode.fastest]
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_locationnearmotorway_withdefaultmodes_whensucceed(self):
with codecs.open(
"testdata/models/routing.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.location_near_motorway([11.0, 12.0], [22.0, 23.0])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_locationnearmotorway_when_error_invalid_input_data_occurred(self):
with open("testdata/models/routing_error_invalid_input_data.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.InvalidInputDataError):
self._api.location_near_motorway(
[11.0, 12.0],
[22.0, 23.0],
[herepy.RouteMode.pedestrian, herepy.RouteMode.fastest],
)
@responses.activate
def test_locationnearmotorway_when_error_invalid_credentials_occurred(self):
with open("testdata/models/routing_error_invalid_credentials.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
api = herepy.RoutingApi("wrong_api_key", "wrong_app_code")
with self.assertRaises(herepy.InvalidCredentialsError):
api.location_near_motorway([11.0, 12.0], [22.0, 23.0])
@responses.activate
def test_locationnearmotorway_when_error_no_route_found_occurred(self):
with open("testdata/models/routing_error_no_route_found.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.NoRouteFoundError):
self._api.location_near_motorway([11.0, 12.0], [47.013399, -10.171986])
@responses.activate
def test_truckroute_whensucceed(self):
with codecs.open(
"testdata/models/routing.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.truck_route(
[11.0, 12.0],
[22.0, 23.0],
[herepy.RouteMode.truck, herepy.RouteMode.fastest],
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_truckroute_route_short(self):
with codecs.open(
"testdata/models/routing_truck_route_short.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.truck_route([11.0, 12.0], [22.0, 23.0])
expected_short_route = (
"I-190; I-294 S - Tri-State Tollway; I-290 W - Eisenhower Expy W; "
"IL-64 W - E North Ave; I-290 E - Eisenhower Expy E; I-290"
)
self.assertEqual(response.route_short, expected_short_route)
@responses.activate
def test_truckroute_withdefaultmodes_whensucceed(self):
with codecs.open(
"testdata/models/routing.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
response = self._api.truck_route([11.0, 12.0], [22.0, 23.0])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_truckroute_when_error_invalid_input_data_occurred(self):
with open("testdata/models/routing_error_invalid_input_data.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.InvalidInputDataError):
self._api.truck_route(
[11.0, 12.0],
[22.0, 23.0],
[herepy.RouteMode.pedestrian, herepy.RouteMode.fastest],
)
@responses.activate
def test_truckroute_when_error_invalid_credentials_occurred(self):
with open("testdata/models/routing_error_invalid_credentials.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
api = herepy.RoutingApi("wrong_api_key", "wrong_app_code")
with self.assertRaises(herepy.InvalidCredentialsError):
api.truck_route([11.0, 12.0], [22.0, 23.0])
@responses.activate
def test_truckroute_when_error_no_route_found_occurred(self):
with open("testdata/models/routing_error_no_route_found.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.NoRouteFoundError):
self._api.truck_route([11.0, 12.0], [47.013399, -10.171986])
@responses.activate
def test_sync_matrix_whensucceed(self):
with codecs.open(
"testdata/models/routing_matrix.json", mode="r", encoding="utf-8"
) as f:
server_response = f.read()
responses.add(
responses.POST,
"https://matrix.router.hereapi.com/v8/matrix",
server_response,
status=200,
)
avoid = Avoid(
features=[AvoidFeature.toll_road],
areas=[AvoidArea(north=30, south=45, west=30, east=45)],
)
truck = Truck(
shipped_hazardous_goods=[
ShippedHazardousGood.gas,
ShippedHazardousGood.flammable,
],
gross_weight=750,
weight_per_axle=100,
height=2000,
width=350,
length=10000,
tunnel_category=TunnelCategory.c,
axle_count=5,
truck_type=TruckType.tractor,
trailer_count=5,
)
response = self._api.sync_matrix(
origins=[[9.933231, -84.076831]],
destinations=[[9.934574, -84.065544]],
matrix_type=herepy.MatrixRoutingType.circle,
center=[9.933300, -84.066891],
radius=10000,
avoid=avoid,
truck=truck,
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingMatrixResponse)
@responses.activate
def test_sync_matrix_multiple_starts(self):
with codecs.open(
"testdata/models/routing_matrix_multiple_starts.json",
mode="r",
encoding="utf-8",
) as f:
server_response = f.read()
responses.add(
responses.POST,
"https://matrix.router.hereapi.com/v8/matrix",
server_response,
status=200,
)
response = self._api.sync_matrix(
origins=[[9.933231, -84.076831], [9.934574, -84.065544]],
destinations=[[9.934574, -84.065544]],
matrix_type=herepy.MatrixRoutingType.circle,
center=[9.933300, -84.066891],
radius=10000,
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingMatrixResponse)
@responses.activate
def test_sync_matrix_multiple_start_names(self):
with codecs.open(
"testdata/models/routing_matrix_multiple_starts.json",
mode="r",
encoding="utf-8",
) as f:
server_response = f.read()
responses.add(
responses.POST,
"https://matrix.router.hereapi.com/v8/matrix",
server_response,
status=200,
)
with open("testdata/models/geocoder.json", "r") as f:
expectedGeocoderResponse = f.read()
responses.add(
responses.GET,
"https://geocode.search.hereapi.com/v1/geocode",
expectedGeocoderResponse,
status=200,
)
response = self._api.sync_matrix(
origins=["Seattle", "Kentucky"],
destinations=[[9.934574, -84.065544]],
matrix_type=herepy.MatrixRoutingType.circle,
center=[9.933300, -84.066891],
radius=10000,
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingMatrixResponse)
@responses.activate
def test_sync_matrix_multiple_destinations(self):
with codecs.open(
"testdata/models/routing_matrix_multiple_destinations.json",
mode="r",
encoding="utf-8",
) as f:
server_response = f.read()
responses.add(
responses.POST,
"https://matrix.router.hereapi.com/v8/matrix",
server_response,
status=200,
)
response = self._api.sync_matrix(
origins=[[9.933231, -84.076831]],
destinations=[[9.934574, -84.065544], [9.612552, -84.62892]],
matrix_type=herepy.MatrixRoutingType.circle,
center=[9.933300, -84.066891],
radius=10000,
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingMatrixResponse)
@responses.activate
def test_sync_matrix_multiple_destinations(self):
with codecs.open(
"testdata/models/routing_matrix_multiple_destinations.json",
mode="r",
encoding="utf-8",
) as f:
server_response = f.read()
responses.add(
responses.POST,
"https://matrix.router.hereapi.com/v8/matrix",
server_response,
status=200,
)
with open("testdata/models/geocoder.json", "r") as f:
expectedGeocoderResponse = f.read()
responses.add(
responses.GET,
"https://geocode.search.hereapi.com/v1/geocode",
expectedGeocoderResponse,
status=200,
)
response = self._api.sync_matrix(
origins=[[9.933231, -84.076831]],
destinations=["Seattle", "Kentucky"],
matrix_type=herepy.MatrixRoutingType.circle,
center=[9.933300, -84.066891],
radius=10000,
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingMatrixResponse)
@responses.activate
def test_sync_matrix_bad_request(self):
with codecs.open(
"testdata/models/routing_matrix_bad_request.json",
mode="r",
encoding="utf-8",
) as f:
server_response = f.read()
responses.add(
responses.POST,
"https://matrix.router.hereapi.com/v8/matrix",
server_response,
status=400,
)
with self.assertRaises(herepy.HEREError):
self._api.sync_matrix(
origins=[[9.933231, -84.076831]],
destinations=[[9.934574, -84.065544]],
matrix_type=herepy.MatrixRoutingType.circle,
center=[9.933300, -84.066891],
radius=10000,
routing_mode=herepy.MatrixRoutingMode.fast,
)
@responses.activate
def test_async_matrix_whensucceed(self):
with open(
"testdata/models/routing_async_matrix_calculation.json",
mode="r",
encoding="utf-8",
) as f:
server_response = f.read()
responses.add(
responses.POST,
"https://matrix.router.hereapi.com/v8/matrix",
server_response,
status=202,
)
with open(
"testdata/models/routing_async_matrix_completed.json",
mode="r",
encoding="utf-8",
) as f:
server_response = f.read()
responses.add(
responses.GET,
"https://com.com/status",
server_response,
status=200,
)
avoid = Avoid(
features=[AvoidFeature.toll_road],
areas=[AvoidArea(north=30, south=45, west=30, east=45)],
)
truck = Truck(
shipped_hazardous_goods=[
ShippedHazardousGood.gas,
ShippedHazardousGood.flammable,
],
gross_weight=750,
weight_per_axle=100,
height=2000,
width=350,
length=10000,
tunnel_category=TunnelCategory.c,
axle_count=5,
truck_type=TruckType.tractor,
trailer_count=5,
)
response = self._api.async_matrix(
token="token",
origins=[[9.933231, -84.076831]],
destinations=[[9.934574, -84.065544]],
matrix_type=herepy.MatrixRoutingType.circle,
center=[9.933300, -84.066891],
radius=10000,
avoid=avoid,
truck=truck,
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingMatrixResponse)
@responses.activate
def test_departure_as_datetime(self):
with codecs.open(
"testdata/models/routing_truck_route_short.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
date = datetime.datetime(
2013, 7, 4, 17, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 7200))
)
response = self._api.truck_route([11.0, 12.0], [22.0, 23.0], departure=date)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_departure_as_string(self):
with codecs.open(
"testdata/models/routing_truck_route_short.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedResponse,
status=200,
)
date = "2013-07-04T17:00:00+02:00"
response = self._api.truck_route([11.0, 12.0], [22.0, 23.0], departure=date)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_location_by_name(self):
with codecs.open(
"testdata/models/routing_truck_route_short.json", mode="r", encoding="utf-8"
) as f:
expectedRoutingResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedRoutingResponse,
status=200,
)
with open("testdata/models/geocoder.json", "r") as f:
expectedGeocoderResponse = f.read()
responses.add(
responses.GET,
"https://geocode.search.hereapi.com/v1/geocode",
expectedGeocoderResponse,
status=200,
)
response = self._api.truck_route(
"200 S Mathilda Sunnyvale CA", "200 S Mathilda Sunnyvale CA"
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_location_by_name_throws_WaypointNotFoundError(self):
with codecs.open(
"testdata/models/routing_truck_route_short.json", mode="r", encoding="utf-8"
) as f:
expectedRoutingResponse = f.read()
responses.add(
responses.GET,
"https://route.ls.hereapi.com/routing/7.2/calculateroute.json",
expectedRoutingResponse,
status=200,
)
with open("testdata/models/geocoder_error.json", "r") as f:
expectedGeocoderResponse = f.read()
responses.add(
responses.GET,
"https://geocode.search.hereapi.com/v1/geocode",
expectedGeocoderResponse,
status=200,
)
with self.assertRaises(herepy.WaypointNotFoundError):
response = self._api.truck_route(
"200 S Mathilda Sunnyvale CA", "200 S Mathilda Sunnyvale CA"
)
@responses.activate
def test_route_v8_success(self):
with codecs.open(
"testdata/models/routing_v8_response.json", mode="r", encoding="utf-8"
) as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://router.hereapi.com/v8/routes",
expectedResponse,
status=200,
)
response = self._api.route_v8(
transport_mode=herepy.RoutingTransportMode.car,
origin=[41.9798, -87.8801],
destination=[41.9043, -87.9216],
via=[[41.9339, -87.9021], [41.9379, -87.9121]],
routing_mode=herepy.RoutingMode.fast,
avoid={"features": ["controlledAccessHighway", "tunnel"]},
exclude={"countries": ["TUR"]},
units=herepy.RoutingMetric.metric,
lang="tr-TR",
return_fields=[herepy.RoutingApiReturnField.polyline],
span_fields=[herepy.RoutingApiSpanField.walkAttributes],
truck={"shippedHazardousGoods": ["explosive", "gas"]},
scooter={"allowHighway": "true"},
)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponseV8)
``` |
{
"source": "19goldi94/PostIt_Segmentation",
"score": 2
} |
#### File: PostIt_Segmentation/postit/postit.py
```python
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
"""
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python postit.py train --dataset=/path/to/postit/dataset --weights=coco
# Resume training a model that you had trained earlier
python postit.py train --dataset=/path/to/postit/dataset --weights=last
#
python postit.py detect --weights=/path/to/weights/file.h5 --image=<URL or path to file>
python postit.py train --dataset=C:/Users/chris/Downloads/PostIt_Segmentation/images --weights=C:/Users/chris/Downloads/PostIt_Segmentation/mask_rcnn_coco.h5
"""
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
############################################################
# Configurations
############################################################
class PostConfig(Config):
"""Configuration for the training
Derived from the base Config class"""
NAME = "postit"
IMAGES_PER_GPU = 2
NUM_CLASSES = 1 + 1 # Background + postit
STEPS_PER_EPOCH = 100
DETECTION_MIN_CONFIDENCE = 0.9
############################################################
# Dataset
############################################################
class PostDataset(utils.Dataset):
def load_postit(self, dataset_dir, subset):
# add class
self.add_class("postit", 1, "postit")
# subset has to be train or val
assert subset in ["train", "val"]
#dataset_dir = os.path.abspath("../../Coco")
dataset_dir = os.path.join(dataset_dir, subset)
#dataset_dir = os.path.join(dataset_dir, "train")
# load json file
data_json = json.load(open(os.path.join(dataset_dir, "output.json")))
print(data_json)
# iterates over all images in the json file
for i in data_json:
polygons = [g['geometry'] for g in i['Label']['Post It']]
image_path = i["Labeled Data"]
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"postit",
image_id=i['ID'],
path=image_path,
width=width, height=height,
polygons=polygons
)
def load_mask(self, image_id):
image_info = self.image_info[image_id]
if image_info["source"] != "postit":
return super(self.__class__, self).load_mask(image_id)
##convert x, y coordinates to mask
imginfo = self.image_info[image_id]
mask = np.zeros([imginfo["height"], imginfo["width"], len(imginfo["polygons"])],
dtype=np.uint8)
for i, p in enumerate(imginfo["polygons"]):
x_values = []
y_values = []
for x in p:
x_values.append(x['x'])
y_values.append(x['y'])
rr, cc = skimage.draw.polygon(y_values, x_values)
mask[rr, cc, i] = 1
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
info = self.image_info[image_id]
if info["source"] == "postit":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def train(model):
# Training Dataset
dataset_train = PostDataset()
dataset_train.load_postit(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = PostDataset()
dataset_val.load_postit(args.dataset, "val")
dataset_val.prepare()
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=5,
layers='heads')
def detectpostit(image, image_path=None):
r = model.detect([image], verbose=1)[0]
m = r['masks']
m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)
for i in range(len(m)):
for j in range(len(m[0])):
if m[i][j] > 5:
m[i][j] = 255
else:
m[i][j] = 0
import imageio
file_name = "postit_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
imageio.imwrite(file_name, m)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect post its.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'detect'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/postit/dataset/",
help='Directory of the dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Post it image')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "detect":
assert args.image or args.video, \
"Provide --image to detect post its"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = PostConfig()
else:
class InferenceConfig(PostConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
# Select weights file to load
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "detect":
detectpostit(model, image_path=args.image)
else:
print("'{}' is not recognized. "
"Use 'train' or 'detect'".format(args.command))
``` |
{
"source": "19hgp-es/iris_processing",
"score": 3
} |
#### File: iris_processing/irisCNN/iris_cnn_vgg16.py
```python
import os
from PIL import Image
from torchsummary import summary
import torch
from torch.utils.data import Dataset, DataLoader
from torch import nn
import torch.optim as optim
from torchvision import datasets, transforms
import argparse
import numpy as np
import sys
class CustomImageDataset(Dataset):
def read_data_set(self):
all_img_files = []
all_labels = []
class_names = os.walk(self.data_set_path).__next__()[1]
class_names.sort()
for index, class_name in enumerate(class_names):
print(index)
label = index
img_dir = os.path.join(self.data_set_path, class_name)
img_files = os.walk(img_dir).__next__()[2]
for img_file in img_files:
img_file = os.path.join(img_dir, img_file)
img = Image.open(img_file)
if img is not None:
all_img_files.append(img_file)
all_labels.append(label)
return all_img_files, all_labels, len(all_img_files), len(class_names)
def __init__(self, data_set_path, transforms=None):
self.data_set_path = data_set_path
self.image_files_path, self.labels, self.length, self.num_classes = self.read_data_set()
self.transforms = transforms
def __getitem__(self, index):
image = Image.open(self.image_files_path[index])
image = image.convert("RGB")
if self.transforms is not None:
image = self.transforms(image)
return {'image': image, 'label': self.labels[index]}
def __len__(self):
return self.length
class CustomConvNet(nn.Module):
def __init__(self, num_classes):
super(CustomConvNet, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, padding=1, stride=1), nn.ReLU())
self.conv2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2))
self.conv3 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1), nn.ReLU())
self.conv4 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2))
self.conv5 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1), nn.ReLU())
self.conv6 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1), nn.ReLU())
self.conv7 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2))
self.conv8 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1), nn.ReLU())
self.conv9 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1), nn.ReLU())
self.conv10 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2))
self.conv11 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1), nn.ReLU())
self.conv12 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1), nn.ReLU())
self.conv13 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2))
self.fc1 = nn.Sequential(nn.Linear(11*2*512, 4096), nn.ReLU(), nn.Dropout(), nn.BatchNorm1d(4096))
self.fc2 = nn.Sequential(nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(), nn.BatchNorm1d(4096))
self.fc3 = nn.Sequential(nn.Linear(4096, num_classes))
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.conv5(out)
out = self.conv6(out)
out = self.conv7(out)
out = self.conv8(out)
out = self.conv9(out)
out = self.conv10(out)
out = self.conv11(out)
out = self.conv12(out)
out = self.conv13(out)
out = out.view(-1, 11*2*512)
out = self.fc1(out)
out = self.fc2(out)
out = self.fc3(out)
return out
hyper_param_epoch = 80
hyper_param_batch = 50
hyper_param_learning_rate = 0.001
transforms_train = transforms.Compose([transforms.Resize((360, 80)),
transforms.RandomRotation(10.),
transforms.ToTensor()])
transforms_test = transforms.Compose([transforms.Resize((360, 80)),
transforms.ToTensor()])
train_data_set = CustomImageDataset(data_set_path="/content/gdrive/My Drive/iris/train", transforms=transforms_train)
train_loader = DataLoader(train_data_set, batch_size=hyper_param_batch, shuffle=True)
test_data_set = CustomImageDataset(data_set_path="/content/gdrive/My Drive/iris/test", transforms=transforms_test)
test_loader = DataLoader(test_data_set, batch_size=hyper_param_batch, shuffle=True)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
num_classes = train_data_set.num_classes
custom_model = CustomConvNet(num_classes=num_classes).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(custom_model.parameters(), lr=hyper_param_learning_rate)
for e in range(hyper_param_epoch):
for i_batch, item in enumerate(train_loader):
images = item['image'].to(device)
labels = item['label'].to(device)
outputs = custom_model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Epoch [{}/{}], Loss: {:.4f}'.format(e + 1, hyper_param_epoch, loss.item()))
custom_model.eval()
'''
np.set_printoptions(threshold=np.inf)
with open('/content/gdrive/My Drive/iris/some_file.txt', 'w') as f:
for name, param in custom_model.named_parameters():
if param.requires_grad:
print(param.data.shape)
f.write(name + " = ")
f.write("[")
for p in param.data:
f.write(str(p.cpu().numpy()))
f.write(",")
f.write("]")
f.write('\n')
'''
summary(custom_model, (3, 360, 80))
with torch.no_grad():
correct = 0
total = 0
for item in test_loader:
images = item['image'].to(device)
labels = item['label'].to(device)
outputs = custom_model(images)
_, predicted = torch.max(outputs.data, 1)
total += len(labels)
print('predicted : ',predicted, '\nlabels : ',labels)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the {} test images: {} %'.format(total, 100 * correct / total))
```
#### File: iris_processing/normalization/iris_rg_linux_ver.py
```python
import cv2 as cv
import math
import numpy as np
import os
#input_img_path = "C:\\Users\\ANTICODE\\Documents\\Iris\\iris_data\\MMUIrisDatabase\\MMU_Iris_Database"
#output_path = "C:\\Users\\ANTICODE\\Documents\\Iris\\iris_recognition\\images\\MMUIris_norm"
input_img_path = "/home/metaljsw2/iris_processing/CASIA-IrisV4(JPG)/CASIA-Iris-Interval"
output_path = "/home/metaljsw2/CASIA_Iris_interval_norm"
iris_circle = [0, 0, 0]
def bottom_hat_median_blurr(image):
cimg = image
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5))
blackhat = cv.morphologyEx(cimg, cv.MORPH_BLACKHAT, kernel)
bottom_hat_filtered = cv.add(blackhat, cimg)
return cv.medianBlur(bottom_hat_filtered, 17)
def adjust_gamma(image, gamma=1.0):
inv_gamma = 1.0 / gamma
table = np.array([((i / 255.0) ** inv_gamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv.LUT(image, table)
def detect_circles(img, canny_param=20, hough_param=20):
filtered = bottom_hat_median_blurr(img)
adjusted = adjust_gamma(filtered, 10)
#case mmu => min_rad = 15, max_rad = 40
circles = cv.HoughCircles(adjusted, cv.HOUGH_GRADIENT, 1, 20,
param1=canny_param,
param2=hough_param,
minRadius=20,
maxRadius=100)
inner_circle = [0, 0, 0]
if circles is not None:
inner_circle = np.uint16(np.around(circles[0][0])).tolist()
circle_frame = cv.circle(img, (inner_circle[0], inner_circle[1]), inner_circle[2], (0,0,0), cv.FILLED)
#case mmu => min_rad = inner_circle[2]+20, max_rad = 100
circles = cv.HoughCircles(adjusted, cv.HOUGH_GRADIENT, 1, 20,
param1=canny_param,
param2=hough_param,
minRadius=inner_circle[2]+20,
maxRadius=140)
outer_circle = [0, 0, 0]
if circles is not None:
for circle in circles[0]:
outer_circle = np.uint16(np.around(circle)).tolist()
if (abs(outer_circle[0] - inner_circle[0]) < 15) and (abs(outer_circle[1] - inner_circle[1]) < 15):
break
else:
#case mmu => int(inner_circle[2] * 2.4)
outer_circle[2] = int(inner_circle[2] * 2.4)
outer_circle[0], outer_circle[1] = inner_circle[0], inner_circle[1]
global iris_circle
iris_circle = outer_circle
return circle_frame
def detect_iris_frame(frame):
global iris_circle
#for casia_dabase
if iris_circle[0] < iris_circle[2]:
iris_circle[2] = iris_circle[0]
if iris_circle[1] < iris_circle[2]:
iris_circle[2] = iris_circle[1]
if frame.shape[0] - iris_circle[1] < iris_circle[2]:
iris_circle[2] = frame.shape[0] - iris_circle[1]
if frame.shape[1] - iris_circle[0] < iris_circle[2]:
iris_circle[2] = frame.shape[1] - iris_circle[0]
#
mask = cv.bitwise_not(
cv.circle(np.zeros((np.size(frame,0),np.size(frame,1),1), np.uint8)
, (iris_circle[0], iris_circle[1]), iris_circle[2], (255,255,255), cv.FILLED))
iris_frame = frame.copy()
iris_frame = cv.subtract(frame, frame, iris_frame, mask, -1)
return iris_frame[(iris_circle[1] - iris_circle[2]):
(iris_circle[1] + iris_circle[2]),
(iris_circle[0] - iris_circle[2]):
(iris_circle[0] + iris_circle[2])]
def getPolar2CartImg(image, rad):
c = (float(np.size(image, 0)/2.0), float(np.size(image, 1)/2.0))
imgRes = cv.warpPolar(image, (rad*3,360), c, np.size(image, 0)/2, cv.WARP_POLAR_LOG)
for valid_width in reversed(range(rad*3)):
blank_cnt = 0
for h in range(360):
if (imgRes[h][valid_width] != 0):
blank_cnt+=1
if(blank_cnt == 0):
imgRes = imgRes[0:360, valid_width:rad*3]
break
imgRes = cv.resize(imgRes, (80, 360), interpolation=cv.INTER_CUBIC)
return (imgRes)
key = 0
print("start image processing")
for (path, dir, files) in os.walk(input_img_path):
# if not(os.path.isdir(output_path+ path.split("MMU_Iris_Database")[1])):
# os.mkdir(output_path +path.split("MMU_Iris_Database")[1])
if not(os.path.isdir(output_path+ path.split("CASIA-Iris-Interval")[1])):
os.mkdir(output_path +path.split("CASIA-Iris-Interval")[1])
for filename in files:
ext = os.path.splitext(filename)[-1]
if ((ext == '.bmp') or (ext == '.jpg')):
print(filename)
frame = cv.imread(path + "/" + filename, cv.CV_8UC1)
#cv.imshow("input", frame)
circle = detect_circles(frame)
#cv.imshow("iris",circle)
iris = detect_iris_frame(circle)
#cv.imshow("iris",iris)
try:
norm_frame = getPolar2CartImg(iris,iris_circle[2])
except cv.error:
print("cv2 error detected..")
continue
#print(frame.shape)
#cv.imshow("normalized", norm_frame)
cv.imwrite(output_path + path.split("CASIA-Iris-Interval")[1] + "/" + filename, norm_frame)
key = cv.waitKey(1000)
if (key == 27 or key == 1048603):
break
cv.destroyAllWindows()
``` |
{
"source": "19jansve66/sensor.avfallsor",
"score": 2
} |
#### File: custom_components/avfallsor/sensor.py
```python
import logging
from datetime import datetime, timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.const import ATTR_ATTRIBUTION
from . import DOMAIN, garbage_types
from .utils import (
find_address,
find_address_from_lat_lon,
to_dt,
find_next_garbage_pickup,
parse_tomme_kalender,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional("address", default=""): cv.string,
vol.Optional("street_id", default=""): cv.string,
vol.Optional("kommune", default=""): cv.string,
vol.Optional("garbage_types", default=garbage_types): list,
}
)
MIN_TIME_BETWEEN_UPDATES = timedelta(weeks=4)
def check_settings(config, hass):
if not any(config.get(i) for i in ["street_id", "kommune"]):
_LOGGER.info("street_id or kommune was not set config")
else:
return True
if not config.get("address"):
_LOGGER.info("address was not set")
else:
return True
if not hass.config.latitude or not hass.config.longitude:
_LOGGER.info("latitude and longitude is not set in ha settings.")
else:
return True
raise vol.Invalid("Missing settings to setup the sensor.")
async def async_setup_platform(
hass, config_entry, async_add_devices, discovery_info=None
):
"""Setup sensor platform for the ui"""
config = config_entry
street_id = config.get("street_id")
kommune = config.get("kommune")
address = config.get("address")
check_settings(config, hass)
data = AvfallSorData(
address,
street_id,
kommune,
hass.config.latitude,
hass.config.longitude,
async_get_clientsession(hass),
)
await data.update()
sensors = []
for gb_type in config.get("garbage_types"):
sensor = AvfallSor(data, gb_type)
sensors.append(sensor)
async_add_devices(sensors)
return True
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Setup sensor platform for the ui"""
config = config_entry.data
street_id = config.get("street_id")
kommune = config.get("kommune")
address = config.get("address")
check_settings(config, hass)
data = AvfallSorData(
address,
street_id,
kommune,
hass.config.latitude,
hass.config.longitude,
async_get_clientsession(hass),
)
await data.update()
sensors = []
for gb_type in config.get("garbage_types", garbage_types):
sensor = AvfallSor(data, gb_type)
sensors.append(sensor)
async_add_devices(sensors)
return True
async def async_remove_entry(hass, config_entry):
_LOGGER.info("async_remove_entry avfallsor")
try:
await hass.config_entries.async_forward_entry_unload(config_entry, "sensor")
_LOGGER.info("Successfully removed sensor from the avfallsor integration")
except ValueError:
pass
class AvfallSorData:
def __init__(self, address, street_id, kommune, lat, lon, client):
self._address = address
self._street_id = street_id
self._kommune = kommune
self.client = client
self._data = {}
self._last_update = None
self._grbrstr = None
self._lat = lat
self._lon = lon
self._friendly_name = None
async def find_street_id(self):
"""Helper to get get the correct info with the least possible setup
Find the info using different methods where the prios are:
1. streetid and kommune
2. address
3. lat and lon set in ha config when this was setup.
"""
if not len(self._street_id) and not len(self._kommune):
if self._address and self._grbrstr is None:
result = await find_address(self._address, self.client)
if result:
self._grbrstr = result
return
if self._lat and self._lon and self._grbrstr is None:
result = await find_address_from_lat_lon(
self._lat, self._lon, self.client
)
if result:
self._grbrstr = result
return
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def _update(self):
_LOGGER.info("Fetching stuff for AvfallSorData")
await self.find_street_id()
if self._street_id and self._kommune:
url = f"https://avfallsor.no/tommekalender/?id={self._street_id}&kommune={self._kommune}"
elif self._grbrstr:
# This seems to redirect to the url above.
url = f"https://avfallsor.no/tommekalender/?gbnr={self._grbrstr}.&searchString=&mnr=&type=adrSearchBtn&pappPapirPlast=true&glassMetall=true"
resp = await self.client.get(url)
if resp.status == 200:
text = await resp.text()
self._data = parse_tomme_kalender(text)
self._last_update = datetime.now()
async def update(self):
await self._update()
return self._data
class AvfallSor(Entity):
def __init__(self, data, garbage_type):
self.data = data
self._garbage_type = garbage_type
@property
def state(self):
"""Return the state of the sensor."""
nxt = self.next_garbage_pickup
if nxt is not None:
delta = nxt.date() - datetime.today().date()
return delta.days
async def async_update(self):
await self.data.update()
@property
def next_garbage_pickup(self):
"""Get the date of the next picked for that garbage type."""
if self._garbage_type == "paper":
return find_next_garbage_pickup(self.data._data.get("paper"))
elif self._garbage_type == "bio":
return find_next_garbage_pickup(self.data._data.get("bio"))
elif self._garbage_type == "mixed":
return find_next_garbage_pickup(self.data._data.get("rest"))
elif self._garbage_type == "metal":
return find_next_garbage_pickup(self.data._data.get("metal"))
@property
def icon(self) -> str:
"""Shows the correct icon for container."""
# todo fix icons.
if self._garbage_type == "paper":
return "mdi:recycle"
elif self._garbage_type == "bio":
return "mdi:recycle"
elif self._garbage_type == "mixed":
return "mdi:recycle"
elif self._garbage_type == "metal":
return "mdi:recycle"
@property
def unique_id(self) -> str:
"""Return the name of the sensor."""
return f"avfallsor_{self._garbage_type}_{self.data._street_id or self.data._grbrstr}"
@property
def name(self) -> str:
return self.unique_id
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return {
"next garbage pickup": self.next_garbage_pickup,
ATTR_ATTRIBUTION: "avfallsør",
"last update": self.data._last_update,
}
@property
def device_info(self) -> dict:
"""I can't remember why this was needed :D"""
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": DOMAIN,
}
@property
def unit(self)-> int:
"""Unit"""
return int
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement this sensor expresses itself in."""
return "days"
@property
def friendly_name(self) -> str:
return self._friendly_name
``` |
{
"source": "19katz/matching",
"score": 4
} |
#### File: matching/src/matching_1_to_1.py
```python
import copy # deepcopy constructs a new compound object, recursively, inserts copies into it
import random
import networkx as nx
from hungarian_algorithm import algorithm
from scipy.optimize import linear_sum_assignment
import numpy as np
import math
class Person:
# constructor to initialize the attributes of Person class
def __init__(self, name, location, distances):
self.name = name
self.partner = None
self.location = location
self.distances = distances
# return object representation
def __repr__(self):
if self.partner:
return f'{self.name} ⚭ {self.partner}'
else:
return f'{self.name} (no match)'
def check_not_top_matches(matches):
'''Generate a list of people who do not have their top matches'''
not_top_matches = []
for person in matches.keys():
if matches[person].partner != matches[person].preferences[0]:
not_top_matches.append(person)
return not_top_matches
def check_stability(proposing, accepting, list_of_not_top_matches):
for i in list_of_not_top_matches:
more_preferred = proposing[i].preferences[:proposing[i].preferences.index(
proposing[i].partner)]
# check to see if it's reciprocated
for j in more_preferred:
# print reason why the female rejects
if accepting[j].accept(proposing[i].name) == False:
print(
f'{proposing[i].name} prefers {accepting[j].name} more, but {accepting[j].name} prefers {accepting[j].partner}.')
else:
print("This matching is NOT stable!")
break
print("Therefore, this matching is stable.")
class Rider(Person):
def __init__(self, name, location, distances):
# super() refers to parent class, and inherits methods
super().__init__(name, location, distances)
# prefered person not asked yet
# recursively copy
#self.num_people = random.randint(1, 5)
self.trip_length = random.randint(15, 90)
self.driver_prefs = []
self.preferences = []
self.not_asked = []
def make_preferences(self, drivers):
self.driver_prefs = sorted(drivers, key=lambda x: self.distances[x.location][self.location])
self.preferences = [d.name for d in self.driver_prefs]
self.not_asked = copy.deepcopy(self.preferences)
def ask(self):
# drop the first element which is the next preferred person
return self.not_asked.pop(0)
def accept(self, suitor):
return self.partner is None or(
# check that the suitor is strictly preferred to the existing partner
self.preferences.index(suitor) <
self.preferences.index(self.partner)
)
class Driver(Person):
def __init__(self, name, location, distances):
super().__init__(name, location, distances)
self.preferences = []
#print(self.preferences)
self.not_asked = []
self.rider_prefs = []
#self.capacity = random.randint(1, 5)
# amount earned per minute
self.earning = 0.35
# point at which long trip is no longer worth it
self.distance_limit = random.randint(45, 90)
self.utilities = []
self.cost = 0.25
def make_preferences(self, riders):
for rider in riders:
utility = self.get_utility(rider)
#utility = self.get_simple_utility(rider)
self.utilities.append(utility)
self.preferences = [r.name for r in self.rider_prefs]
zipped_lists = zip(self.utilities, self.preferences)
sorted_pairs = sorted(zipped_lists, reverse=True)
tuples = zip(*sorted_pairs)
self.utilities, self.preferences = [ list(tuple) for tuple in tuples]
#print(self.preferences)
self.not_asked = copy.deepcopy(self.preferences)
def get_simple_utility(self, rider):
to_rider_dist = self.distances[self.location][rider.location]
self.rider_prefs.append(rider)
return to_rider_dist
def get_utility(self, rider):
to_rider_dist = self.distances[self.location][rider.location]
travel_dist = rider.trip_length
long_drive_penalty = 0
current_earnings = self.earning * travel_dist
if travel_dist > self.distance_limit:
current_earnings = self.earning * self.distance_limit
for i in range(self.distance_limit + 1, travel_dist + 1):
current_earnings += self.earning * (1 - (i - self.distance_limit) / self.distance_limit)
self.rider_prefs.append(rider)
return current_earnings - self.cost * to_rider_dist
def accept(self, suitor):
return self.partner is None or(
# check that the suitor is strictly preferred to the existing partner
self.preferences.index(suitor) <
self.preferences.index(self.partner)
)
def run_da(riders, drivers):
"""
Run the deferred acceptance algo and print the match results.
1) Each unengaged man propose to the woman he prefers most
2) Each woman says "maybe" to the suitor she most prefers and "no" to all other suitors
3) Continue while there are still unengaged men
"""
# all riders are unmatched at the beginning
unmatched = list(riders.keys())
all_riders = list(riders.keys())
while unmatched:
# randomly select one of the riders to choose next
next = random.choice(unmatched)
rider = riders[next]
# rider ask his first choice
driver = drivers[rider.ask()]
# if driver accepts rider's proposal
if driver.accept(rider.name):
#print(f'{driver.name} accepts')
# # if driver has a partner
if driver.partner:
# this existing rider partner is now an ex
ex = riders[driver.partner]
# this rider person has no partner now :(
ex.partner = None
# add this rider person back to the list of unmatched
if len(ex.not_asked) > 0:
unmatched.append(ex.name)
unmatched.remove(rider.name)
# log the match
rider.partner = driver.name
driver.partner = rider.name
#else:
#print(f'{driver.name} rejects')
if len(rider.not_asked) == 0 and rider.name in unmatched:
unmatched.remove(rider.name)
def print_pairings(people):
average_utility = 0
for p in people.values():
if p.partner:
average_utility += p.utilities[p.preferences.index(p.partner)]
print(
f'{p.name} is paired with {p.partner} ({p.preferences.index(p.partner) + 1}) with utility {p.utilities[p.preferences.index(p.partner)]}')
else:
print(f'{p.name} is not paired')
return average_utility / len(list(people.keys()))
def print_rider_pairings(people):
max_wait_time = -1
average_wait_time = 0
num_rider_best = 0
for p in people.values():
if p.partner:
pref_ind = p.preferences.index(p.partner)
if pref_ind == 0:
num_rider_best += 1
driver_preferred = p.driver_prefs[pref_ind]
distance = p.distances[driver_preferred.location][p.location]
average_wait_time += distance
if distance > max_wait_time:
max_wait_time = distance
print(
f'{p.name} is paired with {p.partner} ({p.preferences.index(p.partner) + 1}) with a wait time of {distance}')
else:
print(f'{p.name} is not paired')
return average_wait_time / len(list(people.keys())), max_wait_time, num_rider_best / len(list(people.keys()))
def run_all(proposing, accepting):
run_da(proposing, accepting)
print()
average_wait_time, max_wait_time, num_rider_best = print_rider_pairings(proposing)
print()
average_utility = print_pairings(accepting)
print()
check_stability(proposing, accepting, check_not_top_matches(proposing))
print()
check_stability(accepting, proposing, check_not_top_matches(accepting))
return average_wait_time, max_wait_time, average_utility, num_rider_best
def make_people(num_locations, num_riders, num_drivers):
N = num_locations
# "distance" is calculated in terms of time for travel
distances = [[-1 for i in range(N)] for i in range(N)]
for i in range(N):
for j in range(N):
if i == j:
distances[i][j] = 0
elif distances[i][j] == -1:
distances[i][j] = random.randint(1, 15)
distances[j][i] = distances[i][j] + random.randint(int(- 0.25 * distances[i][j]), int(0.25 * distances[i][j]))
riders = []
for i in range(num_riders):
location = random.randint(0, N - 1)
r = Rider('RiderGroup' + str(i), location, distances)
riders.append(r)
drivers = []
for i in range(num_drivers):
location = random.randint(0, N - 1)
d = Driver('Driver' + str(i), location, distances)
drivers.append(d)
rider_dict = {}
print("RIDERS")
for r in riders:
r.make_preferences(drivers)
rider_dict[r.name] = r
print(r.name + " demands trip of length: " + str(r.trip_length) )
print(r.name + " prefers these drivers: " + str(r.preferences))
print("\n")
driver_dict = {}
print("DRIVERS")
for d in drivers:
d.make_preferences(riders)
driver_dict[d.name] = d
print(d.name + " has a limit of " + str(d.distance_limit) + " and a cost of " + str(d.cost) )
print(d.name + " prefers these riders: " + str(d.preferences))
return rider_dict, driver_dict, distances
def hungarian_algorithm(rider_dict, driver_dict, distances):
rider_keys = list(rider_dict.keys())
driver_keys = list(driver_dict.keys())
num_riders = len(rider_keys)
G = np.array([[0 for i in range(num_riders)] for j in range(num_riders)])
for r in range(num_riders):
rider = rider_dict[rider_keys[r]]
for d in range(len(driver_keys)):
driver = driver_dict[driver_keys[d]]
G[r][d] = distances[driver.location][rider.location]
row_ind, col_ind = linear_sum_assignment(G)
max_wait_time = -1
average_wait_time = 0
num_rider_best = 0
for i in range(len(col_ind)):
wait_time = G[i][col_ind[i]]
average_wait_time += wait_time
if wait_time > max_wait_time:
max_wait_time = wait_time
rider_prefs = rider_dict[rider_keys[i]].preferences
driver_name = 'Driver' + str(col_ind[i])
pref_ind = rider_prefs.index(driver_name)
if pref_ind == 0:
num_rider_best += 1
print("RiderGroup" + str(i) + " got matched with Driver" + str(col_ind[i]) + f" ({pref_ind + 1})" + " with a wait time of " + str(G[i][col_ind[i]]))
print()
average_utility = 0
for i in range(len(col_ind)):
driver_prefs = driver_dict[driver_keys[col_ind[i]]].preferences
rider_name = 'RiderGroup' + str(i)
driver_pref_ind = driver_prefs.index(rider_name)
utility = driver_dict[driver_keys[col_ind[i]]].utilities[driver_pref_ind]
average_utility += utility
print("Driver" + str(col_ind[i]) + " got matched with RiderGroup" + str(i) + f" ({driver_pref_ind + 1}) and utility {utility}")
return average_wait_time / len(col_ind), max_wait_time, average_utility / len(col_ind), num_rider_best/len(rider_keys)
def greedy_algorithm(rider_dict, driver_dict, distances):
driver_names = list(driver_dict.keys())
matched = [False] * len(driver_names)
matching = {}
max_wait_time = -1
average_wait_time = 0
num_rider_best = 0
for rider in rider_dict:
best_time = 100
best_driver = None
best_driver_ind = -1
rider_prefs = rider_dict[rider].preferences
for i in range(len(driver_names)):
if matched[i] == True:
continue
cur_driver_name = driver_names[i]
if distances[driver_dict[cur_driver_name].location][rider_dict[rider].location] < best_time:
best_time = distances[driver_dict[cur_driver_name].location][rider_dict[rider].location]
best_driver = cur_driver_name
best_driver_ind = i
matching[best_driver] = rider
if best_time > max_wait_time:
max_wait_time = best_time
average_wait_time += best_time
pref_ind = rider_prefs.index(best_driver)
if pref_ind == 0:
num_rider_best += 1
print(rider + " matched with " + best_driver + " " + f"({pref_ind + 1})")
matched[best_driver_ind] = True
print()
average_utility = 0
for driver in matching:
driver_prefs = driver_dict[driver].preferences
rider = matching[driver]
pref_ind = driver_prefs.index(rider)
utility = driver_dict[driver].utilities[pref_ind]
average_utility += utility
print(driver + " matched with " + rider + " " + f"({pref_ind + 1}) with utility ({utility})")
return average_wait_time / len(driver_names), max_wait_time, average_utility / len(driver_names), num_rider_best / len(driver_names)
if __name__ == '__main__':
#random.seed(2021)
num_locations = [5, 10, 25]
num_people = [3, 10, 20]
stats = []
#for i in range(10):
stat_dict = {}
rider_dict, driver_dict, distances = make_people(20, 7, 7)
print()
print("GREEDY ALGORITHM")
g_average, g_max, g_ut, g_best = greedy_algorithm(rider_dict, driver_dict, distances)
print()
print("HUNGARIAN ALGORITHM")
h_average, h_max, h_ut, h_best = hungarian_algorithm(rider_dict, driver_dict, distances)
#print(rider_dict)
print()
print("DEFERRED ACCEPTANCE")
da_average, da_max, da_ut, da_best = run_all(rider_dict, driver_dict)
stat_dict['g'] = [g_average, g_max, g_ut, g_best]
stat_dict['h'] = [h_average, h_max, h_ut, h_best]
stat_dict['da'] = [da_average, da_max, da_ut, da_best]
stats.append(stat_dict)
'''
overall_max = [0] * 3
overall_avg = [0] * 3
overall_ut = [0] * 3
overall_best = [0] * 3
fraction_max = [0] * 3
for i in range(10):
stat_dict = stats[i]
g_max, g_average, g_ut, g_best = stat_dict['g'][0], stat_dict['g'][1], stat_dict['g'][2], stat_dict['g'][3]
h_max, h_average, h_ut, h_best = stat_dict['h'][0], stat_dict['h'][1], stat_dict['h'][2], stat_dict['h'][3]
da_max, da_average, da_ut, da_best = stat_dict['da'][0], stat_dict['da'][1], stat_dict['da'][2], stat_dict['da'][3]
overall_max[0] += g_max / 10
overall_max[1] += h_max / 10
overall_max[2] += da_max / 10
overall_avg[0] += g_average / 10
overall_avg[1] += h_average / 10
overall_avg[2] += da_average / 10
overall_ut[0] += g_ut / 10
overall_ut[1] += h_ut / 10
overall_ut[2] += da_ut / 10
overall_best[0] += g_best / 10
overall_best[1] += h_best / 10
overall_best[2] += da_best / 10
print(f"Greedy Algorithm maximum ({g_max}) and average wait time ({g_average}); average driver utility: {g_ut}; number best: {g_best}")
print(f"Hungarian Algorithm maximum ({h_max}) and average wait time ({h_average}); average driver utility: {h_ut}; number best: {h_best}")
print(f"DA maximum ({da_max}) and average wait time ({da_average}); average driver utility: {da_ut}; number best: {da_best}")
print()
print(f"Greedy Algorithm OVERALL maximum ({overall_max[0]}) and average wait time ({overall_avg[0]}); average driver utility: {overall_ut[0]}; number best: {overall_best[0]}")
print(f"Hungarian Algorithm OVERALL maximum ({overall_max[1]}) and average wait time ({overall_avg[1]}); average driver utility: {overall_ut[1]}; number best: {overall_best[1]}")
print(f"DA OVERALL maximum ({overall_max[2]}) and average wait time ({overall_avg[2]}); average driver utility: {overall_ut[2]}; number best: {overall_best[2]}")
'''
```
#### File: matching/src/step_3_mini_sheet.py
```python
from scipy.spatial import distance
from collections import Counter
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
import random
import copy
# random.seed(42)
np.set_printoptions(suppress=True)
# import itertools
def mini_sheet(matchesFile: str, n_suiteds: int, m_suitors: int) -> np.array:
'''
Returns a schedule in the format of a minisheet for small group meetings.
Each row or column may still have conflicts to be resolved.
'''
matches = pd.read_csv(matchesFile).values
results = np.zeros((n_suiteds, m_suitors))
for suited in range(0, n_suiteds):
suitors = matches[:, suited]
n_meetings_for_venture = len(suitors)
# a venture cannot meet multiple mentors at the same time
# random scheduling is better than 1, 2, ..., n_meetings_for_venture
schedule = list(range(1, n_meetings_for_venture + 1))
random.shuffle(schedule)
results[suited, suitors] = schedule
# replace 0 with nan for google sheet
results[results == 0] = np.nan
return results
def is_valid(arr: np.array) -> np.array:
'''
Check if an 1D array is a valid solution (column wise, or row wise).
This means there the row contains only unique values.
Return a boolean array such that each index is True
if it is a unique value, otherwise False.
Inspired by Google Sheets
=if(OR(countif(D$5:D$20, D5)>1, countif($D5:$BA5, D5)>1), TRUE, FALSE)
'''
vals, counts = np.unique(arr, return_counts=True)
idx_val_repeated = np.where(counts > 1)[0]
vals_repeated = vals[idx_val_repeated]
# no duplicates
if len(vals_repeated) < 1:
return np.tile(True, arr.shape)
else:
bool_array = np.logical_not(
np.any([arr == val for val in vals_repeated], axis=0))
return bool_array
def check_valid(arr: np.array) -> np.array:
'''
Ensure resulting schedule is valid.
Return a boolean_array n x m of valid indices, such that each
index is True if the it is unique in the row and col. Otherwise,
the index is False.
'''
check_rows = np.apply_along_axis(is_valid, axis=0, arr=arr) # check_rows
if arr.ndim == 1: # 1D
return check_rows
if arr.ndim == 2: # 2D
check_cols = np.apply_along_axis(is_valid, axis=1, arr=arr)
return np.logical_and(check_cols, check_rows)
def schedule_optimiser(arr: np.array):
'''
Return an optimised schedule with as few errors as possible.
'''
errors = np.where(check_valid(arr[:, :]) == False)
n_errors = len(errors[0])
print(f'There are {n_errors} errors before optimising.')
d_errors = dict(enumerate(zip(errors[0], errors[1])))
if len(d_errors) == 0:
print(
f'There are {n_errors} errors before optimising. Therefore, there is no need to optimise.')
return arr
round = 0
# cap 10 rounds to fix
while len(errors[0]) > 0 and (round < 10):
round += 1
num_errors_current = len(errors[0])
d_errors = dict(enumerate(zip(errors[0], errors[1])))
df_errors = pd.DataFrame(d_errors).T
df_errors.rename(columns={0: 'row', 1: 'column'}, inplace=True)
# start with row with the least number of errors
row_order = df_errors.groupby(
'row').count().sort_values(by='column').index
# len(list(itertools.permutations([1,2,3,4,5])))
# in theory, if all of the timeslots were conflicts, there would be 120 permutations to go through
# therefore, cap it at 120 otherwise.
for row in row_order:
# print('\n')
iteration = 0
# print(f'Fixing row: {row}')
# print('\n')
# bool_arr of non na
not_na = ~np.isnan(arr[row, :])
# note, you must verify ALL the columns of the error row, not just the error columns.
# otherwise, you introduce potentially new errors.
# i.e. is_valid on the row, and its respective columns to ensure solution is valid for all columns
# while 1) there are errors, and 2) if iteration < 20
# continue to shuffle and resolve conflicts
# possible_loc is a list of (row, column) of the meetings
possible_locs = list(
zip([row]*np.sum(not_na), np.where(not_na)[0]))
while not(np.all([np.all(is_valid(arr[:, loc[1]])) for loc in possible_locs])) and (iteration < 120):
num_valid_before = np.sum(
[np.all(is_valid(arr[:, loc[1]])) for loc in possible_locs])
iteration += 1
# iterate on each error for that row
for error in [v for k, v in d_errors.items() if v[0] == row]:
# get timeslots (i.e. non na)
timeslots = arr[row, :][~np.isnan(arr[row, :])]
# randomly shuffle timeslots
np.random.shuffle(timeslots)
arr_ = copy.copy(arr)
arr_[row, :][~np.isnan(arr_[row, :])] = timeslots
num_valid_after = np.sum(
[np.all(is_valid(arr_[:, loc[1]])) for loc in possible_locs])
# reassign only if this condition, so you do not add errors
if num_valid_after >= num_valid_before:
arr[row, :][~np.isnan(arr[row, :])] = timeslots
# update errors
errors = np.where(check_valid(arr[:, :]) == False)
# print(f'iteration: {iteration}')
# print(f'row: {row} done.')
errors_ = errors
n_errors_ = len(errors_[0])
print(f'There are {n_errors_} errors after optimising.')
print('\n')
return arr
def simulate_runs(og_schedule, k_runs):
'''
Return number of runs before a perfect schedule,
after simulating up to random k_runs (no random seed)
Time complexity is poor, k_runs of 10 is recommended.
'''
# for k_run scenarios
is_perfect = False
while k_runs > 0 and not(is_perfect):
print(f'--------------- run #{k_runs} ---------------')
# try schedule optimiser up to 10 times to return a perfect schedule
for i in range(0, 10):
print(f'iteration {i+1}')
optimised_schedule = schedule_optimiser(copy.deepcopy(og_schedule))
is_perfect = np.all(check_valid(optimised_schedule))
if is_perfect:
break
else:
continue
k_runs -= 1
print(f'Number of tries for a perfect schedule: {i+1}')
return i+1
def pair_mentors():
'''
Pair Mentors who are similar to each other
'''
# 0 is identical, 1 is farthest
# distance.jaccard([1,1,1], [2,2,2])
# distance.jaccard([1,1,1], [1,1,0])
# post DAA
# jaccard_similarity
# pair mentors up with no matches with mentors they're similar to
pass
def main(matchesFile, simulate=None):
print(f'simulate data? {simulate}')
n_suiteds = 14
m_suitors = 50
og_schedule = mini_sheet(matchesFile, n_suiteds, m_suitors)
if simulate:
return simulate_runs(og_schedule, 10)
errors = np.where(check_valid(og_schedule[:, :]) == False)
d_errors = dict(enumerate(zip(errors[0], errors[1])))
print(f'old_errors: {d_errors}, count: {len(d_errors)}')
arr = copy.deepcopy(og_schedule)
schedule_optimiser(arr)
new_errors = dict(enumerate(tuple(
zip(np.where(check_valid(arr) == 0)[0], np.where(check_valid(arr) == 0)[1]))))
print(f'new_errors: {new_errors}, count: {len(new_errors)}')
# double check entire schedule
print(f'perfect schedule? {np.all(check_valid(arr))}')
# if np.all(check_valid(arr)):
pd.DataFrame(og_schedule).to_csv(
f'./ss_match_sim/sim_100/schedules/original_schedule_test.csv')
pd.DataFrame(arr).to_csv(
f'./ss_match_sim/sim_100/schedules/optimised_schedule_test.csv')
if __name__ == '__main__':
# main('./ss_match_sim/ss_matches_room_0.csv', True) # test 1
main('./ss_match_sim/sim_100/matches/ss_matches_room_0.csv')
``` |
{
"source": "19Leonidas99/JenniferVirtualAssistant",
"score": 2
} |
#### File: ioclients/terminal/__init__.py
```python
from ioclients import JenniferClientSupportsResponders, JenniferClientSupportsNotification
from lessons.base.responses import JenniferTextResponseSegment, JenniferImageReponseSegment, \
JenniferLinkResponseSegement
class JenniferTerminalClient(JenniferClientSupportsNotification, JenniferClientSupportsResponders):
ALLOWED_RESPONSE_TYPES = [JenniferTextResponseSegment, JenniferLinkResponseSegement]
def __init__(self, brain):
self.prompt = '> '
# Must call JenniferNotificationClientBase.init before JenniferClientBase.init
# because JenniferClientBase.init is blocking
JenniferClientSupportsNotification.__init__(self, brain)
JenniferClientSupportsResponders.__init__(self, brain)
def collect_input(self):
return raw_input(self.prompt)
def give_output(self, response_obj):
response = response_obj.to_text()
print "JENNIFER: {}".format(response)
def regain_control(self):
self.prompt = '> '
def give_up_control(self):
self.prompt = '>>> '
```
#### File: JenniferVirtualAssistant/server/brain.py
```python
import os
import json
import atexit
import nltk
import pkgutil
import Queue
import random
import logging
from pytz import common_timezones, timezone
from apscheduler.schedulers.background import BackgroundScheduler
from nltk.tag.perceptron import PerceptronTagger
from lessons.base.plugin import JenniferResponsePlugin, JenniferNotificationPlugin
from lessons.base.responses import JenniferResponse, JenniferTextResponseSegment
logging.basicConfig()
class JenniferBrain(object):
UNSURE_TEXT = "Sorry, I can't help with that"
MULTIPLE_LESSONS_APPLY = 'Which one of my lessons applies here?'
def __init__(self, allow_network_plugins=False, always_allow_plugins=None):
self._initialize_paths()
# Lessons + Settings
self.allow_network_plugins = allow_network_plugins
self.always_allow_plugins = always_allow_plugins or []
self.responders = []
self.notifiers = []
self.notification_queue = Queue.PriorityQueue()
self._load_profile_and_settings()
# Requires self.database & self.settings
self._load_lessons()
# Just to save time later
self.nltktagger = PerceptronTagger()
self.tagset = None
# Notifications
self.notification_clients = []
self._initialize_background_tasks()
def _initialize_paths(self):
"""Create the paths needed"""
self.base_path = os.path.join(os.path.dirname(__file__), '..')
self.profile_file = os.path.join(self.base_path, 'profile.json')
self.lessons_path = os.path.join(self.base_path, 'lessons')
def _load_lessons(self):
"""
Search the lessons/ package for lessons & store them in sorted order by priority
:return:
"""
pkgs = [n for _, n, _ in pkgutil.iter_modules(['lessons']) if n != 'base']
for name in pkgs:
exec 'import lessons.' + name + '.plugin'
responders = [cls(self).set_profile(self.database['profile']) for cls in JenniferResponsePlugin.__subclasses__() if self._is_lesson_allowed(cls)]
self.notifiers = [cls(self).set_profile(self.database['profile']) for cls in JenniferNotificationPlugin.__subclasses__() if self._is_lesson_allowed(cls)]
for r in (responders + self.notifiers):
r.set_settings(self._get_settings_for_lesson(r))
self.responders = sorted(responders, key=lambda l: l.PRIORITY)
def _is_lesson_allowed(self, lesson_cls):
if lesson_cls in self.always_allow_plugins:
return True
if lesson_cls.REQUIRES_NETWORK and not self.allow_network_plugins:
return False
return True
def _load_profile_and_settings(self):
"""
Load the profile
:return:
"""
try:
with open(self.profile_file, 'r+') as profile_file:
data = json.loads(profile_file.read(), strict=False)
self.database = data
if 'profile' in self.database and 'settings' in self.database:
profile_file.close()
return
except (IOError, ValueError):
self.database = {}
self._init_profile()
self._save_profile_to_file()
def _get_settings_for_lesson(self, lesson, lesson_name=None):
"""
Get the settings dict for the lesson
(Must be called from a lesson)
:return:
"""
if not lesson_name:
lesson_name = unicode(lesson.settings_name)
try:
return self.database['settings'][lesson_name]
except KeyError:
if self._test_if_settings_template_exists(lesson):
print "--------{} SETTINGS--------".format(lesson_name)
self._add_lesson_to_settings_and_write(lesson)
return self._get_settings_for_lesson(lesson)
return {}
def _settings_template_path_for_lesson(self, lesson):
"""Gets a settings_template for a given lesson"""
lesson_settings_name = lesson.settings_name
return os.path.join(self.lessons_path, lesson_settings_name, 'settings_template.json')
def _test_if_settings_template_exists(self, lesson):
"""Returns if a settings_template for a given lesson"""
return os.path.isfile(self._settings_template_path_for_lesson(lesson))
def _add_lesson_to_settings_and_write(self, lesson):
"""Loads a lesson's settings_template, runs an initialization function if available, and copies into DB"""
lesson_settings_name = lesson.settings_name
with open(self._settings_template_path_for_lesson(lesson)) as template:
try:
# Try to load initial template
settings_template_dict = json.loads(template.read(), strict=False)
settings_template_dict = lesson.initialize_settings(settings_template_dict)
# Push to DB & save
self.database['settings'][lesson_settings_name] = settings_template_dict
self._save_profile_to_file()
except ValueError:
exit("{} has an invalid settings_template.json".format(lesson_settings_name))
def _save_profile_to_file(self):
"""Writes to profile.json"""
with open(self.profile_file, "w+") as f:
plain_text = json.dumps(self.database, indent=4, sort_keys=True)
f.write(plain_text)
f.close()
def _init_profile(self):
"""Should be run if profile.json doesn't exist"""
fields = [
('first name', 'firstName'),
('last name', 'lastName'),
]
location_fields = [
('city', 'city', 'New York City'),
('region', 'region', 'NY'),
('country', 'country', 'US'),
('zip', 'zip'),
]
if 'profile' not in self.database:
for field in fields:
self.database.update({'profile': {'location':{}}})
print "What is your {}?".format(field[0])
self.database['profile'][field[1]] = raw_input("> ")
self.database['profile']['location'] = {}
for field in location_fields:
txt = "What is your {}?".format(field[0])
if len(location_fields) >= 3:
txt += " example: ({})".format(field[2])
print txt
self.database['profile']['location'][field[1]] = raw_input("> ")
while True:
print "What is your timezone? example: ({})".format(random.choice(common_timezones))
tz = raw_input('> ')
if timezone(tz):
self.database['profile']['location']['timezone'] = tz
break
else:
print "Invalid timezone"
if 'settings' not in self.database:
self.database.update({'settings': {'notifications': {'quiet_hours': []}}})
def _get_profile(self):
"""Get the user's profile"""
return self.database['profile']
def take_input(self, text_input, client):
"""
Search all lessons for lessons that can respond
:param text_input:
:return:
"""
text_input = text_input.lower()
tokens = nltk.word_tokenize(text_input)
tags = nltk.tag._pos_tag(tokens, self.tagset, self.nltktagger)
# TODO: extrap this out to a custom stopwords
try:
tags.remove(('please', 'NN')) # It's common to say 'please' when asking Jennifer something
except:
pass
# Find the lessons that can answer
respond_to = None
matching_lessons = [lesson for lesson in self.responders if lesson.can_respond(tags=tags,
client=client,
brain=self,
plain_text=text_input)]
# No answer
if len(matching_lessons) == 0:
self.respond_or_unsure(None, tags, client, text_input)
# Only one module can respond
elif len(matching_lessons) == 1:
respond_to = matching_lessons[0]
# Multiple lessons can response
else:
priority_counts = {}
for lesson in matching_lessons:
key = lesson.PRIORITY
priority_counts.setdefault(key, []).append(lesson)
# Now we have something like {999: [TimePlugin(), LowPriorityTimePlugin()], 0: [ImportantTimePlugin()]}
min_priority = min(priority_counts.keys())
if len(priority_counts[min_priority]) == 1:
respond_to = priority_counts[min_priority][0]
else:
client.give_output_string("brain", self.MULTIPLE_LESSONS_APPLY)
for lesson in priority_counts[min_priority]:
if client.confirm("brain", lesson.VERBOSE_NAME + "?"):
# TODO: would be nice to remember this decision.. that's v3.0 though.
respond_to = lesson
break
return self.respond_or_unsure(respond_to, tags, client, text_input)
def respond_or_unsure(self, respond_to, tags, client, text_input):
try:
return respond_to.respond(tags=tags,
client=client,
brain=self,
plain_text=text_input)
except Exception as e:
return JenniferResponse(self, [
JenniferTextResponseSegment(self.UNSURE_TEXT)
])
def _initialize_background_tasks(self):
self.scheduler = BackgroundScheduler(timezone="UTC", daemon=True)
self.scheduler.start()
self.scheduler.add_job(self._collect_notifications_from_notifiers, 'interval', seconds=10)
self.scheduler.add_job(self.push_notifications_to_clients, 'interval', seconds=2)
atexit.register(lambda: self.scheduler.shutdown(wait=False))
def _collect_notifications_from_notifiers(self):
for notification_provider in self.notifiers:
while not notification_provider.queue.empty():
self.notification_queue.put(notification_provider.queue.get())
def register_notification_client(self, client):
self.notification_clients.append(client)
def push_notifications_to_clients(self):
while not self.notification_queue.empty():
notification = self.notification_queue.get()
for client in self.notification_clients:
client.give_output_string("brain", notification[1])
``` |
{
"source": "19noahsss19/school_algorithms",
"score": 5
} |
#### File: src/school_algorithms/physics.py
```python
from ._if_not_valid_raise import _if_not_int_or_float_raise
def power_calc(E, t):
"""Calculates power from energy and time using the formula:
power = energy / time
Parameters
----------
E : int or float
The energy value in the equation.
t : int or float
The time value of the equation.
Returns
-------
Float
E / t
Raises
------
ValueError
If E or t is not an integer or float
Examples
--------
>>> school_algorithms.power_calc(10, 5)
2.0
"""
_if_not_int_or_float_raise(E, t)
return E / t
def energy_calc(p, t):
"""
Calculates energy from power and time using the formula:
energy = power * time
Parameters
----------
p: Int or float
The power value of the equation.
t: Int or float
The time value of the equation.
Returns
-------
Int
p * t
Raises
------
ValueError
If p or t is not an integer or float
Examples
--------
>>> school_algorithms.energy_calc(5, 2)
10
"""
_if_not_int_or_float_raise(p, t)
return p * t
def time_calc(p, E):
"""
Calculates time from power and energy using the formula:
time = energy / power
Parameters
----------
p: int or float
The power value of the equation.
E: int or float
The energy value of the equaton.
Returns
-------
Float
E / p
Raises
------
ValueError
If p or E is not an integer or float
Examples
--------
>>> school_algorithms.energy_calc(10, 2)
5.0
"""
_if_not_int_or_float_raise(p, E)
return E / p
``` |
{
"source": "19po/rtl102.5-playlist",
"score": 3
} |
#### File: 19po/rtl102.5-playlist/rtl1025-playlist.py
```python
import urllib
from xml.dom.minidom import parse
import re
import json
def uni(s):
"""
Decode text.
"""
ascii_char = re.findall(r'\[e\]\[c\](\d+)\[p\]', s)
other_char = re.findall(r'\[[a-z]\]+', s)
# find and replace number to ascii character
for char in ascii_char:
if char in s:
s = s.replace(char , unichr(int(char)))
# find and remove [*]
for char in other_char:
if char in s:
s = s.replace(char , '')
return s
def get_info():
"""
Get information.
"""
# check if VLC is turned on
try:
urllib.urlretrieve('http://127.0.0.1:8080/requests/status.xml', '/tmp/info.xml')
except IOError:
print 'VLC is closed.'
return
# replace html characters with xml
with open('/tmp/info.xml', 'r') as fr, open('/tmp/info2.xml', 'w') as fw:
z = ['<', '>']
x = ['<', '>']
for line in fr.readlines():
for i in range(len(z)):
if z[i] in line:
line = line.replace(z[i], x[i])
fw.write(line)
# open xml file, get information and make json file
with open('/tmp/info2.xml', 'r') as fr, open('rtl1025-playlist.json', 'w') as fw:
dom = parse(fr)
cnodes = dom.childNodes
info_dict = {"program_title":"", "speakers":"", "program_image":"",
"artist_name":"", "song_title":"", "song_cover":""}
try:
info_dict["program_title"] = uni(cnodes[0].\
getElementsByTagName('prg_title')[0].firstChild.data)
info_dict["speakers"] = uni(cnodes[0].\
getElementsByTagName('speakers')[0].firstChild.data)
info_dict["program_image"] = cnodes[0].\
getElementsByTagName('image400')[0].firstChild.data
info_dict["artist_name"] = uni(cnodes[0].\
getElementsByTagName('mus_art_name')[0].firstChild.data)
info_dict["song_title"] = uni(cnodes[0].\
getElementsByTagName('mus_sng_title')[0].firstChild.data)
info_dict["song_cover"] = cnodes[0].\
getElementsByTagName('mus_sng_itunescoverbig')[0].firstChild.data
except (IndexError, AttributeError):
pass
# my_dict as json file
fw.write(json.dumps(info_dict))
# display data
with open('rtl1025-playlist.json', 'r') as fw:
j = json.load(fw)
for k, v in j.iteritems():
print "{:15}{:2}{:1}".format(k, ":", v.encode('utf-8'))
if __name__ == '__main__':
get_info()
```
#### File: 19po/rtl102.5-playlist/SystemTray.py
```python
from PyQt4 import QtGui, QtCore
import sys
from Playlist import Playlist
import vlc
import urllib
__author__ = 'postrowski'
# -*-coding: utf-8-*-
class SystemTray(QtGui.QSystemTrayIcon):
"""
Class System Tray which show app indicator and supports its actions.
"""
def __init__(self, parent):
super(SystemTray, self).__init__(parent)
self.sc = QtGui.QFileDialog()
self.nowPlayingLabel = parent.nowPlayingLabel
self.programLabel = parent.programLabel
self.logoLabel = parent.logoLabel
self.coverWebView = parent.coverWebView
self.programWebView = parent.programWebView
self.tray_menu = parent.tray_menu
self.tray_icon = parent.tray_icon
self.hide_ui = parent.hide_ui
self.show_ui = parent.show_ui
self.central_widget = parent.central_widget
self.timer_show = parent.timer_show
self.setup_menu()
self.playlist = Playlist(self)
self.instance = vlc.Instance() # create a vlc instance
self.player = self.instance.media_player_new() # create a empty vlc media player
stream = 'http://shoutcast.rtl.it:3010/stream/1/'
option = '--extraintf=http' # enable web interface
self.media = self.instance.media_new(stream, option) # create the media
self.player.set_media(self.media)
self.info_0 = None # this variable always before set_meta_data call is None
self.timer_check = QtCore.QTimer()
self.connect(self.timer_check, QtCore.SIGNAL("timeout()"), self.set_meta_data) # polling every second
self.my_dict = {}
def setup_menu(self):
"""
Setup app indicator menu.
:return: None
"""
# menu
self.show_action = QtGui.QAction("Show", self.tray_menu)
self.connect(self.show_action, QtCore.SIGNAL("triggered()"), self.show_all)
self.tray_menu.addAction(self.show_action)
self.play_pause_action = QtGui.QAction("Play", self.tray_menu)
self.connect(self.play_pause_action, QtCore.SIGNAL("triggered()"), self.play_pause)
self.tray_menu.addAction(self.play_pause_action)
self.stop_action = QtGui.QAction("Stop", self.tray_menu)
self.connect(self.stop_action, QtCore.SIGNAL("triggered()"), self.stop)
self.tray_menu.addAction(self.stop_action)
self.stop_action.setVisible(False)
self.save_cover_action = QtGui.QAction("Save album cover", self.tray_menu)
self.connect(self.save_cover_action, QtCore.SIGNAL("triggered()"),
lambda: self.save_picture(self.my_dict["album_cover"],
self.my_dict[u"artist_name"] + " - " + self.my_dict[u"album_title"]))
self.tray_menu.addAction(self.save_cover_action)
self.save_cover_action.setVisible(False)
self.save_image_action = QtGui.QAction("Save program image", self.tray_menu)
self.connect(self.save_image_action, QtCore.SIGNAL("triggered()"),
lambda: self.save_picture(self.my_dict["program_image"],
self.my_dict[u"program_title"] + " - " + self.my_dict[u"speakers"]))
self.tray_menu.addAction(self.save_image_action)
self.save_image_action.setVisible(False)
quit_action = QtGui.QAction("Quit", self.tray_menu)
self.connect(quit_action, QtCore.SIGNAL("triggered()"), self.quit_app)
self.tray_menu.addAction(quit_action)
# system tray icon
self.tray_icon.setIcon(QtGui.QIcon(":/images/icon.png"))
self.tray_icon.setContextMenu(self.tray_menu)
self.tray_icon.show()
def hide_all(self):
"""
Hide UI.
"""
self.hide_ui()
self.central_widget.hide()
def show_all(self):
""""
Show UI for 10 seconds, then hide it.
"""
print "show"
self.show_ui()
self.central_widget.show()
self.timer_show.start(10000) # 10 seconds, display UI time in ms
self.timer_show.timeout.connect(self.hide_all)
def set_meta_data(self):
"""
Set xml meta data and show message. Check if images are available to download.
:return: None
"""
info_1 = self.media.get_meta(vlc.Meta.NowPlaying) # get xml data
if info_1 != self.info_0:
self.info_0 = info_1
# print "now playing: {0}".format(self.info_0)
self.playlist.set_info(self.playlist.xml_to_dict(self.info_0))
self.playlist.show_msg()
self.my_dict = self.playlist.xml_to_dict(self.info_0)
# print "my_dict: ", self.my_dict
if self.player.is_playing():
try:
if self.my_dict["album_cover"]:
self.save_cover_action.setVisible(True)
else:
self.save_cover_action.setVisible(False)
except TypeError: # parse data delay when play button pressed
pass
try:
if self.my_dict["program_image"]:
self.save_image_action.setVisible(True)
else:
self.save_image_action.setVisible(False)
except TypeError: # parse data delay when play button pressed
pass
def play_pause(self):
"""
Play or pause radio stream.
:return: None
"""
if self.player.is_playing():
# print "paused"
self.timer_show.killTimer(10)
self.timer_check.stop()
self.play_pause_action.setText("Paused")
self.player.pause()
self.hide_all()
self.stop_action.setVisible(True)
else:
# print "play"
self.timer_check.start(1000)
self.play_pause_action.setText("Pause")
self.player.play()
self.set_meta_data()
self.playlist.show_msg()
self.stop_action.setVisible(True)
def stop(self):
"""
Stop stream.
:return: None
"""
# print "stop"
self.player.stop()
self.play_pause_action.setText("Play")
self.stop_action.setVisible(False)
self.save_cover_action.setVisible(False)
self.save_image_action.setVisible(False)
self.hide_all()
@staticmethod
def save_picture(url, file_name):
"""
Save album cover and/or program image.
:param url: file url
:param file_name: file name
:return: None
"""
location = QtGui.QFileDialog()
dir_path = QtCore.QDir()
path = dir_path.homePath() + dir_path.separator() + unicode(file_name)
file_path = location.getSaveFileName(location, "Save file as", path)
if location:
urllib.urlretrieve(url, unicode(file_path))
@staticmethod
def quit_app():
"""
Close application.
:return: None
"""
# print "quit"
sys.exit()
``` |
{
"source": "19reborn/ImageNette-Training",
"score": 2
} |
#### File: 19reborn/ImageNette-Training/net.py
```python
import os
import time
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms, models ,datasets
from torch.utils.data import Dataset, DataLoader, random_split
from torch.autograd import Variable
import torch.optim as optim
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import time
from PIL import Image
# In[2]:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
# In[3]:
from mxnet.gluon import data as gdata
train_transform = transforms.Compose([
# 随机对图像裁剪出面积为原图像面积0.08~1倍、且高和宽之比在3/4~4/3的图像,再放缩为224*224的新图像
transforms.RandomResizedCrop(224, scale=(0.08, 1.0),ratio=(3.0/4.0, 4.0/3.0)),
# 随机水平翻转
transforms.RandomHorizontalFlip(),
# 随机变化亮度、对比度和饱和度
transforms.ColorJitter(brightness=0.4, contrast=0.4,saturation=0.4),
transforms.ToTensor(),
# 标准化
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transform = transforms.Compose([
#缩放到256*256的图像
transforms.Resize(256),
#中心裁剪到224*224的图像
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# In[4]:
train_dataset = datasets.ImageFolder(
"./data/imagenette2/train",
train_transform
)
test_dataset =datasets.ImageFolder(
"./data/imagenette2/val",
test_transform
)
# In[5]:
Batch = 64
EPOCH = 200
LR = 0.01
# In[6]:
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=Batch, num_workers=5)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=Batch, num_workers=5)
# In[7]:
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.model = models.resnet101(pretrained=False)
self.model.fc = nn.Linear(2048, 10)
def forward(self, x):
output = self.model(x)
return output
# In[8]:
import ranger
net=Net()
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = ranger.Ranger(net.parameters(),lr=LR,eps=1e-6)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer,EPOCH+20)
loss_history=[]
acc_history=[]
tacc_history=[]
tloss_history=[]
lr_list=[]
best_acc=0
best_epoch=0
# In[ ]:
start_time = time.time()
for epoch in range(EPOCH):
epoch_time = time.time()
epoch_loss = 0
correct = 0
total=0
scheduler.step()
lr_list.append(scheduler.get_lr())
print("Epoch {} / {}".format(epoch, EPOCH))
net.train()
for inputs, labels in train_loader:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = net(inputs) # 前向传播
loss = criterion(outputs, labels) # softmax + cross entropy
optimizer.zero_grad() # 梯度清零
loss.backward() # 反向传播
optimizer.step()
epoch_loss += loss.item()
outputs = nn.functional.softmax(outputs,dim=1)
_, pred = torch.max(outputs, dim=1)
correct += (pred.cpu() == labels.cpu()).sum().item()
total += labels.shape[0]
acc = correct / total
loss_history.append(epoch_loss/len(labels))
acc_history.append(acc)
#计算测试集准确率及Loss
correct = 0
total = 0
test_loss = 0
net.eval()
with torch.no_grad():
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = net(images)
loss = criterion(outputs, labels) # softmax + cross entropy
test_loss += loss.item()
outputs = nn.functional.softmax(outputs,dim=1)
_, pred = torch.max(outputs, dim=1)
correct += (pred.cpu() == labels.cpu()).sum().item()
total += labels.shape[0]
tacc = correct / total
epoch_time2 = time.time()
print("Duration: {:.0f}s, Train Loss: {:.4f}, Train Acc: {:.4f}, Test Acc : {:.4f}, Test Loss: {:.4f}".format(epoch_time2-epoch_time, epoch_loss/len(labels), acc, tacc, test_loss/len(labels)))
if tacc>best_acc:
best_acc = tacc
best_epoch = epoch
torch.save(net.state_dict(), './model_1.pth') #保存模型
end_time = time.time()
tacc_history.append(tacc)
tloss_history.append(test_loss/len(labels))
print("Total Time:{:.0f}s".format(end_time-start_time))
# In[ ]:
plt.plot(np.arange(1,EPOCH+1),lr_list,'b-',color = 'b')
plt.xlabel("epoch")#横坐标名字
plt.ylabel("Learning Rate")#纵坐标名字
plt.title('Learning Rate Curve')
plt.savefig('./nopre_result/lr_test.png', bbox_inches = 'tight')
plt.close
# In[ ]:
x=np.arange(1,EPOCH+1)
loss_history=np.array(loss_history)
tloss_history=np.array(tloss_history)
plt.plot(x,loss_history,'s-',color = 'r', label = 'train_loss')#s-:方形
plt.plot(x,tloss_history,'o-',color = 'g', label ='test_loss')#o-:圆形
plt.legend()
plt.xlabel("epoch")#横坐标名字
plt.ylabel("Loss")#纵坐标名字
plt.title('Loss Curve')
plt.savefig('./nopre_result/loss_test.png', bbox_inches = 'tight')
plt.close()
# In[ ]:
x=np.arange(1,EPOCH+1)
acc_history=np.array(acc_history)
tacc_history=np.array(tacc_history)
plt.plot(x,acc_history,'s-',color = 'r', label = 'train_accuracy')#s-:方形
plt.plot(x,tacc_history,'o-',color = 'g', label ='test_accuracy')#o-:圆形
plt.legend()
plt.xlabel("epoch")#横坐标名字
plt.ylabel("Accuracy")#纵坐标名字
plt.title('Accuracy Curve')
plt.savefig('./nopre_result/acc_test.png', bbox_inches = 'tight')
plt.close()
# In[ ]:
print('Best Accuracy of the network on the test images: %.4f %%' % (100*best_acc))
print('Best epoch: %d' % (best_epoch))
# In[ ]:
# In[ ]:
``` |
{
"source": "19tony97/Autonomous_vheicle_drivingProject",
"score": 4
} |
#### File: carla/controller/utils.py
```python
import numpy as np
from math import pi
from numpy import linalg as LA
def project_point(vector, point):
"""Given a line vector and a point, projects the point
on the line, resulting to a point that is closest to
the given point.
Args:
vector: A 2D array of points in the form [[x1, y1], [x2, y2]]
point: A 2D point in the form [x, y]
Returns:
closest_point: A 2D point in the form [x, y] that lies on
given vector.
"""
p0 = vector[0]
p1 = vector[1]
v1 = np.subtract(point, p0)
v2 = np.subtract(p1, p0)
distance = np.dot(v1, v2) / np.power(LA.norm(v2), 2)
if distance < 0.0:
closest_point = p0
elif distance > 1.0:
closest_point = p1
else:
closest_point = p0 + distance * v2
return closest_point
def next_carrot(vector, pose_2d, lookahead_dis):
"""Given a line vector, position and look-ahead distance,
determine the next carrot point.
Args:
vector: A 2D array of points in the form [[x1, y1], [x2, y2]]
pose_2d: A 2D point in the form [x, y]
lookahead_dis: A float distance determining how far ahead
we want to look.
Returns:
carrot: A 2D point in the form [x, y].
"""
p0 = vector[0]
p1 = vector[1]
projected_point = project_point(vector, pose_2d)
# Calculate unit vector of trajectory
vec_diff = np.subtract(p1, p0)
unit_vec = vec_diff / LA.norm(vec_diff)
carrot = projected_point + lookahead_dis * unit_vec
return carrot
def calculate_delta(position, carrot, delta_max):
"""Given a 2D position and carrot pose, determine the steering
angle delta.
This angle should be constrained by `delta_max`, determined based
on the model. For instance for a car, this will depend on the properties
of the car (for instance using Ackermann steering geometry you can
calculate the center of the turning circle).
Args:
position: A 2D array of points in the form [[x1, y1], [x2, y2]]
carrot: A 2D point in the form [x, y]
delta_max: A float distance determining how far ahead we want to look.
Returns:
delta: A float representing the steering angle in unit radians.
"""
theta = position[2]
# Calculate the angle between position and carrot
x = carrot[0] - position[0]
y = carrot[1] - position[1]
angle_of_vec = np.arctan2(y, x)
# Limit delta to pi and -pi
delta = -(theta - angle_of_vec)
delta = np.mod(delta + pi, 2 * pi) - pi
# Limit delta to steering angle max
if delta > delta_max:
delta = delta_max
elif delta < -delta_max:
delta = -delta_max
return delta
def update_waypoint_trajectory(waypoints, waypoint_counter):
"""Given a list of waypoints, and waypoint_counter, determine
the next set up waypoints.
Args:
waypoints: An array of waypoints in the format [wp1, wp2, ..., wpn]
where each wp is a 2D point in the form [x, y].
waypoint_counter: A counter representing a pointer to the current
waypoint. This should not exceed the total size of waypoint_counter.
Returns:
wp1 : First waypoint of the updated trajectory.
wp2: Second waypoint of the updated trajectory.
update_trajectory: A flag to determine whether we should continue.
"""
update_trajectory = True
if waypoint_counter >= len(waypoints):
print('Ran out of waypoints.')
update_trajectory = False
wp1 = wp2 = None
elif waypoint_counter == len(waypoints) - 1:
# Grab the last waypoint and the initial to get back
# to the starting point
wp1 = waypoints[waypoint_counter]
wp2 = waypoints[0]
else:
wp1 = waypoints[waypoint_counter]
wp2 = waypoints[waypoint_counter + 1]
return wp1, wp2, update_trajectory
def calculate_distance(point1, point2):
"""Given two 2D points, calculate the distance.
Args:
point1: A 2D array in the form [x, y]
point2: A 2D array in the form [x, y]
Returns:
distance: A float representing the distance between
the points.
"""
distance = np.sqrt(np.power((point2[1] - point1[1]), 2) +
np.power((point2[0] - point1[0]), 2))
return distance
```
#### File: 19tony97/Autonomous_vheicle_drivingProject/obstacle_detection.py
```python
import numpy as np
from scipy.spatial.distance import euclidean, cdist
from main import BP_LOOKAHEAD_TIME, BP_LOOKAHEAD_BASE, CIRCLE_OFFSETS, CIRCLE_RADII
L = 2
BP_LOOKAHEAD_BASE = int(BP_LOOKAHEAD_BASE)
def next_position(current_x,current_y,yaw,v, delta,L, BP_LOOKAHEAD_TIME, path_iteration):
x_n = current_x
y_n = current_y
yaw_n = yaw
delta_t = BP_LOOKAHEAD_TIME
for i in range(path_iteration+1):
x_n = x_n + v * np.cos(yaw_n)*delta_t
y_n = y_n + v * np.sin(yaw_n)*delta_t
yaw_n = yaw_n + ((v * np.tan(delta))/ L) * delta_t
return x_n, y_n, yaw_n
def circles_for_detection(x,y,yaw,CIRCLE_OFFSETS):
current_x, current_y,yaw = x,y,yaw
# get the orientation of the ego-vehicle with formular: position_x_y + distance_between_centroids*cos(yaw)
x_front = current_x + (CIRCLE_OFFSETS[2]*np.cos(yaw))
y_front = current_y + (CIRCLE_OFFSETS[2]*np.sin(yaw))
x_back = current_x + (CIRCLE_OFFSETS[0]*(np.cos(yaw)))
y_back = current_y + (CIRCLE_OFFSETS[0]*(np.sin(yaw)))
center = [0,0,0]
center[0] = [current_x, current_y]
center[1] = [x_front, y_front]
center[2] = [x_back, y_back]
return center
def check_for_obs(obstacles, ego_state, is_collision=False):
"""
get circles_centers, get obstacle data and check
whether obstacle location is in distance of radius
"""
x, y,yaw,v,delta = ego_state[0], ego_state[1], ego_state[2], ego_state[3], ego_state[4]
for i in range(BP_LOOKAHEAD_BASE):
if is_collision:
break
x_lookahead, y_lookahead, yaw_lookahead = next_position(x,y,yaw,v,delta,L,BP_LOOKAHEAD_TIME, path_iteration = i)
#centers for ego vehicle
centers = circles_for_detection(x_lookahead,y_lookahead, yaw_lookahead, CIRCLE_OFFSETS)
#is_collision = False
for obstacle in obstacles:
center_ob = []
#print(str(obstacle.__class__) == "<class 'carla_server_pb2.Vehicle'>")
if str(obstacle.__class__) == "<class 'carla_server_pb2.Vehicle'>":
x_ob_veh = obstacle.transform.location.x
y_ob_veh = obstacle.transform.location.y
yaw_ob_veh = obstacle.transform.rotation.yaw
v_ob_veh = obstacle.forward_speed
# position of obstacle
xn_ob,yn_ob,yawn_ob = next_position(x_ob_veh,y_ob_veh,yaw_ob_veh,v_ob_veh,delta,L,BP_LOOKAHEAD_TIME, path_iteration=i)
# circle centers of other vehicles
center_ob = circles_for_detection(xn_ob, yn_ob, yawn_ob, CIRCLE_OFFSETS)
else:
x_ob_ped = obstacle.transform.location.x
y_ob_ped = obstacle.transform.location.y
yaw_ob_ped = obstacle.transform.rotation.yaw
v_ob_ped = obstacle.forward_speed
xn_ob_ped, yn_ob_ped, yawn_ob_ped = next_position(x_ob_ped, y_ob_ped, yaw_ob_ped, v_ob_ped,delta,L,BP_LOOKAHEAD_TIME, path_iteration=i)
center_ob = [[xn_ob_ped, yn_ob_ped]]
dists = cdist(centers,center_ob, euclidean)
if np.any(dists <= CIRCLE_RADII[0]):
is_collision = True
#print(dists[np.where([dist <= CIRCLE_RADII[0] for dist in dists])] )
print("detected collision: ", is_collision)
break
return is_collision
``` |
{
"source": "19tony97/FlakyTestsProject",
"score": 3
} |
#### File: FlakyTestsProject/preprocessing/feature_extractor.py
```python
from nltk import word_tokenize, download, PorterStemmer
from nltk.corpus import stopwords
from wordsegment import load, segment
download('punkt')
download('wordnet')
download('stopwords')
stop_words = set(stopwords.words('english'))
other_stop_words = [",", "?", "-", "_", ";", "\"", "—", "\\n", "==", "0", "1", "2", "3", "4", "-c", "*", "=", "/",
"@", "$", ";", ":", "(", ")", "<", ">", "{", "}", ".", "''", "'", "``", "get", "set", "test"]
stop_words = stop_words.union(set(other_stop_words))
load()
def read_words(text):
tokens = word_tokenize(text)
words = list()
for token in tokens:
if len(token) > 100:
index = 5
while index < len(token):
words += segment(token[index - 5: index + 100])
index += 100
else:
for word in segment(token):
words.append(word)
stemmer = PorterStemmer()
stems = set()
for w in words:
stems.add(stemmer.stem(w))
filtered_stems = [w for w in stems if w not in stop_words]
return set(filtered_stems)
def extract_features(path):
with open(path, 'r', encoding='utf-8') as file:
data = file.read().replace('\n', '')
test_words = read_words(data)
return list(test_words)
```
#### File: FlakyTestsProject/preprocessing/filter_non_flaky.py
```python
import csv
import os
from collections import OrderedDict
base_dir = 'C:\\Users\\ikime\\Desktop'
prob_all_outfile = os.path.join(base_dir, 'reruns', 'prob_all.csv')
RUNS = 100
def consolidate_results(project):
project_dir = os.path.join(base_dir, 'reruns', project)
non_flaky_outfile = os.path.join(project_dir, 'nonflaky_list.txt')
oth_outfile = os.path.join(project_dir, 'results_distribution.txt')
prob_outfile = os.path.join(project_dir, 'prob.csv')
results = OrderedDict()
print('Consolidating result for project {}'.format(project))
for i in range(RUNS):
log_file = os.path.join(project_dir, 'run{}.log'.format(i + 1))
log = csv.reader(open(log_file, 'r'), delimiter=',')
for row in log:
if len(row) == 3:
classname, name, status = row
elif len(row) == 4:
classname, name1, name2, status = row
tc = classname.strip() + '---' + name.strip()
status = status.strip()
if status not in ['pass', 'fail', 'error', 'skip']:
print('Status not in ["pass", "fail", "error", "skip"]')
if tc not in results:
results[tc] = {}
try:
results[tc][status] += 1
except KeyError:
results[tc][status] = 1
with open(non_flaky_outfile, 'w') as non_flaky_out, open(oth_outfile, 'w') as oth_out, open(prob_outfile, 'w') as prob_out, open(prob_all_outfile, 'a') as prob_all_out:
oth_out.write('TC PASS FAIL ERROR SKIP\n')
prob_out.write('TC, PASS, FAIL, ERROR, SKIP\n')
for tc in results:
classname, name = tc.split('---')
c_pass = results[tc]['pass'] if 'pass' in results[tc] else 0
c_fail = results[tc]['fail'] if 'fail' in results[tc] else 0
c_error = results[tc]['error'] if 'error' in results[tc] else 0
c_skip = results[tc]['skip'] if 'skip' in results[tc] else 0
c_total = c_pass + c_fail + c_error + c_skip
if c_pass / c_total == 1 or c_fail / c_total == 1 or c_error / c_total == 1 or c_skip / c_total == 1:
non_flaky_out.write('{}.{}\n'.format(classname, name))
else:
res = '{} {} {} {}'.format(c_pass, c_fail, c_error, c_skip)
oth_out.write('{}.{} {}'.format(classname, name, res))
res_prob = '{},{},{},{}'.format(c_pass / c_total, c_fail / c_total, c_error / c_total, c_skip / c_total)
prob_out.write('{}.{},{}\n'.format(classname, name, res_prob))
prob_all_out.write('{},{}.{},{}\n'.format(project, classname, name, res_prob))
def main():
all_projects = sorted([
p for p in os.listdir(os.path.join(base_dir, 'reruns'))
if os.path.isdir(os.path.join(base_dir, 'reruns', p))
and p.endswith('-100')
])
skip_list = ['hadoop-100', 'alluxio-100']
if os.path.exists(prob_all_outfile):
os.remove(prob_all_outfile)
with open(prob_all_outfile, 'w') as prob_all_out:
prob_all_out.write('PROJECT, TC, PASS, FAIL, ERROR, SKIP\n')
for project in all_projects:
if project not in skip_list:
consolidate_results(project)
if __name__ == '__main__':
main()
```
#### File: FlakyTestsProject/preprocessing/same_number_of_tests.py
```python
from statistics import median
from os import listdir, path
from random import choice
from shutil import copyfile
def same_number_of_tests():
loc_flaky = []
# calculating the lines of code for each flaky test sample
for filename in listdir('C:\\Users\\Roberto\\Desktop\\test_cases_samples_flaky'):
with open(path.join('C:\\Users\\Roberto\\Desktop\\test_cases_samples_flaky', filename), 'r', encoding = 'utf8') as f:
lines = f.readlines()
count = 0
for line in lines:
for word in line.split():
if '//' in word or '/*' in word or '*/' in word or '*' in word:
break
else:
count += 1
break
loc_flaky.append(count)
# calculating the median size of lines of code of the flaky test samples
median_flaky = median(loc_flaky)
loc_nonflaky = {}
# calculating the lines of code for each non-flaky test sample
for filename in listdir('C:\\Users\\Roberto\\Desktop\\test_cases_samples_nonflaky'):
with open(path.join('C:\\Users\\Roberto\\Desktop\\test_cases_samples_nonflaky', filename), 'r', encoding='utf8') as f:
lines = f.readlines()
count = 0
for line in lines:
for word in line.split():
if '//' in word or '/*' in word or '*/' in word or '*' in word:
break
else:
count += 1
break
loc_nonflaky[filename] = count
file_counter = 0 # variable to track the number of files chosen as non-flaky test sample
flag = 0 # to select alternately a test of size above the median and a test size below the median
while file_counter < 1402: # repeating the process until we get the same number of files as flaky test samples, i.e. 1402
name, lines_of_code = choice(list(loc_nonflaky.items())) # random non-flaky test sample
if lines_of_code >= median_flaky and flag == 0:
try:
with open(path.join('C:\\Users\\Roberto\\Desktop\\chosen_test_cases_nonflaky', name), 'r', encoding='utf8') as f:
continue
except FileNotFoundError:
src_cases = 'C:\\Users\\Roberto\\Desktop\\test_cases_samples_nonflaky\\' + name
dst_cases = 'C:\\Users\\Roberto\\Desktop\\chosen_test_cases_nonflaky\\' + name
src_tokens = 'C:\\Users\\Roberto\\Desktop\\test_tokens_samples_nonflaky\\' + name
dst_tokens = 'C:\\Users\\Roberto\\Desktop\\chosen_test_tokens_nonflaky\\' + name
copyfile(src_cases, dst_cases)
copyfile(src_tokens, dst_tokens)
file_counter += 1
flag = 1
elif lines_of_code <= median_flaky and flag == 1:
try:
with open(path.join('C:\\Users\\Roberto\\Desktop\\chosen_test_cases_nonflaky', name), 'r', encoding='utf8') as f:
continue
except FileNotFoundError:
src_cases = 'C:\\Users\\Roberto\\Desktop\\test_cases_samples_nonflaky\\' + name
dst_cases = 'C:\\Users\\Roberto\\Desktop\\chosen_test_cases_nonflaky\\' + name
src_tokens = 'C:\\Users\\Roberto\\Desktop\\test_tokens_samples_nonflaky\\' + name
dst_tokens = 'C:\\Users\\Roberto\\Desktop\\chosen_test_tokens_nonflaky\\' + name
copyfile(src_cases, dst_cases)
copyfile(src_tokens, dst_tokens)
file_counter += 1
flag = 0
if __name__ == '__main__':
same_number_of_tests()
```
#### File: FlakyTestsProject/preprocessing/tokenize_test_case.py
```python
import os
import re
import subprocess
def tokenize(path):
process = subprocess.Popen(['java', '-jar', os.path.join(os.getcwd(), 'utils\\vis_ids\\jar\\vis_ids.jar'), path],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
body, stderr = process.communicate()
tokens = body.decode('windows-1252').split('\r\n')
all_tokens = list()
for token in tokens:
all_tokens.append(token.lower())
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', token)
for m in matches:
all_tokens.append(m.group(0).lower())
if '' in all_tokens:
all_tokens.remove('')
return list(all_tokens)
``` |
{
"source": "19valentin99/pykale",
"score": 2
} |
#### File: examples/multisource_adapt/model.py
```python
from copy import deepcopy
import torch
from kale.embed.image_cnn import ResNet18Feature, SmallCNNFeature
from kale.pipeline.multi_domain_adapter import create_ms_adapt_trainer
from kale.predict.class_domain_nets import ClassNetSmallImage
def get_config(cfg):
"""
Sets the hyper-parameters for the optimizer and experiment using the config file
Args:
cfg: A YACS config object.
"""
config_params = {
"train_params": {
"adapt_lambda": cfg.SOLVER.AD_LAMBDA,
"adapt_lr": cfg.SOLVER.AD_LR,
"lambda_init": cfg.SOLVER.INIT_LAMBDA,
"nb_adapt_epochs": cfg.SOLVER.MAX_EPOCHS,
"nb_init_epochs": cfg.SOLVER.MIN_EPOCHS,
"init_lr": cfg.SOLVER.BASE_LR,
"batch_size": cfg.SOLVER.TRAIN_BATCH_SIZE,
"optimizer": {
"type": cfg.SOLVER.TYPE,
"optim_params": {
"momentum": cfg.SOLVER.MOMENTUM,
"weight_decay": cfg.SOLVER.WEIGHT_DECAY,
"nesterov": cfg.SOLVER.NESTEROV,
},
},
},
"data_params": {
"dataset_group": cfg.DATASET.NAME,
"dataset_name": cfg.DATASET.NAME + "_Target_" + cfg.DATASET.TARGET,
"source": "_".join(cfg.DATASET.SOURCE) if cfg.DATASET.SOURCE is not None else None,
"target": cfg.DATASET.TARGET,
"size_type": cfg.DATASET.SIZE_TYPE,
"weight_type": cfg.DATASET.WEIGHT_TYPE,
},
}
return config_params
# Based on https://github.com/criteo-research/pytorch-ada/blob/master/adalib/ada/utils/experimentation.py
def get_model(cfg, dataset, num_channels):
"""
Builds and returns a model and associated hyper-parameters according to the config object passed.
Args:
cfg: A YACS config object.
dataset: A multidomain dataset consisting of source and target datasets.
num_channels: The number of image channels.
"""
config_params = get_config(cfg)
train_params = config_params["train_params"]
train_params_local = deepcopy(train_params)
# setup feature extractor
if cfg.DATASET.NAME.upper() == "DIGITS":
feature_network = SmallCNNFeature(num_channels)
else:
feature_network = ResNet18Feature(num_channels)
if cfg.DAN.METHOD == "MFSAN":
feature_network = torch.nn.Sequential(*(list(feature_network.children())[:-1]))
method_params = {"n_classes": cfg.DATASET.NUM_CLASSES, "target_domain": cfg.DATASET.TARGET}
model = create_ms_adapt_trainer(
method=cfg.DAN.METHOD,
dataset=dataset,
feature_extractor=feature_network,
task_classifier=ClassNetSmallImage,
**method_params,
**train_params_local,
)
return model, train_params
```
#### File: kale/embed/video_se_i3d.py
```python
import torch.nn as nn
from torch.hub import load_state_dict_from_url
from kale.embed.video_i3d import InceptionI3d
from kale.embed.video_selayer import get_selayer, SELayerC, SELayerT
model_urls = {
"rgb_imagenet": "https://github.com/XianyuanLiu/pytorch-i3d/raw/master/models/rgb_imagenet.pt",
"flow_imagenet": "https://github.com/XianyuanLiu/pytorch-i3d/raw/master/models/flow_imagenet.pt",
"rgb_charades": "https://github.com/XianyuanLiu/pytorch-i3d/raw/master/models/rgb_charades.pt",
"flow_charades": "https://github.com/XianyuanLiu/pytorch-i3d/raw/master/models/flow_charades.pt",
}
class SEInceptionI3DRGB(nn.Module):
"""Add the several SELayers to I3D for RGB input.
Args:
num_channels (int): the channel number of the input.
num_classes (int): the class number of dataset.
attention (string): the name of the SELayer.
(Options: ["SELayerC", "SELayerT", "SELayerCoC", "SELayerMC", "SELayerMAC", "SELayerCT" and "SELayerTC"])
Returns:
model (VideoResNet): I3D model with SELayers.
"""
def __init__(self, num_channels, num_classes, attention):
super(SEInceptionI3DRGB, self).__init__()
model = InceptionI3d(in_channels=num_channels, num_classes=num_classes)
temporal_length = 16
# Add channel-wise SELayer
if attention in ["SELayerC", "SELayerCoC", "SELayerMC", "SELayerMAC"]:
se_layer = get_selayer(attention)
model.Mixed_3b.add_module(attention, se_layer(256))
model.Mixed_3c.add_module(attention, se_layer(480))
model.Mixed_4b.add_module(attention, se_layer(512))
model.Mixed_4c.add_module(attention, se_layer(512))
model.Mixed_4d.add_module(attention, se_layer(512))
model.Mixed_4e.add_module(attention, se_layer(528))
model.Mixed_4f.add_module(attention, se_layer(832))
model.Mixed_5b.add_module(attention, se_layer(832))
model.Mixed_5c.add_module(attention, se_layer(1024))
# Add temporal-wise SELayer
elif attention == "SELayerT":
se_layer = get_selayer(attention)
model.Mixed_3b.add_module(attention, se_layer(temporal_length // 2))
model.Mixed_3c.add_module(attention, se_layer(temporal_length // 2))
model.Mixed_4b.add_module(attention, se_layer(temporal_length // 4))
model.Mixed_4c.add_module(attention, se_layer(temporal_length // 4))
model.Mixed_4d.add_module(attention, se_layer(temporal_length // 4))
model.Mixed_4e.add_module(attention, se_layer(temporal_length // 4))
model.Mixed_4f.add_module(attention, se_layer(temporal_length // 4))
# model.Mixed_5b.add_module(attention, SELayerT(temporal_length//8))
# model.Mixed_5c.add_module(attention, SELayerT(temporal_length//8))
# Add channel-temporal-wise SELayer
elif attention == "SELayerCT":
model.Mixed_3b.add_module(attention + "c", SELayerC(256))
model.Mixed_3c.add_module(attention + "c", SELayerC(480))
model.Mixed_4b.add_module(attention + "c", SELayerC(512))
model.Mixed_4c.add_module(attention + "c", SELayerC(512))
model.Mixed_4d.add_module(attention + "c", SELayerC(512))
model.Mixed_4e.add_module(attention + "c", SELayerC(528))
model.Mixed_4f.add_module(attention + "c", SELayerC(832))
model.Mixed_5b.add_module(attention + "c", SELayerC(832))
model.Mixed_5c.add_module(attention + "c", SELayerC(1024))
model.Mixed_3b.add_module(attention + "t", SELayerT(temporal_length // 2))
model.Mixed_3c.add_module(attention + "t", SELayerT(temporal_length // 2))
model.Mixed_4b.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_4c.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_4d.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_4e.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_4f.add_module(attention + "t", SELayerT(temporal_length // 4))
# model.Mixed_5b.add_module(attention + "t", SELayerT(temporal_length // 8))
# model.Mixed_5c.add_module(attention + "t", SELayerT(temporal_length // 8))
# Add temporal-channel-wise SELayer
elif attention == "SELayerTC":
model.Mixed_3b.add_module(attention + "t", SELayerT(temporal_length // 2))
model.Mixed_3c.add_module(attention + "t", SELayerT(temporal_length // 2))
model.Mixed_4b.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_4c.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_4d.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_4e.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_4f.add_module(attention + "t", SELayerT(temporal_length // 4))
# model.Mixed_5b.add_module(attention + "t", SELayerT(temporal_length // 8))
# model.Mixed_5c.add_module(attention + "t", SELayerT(temporal_length // 8))
model.Mixed_3b.add_module(attention + "c", SELayerC(256))
model.Mixed_3c.add_module(attention + "c", SELayerC(480))
model.Mixed_4b.add_module(attention + "c", SELayerC(512))
model.Mixed_4c.add_module(attention + "c", SELayerC(512))
model.Mixed_4d.add_module(attention + "c", SELayerC(512))
model.Mixed_4e.add_module(attention + "c", SELayerC(528))
model.Mixed_4f.add_module(attention + "c", SELayerC(832))
model.Mixed_5b.add_module(attention + "c", SELayerC(832))
model.Mixed_5c.add_module(attention + "c", SELayerC(1024))
else:
raise ValueError("Wrong MODEL.ATTENTION. Current:{}".format(attention))
self.model = model
def forward(self, x):
return self.model(x)
class SEInceptionI3DFlow(nn.Module):
"""Add the several SELayers to I3D for optical flow input."""
def __init__(self, num_channels, num_classes, attention):
super(SEInceptionI3DFlow, self).__init__()
model = InceptionI3d(in_channels=num_channels, num_classes=num_classes)
temporal_length = 16
# Add channel-wise SELayer
if attention in ["SELayerC", "SELayerCoC", "SELayerMC", "SELayerMAC"]:
se_layer = get_selayer(attention)
model.Mixed_3b.add_module(attention, se_layer(256))
model.Mixed_3c.add_module(attention, se_layer(480))
model.Mixed_4b.add_module(attention, se_layer(512))
model.Mixed_4c.add_module(attention, se_layer(512))
model.Mixed_4d.add_module(attention, se_layer(512))
model.Mixed_4e.add_module(attention, se_layer(528))
model.Mixed_4f.add_module(attention, se_layer(832))
model.Mixed_5b.add_module(attention, se_layer(832))
model.Mixed_5c.add_module(attention, se_layer(1024))
# Add temporal-wise SELayer
elif attention == "SELayerT":
se_layer = get_selayer(attention)
model.Mixed_3b.add_module(attention, se_layer(temporal_length // 4))
model.Mixed_3c.add_module(attention, se_layer(temporal_length // 4))
model.Mixed_4b.add_module(attention, se_layer(temporal_length // 8))
model.Mixed_4c.add_module(attention, se_layer(temporal_length // 8))
model.Mixed_4d.add_module(attention, se_layer(temporal_length // 8))
model.Mixed_4e.add_module(attention, se_layer(temporal_length // 8))
model.Mixed_4f.add_module(attention, se_layer(temporal_length // 8))
# Add channel-temporal-wise SELayer
elif attention == "SELayerCT":
model.Mixed_3b.add_module(attention + "c", SELayerC(256))
model.Mixed_3c.add_module(attention + "c", SELayerC(480))
model.Mixed_4b.add_module(attention + "c", SELayerC(512))
model.Mixed_4c.add_module(attention + "c", SELayerC(512))
model.Mixed_4d.add_module(attention + "c", SELayerC(512))
model.Mixed_4e.add_module(attention + "c", SELayerC(528))
model.Mixed_4f.add_module(attention + "c", SELayerC(832))
model.Mixed_5b.add_module(attention + "c", SELayerC(832))
model.Mixed_5c.add_module(attention + "c", SELayerC(1024))
model.Mixed_3b.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_3c.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_4b.add_module(attention + "t", SELayerT(temporal_length // 8))
model.Mixed_4c.add_module(attention + "t", SELayerT(temporal_length // 8))
model.Mixed_4d.add_module(attention + "t", SELayerT(temporal_length // 8))
model.Mixed_4e.add_module(attention + "t", SELayerT(temporal_length // 8))
model.Mixed_4f.add_module(attention + "t", SELayerT(temporal_length // 8))
# Add temporal-channel-wise SELayer
elif attention == "SELayerTC":
model.Mixed_3b.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_3c.add_module(attention + "t", SELayerT(temporal_length // 4))
model.Mixed_4b.add_module(attention + "t", SELayerT(temporal_length // 8))
model.Mixed_4c.add_module(attention + "t", SELayerT(temporal_length // 8))
model.Mixed_4d.add_module(attention + "t", SELayerT(temporal_length // 8))
model.Mixed_4e.add_module(attention + "t", SELayerT(temporal_length // 8))
model.Mixed_4f.add_module(attention + "t", SELayerT(temporal_length // 8))
model.Mixed_3b.add_module(attention + "c", SELayerC(256))
model.Mixed_3c.add_module(attention + "c", SELayerC(480))
model.Mixed_4b.add_module(attention + "c", SELayerC(512))
model.Mixed_4c.add_module(attention + "c", SELayerC(512))
model.Mixed_4d.add_module(attention + "c", SELayerC(512))
model.Mixed_4e.add_module(attention + "c", SELayerC(528))
model.Mixed_4f.add_module(attention + "c", SELayerC(832))
model.Mixed_5b.add_module(attention + "c", SELayerC(832))
model.Mixed_5c.add_module(attention + "c", SELayerC(1024))
else:
raise ValueError("Wrong MODEL.ATTENTION. Current:{}".format(attention))
self.model = model
def forward(self, x):
return self.model(x)
def se_inception_i3d(name, num_channels, num_classes, attention, pretrained=False, progress=True, rgb=True):
"""Get InceptionI3d module w/o SELayer and pretrained model."""
if rgb:
model = SEInceptionI3DRGB(num_channels, num_classes, attention)
else:
model = SEInceptionI3DFlow(num_channels, num_classes, attention)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[name], progress=progress)
# delete the last layer's parameter and only load the parameters before the last due to different class number.
# uncomment and change the output size of I3D when using the default classifier in I3D.
# state_dict.pop("logits.conv3d.weight")
# state_dict.pop("logits.conv3d.bias")
# model.load_state_dict(state_dict, strict=False)
# Create new OrderedDict that add `model.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = "model.{}".format(k)
new_state_dict[name] = v
# Load params except SELayer
model.load_state_dict(new_state_dict, strict=False)
return model
def se_i3d_joint(rgb_pt, flow_pt, num_classes, attention, pretrained=False, progress=True):
"""Get I3D models with SELayers for different inputs.
Args:
rgb_pt (string, optional): the name of pre-trained model for RGB input.
flow_pt (string, optional): the name of pre-trained model for optical flow input.
num_classes (int): the class number of dataset.
attention (string, optional): the name of the SELayer.
pretrained (bool): choose if pretrained parameters are used. (Default: False)
progress (bool, optional): whether or not to display a progress bar to stderr. (Default: True)
Returns:
models (dictionary): A dictionary contains models for RGB and optical flow.
"""
i3d_rgb = i3d_flow = None
if rgb_pt is not None:
i3d_rgb = se_inception_i3d(
name=rgb_pt,
num_channels=3,
num_classes=num_classes,
attention=attention,
pretrained=pretrained,
progress=progress,
rgb=True,
)
if flow_pt is not None:
i3d_flow = se_inception_i3d(
name=flow_pt,
num_channels=2,
num_classes=num_classes,
attention=attention,
pretrained=pretrained,
progress=progress,
rgb=False,
)
models = {"rgb": i3d_rgb, "flow": i3d_flow}
return models
``` |
{
"source": "19wintersp/JumpCutterGUI",
"score": 3
} |
#### File: JumpCutterGUI/src/app.py
```python
import jumpcutter
import os
import tkinter as tk
import tkinter.filedialog as tk_filedialog
import tkinter.messagebox as tk_msgbox
class JcApp:
def __init__(self):
self.root = tk.Tk()
self.root.title("JumpCutter")
self.root.resizable(False, False)
self.root.config(pady=0, padx=0)
self.rendering = tk.StringVar()
self.input_type = tk.IntVar()
self.input_type.set(0)
self.input_file = tk.StringVar()
self.input_filename = tk.StringVar()
self.youtube_url = tk.StringVar()
self.jumpcut_silence = tk.IntVar()
self.silent_speed = tk.StringVar()
self.sounded_speed = tk.StringVar()
self.silent_thresh = tk.StringVar()
self.frame_margin = tk.StringVar()
self.sample_rate = tk.StringVar()
self.frame_rate = tk.StringVar()
self.frame_quality = tk.StringVar()
self.jumpcut_silence.set(0)
self.silent_speed.set("5")
self.sounded_speed.set("1")
self.reset_params(True)
input_section = tk.Frame(self.root, padx=8, pady=8)
input_label = tk.Label(input_section, text="Input", font=("TkHeadingFont"))
input_label.grid(row=0, column=0, columnspan=2, sticky=tk.W)
file_radio = tk.Radiobutton(input_section, variable=self.input_type, value=0)
file_radio.grid(row=1, column=0, sticky=tk.E+tk.W)
file_label = tk.Label(input_section, text="Choose a file")
file_label.grid(row=1, column=1, sticky=tk.W)
file_content = tk.Frame(input_section)
file_chooser = tk.Button(file_content, text="Open file", command=self.choose_file_input)
file_chooser.grid(row=0, column=0)
file_chosen = tk.Label(file_content, textvariable=self.input_filename, anchor=tk.W)
file_chosen.grid(row=0, column=1)
file_content.grid(row=2, column=1, sticky=tk.E+tk.W)
youtube_radio = tk.Radiobutton(input_section, variable=self.input_type, value=1)
youtube_radio.grid(row=3, column=0, sticky=tk.E+tk.W)
youtube_label = tk.Label(input_section, text="Download YouTube URL")
youtube_label.grid(row=3, column=1, sticky=tk.W)
youtube_input = tk.Entry(input_section, state=tk.DISABLED, textvariable=self.youtube_url)
youtube_input.grid(row=4, column=1, sticky=tk.E+tk.W)
file_radio.config(command=lambda: [
file_chooser.config(state=tk.NORMAL),
file_chosen.config(state=tk.NORMAL),
youtube_input.config(state=tk.DISABLED)
])
youtube_radio.config(command=lambda: [
file_chooser.config(state=tk.DISABLED),
file_chosen.config(state=tk.DISABLED),
youtube_input.config(state=tk.NORMAL)
])
file_label.bind("<Button-1>", lambda _: file_radio.invoke())
youtube_label.bind("<Button-1>", lambda _: youtube_radio.invoke())
input_section.columnconfigure(1, weight=1)
input_section.pack(expand=False, fill=tk.X, anchor=tk.W)
speed_section = tk.Frame(self.root, padx=8, pady=8)
speed_label = tk.Label(speed_section, text="Speed", font=("TkHeadingFont"))
speed_label.grid(row=0, column=0, columnspan=2, sticky=tk.W)
sound_label = tk.Label(speed_section, text="Sounded speed", padx=8)
sound_label.grid(row=1, column=0, sticky=tk.E)
sound_input = tk.Entry(speed_section, textvariable=self.sounded_speed)
sound_input.grid(row=1, column=1, sticky=tk.E+tk.W)
silent_label = tk.Label(speed_section, text="Silent speed", padx=8)
silent_label.grid(row=2, column=0, sticky=tk.E)
silent_input = tk.Entry(speed_section, textvariable=self.silent_speed)
silent_input.grid(row=2, column=1, sticky=tk.E+tk.W)
jumpcut_switch = tk.Checkbutton(speed_section, text="Jumpcut silence", variable=self.jumpcut_silence)
jumpcut_switch.grid(row=3, column=0, columnspan=2, sticky=tk.W)
jumpcut_switch.config(command=lambda: [
silent_label.config(state=(tk.DISABLED if self.jumpcut_silence.get() else tk.NORMAL)),
silent_input.config(state=(tk.DISABLED if self.jumpcut_silence.get() else tk.NORMAL))
])
speed_section.columnconfigure(1, weight=1)
speed_section.pack(expand=False, fill=tk.X, anchor=tk.W)
advanced_section = tk.Frame(self.root, padx=8, pady=8)
advanced_label = tk.Label(advanced_section, text="Advanced", font=("TkHeadingFont"))
advanced_label.grid(row=0, column=0, columnspan=2, sticky=tk.W)
thresh_label = tk.Label(advanced_section, text="Silent threshold", padx=8)
thresh_label.grid(row=1, column=0, sticky=tk.E)
thresh_input = tk.Entry(advanced_section, textvariable=self.silent_thresh)
thresh_input.grid(row=1, column=1, sticky=tk.E+tk.W)
margin_label = tk.Label(advanced_section, text="Frame margin", padx=8)
margin_label.grid(row=2, column=0, sticky=tk.E)
margin_input = tk.Entry(advanced_section, textvariable=self.frame_margin)
margin_input.grid(row=2, column=1, sticky=tk.E+tk.W)
sample_label = tk.Label(advanced_section, text="Sample rate", padx=8)
sample_label.grid(row=3, column=0, sticky=tk.E)
sample_input = tk.Entry(advanced_section, textvariable=self.sample_rate)
sample_input.grid(row=3, column=1, sticky=tk.E+tk.W)
framer_label = tk.Label(advanced_section, text="Frame rate", padx=8)
framer_label.grid(row=4, column=0, sticky=tk.E)
framer_input = tk.Entry(advanced_section, textvariable=self.frame_rate)
framer_input.grid(row=4, column=1, sticky=tk.E+tk.W)
frameq_label = tk.Label(advanced_section, text="Frame quality", padx=8)
frameq_label.grid(row=5, column=0, sticky=tk.E)
frameq_input = tk.Entry(advanced_section, textvariable=self.frame_quality)
frameq_input.grid(row=5, column=1, sticky=tk.E+tk.W)
reset_butt = tk.Button(advanced_section, text="Reset", command=self.reset_params)
reset_butt.grid(row=6, column=1, sticky=tk.E)
advanced_section.columnconfigure(1, weight=1)
advanced_section.pack(expand=False, fill=tk.X, anchor=tk.W)
advanced_section.pack_forget()
actions_section = tk.Frame(self.root, padx=8, pady=8)
status_label = tk.Label(actions_section, textvariable=self.rendering)
status_label.pack(expand=False, side=tk.BOTTOM, anchor=tk.E)
run_butt = tk.Button(actions_section, text="Run", command=self.run_jumpcutter)
run_butt.pack(side=tk.RIGHT)
advanced_butt = tk.Button(actions_section, text="Advanced")
advanced_butt.pack(side=tk.RIGHT)
advanced_close = tk.Button(actions_section, text="Hide advanced")
actions_section.pack(expand=False, anchor=tk.E)
advanced_close.config(command=lambda: [
advanced_section.pack_forget(),
advanced_butt.pack(side=tk.RIGHT),
advanced_close.pack_forget()
])
advanced_butt.config(command=lambda: [
advanced_section.pack(expand=False, fill=tk.X, anchor=tk.W),
advanced_butt.pack_forget(),
advanced_close.pack(side=tk.RIGHT),
actions_section.pack_forget(),
actions_section.pack(expand=False, anchor=tk.E)
])
self.root.mainloop()
def reset_params(self, force = False):
if not force:
if not tk_msgbox.askokcancel(
title="Reset parameters",
message="This action will reset advanced parameters to their defaults. Proceed?"
):
return
self.silent_thresh.set("0.03")
self.frame_margin.set("1")
self.sample_rate.set("44100")
self.frame_rate.set("30")
self.frame_quality.set("3")
def choose_file_input(self):
sel_file = tk_filedialog.askopenfilename()
sel_filename = ""
if not sel_file:
sel_file = ""
else:
# find the filename part of the path
if ("/" in sel_file) or ("\\" in sel_file):
last_sep = sel_file.replace("\\", "/")[::-1].index("/")
sel_filename = sel_file[::-1][:last_sep][::-1]
else:
sel_filename = sel_file
self.input_file.set(sel_file)
self.input_filename.set(sel_filename)
def run_jumpcutter(self):
file = tk_filedialog.asksaveasfilename()
if not file:
return
def check_type(value, vtype, vname):
try:
return vtype(value)
except:
msg = "%s should be a %s, but was set to %s.\nIt will be ignored." % (vname, vtype.__name__, value)
tk_msgbox.showwarning(title="Invalid parameter", message=msg)
return None
if os.path.exists(file):
os.remove(file)
options = {
"url": None, "input_file": None,
"output_file": file,
"silent_threshold": check_type(self.silent_thresh.get(), float, "Silent threshold"),
"sounded_speed": check_type(self.sounded_speed.get(), float, "Sounded speed"),
"silent_speed": check_type(self.silent_speed.get(), float, "Silent speed"),
"frame_margin": check_type(self.frame_margin.get(), int, "Frame margin"),
"sample_rate": check_type(self.sample_rate.get(), int, "Sample rate"),
"frame_rate": check_type(self.frame_rate.get(), int, "Frame rate"),
"frame_quality": check_type(self.frame_quality.get(), int, "Frame quality"),
}
if self.input_type.get() == 0:
options["input_file"] = self.input_file.get()
else:
options["url"] = self.youtube_url.get()
args = lambda: None
for key in options:
setattr(args, key, options[key])
try:
self.rendering.set("Rendering in progress")
jumpcutter.main(args)
tk_msgbox.showinfo(title="Done", message=("Rendering complete; output saved to %s." % file))
except:
tk_msgbox.showerror(title="Failed", message="JumpCutter failed unexpectedly.")
self.rendering.set("")
def main():
JcApp()
if __name__ == "__main__":
main()
``` |
{
"source": "19zhangt/sRNA_analysis_Maize",
"score": 3
} |
#### File: sRNA_analysis_Maize/software_scripts/04-isomiRs-abundance.py
```python
import re
import sys
import itertools
# mature and miRNA* location
location = {}
loc_file = open('data_miRNAs/6mireapout/posMirnaLocation.txt', 'r')
for loc_each in loc_file.readlines():
loc_each = loc_each.strip()
loc_line = loc_each.split('\t')
location[loc_line[0]] = [int(loc_line[1]), int(loc_line[2]), int(loc_line[3]), int(loc_line[4])]
loc_file.close()
# results from isomiRs
def iso_type(ref_location,query_location):
type_out = '-'
if -3 <= query_location[0]-ref_location[0] < 0:
type_out = 'add5'
if query_location[1] > ref_location[1]:
type_out = 'add5_add3'
elif query_location[1] < ref_location[1]:
type_out = 'add5_sub3'
elif 0 < query_location[0]-ref_location[0] <= 3:
type_out = 'sub5'
if query_location[1] > ref_location[1]:
type_out = 'sub5_add3'
elif query_location[1] < ref_location[1]:
type_out = 'sub5_sub3'
elif 0 < query_location[1]-ref_location[1] <= 3:
type_out = 'add3'
if query_location[0] < ref_location[0]:
type_out = 'add3_add5'
elif query_location[0] > ref_location[0]:
type_out = 'add3_sub5'
elif -3 <= query_location[1]-ref_location[1] < 0:
type_out = 'sub3'
if query_location[0] < ref_location[0]:
type_out = 'sub3_add5'
elif query_location[0] > ref_location[0]:
type_out = 'sub3_sub5'
else:
type_out = '-'
return type_out
def iso_snp_type(ref_location,query_location,tmp_mutant):
type_out = '-'
seed_dist = int(tmp_mutant) + query_location[0] - ref_location[0]
if query_location[1] - ref_location[1] == 1 and int(tmp_mutant) == query_location[1]-query_location[0] +1:
type_out = 'nt_add3'
if query_location[0] == ref_location[0] and query_location[1] == ref_location[1]:
type_out = 'seed_snp'
elif 9 <= seed_dist <= (ref_location[1]-ref_location[0]+1):
type_out = 'tail_snp'
return type_out
def iso_mat_star(mode, mat_location, star_location, query_location, mutant):
type_overlap = ['-', '-']
start_dis_1 = abs(query_location[0] - mat_location[0])
end_dis_1 = abs(query_location[1] - mat_location[1])
con_1 = (start_dis_1 <= 3 and end_dis_1 <= 3)
start_dis_2 = abs(query_location[0] - star_location[0])
end_dis_2 = abs(query_location[1] - star_location[1])
con_2 = (start_dis_2 <= 3 and end_dis_2 <= 3)
if mode == 'NonSNP':
if start_dis_1 == 0 and end_dis_1 == 0:
type_overlap = ['mature', '-']
elif start_dis_1 <= 3 and end_dis_1 <= 3:
type_overlap = ['iso',iso_type(mat_location, query_location)]
elif start_dis_2 == 0 and end_dis_2 == 0:
type_overlap = ['star', '-']
elif start_dis_2 <= 3 and end_dis_2 <= 3:
type_overlap = ['iso',iso_type(star_location, query_location)]
else:
snp_type_m = iso_snp_type(mat_location, query_location, mutant)
snp_type_s = iso_snp_type(star_location, query_location, mutant)
if start_dis_1 == 0 and end_dis_1 == 0 and snp_type_m == '-':
type_overlap = ['mature', '-']
elif start_dis_1 <= 3 and end_dis_1 <= 3 and snp_type_m != '-':
type_overlap = ['iso', snp_type_m]
elif start_dis_2 == 0 and end_dis_2 == 0 and snp_type_s == '-':
type_overlap = ['star', '-']
elif start_dis_2 <= 3 and end_dis_2 <= 3 and snp_type_s != '-':
type_overlap = ['iso',snp_type_s]
return type_overlap
mapping_result = {}
files_name = ['data_miRNAs/7isomiRs/Results/r0/RawResults/r0.txt', 'data_miRNAs/7isomiRs/Results/r1/RawResults/r1.txt']
for file_name in files_name:
riso_file = open(file_name, "r")
for iso_line in riso_file.readlines()[1::]:
line_inform = iso_line.split("\t")
line_new = line_inform[3]
line_new = re.sub("\[\'", "", line_new)
line_new = re.sub("\'\]", "", line_new)
line_inform[3] = line_new.split("\', \'")
pre_length = len(line_inform[3])
iso_seq = line_inform[0]
for i in range(pre_length):
pre_each = line_inform[3][i].split(":")
pre_name = pre_each[0]
if pre_name in location:
start_site = int(pre_each[1])
end_site = start_site + int(len(line_inform[0])-1)
if file_name == files_name[0]:
type_out_name = iso_mat_star('NonSNP', location[pre_name][0:2], location[pre_name][2:4], [start_site,end_site], '-')
if type_out_name[0] != '-':
if pre_name not in mapping_result:
mapping_result[pre_name] = [":".join([pre_name,type_out_name[0],iso_seq,type_out_name[1],'-',str(start_site),str(end_site),'+'])]
# all_alignment[pre_name] = [iso_seq + "\t" + str(int(pre_each[1]) -1)]
else:
mapping_result[pre_name].append(":".join([pre_name,type_out_name[0],iso_seq,type_out_name[1],'-',str(start_site),str(end_site),'+']))
# all_alignment[pre_name].append(iso_seq + "\t" + str(int(pre_each[1]) -1))
else:
mutant_type = pre_each[2].split(",")[0]
mutant = pre_each[2].split(",")[1]
type_out_name = iso_mat_star('SNP', location[pre_name][0:2], location[pre_name][2:4], [start_site,end_site], mutant)
if type_out_name[0] != '-':
if pre_name not in mapping_result:
mapping_result[pre_name] = [":".join([pre_name,type_out_name[0],iso_seq,type_out_name[1],mutant_type,str(start_site),str(end_site),'+'])]
else:
mapping_result[pre_name].append(":".join([pre_name,type_out_name[0],iso_seq,type_out_name[1],mutant_type,str(start_site),str(end_site),'+']))
riso_file.close()
# abundance_seq = list(itertools.chain.from_iterable(mapping_result.values()))
# abundance_seq = list(set(abundance_seq))
# iso_file = open('0-unique-isomiRNAs.txt', "w")
# for i in abundance_seq:
# iso_file.write(i + "\n")
# iso_file.close()
# isomiR_file = open('pre-miRNAs2isomiRs.txt', "w")
# for i in mapping_result:
# for each in list(set(mapping_result[i])):
# isomiR_file.write('%s\t%s\n' % (i, each))
# isomiR_file.close()
mapping_file = open('data_miRNAs/7isomiRs/0-alignment.txt', "r")
for aline in mapping_file:
line_detail = aline.strip().split('\t')
pre_name = line_detail[1]
match_list = [pre_name,'others',line_detail[6],'-','-',line_detail[2],line_detail[3],line_detail[4]]
if pre_name not in mapping_result:
mapping_result[pre_name] = [":".join(match_list)]
elif line_detail[6] not in [x.split(':')[2] for x in mapping_result[pre_name]]:
mapping_result[pre_name].append(":".join(match_list))
else:
pass
mapping_file.close()
# using plot the show
alignment_file = open('data_miRNAs/7isomiRs/0-pre-miRNAs_mapping_read.txt', "w")
for i in mapping_result.values():
for each_loc in i:
alignment_file.write('%s\n' % each_loc)
alignment_file.close()
```
#### File: sRNA_analysis_Maize/software_scripts/3_isomiRs_ID.py
```python
__version__ = '0.53'
__LastModf__ = "2013, May 30"
import os
from os import makedirs
import datetime
import sys
def _cleanALLDIRS():
top = sys.argv[1] + "/Results"
try:
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top)
except:
next
def _cleanDIRS(x):
listx = []
if x == "r0":
listx.append(sys.argv[1] + "/Results/input")
listx.append(sys.argv[1] + "/Results/%s/SAM" % (x))
listx.append(sys.argv[1] + "/Results/%s/mapped" % (x))
listx.append(sys.argv[1] + "/Results/%s/unmapped" % (x))
try:
for top in listx:
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top)
except:
next
def _ParseFq(inputFile, outName):
"""
:param inputFile:
"""
output = open(outName, "w")
count_seq = 0
count = 1
Seq = ""
dic = dict()
for line in open(inputFile):
line = line.split("\n")[0]
if count == 2:
Seq = line.split()[0]
if "N" in Seq:
next;
else:
if Seq not in dic:
count_seq += 1
dic[Seq] = ">Seq" + str(count_seq) + ":1"
elif Seq in dic:
dic[Seq] = ((dic[Seq]).split(":")[0]) + ":" + str(int((dic[Seq]).split(":")[-1]) + 1)
if count == 4:
count = 0
count += 1
for Seq in dic:
tmp = dic[Seq] + "\n" + Seq + "\n"
output.write(tmp)
def _ParseFa(inputFile, outName):
"""
:param inputFile:
"""
output = open(outName, "w")
count_seq = 0
dic = dict()
for line in open(inputFile):
line = line.split("\n")[0]
if line.startswith(">"):
next,
else:
Seq = line.split()[0]
if "N" in Seq:
next;
else:
if Seq not in dic:
count_seq += 1
dic[Seq] = ">Seq" + str(count_seq) + ":1"
elif Seq in dic:
dic[Seq] = ((dic[Seq]).split(":")[0]) + ":" + str(int((dic[Seq]).split(":")[-1]) + 1)
for Seq in dic:
tmp = dic[Seq]+"\n"+Seq+"\n"
output.write(tmp)
def _buildout(ref1, ref2, ref3, cab, output):
def printlines(total, position, length, seq):
first = position - 1
end = total-first-length
line = ("."*(position-1))+seq+("."*end)
return line
out = open(output, "w")
for r in ref2:
refName = r
refSeq = ref1[r]["refSeq"]
refLen = ref1[r]["refLen"]
line1 = refName + ("-"*(refLen-len(refName))) +"\tRound\tLength\tVar\tPosition" + cab + "\n"
line2 = ""
line3 = refSeq + "\t" + refName + "\t" + str(refLen) + "\n"
line4 = ""
try:
for m in ref3[r]:
matName = m
matLen = ref3[r][m]["matLen"]
matSeq = ref3[r][m]["matSeq"]
matPos = ref3[r][m]["matPos"]
line2 += printlines(refLen, matPos, matLen, matSeq) + "\t" + matName + "\t" + str(matLen) +"\n"
except:
next;
try:
tmp1 = dict()
tmp2 = dict()
for t in ref2[r]:
if ref2[r][t]["Position"] > 0:
tmp1[t] = ref2[r][t]["Position"]
elif ref2[r][t]["Position"] < 0:
tmp2[t] = ref2[r][t]["Position"]
list1 = sorted(tmp1.items(),lambda x,y: cmp(y[1],x[1]))
list2 = sorted(tmp2.items(),lambda x,y: cmp(y[1],x[1]))
for s1 in list1:
s = s1[0]
Modification = (ref2[r][s]["Modification"]).split(",")[0]
if Modification != "no":
if ">" in Modification:
Position = ref2[r][s]["Position"]
else:
Position = ref2[r][s]["Position"]-len(Modification)
else:
Position = ref2[r][s]["Position"]
Freqs = ref2[r][s]["Freqs"]
ModSeq = ref2[r][s]["ModSeq"]
Mod = ref2[r][s]["Mod"]
Rn = ref2[r][s]["Round"]
Len = len(ModSeq)
line1 += printlines(refLen, Position, Len, ModSeq) +"\t"+ Rn +"-"+ Mod + "\t" + str(Len) + "\t" + Modification + "\t" + str(Position) +"\t"+ Freqs + "\n"
for s1 in list2:
s = s1[0]
Modification = (ref2[r][s]["Modification"]).split(",")[0]
if Modification != "no":
if ">" in Modification:
Position = ref2[r][s]["Position"]*-1
else:
Position = (ref2[r][s]["Position"]-len(Modification))*-1
else:
Position = ref2[r][s]["Position"]*-1
Freqs = ref2[r][s]["Freqs"]
ModSeq = _RevComp(ref2[r][s]["ModSeq"])
Mod = ref2[r][s]["Mod"]
Rn = ref2[r][s]["Round"]
Len = len(ModSeq)
line4 += printlines(refLen, Position, Len, ModSeq) +"\t"+ Rn + "-" + Mod + "\t" + str(Len) + "\t" + Modification + "\t" + str(Position*-1) + "\t"+ Freqs + "\n"
except:
next;
lines = line1 + line2 + line3 + line4 + "\n\n"
out.write(lines)
def __mature(input1, sp, mir):
cntrl = 0
dic = dict()
name = ""
seq = ""
rootname = len(sp)+len(mir)+1
for line in open(input1):
if line.startswith(">"):
name = (line.split()[0])[1:]
if name.split("-")[-1] == "3p" or name.split("-")[-1] == "5p":
Rname = sp + "-" + mir + name[rootname:-3]
else:
Rname = sp + "-" + mir + name[rootname:]
seq = ""
else:
seq += line.split()[0]
if Rname not in dic:
dic[Rname] = dict()
dic[Rname][name] = dict()
dic[Rname][name]["matName"] = name
dic[Rname][name]["matSeq"] = seq
dic[Rname][name]["matLen"] = len(seq)
if Rname in dic:
if name not in dic[Rname]:
dic[Rname][name] = dict()
dic[Rname][name]["matName"] = name
dic[Rname][name]["matSeq"] = seq
dic[Rname][name]["matLen"] = len(seq)
return dic
def _mature():
input1 = sys.argv[1] + "/Results/r0/SAM/mature-clean.sam"
dic = dict()
seq = ""
name = ""
for line1 in open(input1):
if line1.startswith("@"):
next
else:
tmpline = line1.split("\t")
MIRname = tmpline[0]
Sense = tmpline[1]
Rname = tmpline[2]
Position = int(tmpline[3])
seq = tmpline[9]
if Rname not in dic:
dic[Rname] = dict()
dic[Rname][MIRname] = dict()
dic[Rname][MIRname]["matName"] = MIRname
dic[Rname][MIRname]["matSeq"] = seq
dic[Rname][MIRname]["matLen"] = len(seq)
dic[Rname][MIRname]["matPos"] = Position
if Rname in dic:
if name not in dic[Rname]:
dic[Rname][MIRname] = dict()
dic[Rname][MIRname]["matName"] = MIRname
dic[Rname][MIRname]["matSeq"] = seq
dic[Rname][MIRname]["matLen"] = len(seq)
dic[Rname][MIRname]["matPos"] = Position
return dic
def _GetRef(input1):
dic = dict()
Rname = ""
for line in open(input1):
if line.startswith(">"):
Rname = (line.split()[0])[1:]
else:
seq = line.split()[0]
dic[Rname] = dict()
dic[Rname]["refSeq"] = seq
dic[Rname]["refLen"] = len(seq)
return dic
def _ParseRn(inputFile, dic):
count = 0
cablibs = ""
def _ModifySeq(Seq, mod):
newSeq = ""
if mod != "no":
position = int(mod.split(",")[1])-1
for i in range(0, len(Seq)):
if i == position:
newSeq += Seq[i].lower()
else:
newSeq += Seq[i]
return newSeq
for line in open(inputFile):
refs = dict()
freqs = ""
col = len(line.split("\t"))
if line.startswith("OriginalSeq"):
lines = line.split("\t")
start = 5
while start < col:
cablibs += "\t" + (lines[start]).split()[0]
start += 1
else:
lines = line.split("\t")
seq = lines[0]
rn = lines[1]
seqLen = lines[2]
start = 4
while start < col:
freqs += "\t" + (lines[start]).split()[0]
start += 1
ref = lines[3][1:-1].split(", ")
for i in ref:
refs[(i[1:-1]).split(":")[0]] = dict()
refs[(i[1:-1]).split(":")[0]]["Position"] = int((i[1:-1]).split(":")[1])
refs[(i[1:-1]).split(":")[0]]["Modification"] = (i[1:-1]).split(":")[2]
refs[(i[1:-1]).split(":")[0]]["Mod"] = (i[1:-1]).split(":")[3]
for r in refs:
count += 1
SeqID = "Seq"+"-"+str(rn)+str(count)
if r not in dic:
dic[r] = dict()
dic[r][SeqID] = dict()
dic[r][SeqID]["Modification"] = refs[r]["Modification"]
dic[r][SeqID]["Mod"] = refs[r]["Mod"]
if dic[r][SeqID]["Modification"] != "no":
dic[r][SeqID]["ModSeq"] = _ModifySeq(seq, dic[r][SeqID]["Modification"])
else:
dic[r][SeqID]["ModSeq"] = seq
dic[r][SeqID]["Position"] = refs[r]["Position"]
dic[r][SeqID]["Freqs"] = freqs
dic[r][SeqID]["Round"] = rn
if r in dic:
dic[r][SeqID] = dict()
dic[r][SeqID]["Modification"] = refs[r]["Modification"]
dic[r][SeqID]["Mod"] = refs[r]["Mod"]
if dic[r][SeqID]["Modification"] != "no":
dic[r][SeqID]["ModSeq"] = _ModifySeq(seq, dic[r][SeqID]["Modification"])
else:
dic[r][SeqID]["ModSeq"] = seq
dic[r][SeqID]["Position"] = refs[r]["Position"]
dic[r][SeqID]["Freqs"] = freqs
dic[r][SeqID]["Round"] = rn
return dic, cablibs
class Reference:
def __init__(self, name, refpath, buildindex, bowtiebuild):
self.Name = name
self.Refpath = refpath
self.Buildindex = buildindex
self.BowtieBuild = bowtiebuild
def _BuidIndex(self):
if self.Buildindex == "yes":
print "Building indexes for %s ..." %(self.Name)
print self.Refpath
os.system("%s %s %s" %(self.BowtieBuild, self.Refpath, self.Refpath))
print "... The indexes was built\n"
else:
print "... It was not necessary to build %s indexes\n" %(self.Name)
class Library():
def __init__(self, libname, libpath, format):
self.LibName = libname
self.LibPath = libpath
self.Format = format
def _getLibInfo(self):
print self.LibName, self.LibPath, self.Format
class SAMlist():
def __init__(self, rounds, libs, add):
self.rounds = rounds + 1
self.libs = libs
if add == 0:
self.add = "r0"
if add == 1:
self.add = "r1"
elif add > 1:
self.add = "M" + str(add)
self.samlist = dict()
def _getSAMlist(self):
for i in range(self.rounds):
if i == 0:
if self.add == "r0":
name = "r0"
self.samlist[name] = dict()
for l in self.libs:
self.samlist[name][l] = sys.argv[1] + "/Results/%s/SAM/r%i-%s-clean.sam" %(self.add, i, l)
else:
next
if i == 1:
if self.add == "r1":
name = "r1"
self.samlist[name] = dict()
for l in self.libs:
self.samlist[name][l] = sys.argv[1] + "/Results/%s/SAM/r%i-%s-clean.sam" %(self.add, i, l)
print self.samlist[name][l]
else:
next
elif i > 1:
name = "r"+str(i)
self.samlist[name] = dict()
for l in self.libs:
self.samlist[name][l] = sys.argv[1] + "/Results/%s/SAM/r%i-%s-clean.sam" %(self.add, i, l)
return self.samlist
def _3sRNAaddfa(readFile, output, z):
z = int(z)
output1 = open(output, "w")
yeseq = r'A|a|T|t|C|c|G|g'
pegoFa = 0
for line in open(readFile):
line = line.split("\n")[0]
if line.startswith(">"):
if pegoFa == 0:
seqName = line+"&"
if (seqName.split("&")[1]).split("_")[0] == "SIZE":
name = seqName.split("&")[0]
size = (seqName.split("&")[1]).split("_")[1]
addx = (seqName.split("&")[2]).split("_")[1]
else:
name = seqName.split("&")[0]
size = "x"
addx = ""
elif pegoFa == 1:
complete = name + "&SIZE_" + size + "&M3_" + add + addx + "\n" + seq + "\n"
output1.write(complete)
seqName = line+"&"
if (seqName.split("&")[1]).split("_")[0] == "SIZE":
name = seqName.split("&")[0]
size = (seqName.split("&")[1]).split("_")[1]
addx = (seqName.split("&")[2]).split("_")[1]
else:
name = seqName.split("&")[0]
size = "x"
addx = ""
pegoFa = 0
else:
seq1 = line.split()[0]
if z == 2:
add = seq1[-2:]
seq = seq1[:-2]
elif z == 1:
add = seq1[-1]
seq = seq1[:-1]
pegoFa = 1
complete = name + "&SIZE_" + size + "&M3_" + add + addx + "\n" + seq + "\n"
output1.write(complete)
def _5sRNAaddfa(readFile, output, z):
output1 = open(output, "w")
yeseq = r'A|a|T|t|C|c|G|g'
pegoFa = 0
for line in open(readFile):
line = line.split("\n")[0]
if line.startswith(">"):
if pegoFa == 0:
seqName = line+"&"
if (seqName.split("&")[1]).split("_")[0] == "SIZE":
name = seqName.split("&")[0]
size = (seqName.split("&")[1]).split("_")[1]
addx = (seqName.split("&")[2]).split("_")[1]
else:
name = seqName.split("&")[0]
size = "x"
addx = ""
elif pegoFa == 1:
complete = name + "&SIZE_" + size + "&M5_" + addx + add + "\n" + seq + "\n"
output1.write(complete)
seqName = line+"&"
if (seqName.split("&")[1]).split("_")[0] == "SIZE":
name = seqName.split("&")[0]
size = (seqName.split("&")[1]).split("_")[1]
addx = (seqName.split("&")[2]).split("_")[1]
else:
name = seqName.split("&")[0]
size = "x"
addx = ""
pegoFa = 0
else:
seq1 = line.split()[0]
add = seq1[:z]
seq = seq1[z:]
pegoFa = 1
complete = name + "&SIZE_" + size + "&M5_" + addx + add + "\n" + seq + "\n"
output1.write(complete)
def _appCutoff(input, name, cutoff, dir):
inputA = open(input)
cut = int(cutoff)
out = dir+"Cutoff/"+name+"-Cutoff"+cutoff+".txt"
output = open(out, "w")
for line in inputA:
if line.startswith("OriginalSeq"):
output.write(line)
else:
sum = 0
line1 = line.split("\t")[4:]
n = len(line1)
for i in line1:
sum += int(i)
if sum >= (cut):
output.write(line)
return out
def _RevComp(sequence):
complement = {'A':'T', 'C':'G', 'G':'C', 'T':'A','a':'t', 'c':'g', 'g':'c', 't':'a'}
return "".join([complement.get(nt, 'N') for nt in sequence[::-1]])
def _Rev(sequence):
return sequence[::-1]
def _Comp(sequence):
complement = {'A':'T', 'C':'G', 'G':'C', 'T':'A','a':'t', 'c':'g', 'g':'c', 't':'a'}
return "".join([complement.get(nt, 'N') for nt in sequence])
def _freqSAM(input1, n, R, output, nadd):
yesSeqNT = r'A|a|T|t|C|c|G|g'
out = open(output, 'w')
s = 5
nlib = 4
dic = dict()
n = int(n)
x = n+s
cab = ""
Seq = ""
mod = ""
startall = datetime.datetime.now()
for line in input1:
start = datetime.datetime.now()
libName = line
print "%s..."%(libName)
lib2 = input1[line]
nlib += 1
cab += "\t" + libName
samFile = open(lib2)
for line1 in samFile.readlines():
mod = "no"
if line1.startswith("@"):
next
else:
tmpName = line1.split("\t")[0]
tmpName = tmpName+"&"
if (tmpName.split("&")[1]).split("_")[0] == "SIZE":
rawname = (tmpName.split("&")[0]).split(":")[0]
Freq = int((tmpName.split("&")[0]).split(":")[1])
Size = (tmpName.split("&")[1]).split("_")[1]
Add = (tmpName.split("&")[2]).split("_")[1]
mod = Add
Add1 = (tmpName.split("&")[2]).split("_")[0]
else:
rawname = (tmpName.split("&")[0]).split(":")[0]
Freq = int((tmpName.split("&")[0]).split(":")[1])
Size = ""
Add = ""
Add1 = "no"
Test_Sense = line1.split("\t")[1]
if Test_Sense == "0":
Sense = ""
Seqmap = line1.split("\t")[9]
else:
Seqmap = _RevComp(line1.split("\t")[9])
Sense = "-"
if nadd == 3:
Seq = Seqmap+Add
mod += ","+str(len(Seq)) + ":M3"
if nadd == 5:
Seq = Add+Seqmap
mod += ",1" + ":M5"
if nadd == 0:
Seq = Seqmap
mod += ":r0"
if nadd == 1:
Seq = Seqmap
mod += ":r1"
mm = (line1.split("\t")[12]).split(":")[-1]
SNP = ""
if mm == str(len(Seqmap)): # MD:Z:22
next,
if mm != str(len(Seq)): # MD:Z:0C21 | MD:Z:21A0 | MD:Z:16A5 | MD:Z:4C17
try:
if mm[1]=="A" or mm[1]=="T" or mm[1]=="C" or mm[1]=="G":
SNP = Seq[int(mm[0])]
mod = mm[1] + ">" + SNP + "," + str(int(mm[0])+1)
if mm[0] == "0":
Add1 = "M5"
mod += ":" + Add1
else:
Add1 = "MM"
mod += ":" + Add1
elif mm[2]=="A" or mm[2]=="T" or mm[2]=="C" or mm[2]=="G":
SNP = Seq[int(mm[0:2])]
mod = mm[2] + ">" + SNP + "," + str(int(mm[0:2])+1)
if mm[0:2] == str(len(Seqmap)-1):
Add1 = "M3"
mod += ":" + Add1
else:
Add1 = "MM"
mod += ":" + Add1
except:
next;
Size = str(len(Seq))
RefSeq = line1.split("\t")[2]+":"+Sense+line1.split("\t")[3]+":"+mod
if SNP != "N":
if Seq in dic:
dic[Seq][nlib] = Freq
if rawname not in dic[Seq][0]:
dic[Seq][0].append(rawname)
if RefSeq not in dic[Seq][4]:
dic[Seq][4].append(RefSeq)
elif rawname in dic[Seq][0]:
if RefSeq not in dic[Seq][4]:
dic[Seq][4].append(RefSeq)
elif Seq not in dic:
dic[Seq] = []
for i in range(s):
if i == 0 or i == 4:
dic[Seq].append([])
else:
dic[Seq].append("")
for i in range(n):
dic[Seq].append(0)
dic[Seq][0].append(rawname)
dic[Seq][1] = Seq
dic[Seq][2] = R
dic[Seq][3] = Size
dic[Seq][4].append(RefSeq)
dic[Seq][nlib] = Freq
end = datetime.datetime.now() - start
print "\t%s is done\t(time: %s)"%(libName,end)
cab = "OriginalSeq"+"\t"+"Round"+"\t"+"Variation"+"\t"+"Length"+"\t"+"preMIRref"+ cab + "\n"
out.write(cab)
print "\tcreating Frequences Tab"
for Seq in dic:
tab = ""
for i in range(x):
if i != 0:
if i < (x-1):
if i == 4:
dic[Seq][4].sort()
",".join(dic[Seq][4])
tmp1 = dic[Seq][i]
tab += str(tmp1)+"\t"
else:
tmp1 = dic[Seq][i]
tab += str(tmp1)
tmp2 = tab + "\n"
out.write(tmp2)
total = datetime.datetime.now() - startall
print "\ttotal time read libs: %s"%(total)
def _cleanSAM(inputA, output):
out = open(output, "wb")
cab = ""
dic = dict()
for line in open(inputA):
if line.startswith("@"):
out.write(line)
if line.startswith("@SQ"):
locus = (line.split("@SQ\tSN:")[1]).split("\t")[0]
dic[locus] = [1]
else:
seq = line.split("\t")[2]
if seq in dic:
out.write(line)
else:
next
def _Round_mir(bowtie, refpath, mir_ref, root0):
print "\n*** Mapping mature miRNA on precursor"
inp = "f"
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %s --sam %sSAM/mature.sam" % (bowtie, refpath, inp, mir_ref, root0))
inputA = "%sSAM/mature.sam" %(root0)
inputB = "%sSAM/mature-clean.sam" %(root0)
_cleanSAM(inputA, inputB)
os.remove("%sSAM/mature.sam" %(root0))
def _Round0(bowtie, refpath, libname, libpath, format, filter, filterpath, root0):
"""
:param bowtie:
:param refpath:
:param libname:
:param libpath:
:param format:
:param filter:
:param filterpath:
:param root0:
"""
print "\n*** Mapping Round 0...%s" %(libname)
if format == "fa":
inp = "f"
else:
inp = "q"
if filter == "yes":
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %s --sam %sSAM/r0-%s.sam --un %sunmapped/unr0tmp-%s.%s --al %smapped/r0-%s.%s" %(bowtie, refpath, inp, libpath, root0, libname, root0, libname, format, root0, libname, format))
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %sunmapped/unr0tmp-%s.%s --sam %sSAM/r0tmp-%s.sam --un %sunmapped/unr0-%s.%s --al %smapped/r0-%s.%s" %(bowtie, filterpath, inp, root0, libname, format, root0, libname, root0, libname, format, root0, libname, format))
os.remove("%sunmapped/unr0tmp-%s.%s" %(root0, libname, format))
os.remove("%sSAM/r0tmp-%s.sam" %(root0, libname))
elif filter == "no":
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %s --sam %sSAM/r0-%s.sam --un %sunmapped/unr0-%s.%s --al %smapped/r0-%s.%s" %(bowtie, refpath, inp, libpath, root0, libname, root0, libname, format, root0, libname, format))
inputA = "%sSAM/r0-%s.sam" %(root0, libname)
inputB = "%sSAM/r0-%s-clean.sam" %(root0, libname)
_cleanSAM(inputA, inputB)
os.remove("%sSAM/r0-%s.sam" %(root0, libname))
def _Round1(bowtie, refpath, libname, libpath, format, n, root1, root0):
add = "M"
inp = "f"
print "*** Mapping %sadd Round %s...%s" %(add, n, libname)
os.system("%s --norc -n 1 -v 1 -a -l 6 -t %s -%s %sunmapped/unr0-%s.%s --sam %sSAM/r1-%s.sam --un %sunmapped/unr1-%s.%s --al %smapped/r1-%s.%s" %(bowtie, refpath, inp, root0, libname, format, root1, libname, root1, libname, format, root1, libname, format))
inputA = "%sSAM/r1-%s.sam" %(root1, libname)
inputB = "%sSAM/r1-%s-clean.sam" %(root1, libname)
_cleanSAM(inputA, inputB)
os.remove("%sSAM/r1-%s.sam" %(root1, libname))
def _RoundN(add, bowtie, refpath, libname, libpath, format, n, rootN, root1):
y=n-1
if format == "fa":
inp = "f"
else:
inp = "q"
if n == 2:
print "*** Mapping M%s Round %s...%s" %(add, n, libname)
inputA = "%sunmapped/unr1-%s.%s" %(root1, libname, format)
inputB = "%sunmapped/un%sr1-%s.%s" %(root1, add, libname, format)
print "\t*** Creating un%sr1-%s.%s" %(add, libname, format)
if add == "3":
_3sRNAaddfa(inputA, inputB, 2)
if add == "5":
_5sRNAaddfa(inputA, inputB, 2)
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %sunmapped/un%sr1-%s.%s --sam %sSAM/r2-%s.sam --un %sunmapped/unr2-%s.%s --al %smapped/r2-%s.%s" %(bowtie, refpath, inp, root1, add, libname, format, rootN, libname, rootN, libname, format, rootN, libname, format))
inputA = "%sSAM/r%s-%s.sam" %(rootN, n, libname)
inputB = "%sSAM/r%s-%s-clean.sam" %(rootN, n, libname)
_cleanSAM(inputA, inputB)
os.remove("%sSAM/r%s-%s.sam" %(rootN, n, libname))
elif n > 2:
print "\n*** Mapping M%s Round %s...%s" %(add, n, libname)
inputA = "%sunmapped/unr%s-%s.%s" %(rootN, y, libname, format)
inputB = "%sunmapped/un%sr%s-%s.%s" %(rootN, add, y, libname, format)
print "\t*** Creating un%sr%s-%s.%s" %(add, y, libname, format)
if add == "3":
if format == "fq":
_3sRNAaddfq(inputA, inputB, 1)
elif format == "fa":
_3sRNAaddfa(inputA, inputB, 1)
if add == "5":
if format == "fq":
_5sRNAaddfq(inputA, inputB, 1)
elif format == "fa":
_5sRNAaddfa(inputA, inputB, 1)
os.system("%s --norc -n 0 -v 0 -a -l 6 -t %s -%s %sunmapped/un%sr%s-%s.%s --sam %sSAM/r%s-%s.sam --un %sunmapped/unr%s-%s.%s --al %smapped/r%s-%s.%s" %(bowtie, refpath, inp, rootN, add, y, libname, format, rootN, n, libname, rootN, n, libname, format, rootN, n, libname, format))
inputA = "%sSAM/r%s-%s.sam" %(rootN, n, libname)
inputB = "%sSAM/r%s-%s-clean.sam" %(rootN, n, libname)
_cleanSAM(inputA, inputB)
os.remove("%sSAM/r%s-%s.sam" %(rootN, n, libname))
def _ReadConfig():
config = open(sys.argv[1] + "/Config.txt")
bowtie = []
libs = dict()
mainref = []
filterref = []
mirnaref = []
add3 = []
add5 = []
Size_cutoff = []
cutoffs = []
for line in config.readlines():
line = line.strip()
if line.startswith("#"):
next;
if line.startswith("bowtie_path:"): #bowtie
bowtie_path = line.split()[1]
bowtie.append(bowtie_path)
if line.startswith("bowtie-build_path:"):
bowtiebuild_path = line.split()[1]
bowtie.append(bowtiebuild_path)
if line.startswith("lib:"): #libs
libName = line.split()[2]
libPath = line.split()[1]
format = line.split()[3]
libs[libName]=[]
libs[libName].append("")
libs[libName].append("")
libs[libName][0]= libPath
libs[libName][1]= format
if line.startswith("main_ref:"): #main ref
mainref_path = line.split()[1]
mainref_name = mainref_path.split("/")[-1]
mainref_index = line.split()[2]
mainref.append(mainref_name)
mainref.append(mainref_path)
mainref.append(mainref_index)
if line.startswith("filter_ref:"): #filter ref
filterref_path = line.split()[1]
if filterref_path == "no":
filterref = filterref_path
else:
filterref_name = os.path.split(filterref_path)[-1]
#filterref_name = filterref_path.split("/")[-1]
filterref_index = line.split()[2]
filterref.append(filterref_name)
filterref.append(filterref_path)
filterref.append(filterref_index)
if line.startswith("known_miRNAs:"): #known miRNA
miRNAref_path = line.split()[1]
if miRNAref_path == "no":
mirnaref.append(miRNAref_path)
else:
miRNAref_name = os.path.split(miRNAref_path)[-1]
#miRNAref_name = miRNAref_path.split("/")[-1]
mirnaref.append(miRNAref_name)
mirnaref.append(miRNAref_path)
if line.startswith("M3:"): #3Add and Rounds
add3.append((line.split()[1]))
add3.append((line.split()[2]))
if line.startswith("M5:"): #5Add and Rounds
add5.append((line.split()[1]))
add5.append((line.split()[2]))
if line.startswith("RangeSize:"): #Size cutoff
min = int(line.split()[1])
max = int(line.split()[2])
Size_cutoff.append(min)
Size_cutoff.append(max)
if line.startswith("cutoff:"): #Frequence cutoff
cutoff = line.split()[1]
cutoffs.append(cutoff)
else:
next;
return bowtie, libs, mainref, filterref, mirnaref, add3, add5, Size_cutoff, cutoffs
def _Complete_analysis():
bowtie, libs, mainref, filterref, mirnaref, add3, add5, Size_cutoff, cutoffs = _ReadConfig()
### Creating Folders
#For r0
try:
makedirs(sys.argv[1] + "/Results/input")
makedirs(sys.argv[1] + "/Results/r0/SAM")
makedirs(sys.argv[1] + "/Results/r0/mapped")
makedirs(sys.argv[1] + "/Results/r0/unmapped")
makedirs(sys.argv[1] + "/Results/r0/RawResults")
makedirs(sys.argv[1] + "/Results/r0/Cutoff")
makedirs(sys.argv[1] + "/Results/MapResults")
makedirs(sys.argv[1] + "/Results/r1/SAM")
makedirs(sys.argv[1] + "/Results/r1/mapped")
makedirs(sys.argv[1] + "/Results/r1/unmapped")
makedirs(sys.argv[1] + "/Results/r1/RawResults")
makedirs(sys.argv[1] + "/Results/r1/Cutoff")
# For add3
add3_ = "no"
if add3[0] == "yes":
round3 = int(add3[1])+1
add3_ = add3[0]
makedirs(sys.argv[1] + "/Results/M3/SAM")
makedirs(sys.argv[1] + "/Results/M3/mapped")
makedirs(sys.argv[1] + "/Results/M3/unmapped")
makedirs(sys.argv[1] + "/Results/M3/RawResults")
makedirs(sys.argv[1] + "/Results/M3/Cutoff")
# For M5
add5_ = "no"
if add5[0] == "yes":
round5 = int(add5[1])+1
add5_ = add5[0]
makedirs(sys.argv[1] + "/Results/M5/SAM")
makedirs(sys.argv[1] + "/Results/M5/mapped")
makedirs(sys.argv[1] + "/Results/M5/unmapped")
makedirs(sys.argv[1] + "/Results/M5/RawResults")
makedirs(sys.argv[1] + "/Results/M5/Cutoff")
except:
print "\nWARNING!\n\tThe Results directory already exists\n\n\tPlease, rename this old directory or include -f or --force parameter on terminal in order to erase this directory.\n\n\tEx: python sRNAadd.py --force\n\n"
exit(1)
# set bowtie
bowtie_path = bowtie[0]
bowtiebuild_path = bowtie[1]
# set main ref
mainref_name = mainref[0]
mainref_path = mainref[1]
mainref_index = mainref[2]
main_ref = Reference(mainref_name, mainref_path, mainref_index, bowtiebuild_path)
main_ref._BuidIndex()
# set filter ref
if filterref != "no":
filter = "yes"
filterref_name = filterref[0]
filterref_path = filterref[1]
filterref_index = filterref[2]
filter_ref = Reference(filterref_name, filterref_path, filterref_index, bowtiebuild_path)
filter_ref._BuidIndex()
else:
filter = "no"
filterref_path = ""
next;
# set Size Cutoff
min = Size_cutoff[0]
max = Size_cutoff[1]
### Set other variables:
mature_dic = ""
### Perform Parse and mapping
for l in libs:
libname = l
libpath_tmp = libs[l][0]
format = libs[l][1]
#Parse input
libpath = sys.argv[1] + "/Results/input/"+ libname +"-nonRed.fa"
print "\ncreating %s..." % (libpath)
if libs[l][1] == "fq":
_ParseFq(libpath_tmp, libpath)
format = "fa"
if libs[l][1] == "fa":
_ParseFa(libpath_tmp, libpath)
format = "fa"
# For R0
root0 = sys.argv[1] + "/Results/r0/"
print "\n*** Performing mapping R0"
_Round0(bowtie_path, mainref_path, libname, libpath, format, filter, filterref_path, root0)
# For R1
root1 = sys.argv[1] + "/Results/r1/"
print "\n*** Performing mapping R1"
_Round1(bowtie_path, mainref_path, libname, libpath, format, 1, root1, root0)
# For ADD3
if add3_ == "yes":
root3 = sys.argv[1] + "/Results/M3/"
add3 = "3"
print "\n*** Performing mapping M3"
for i in range(round3):
n = i + 1
if n == 0:
next,
if n == 1:
next,
else:
_RoundN(add3, bowtie_path, mainref_path, libname, libpath, format, n, root3, root1)
# For ADD5
if add5_ == "yes":
root5 = sys.argv[1] + "/Results/M5/"
add5 = "5"
print "\n*** Performing mapping M5"
for i in range(round5):
n = i+1
if n == 0:
next
if n == 1:
next
else:
_RoundN(add5, bowtie_path, mainref_path, libname, libpath, format, n, root5, root1)
#Perform freq tab r0:
print "\n*** Getting frequences for r0"
outListr0 = []
rnList0 = []
roundr0 = 0
SAMr0 = SAMlist(roundr0, libs, 0)
SAMlistr0 = SAMr0._getSAMlist()
for rn in SAMlistr0:
rnList0.append(rn)
samlistr0 = SAMlistr0[rn]
nlibs = len(SAMlistr0[rn])
out = sys.argv[1] + "/Results/r0/RawResults/"+rn+".txt"
outListr0.append(out)
header0 = _freqSAM(samlistr0, nlibs, rn, out, 0)
#Applying cutoffs R0
cutoffList = []
cutoffd = dict()
for co in cutoffs:
if co == "no":
print "\tThere is no cutoff to applying for R0"
else:
print "Applying frequencing cutoff of %s reads:" % (co)
cutoffd[co] = dict()
r = "r0"
for r in rnList0:
cutoffd[co][r] = dict()
print "\tFor %s..." % (r)
input = sys.argv[1] + "/Results/r0/RawResults/"+r+".txt"
cutofftemp = _appCutoff(input, r, co, root0)
cutoffd[co][r]["r0"] = cutofftemp
cutoffList.append(cutofftemp)
#Perform freq tab r1:
print "\n*** Getting frequences for r1"
outListr1 = []
rnList1 = []
roundr1 = 1
SAMr1 = SAMlist(roundr1, libs, 1)
SAMlistr1 = SAMr1._getSAMlist()
Tablist = []
for rn in SAMlistr1:
rnList1.append(rn)
samlistr1 = SAMlistr1[rn]
nlibs = len(SAMlistr1[rn])
out = sys.argv[1] + "/Results/r1/RawResults/"+rn+".txt"
outListr1.append(out)
header0 = _freqSAM(samlistr1, nlibs, rn, out, 1)
#Applying cutoffs R0
cutoffList = []
for co in cutoffs:
if co == "no":
print "\tThere is no cutoff to applying for R0"
else:
print "Applying frequencing cutoff of %s reads:" % (co)
r = "r0"
for r in rnList1:
cutoffd[co][r] = dict()
print "\tFor %s..." % (r)
input = sys.argv[1] + "/Results/r1/RawResults/"+r+".txt"
cutofftemp = _appCutoff(input, r, co, root1)
cutoffd[co][r]["r1"] = cutofftemp
cutoffList.append(cutofftemp)
#Perform freq tab add3:
if add3_ == "yes":
print "\n*** Getting frequences for M3"
outListM3= []
rnList3 = []
SAM3 = SAMlist(round3, libs, 3)
SAMlist3 = SAM3._getSAMlist()
for rn in SAMlist3:
rnList3.append(rn)
samlist3 = SAMlist3[rn]
nlibs = len(SAMlist3[rn])
out = sys.argv[1] + "/Results/M3/RawResults/"+rn+".txt"
outListM3.append(out)
header3add = _freqSAM(samlist3, nlibs, rn, out, 3)
#Applying cutoffs add3
cutoffList = []
for co in cutoffs:
if co == "no":
print "\tThere is no cutoff to applying for M3"
else:
print "Applying frequencing cutoff of %s reads:" % (co)
r = "r0"
for r in rnList3:
if r in cutoffd[co]:
next,
else:
cutoffd[co][r] = dict()
print "\tFor %s..." % (r)
input = sys.argv[1] + "/Results/M3/RawResults/"+r+".txt"
cutofftemp = _appCutoff(input, r, co, root3)
cutoffd[co][r]["M3"] = cutofftemp
cutoffList.append(cutofftemp)
#Perform freq tab add5:
if add5_ == "yes":
print "\n*** Getting frequences for M5"
outListM5= []
rnList5 = []
SAM5 = SAMlist(round5, libs, 5)
SAMlist5 = SAM5._getSAMlist()
for rn in SAMlist5:
rnList5.append(rn)
samlist5 = SAMlist5[rn]
nlibs = len(SAMlist5[rn])
out = sys.argv[1] + "/Results/M5/RawResults/"+rn+".txt"
outListM5.append(out)
header5add = _freqSAM(samlist5, nlibs, rn, out, 5)
#Applying cutoffs add5
cutoffList = []
for co in cutoffs:
if co == "no":
print "\tThere is no cutoff to applying for M5"
else:
print "Applying frequencing cutoff of %s reads:" % (co)
r = "r0"
for r in rnList5:
if r in cutoffd[co]:
next,
else:
cutoffd[co][r] = dict()
print "\tFor %s..." % (r)
input = sys.argv[1] + "/Results/M5/RawResults/"+r+".txt"
cutofftemp = _appCutoff(input, r, co, root5)
cutoffd[co][r]["M5"] = cutofftemp
cutoffList.append(cutofftemp)
#Applying Filter
if mirnaref[0] == "no":
ref_dic = _GetRef(mainref_path)
else:
ref_dic = _GetRef(mainref_path)
mirna_path = mirnaref[1]
_Round_mir(bowtie_path, mainref_path, mirna_path, root0)
mature_dic = _mature()
##Creating MapResults
print "\nCreating MapResults"
lib_dic = dict()
for r in outListr0:
lib_dic, cab = _ParseRn(r, lib_dic)
_cleanDIRS("r0")
for r in outListr1:
lib_dic, cab = _ParseRn(r, lib_dic)
_cleanDIRS("r1")
if add3_ == "yes":
for r in outListM3:
lib_dic, cab = _ParseRn(r, lib_dic)
_cleanDIRS("M3")
if add5_ == "yes":
for r in outListM5:
lib_dic, cab = _ParseRn(r, lib_dic)
_cleanDIRS("M5")
out = sys.argv[1] + "/Results/MapResults/All_MapResults.txt"
_buildout(ref_dic, lib_dic, mature_dic, cab, out)
for c in cutoffd:
print "\nCreating MapResults with %s reads Cutoff" % (c)
lib_dic2 = dict()
out = sys.argv[1] + "/Results/MapResults/cutoff_%s_MapResults.txt" % (c)
for r in cutoffd[c]:
for i in cutoffd[c][r]:
#print "\tGetting frequences for %s" % (cutoffd[c][r][i])
lib_dic2, cab = _ParseRn(cutoffd[c][r][i], lib_dic2)
_buildout(ref_dic, lib_dic2, mature_dic, cab, out)
if __name__=="__main__":
print "\n Start Running isomiRID Version 0.53\n"
if "-f" in sys.argv or "--force" in sys.argv:
_cleanALLDIRS()
_Complete_analysis()
else:
_Complete_analysis()
```
#### File: software_scripts/dnapilib/exhaust.py
```python
import re
import os.path
import subprocess
import fileinput
from dnapilib.io_utils import get_file_obj
from dnapilib.io_utils import fastq_sequence
from dnapilib.io_utils import fastq_record
def rm_temp_dir(temp_dir):
"""Remove temporary directory.
"""
if temp_dir:
if os.path.exists(temp_dir):
subprocess.call("rm -r {}".format(temp_dir).split())
def clip_adapter(fp, aseed, tm5, tm3, min_len, max_len):
"""Return adapter-clipped clean reads.
"""
seed_len = len(aseed)
pp = re.compile("(.*)"+aseed, re.IGNORECASE)
for seq in fastq_sequence(fp):
if len(seq) < tm5 or len(seq) < tm3:
raise Exception("trimming length is too large")
match = pp.search(seq)
if not match:
continue
end = match.end() - seed_len
clipped_seq = seq[tm5 : end-tm3]
L = len(clipped_seq)
if min_len <= L and L <= max_len:
yield clipped_seq
def to_fasta(fastq, fasta, aseed, tm5, tm3, min_len, max_len):
"""Write FASTA containing clean reads, and return
the number of the reads.
"""
fq_obj = get_file_obj(fastq)
if "RAW_INPUT".startswith(aseed):
iterator = fastq_sequence(fq_obj)
else:
iterator = clip_adapter(fq_obj, aseed, tm5, tm3, min_len, max_len)
fas = {}
clean_read_count = 0
for seq in iterator:
fas[seq] = fas.get(seq, 0) + 1
fa_obj = open(fasta, "w")
for seq, cnt in fas.items():
clean_read_count += cnt
fa_obj.write(">{0}_{1}\n{0}\n".format(seq, cnt))
fa_obj.close()
fq_obj.close()
return clean_read_count
def fastq_input_prep(fastq, ratio, temp_dir):
"""Write FASTQ in the temporary directory, and retrun
(subsampled) FASTQ name, the total read count,
standard deviation of read lengths.
"""
num = int(1/ratio)
read_count = 0.0
stats = {}
fq_out = "{}/input.fq".format(temp_dir)
fq_obj = get_file_obj(fastq)
fout = open(fq_out, "w")
for i, rec in enumerate(fastq_record(fq_obj)):
if i % num == 0:
fout.write(rec)
read_count += 1
L = len(rec.split("\n")[1])
stats[L] = stats.get(L,0) + 1
fout.close()
fq_obj.close()
mean = sum([L*c for L,c in stats.items()]) / read_count
sum_square = sum([(L-mean)**2 * c for L,c in stats.items()])
sd = (sum_square / read_count)**0.5
return fq_out, read_count, sd
def count_mapped_read_sam(samout):
"""Return the number of mapped reads to the genome.
"""
if not os.path.exists(samout):
raise Exception("can't open SAM")
mapped = set()
for x in fileinput.input(samout):
if not x or x.startswith("@"):
continue
x = x.rstrip().split("\t")
if x[2] != '*':
mapped.add(x[0])
cnt = sum([int(n.split('_')[1]) for n in mapped])
return cnt
def map_clean_reads(fastq, adapter, tm5, tm3,
min_len, max_len, map_command, temp_dir):
"""Execute mapping command, and return the numbers
of clean and mapped reads.
"""
fasta = "{0}/insert_{1}.fa".format(temp_dir, adapter)
samout = "{}/output.sam".format(temp_dir)
clipped = to_fasta(fastq, fasta, adapter, tm5, tm3, min_len, max_len)
map_command = map_command.replace("@in",fasta).replace("@out",samout)
map_command += " 2> /dev/null"
if subprocess.call(map_command, shell=True) != 0:
raise Exception("mapping failed, check command line")
mapped = count_mapped_read_sam(samout)
return clipped, mapped
def make_stats_report(table, sampled_read, subsample_rate, prefix_match,
sd, fastq, output_dir, temp_dir, no_output_files):
"""Report read statistics with predicted adapters.
"""
out = ["# sampled_reads={} (total_reads * {:.2f})".format(
int(sampled_read), subsample_rate)]
out.append("\t".join([
"# 3'adapter",
"reads_extracted",
"(reads_extracted/sampled_reads)%",
"reads_mapped",
"(reads_mapped/sampled_reads)%",
"params_k:r"]))
max_mapped_read = -1
max_index = -1
for i, x in enumerate(table):
if x[3] > max_mapped_read:
max_mapped_read = x[3]
max_index = i
out.append("{}\t{}\t{:.2f}\t{}\t{:.2f}\t{}".format(*x))
optimal = [table[max_index][0]]
fq_prefix = os.path.basename(fastq).split(".")[0]
if table[max_index][4] < 20:
optimal.append("/POOR_QUALITY")
if optimal[0] == "RAW_INPUT":
if sd:
out.append("# input reads look already clean!")
else:
optimal.append("?")
else:
if no_output_files:
pass
else:
if not os.path.exists(output_dir):
subprocess.call("mkdir {}".format(output_dir).split())
aseq = optimal[0][:prefix_match]
fa_tmp = "{}/insert_{}.fa".format(temp_dir, aseq)
fa_out = "{}/{}_{}.fa".format(output_dir, fq_prefix, aseq)
subprocess.call(("mv {} {}".format(fa_tmp,fa_out)).split())
out.insert(0, "optimal_3'adapter={}\n".format(''.join(optimal)))
report = "\n".join(out)
print(report)
if not no_output_files:
f = open("{}/{}_report.txt".format(output_dir, fq_prefix), "w")
f.write(report + "\n")
f.close()
```
#### File: sRNA_analysis_Maize/software_scripts/phasdetect.py
```python
import os,sys,subprocess,multiprocessing,time,getpass,shutil,hashlib,datetime,collections,re,argparse
from importlib.machinery import SourceFileLoader
from multiprocessing import Process, Queue, Pool
from subprocess import check_output
import os.path
from os.path import expanduser
# from dedup import dedup_main,dedup_process,dedup_fastatolist,deduplicate,dedup_writer
#### USER SETTINGS ########################################
GenomeIn = str(sys.argv[1])
outname = str(sys.argv[2])
PhaseIn = int(sys.argv[3])
### Settings file
setFile = "phasis.set"
memFile = "phasis.mem"
res_folder = "%s_%s_phased" % (outname, PhaseIn) #(datetime.datetime.now().strftime("%m_%d_%H_%M"))
home = expanduser("~")
phaster_path = "%s/.phasis" % (home)
## Degradome - Optional ####################################
deg = 'N' ## Use Degradome validation, IF yes enter PARE db in line below
PARE = 'GuturGu' ## If deg = 'Y' then File for degradome analysis
## ADVANCED SETTINGS #######################################
cores = 0 ## 0: Most cores considered as processor pool | 1-INTEGER: Cores to be considered for pool
# nthread = 3 ## Threads perprocess
# server = "tarkan.ddpsc.org" ## Server to use to fetch library information and smallRNA libraries
# perl = "/usr/local/bin/perl_5.18" ## Josh updated the perl on Tarkan and its not ready yet for PHAS script FORK is missing and somemore modules -Check with Pingchuan help
perl = "perl"
Local = 3 ## [0]: Files in directory [2]: Get the libs from $ALLDATA with raw reads
## [3] Get library from srna db with reads filtered on number of hits
noiseLimit = 2
hitsLimit = 10
#############################################################
#############################################################
#parser = argparse.ArgumentParser()
#parser.add_argument('--lowmem', action='store_true', default=True, help=
# 'Flag to reduce memory usage for large genomes. Using this flag'\
# 'will increase the runtime for phaser')
#args = parser.parse_args()
def checkUser():
'''
Checks if user is authorized to use script
'''
print ("\n#### Checking user ###########################")
auser = getpass.getuser()
print("Hello '%s' - Please report issues at: https://github.com/atulkakrana/PHASIS/issues" % (auser))
# if auser in allowedUser:
# print("Hello '%s' - Issues need to be reproted: https://github.com/atulkakrana/phasTER/issues \n" % (auser))
# else:
# print("YOU ARE NOT AUTHORIZED TO USE DEVELOPMENTAL VERSION OF 'PHASER'")
# print("Contact '<NAME>' at <EMAIL> for permission\n")
# sys.exit()
return None
def checkHost(allowedHost):
'''
Checks if Phster is allowed at this server
'''
print ("#### Pre-run checks ###########################")
f = subprocess.Popen("hostname", stdout=subprocess.PIPE,shell= True)
output,err = f.communicate()
#print (output.decode("ascii"))
host = output.decode("ascii")
print ('--Current host:',host.strip('\n'))
## DO not turn OFF this 'for' loop as that given an error while matching current host with allowedHost - Reason Unknown
# print ('Allowed Hosts:')
# for host in allowedHost:
# print (host)
print("--Allowed hosts: %s" % (','.join(x for x in allowedHost)))
if str(host.strip('\n')) in allowedHost:
print("--PHASIS is supported on this server - good to go!!!\n")
pass
else:
print("--PHASIS is not tested on this server")
print("--Run your analysis at any of these servers:%s" % (','.join(x for x in allowedHost)))
print("--Script will exit now\n")
sys.exit()
return None
def checkDependency():
'''Checks for required components on user system'''
print("\n#### Fn: checkLibs ###########################")
goSignal = True ### Signal to process is set to true
### Check PYTHON version
pythonver = sys.version_info[0]
if int(pythonver) >= 3:
print("--Python v3.0 or higher : found")
pass
else:
print("--Python v3.0 or higher : missing")
goSignal = False
# print("See README for how to INSTALL")
### Check PERL version
# perlver = os.system("perl -e 'print $];' &> /dev/null")
aninfo = check_output(["perl", "-v"]).decode("utf-8")
aninfo2 = aninfo.split('\n')[1].split('(')[1].split(')')[0].rsplit('.',1)[0]
perlver = aninfo2[1:] ## Remove 'v' before version
if float(perlver) >= 5.014:
print("--Perl v5.14 or higher : found")
pass
else:
print("--Perl v5.14 or higher : missing")
goSignal = False
# print("See README for how to INSTALL")
### Check BOWTIE
isbowtie = shutil.which("bowtie")
if isbowtie:
print("--Bowtie (v1) : found")
pass
else:
print("--Bowtie (v1) : missing")
goSignal = False
# print("See README for how to INSTALL")
### Check Perl dependecies
retcode = os.system("perl -MScalar::Util -e1 &> /dev/null")
if retcode == 0:
print("--Scalar::Util (perl) : found")
pass
else:
print("--Scalar::Util (perl) : missing")
goSignal = False
# print("See README for how to INSTALL")
### Check Perl dependecies
retcode = os.system("perl -MData::Dumper -e1 &> /dev/null")
if retcode == 0:
print("--Data::Dumper (perl) : found")
pass
else:
print("--Data::Dumper (perl) : missing")
goSignal = False
# print("See README for how to INSTALL")
### Check Perl dependecies
retcode = os.system("perl -MParallel::ForkManager -e1 &> /dev/null")
if retcode == 0:
print("--Parallel::ForkManager (perl) : found")
pass
else:
print("--Parallel::ForkManager (perl) : missing")
goSignal = False
# print("See README for how to INSTALL")
### Check Perl dependecies
retcode = os.system("perl -MGetopt::Long -e1 &> /dev/null")
if retcode == 0:
print("--Getopt::Long (perl) : found")
pass
else:
print("--Getopt::Long (perl) : missing")
goSignal = False
# print("See README for how to INSTALL")
if goSignal == False:
print("\n** Please install the missing libraries before running the analyses")
# print("See README for how to install these")
print("** revFerno has unmet dependendies and will exit for now\n")
sys.exit()
return None
def readSet(setFile):
'''
Read and parse external settings file
'''
global runType
runType = "G"
global reference
reference = GenomeIn + ".fa"
global index
index = GenomeIn
global libs
libs = [sys.argv[4]]
global libFormat
libFormat = "F"
global phase
phase = PhaseIn
global minDepth
minDepth = 3
global clustBuffer
clustBuffer = 300
# if os.path.isfile(setFile):
# pass
# else:
# print("---Settings file 'phasis.set' not found in current directory")
# print("---Please copy it to same directory as script and rerun")
# sys.exit()
# print("\n#### Fn: Settings Reader #####################")
# fh_in = open(setFile, 'r', encoding='utf-8')
# setFile = fh_in.readlines()
# fh_in.close()
# for line in setFile:
# if line: ## Not empty
# if line.startswith('@'):
# line = line.strip("\n")
# # print(line)
# akey,aval = line.split('=')
# param = akey.strip()
# value = aval.strip()
# # print(param,value)
# ### Extract values #########
# if param.strip() == '@runType':
# global runType
# runType = "G" #str(value.strip())1111
# if (runType != "G") and (runType != "T") and (runType != "S"):
# print("Please input correct setting for '@runType' parameter in 'phasis.set' file")
# print("Script will exit for now\n")
# sys.exit()
# else:
# print('User Input runType :',runType)
# elif param.strip() == '@reference':
# global reference
# reference = GenomeIn + ".fa" #str(value.strip())2222
# print('User Input reference location :',reference)
# elif param.strip() == '@index':
# global index
# index = GenomeIn #str(value.strip())3333
# if index:
# print('User Input index location :',index)
# else:
# print('User Input index location : None')
# elif param.strip() == '@userLibs':
# global libs
# # libs = list(map(str,value.strip().split(',')))
# libs = [SeqIn] #[str(x) for x in value.strip().split(',') if x.strip() != '' ] ## This is my dope...4444
# print('User Input Libs :',",".join(libs))
# elif param.strip() == '@libFormat':
# global libFormat
# libFormat = "F" #str(value.strip()) 5555
# if (libFormat != "T") and (libFormat != "F"):
# print("Please input correct setting for '@libFormat' parameter in 'phasis.set' file")
# print("Script will exit for now\n")
# sys.exit()
# else:
# print('user library format :',libFormat)
# elif param.strip() == '@phase':
# global phase
# phase = PhaseIn #int(value.strip())6666
# print('User Input for phase length :',phase)
# elif param.strip() == '@path_prepro_git':
# global phaster_path
# phaster_path = str(value.strip()).rstrip("/")+"/phaster"
# # phaster_path = str(value.strip()).rstrip("/")+"/core"
# print('User Input for phaster path :',phaster_path)
# elif param.strip() == '@minDepth':
# global minDepth
# minDepth = minDepthIn #int(value.strip())777
# if not minDepth:
# minDepth = 3
# print('User Input for min. sRNA depth :',minDepth)
# elif param.strip() == '@clustBuffer':
# global clustBuffer
# clustBuffer = ClassIn #int(value.strip())888
# if not clustBuffer:
# clustBuffer = 250
# print('User Input distance b/w clusters :',clustBuffer)
# # elif param.strip() == '@mismat':
# # global mismat
# # mismat = int(value.strip())
# # if not mismat:
# # mismat = 0
# # print('User Input for max mismatches :',mismat)
# else:
# #print("Missed line:",line)
# pass
# sys.exit()
return libs
def PHASBatch(con,libs,runType,index,deg):
'''
## Deprecated
'''
#os.mkdir('./%s' % (lib))
#output_path = './%s' % (lib)
for lib in libs:
print (lib)
cur = con.cursor()
cur.execute('SELECT processed_path FROM master.library_info where lib_id = %s' % (lib))
path = cur.fetchall()
#print(path[0][0])
pro_file = path[0][0].replace('$ALLDATA', '/alldata')###Processed sRNA file
out_file = '%s.txt' % (lib)
rl = str(phase)
nproc2 = str(nproc)
sRNAratio = str(75)
print (pro_file)
if runType == 'G': ### Uses Whole genome as input
if deg == 'Y':
retcode = subprocess.call([perl, "/data2/homes/kakrana/svn/users/kakrana/phasiRNA_prediction_pipeline.ver.genome.pl", "-i", pro_file, "-q", PARE, "-f", "-t", sRNAratio, "-d", index, "-px", out_file, "-rl", rl, "-cpu", nproc2])
else:
retcode = subprocess.call([perl, "/data2/homes/kakrana/svn/users/kakrana/phasiRNA_prediction_pipeline.ver.genome.pl", "-i", pro_file,"-f", "-t", sRNAratio, "-d", index, "-px", out_file, "-rl", rl, "-cpu", nproc2])
else: ### Uses FASTA file of genes as input
#pipe =subprocess.Popen(["perl5.18", "-v"])
if deg == 'Y':
retcode = subprocess.call([perl, "/data2/homes/kakrana/svn/users/kakrana/phasiRNA_prediction_pipeline.ver.MUL.pl", "-i", pro_file, "-q", PARE, "-f", "-t", sRNAratio, "-d", index, "-px", out_file, "-rl", rl, "-cpu", nproc2])
else:
retcode = subprocess.call([perl, "/data2/homes/kakrana/svn/users/kakrana/phasiRNA_prediction_pipeline.ver.MUL.pl", "-i", pro_file, "-f", "-t", sRNAratio, "-d", index, "-px", out_file, "-rl", rl, "-cpu", nproc2])
if retcode == 0:
pass
else:
print("Problem with Phasing script - Return code not 0")
sys.exit()
return lib
def TagAbundanceFile(con,db,libs):
'''
### sRNA Libraries are fetched from server
'''
for alib in libs:##For all the libraries
## Check if file already exsits in directory - This saves a lot of time downloading the same file
filePath = '%s.fas' % (alib)
if os.path.isfile(filePath) == False:
print ('\nPreparing sRNA reads file for library: %s' % (alib[0]))
#print (lib[0])
#print ('Caching tag and count information from server for PARE alib %s' % (alib[0]) )
cur = con.cursor()
cur.execute("SELECT tag,norm from %s.run_master where lib_id = %s AND (hits between 0 and 20)" % (db,alib[0]))
lib_info = cur.fetchall()
#print('These are the tags:',lib_info[:10])
fh_out = open('%s.fas' % (alib), 'w')##Naming file with lib_ids name
print ('Library cached, writing abundance file')
tag_num = 1
for ent in lib_info:## All the PARE tags in a library
#print (ent)
fh_out.write('%s\t%s\n' % (ent[0],ent[1]))
tag_num += 1
fh_out.close()
else:
print('tag abundance file exists for library: %s' % (alib))
pass
def PHASBatch2(aninput):
'''
Phasing anlysis - New
'''
print ("\n#### Fn: phaser #############################")
# print("\naninput\n",aninput)
lib,runType,index,deg,nthread,noiseLimit,hitsLimit,clustBuffer = aninput
### Sanity check #####################
if not os.path.isfile(lib):
print("** %s - sRNA library file not found" % (lib))
print("** Please check the library- Is it in specified directory? Did you input wrong name?")
print("** Script will exit for now\n")
sys.exit()
else:
print("sRNA library located - Running phasing analysis")
pass
#####################################
pro_file = lib ### sRNA input file
out_file = '%s/%s.txt' % (res_folder,os.path.basename(lib).rpartition(".")[0]) ## Output file suffix
rl = str(phase)
# nproc2 = str(nproc)
nthread = str(nthread)
sRNAratio = str(75)
noiseLimit = str(minDepth-1)
# mismat = str(mismat)
clustBuffer = str(clustBuffer)
print(pro_file)
if runType == 'G':### Uses Whole genome as input
full_path = "%s/phasclust.genome.v2.pl" % (phaster_path)
# print(full_path)
if deg == 'Y':
retcode = subprocess.call([perl, "%s/phasclust.genome.v2.pl" % (phaster_path), "-i", pro_file, "-q", PARE, "-f", "-t", sRNAratio, "-d", index, "-px", out_file, "-rl", rl, "-cpu", nthread])
else:
if libFormat == "T":
aformat = "t"
retcode = subprocess.call([perl, "%s/phasclust.genome.v2.pl" % (phaster_path), "-i", pro_file,"-f", aformat, "-t", sRNAratio,"-n", noiseLimit, "-g", clustBuffer, "-d", index, "-px", out_file, "-rl", rl, "-cpu", nthread])
elif libFormat == "F":
aformat = "f"
retcode = subprocess.call([perl, "%s/phasclust.genome.v2.pl" % (phaster_path), "-i", pro_file,"-f", aformat, "-t", sRNAratio,"-n", noiseLimit, "-g", clustBuffer, "-d", index, "-px", out_file, "-rl", rl, "-cpu", nthread])
else:
print("** Invalid '@libFormat' parameter value")
print("** Please check the '@libFormat' parameter value in setting file")
print("** F for FASTA format | T for tag-count format are the only acceptable values")
print("** Script will exit now")
sys.exit()
else: ### Uses FASTA file of genes as input
full_path = "%s/phasclust.MUL.v2.pl" % (phaster_path)
# print(full_path)
if deg == 'Y':
retcode = subprocess.call([perl, "%s/phasclust.MUL.v2.pl" % (phaster_path), "-i", pro_file, "-q", PARE, "-f", "-t", sRNAratio, "-d", index, "-px", out_file, "-rl", rl, "-cpu", nthread])
else:
if libFormat == "T":
aformat = "t"
retcode = subprocess.call([perl, "%s/phasclust.MUL.v2.pl" % (phaster_path), "-i", pro_file, "-f", aformat, "-t", sRNAratio,"-n", noiseLimit, "-g", clustBuffer, "-d", index, "-px", out_file, "-rl", rl, "-cpu", nthread])
elif libFormat == "F":
aformat = "f"
retcode = subprocess.call([perl, "%s/phasclust.MUL.v2.pl" % (phaster_path), "-i", pro_file,"-f", aformat, "-t", sRNAratio,"-n", noiseLimit, "-g", clustBuffer, "-d", index, "-px", out_file, "-rl", rl, "-cpu", nthread])
else:
print("** Invalid '@libFormat' parameter value")
print("** Please check the '@libFormat' parameter value in setting file")
print("** F for FASTA format | T for tag-count format are the only acceptable values")
print("** Script will exit now")
sys.exit()
if retcode == 0:
pass
else:
print("** Problem with Phasing script - Return code not 0")
sys.exit()
return None
def PP(module,alist):
'''
paralleizes process with no results catching
'''
start = time.time()
npool = Pool(int(nproc))
npool.map(module, alist)
def PPResults(module,alist):
'''
Parallelizes and stores result
'''
####
npool = Pool(int(nproc))
res = npool.map_async(module, alist)
results = (res.get())
npool.close()
return results
def PPBalance(module,alist):
'''
Balance process according to core pool
'''
#print('***********Parallel instance of %s is being executed*********' % (module))
start = time.time()
##PP is being used for Bowtie mappings - This will avoid overflooding of processes to server
nprocPP = 1 #round((nproc/int(nthread)))
if nprocPP < 1:
nprocPP = 1 ## 1 here so as to avoid 0 processor being allocated in serial mode
else:
pass
print("nprocPP : %s" % (nprocPP))
npool = Pool(4)
npool.map(module, alist)
def optimize(nproc):
'''
dirty optimization of threads per library
'''
nlibs = len(libs)
ninstances = int(nproc/2) ### Number of parallel instances to use
# print("Libs:%s | nproc:%s | ninstance:%s" % (nlibs,nproc,ninstances))
if ninstances > 3:
nthread = ninstances
else:
nthread = 3
print("\n#### %s computing core(s) reserved for analysis ##########" % (str(nproc)))
print("#### %s computing core(s) assigned to one lib ############\n" % (str(nthread)))
# time.sleep(1)
return nthread
def inputList(libs,runType,index,deg,nthread,noiseLimit,hitsLimit,clustBuffer):
'''generate raw inputs for parallel processing'''
rawInputs = [] ## An empty list to store inputs for PP
for alib in libs:
rawInputs.append((alib,runType,index,deg,nthread,noiseLimit,hitsLimit,clustBuffer))
# print("These are rawInputs:",rawInputs)
return rawInputs
def indexBuilder(reference):
'''
Generic index building module
'''
print ("\n#### Fn: indexBuilder #######################")
### Sanity check #####################
if not os.path.isfile(reference):
print("'%s' reference file not found" % (reference))
print("Please check the genomeFile - Is it in specified directory? Did you input wrong name?")
print("Script will exit for now\n")
sys.exit()
else:
print("Reference file located - Preparing to create index")
pass
#####################################
### Clean reference ################
fastaclean,fastasumm = FASTAClean(reference,0)
### Prepare Index ##################
print ("**Deleting old index 'folder' !!!!!!!!!!!**")
shutil.rmtree('./index', ignore_errors=True)
os.mkdir('./index')
genoIndex = '%s/index/%s' % (os.getcwd(),fastaclean.rpartition('/')[-1].rpartition('.')[0]) ## Can be merged with genoIndex from earlier part if we use bowtie2 earlier
# genoIndex = './index/%s' % (fastaclean.rpartition('/')[-1].rpartition('.')[0]) ## Alternative approach -Can be merged with genoIndex from earlier part if we use bowtie2 earlier
print('Creating index of cDNA/genomic sequences:%s**\n' % (genoIndex))
adcv = "256"
divn = "6"
### Run based on input about the memory
# if args.lowmem:
# retcode = subprocess.call(["bowtie-build","-f", fastaclean, genoIndex])
# else:
retcode = subprocess.call(["bowtie-build","-f", "--noauto", "--dcv", adcv,"--bmaxdivn", divn, fastaclean, genoIndex])
if retcode == 0:## The bowtie mapping exit with status 0, all is well
# print("Reference index prepared sucessfully")
pass
else:
print("There is some problem preparing index of reference '%s'" % (reference))
print("Is 'Bowtie' installed? And added to environment variable?")
print("Script will exit now")
sys.exit()
##########################################
## Test for index files #################
# Close this code if not testing
# fh_in1 = open("./index/Triticum_aestivum.TGACv1.dna.toplevel.clean.1.ebwtl",'w')
# fh_in1.write("Atul is a developer for PHASIS")
# fh_in1.close()
##########################################
### Make a memory file ###################
fh_out = open(memFile,'w')
# print("Generating MD5 hash for reference")
refHash = (hashlib.md5(open('%s' % (reference),'rb').read()).hexdigest()) ### reference hash used instead of cleaned FASTA because while comparing only the user input reference is available
print("Generating MD5 hash for Bowtie index")
if os.path.isfile("%s.1.ebwtl" % (genoIndex)):
indexHash = (hashlib.md5(open('%s.1.ebwtl' % (genoIndex),'rb').read()).hexdigest())
elif os.path.isfile("%s.1.ebwt" % (genoIndex)):
indexHash = (hashlib.md5(open('%s.1.ebwt' % (genoIndex),'rb').read()).hexdigest())
else:
print("File extension for index couldn't be determined properly")
print("It could be an issue from Bowtie")
print("This needs to be reported to 'PHASIS' developer - Script will exit")
sys.exit()
print("\n@genomehash:%s | @indexhash:%s" % (refHash, indexHash) )
fh_out.write("@timestamp:%s\n" % (datetime.datetime.now().strftime("%m_%d_%H_%M")))
fh_out.write("@genomehash:%s\n" % (refHash))
fh_out.write("@index:%s\n" % (genoIndex))
fh_out.write("@indexhash:%s\n" % (indexHash))
print("Index prepared:%s\n" % (genoIndex))
# sys.exit()
return genoIndex
def indexBuilder2(reference,fastaclean):
'''
Prepared to work with parallelized version of FASTA cleaner - Not implemented yet - because parallel FASTA
cleaner is slow on bigger genomes - need trouble shooting
'''
print ("\n#### Fn: indexBuilder #######################")
### Prepare Index ##################
print ("**Deleting old index 'folder' !!!!!!!!!!!**")
shutil.rmtree('./index', ignore_errors=True)
os.mkdir('./index')
genoIndex = '%s/index/%s' % (os.getcwd(),fastaclean.rpartition('/')[-1].rpartition('.')[0]) ## Can be merged with genoIndex from earlier part if we use bowtie2 earlier
# genoIndex = './index/%s' % (fastaclean.rpartition('/')[-1].rpartition('.')[0]) ## Alternative approach -Can be merged with genoIndex from earlier part if we use bowtie2 earlier
print('Creating index of cDNA/genomic sequences:%s**\n' % (genoIndex))
adcv = "256"
divn = "6"
### Run based on input about the memory
# if args.lowmem:
# retcode = subprocess.call(["bowtie-build","-f", fastaclean, genoIndex])
# else:
retcode = subprocess.call(["bowtie-build","-f", "--noauto", "--dcv", adcv,"--bmaxdivn", divn, fastaclean, genoIndex])
if retcode == 0:## The bowtie mapping exit with status 0, all is well
# print("Reference index prepared sucessfully")
pass
else:
print("There is some problem preparing index of reference '%s'" % (reference))
print("Is 'Bowtie' installed? And added to environment variable?")
print("Script will exit now")
sys.exit()
##########################################
### Test for index files #################
# ## Close this code if not testing
# fh_in1 = open("./index/Triticum_aestivum.TGACv1.dna.toplevel.clean.1.ebwtl",'w')
# fh_in1.write("Atul is a developer for PHASIS")
# fh_in1.close()
##########################################
### Make a memory file ###################
fh_out = open(memFile,'w')
print("Generating MD5 hash for reference")
refHash = (hashlib.md5(open('%s' % (reference),'rb').read()).hexdigest()) ### reference hash used instead of cleaned FASTA because while comparing only the user input reference is available
print("Generating MD5 hash for Bowtie index")
if os.path.isfile("%s.1.ebwtl" % (genoIndex)):
indexHash = (hashlib.md5(open('%s.1.ebwtl' % (genoIndex),'rb').read()).hexdigest())
elif os.path.isfile("%s.1.ebwt" % (genoIndex)):
indexHash = (hashlib.md5(open('%s.1.ebwt' % (genoIndex),'rb').read()).hexdigest())
else:
print("File extension for index couldn't be determined properly")
print("It could be an issue from Bowtie")
print("This needs to be reported to 'PHASIS' developer - Script will exit")
sys.exit()
print("\n@genomehash:%s | @indexhash:%s" % (refHash, indexHash) )
fh_out.write("@timestamp:%s\n" % (datetime.datetime.now().strftime("%m_%d_%H_%M")))
fh_out.write("@genomehash:%s\n" % (refHash))
fh_out.write("@index:%s\n" % (genoIndex))
fh_out.write("@indexhash:%s\n" % (indexHash))
print("Index prepared:%s\n" % (genoIndex))
# sys.exit()
return genoIndex
def indexIntegrityCheck(index):
'''
Checks the integrity of index and the extension
'''
indexFolder = index.rpartition("/")[0]
# print("This is the folder from earlier run:%s" % (indexFolder))
if os.path.isfile("%s.1.ebwtl" % (index)): ## Check if this extension exists in folder
indexExt = "ebwtl"
indexFiles = [i for i in os.listdir('%s' % (indexFolder)) if i.endswith('.ebwtl')]
if len(indexFiles) >= 6:
# print("Index has all six parts")
indexIntegrity = True
elif os.path.isfile("%s.1.ebwt" % (index)):
indexExt = "ebwt"
indexFiles = [i for i in os.listdir('%s' % (indexFolder)) if i.endswith('.ebwt')]
if len(indexFiles) >= 6:
# print("Index has all six parts")
indexIntegrity = True
else:
print("Existing index extension couldn't be determined")
print("Genome index will be remade")
indexExt = False
indexIntegrity = False
print("Ancillary data integrity :",indexIntegrity)
# print("Number of files:%s" % (len(indexFiles)))
return indexIntegrity,indexExt
def FASTAClean(filename,mode):
'''Cleans FASTA file - multi-line fasta to single line, header clean, empty lines removal'''
## Read seqeunce file
fh_in = open(filename, 'r')
print ("phasdetect uses FASTA header as key for identifying the phased loci")
print ("Caching '%s' reference FASTA file" % (filename))
## Write file
if mode == 0:
fastaclean = ('%s/%s.clean.fa' % (os.getcwd(),filename.rpartition('/')[-1].rpartition('.')[0])) ## os.getcwd(),fastaclean.rpartition('/')[-1].rpartition('.')[0]
else:
print("Input correct mode- 0: Normal | 1: Seqeunces reversed | 2: Seqeunces reverse complemented | 3: Seqeunces complemented only")
print("USAGE: cleanFasta.v.x.x.py FASTAFILE MODE")
sys.exit()
### Outfiles
fh_out1 = open(fastaclean, 'w')
fastasumm = ('%s/%s.summ.txt' % (os.getcwd(),filename.rpartition('/')[-1].rpartition('.')[0]))
fh_out2 = open(fastasumm, 'w')
fh_out2.write("Name\tLen\n")
### Read files
fasta = fh_in.read()
fasta_splt = fasta.split('>')
fastaD = {} ## Store FASTA as dict
acount = 0 ## count the number of entries
empty_count = 0
for i in fasta_splt[1:]:
ent = i.split('\n')
aname = ent[0].split()[0].strip()
if runType == 'G':
## To match with phasing-core script for genome version which removed non-numeric and preceding 0s
name = re.sub("[^0-9]", "", aname).lstrip('0')
else:
name = aname
seq = ''.join(x.strip() for x in ent[1:]) ## Sequence in multiple lines
alen = len(seq)
if alen > 200:
fh_out1.write('>%s\n%s\n' % (name,seq))
fh_out2.write('%s\t%s\n' % (name,alen))
acount+=1
else:
empty_count+=1
pass
#### Prepare a dictionary - Not Tested
# for line in fh_in:
# if line.startswith('>'):
# name = line[1:].rstrip('\n').split()[0]
# fastaD[name] = ''
# else:
# fastaD[name] += line.rstrip('\n').rstrip('*')
#### Write results - Not tested
# for name,seq in fastaD.items():
# alen = len(seq)
# if alen > 200:
# fh_out1.write('>%s\n%s\n' % (name,seq))
# fh_out2.write('%s\t%s\n' % (name,alen))
# acount+=1
# else:
# empty_count+=1
# pass
fh_in.close()
fh_out1.close()
fh_out2.close()
print("Fasta file with reduced header: '%s' with total entries %s is prepared" % (fastaclean, acount))
print("There were %s entries found with empty sequences and were removed\n" % (empty_count))
return fastaclean,fastasumm
def readMem(memFile):
'''
Reads memory file and gives global variables
'''
print ("#### Fn: memReader ############################")
fh_in = open(memFile,'r')
memRead = fh_in.readlines()
fh_in.close()
memflag = True
varcount = 0
for line in memRead:
if line: ## Not empty
if line.startswith('@'):
line = line.strip("\n")
# print(line)
akey,aval = line.split(':')
param = akey.strip()
value = aval.strip()
# print(param,value)
if param == '@genomehash':
global existRefHash
varcount+=1
existRefHash = str(value)
print('Existing reference hash :',existRefHash)
elif param == '@indexhash':
global existIndexHash
varcount+=1
existIndexHash = str(value)
print('Existing index hash :',existIndexHash)
elif param == '@index':
global index
varcount+=1
index = str(value)
print('Existing index location :',index)
else:
pass
## Sanity Check - Memory file is not empty, from a crash
# if existRefHash.strip() == '':
# memflag = False
# elif existIndexHash.strip() == '':
# memflag = False
# elif index.strip() == '':
# memflag = False
if varcount == 3:
memflag = True
else:
memflag = False
return memflag
def coreReserve(cores):
'''
Decides the core pool for machine - written to make PHASIS comaptible with machines that
have less than 10 cores - Will be improved in future
'''
if cores == 0:
## Automatic assignment of cores selected
totalcores = int(multiprocessing.cpu_count())
if totalcores == 4: ## For quad core system
nproc = 3
elif totalcores == 6: ## For hexa core system
nproc = 5
elif totalcores > 6 and totalcores <= 10: ## For octa core system and those with less than 10 cores
nproc = 7
else:
nproc = 10 #int(totalcores*0.9)
else:
## Reserve user specifed cores
nproc = int(cores)
return nproc
#### FASTA CLEAN P - IN DEV
def FASTAread(filename):
'''
Reads FASTA file to alist
'''
### Sanity check #####################
if not os.path.isfile(reference):
print("'%s' reference file not found" % (reference))
print("Please check the genomeFile - Is it in specified directory? Did you input wrong name?")
print("Script will exit for now\n")
sys.exit()
else:
print("Reference file located - Preparing to create index")
pass
#####################################
### Read seqeunce file ##############
fh_in = open(filename, 'r')
print ("phasdetect uses FASTA header as key for identifying the phased loci")
print ("Caching reference '%s' FASTA file" % (filename))
fasta = fh_in.read()
fasta_splt = fasta.split('>')
print("Cached FASTA file with %s entries" % (len(fasta_splt[1:])))
fh_in.close()
return fasta_splt[1:]
def FASTAclean(ent):
'''
Cleans one entry of FASTA file - multi-line fasta to single line, header clean, empty lines removal
'''
ent_splt = ent.split('\n')
aname = ent_splt[0].split()[0].strip()
# print("Cleaning - %s" % (aname))
if runType == 'G':
## To match with phasing-core script for genome version which removed non-numeric and preceding 0s
bname = re.sub("[^0-9]", "", aname).lstrip('0')
else:
bname = aname
bseq = ''.join(x.strip() for x in ent[1:]) ## Sequence in multiple lines
return bname,bseq
def FASTAwrite(filename,alist,mode):
'''
Writes list of processed/cleaned FASTA
'''
## Write file
if mode == 0:
fastaclean = ('%s/%s.clean.fa' % (os.getcwd(),filename.rpartition('/')[-1].rpartition('.')[0])) ## os.getcwd(),fastaclean.rpartition('/')[-1].rpartition('.')[0]
else:
print("Input correct mode- 0: Normal | 1: Seqeunces reversed | 2: Seqeunces reverse complemented | 3: Seqeunces complemented only")
print("USAGE: cleanFasta.v.x.x.py FASTAFILE MODE")
sys.exit()
### Outfiles
fh_out1 = open(fastaclean, 'w')
fastasumm = ('%s/%s.summ.txt' % (os.getcwd(),filename.rpartition('/')[-1].rpartition('.')[0]))
fh_out2 = open(fastasumm, 'w')
fh_out2.write("Name\tLen\n")
acount = 0 ## count the number of entries
empty_count = 0 ## count empty entries
for ent in alist:
aname,aseq = ent
alen = len(aseq)
if alen > 200:
fh_out1.write('>%s\n%s\n' % (aname,aseq))
fh_out2.write('%s\t%s\n' % (aname,alen))
acount+=1
else:
empty_count+=1
pass
fh_out1.close()
fh_out2.close()
print("Fasta file with reduced header: '%s' with total entries %s is prepared" % (fastaclean, acount))
print("There were %s entries with empty/short sequences,these were removed\n" % (empty_count))
return fastaclean,fastasumm
#### DE-DUPLICATOR MODULES ####
def dedup_process(alib):
'''
To parallelize the process
'''
print("\n#### Fn: De-duplicater #######################")
afastaL = dedup_fastatolist(alib) ## Read
acounter = deduplicate(afastaL ) ## De-duplicate
countFile = dedup_writer(acounter,alib) ## Write
return countFile
def dedup_fastatolist(alib):
'''
New FASTA reader
'''
### Sanity check
try:
f = open(alib,'r')
except IOError:
print ("The file, %s, does not exist" % (alib))
return None
## Output
fastaL = [] ## List that holds FASTA tags
print("Reading FASTA file:%s" % (alib))
read_start = time.time()
acount = 0
empty_count = 0
for line in f:
if line.startswith('>'):
seq = ''
pass
else:
seq = line.rstrip('\n')
fastaL.append(seq)
acount += 1
read_end = time.time()
# print("-- Read time: %ss" % (str(round(read_end-read_start,2))))
print("Cached file: %s | Tags: %s | Empty headers: %ss" % (alib,acount,empty_count))
return fastaL
def deduplicate(afastaL):
'''
De-duplicates tags using multiple threads and libraries using multiple cores
'''
dedup_start = time.time()
# deList = [] ## Hold deduplicated tags and their abudnaces in a tuple
acounter = collections.Counter(afastaL)
dedup_end = time.time()
# print("-- dedup time: %ss" % (str(round(dedup_end-dedup_start,2))))
return acounter
def dedup_writer(acounter,alib):
'''
writes rtag count to a file
'''
print("Writing counts file for %s" % (alib))
countFile = "%s.fas" % alib.rpartition('.')[0] ### Writing in de-duplicated FASTA format as required for phaster-core
fh_out = open(countFile,'w')
acount = 0
seqcount = 1 ## TO name seqeunces
for i,j in acounter.items():
# fh_out.write("%s\t%s\n" % (i,j))
fh_out.write(">seq_%s|%s\n%s\n" % (seqcount,j,i))
acount += 1
seqcount += 1
print("Total unique entries written for %s: %s" % (alib,acount))
fh_out.close()
return countFile
#### MAIN ###################################################
#############################################################
def main(libs):
### Open the runlog
runLog = '%s_log_runtime_%s' % (outname, datetime.datetime.now().strftime("%m_%d_%H_%M"))
fh_run = open(runLog, 'w')
phaser_start = time.time()
### 0. Prepare index or reuse old #############
###############################################
## Did user provided its index? If Yes Skip making memory files
if not index:
### Check genome file and index
if not os.path.isfile(memFile):
print("This is first run - create index")
indexflag = False ## index will be made on fly
else:
memflag = readMem(memFile)
if memflag == False:
print("Memory file is empty - seems like previous run crashed")
print("Creating index")
indexflag = False ## index will be made on fly
elif memflag == True:
## valid memory file detected - use existing index
print("Generating MD5 hash for current reference file")
currentRefHash = hashlib.md5(open('%s' % (reference),'rb').read()).hexdigest()
print('Current reference hash :',currentRefHash)
#### Test #######
# if os.path.isdir(index.rpartition('/')[0]):
# print("There is a folder names 'index'")
# pass
# if currentRefHash == existRefHash:
# print("current ref. hash is same as exiting ref hash")
# pass
# sys.exit()
if currentRefHash == existRefHash:
# print("Current reference file matches with the earlier run")
indexIntegrity,indexExt = indexIntegrityCheck(index)
if indexIntegrity: ### os.path.isdir(index.rpartition('/')[0]):
print("Index status : Re-use")
genoIndex = index
indexflag = True
fh_run.write("Indexing Time: 0s\n")
else:
print("Index status : Re-make")
indexflag = False ## index will be made on fly
else:
## Different reference file - index will be remade
print("Index status : Re-make")
indexflag = False
print("Existing index does not matches specified genome - It will be recreated")
if indexflag == False:
## index will be remade
## original function - active
tstart = time.time()
genoIndex = indexBuilder(reference)
tend = time.time()
fh_run.write("Indexing Time:%ss\n" % (round(tend-tstart,2)))
# ## parallel function - not used - slow on large genomes like wheat due to I/O of data to different cores
# fastaL = FASTAread(reference)
# ## Test - Serial mode
# # cleanFasL = []
# # for ent in fastaL:
# # bname,bseq = FASTAclean(ent)
# # cleanFasL.append((bname,bseq))
# cleanFasL = PPResults(FASTAclean,fastaL)
# fastaclean,fastasumm = FASTAwrite(reference,cleanFasL,0)
# genoIndex = indexBuilder(reference)
# tend = time.time()
# fh_run.write("Indexing Time:%ss\n" % (round(tend-tstart,2)))
else:
genoIndex = index
if not (os.path.isfile("%s.1.ebwt" % (genoIndex)) or os.path.isfile("%s.1.ebwtl" % (genoIndex))) :
print("** %s - User specified index not found" % (genoIndex))
print("** Please check the value for @index parameter in settings file")
print("** Is it in specified directory? Did you input wrong name?")
print("** Script will exit for now\n")
sys.exit()
else:
print("Index status : User specified")
fh_run.write("Indexing Time: 0s\n")
pass
### 1. Make Folders ###########################
###############################################
shutil.rmtree("%s" % (res_folder),ignore_errors=True)
os.mkdir("%s" % (res_folder))
#### 2. File conversions#######################
###############################################
if libFormat == "F":
### Convert FASTA to Tagcount
### Sanity check
fh_in = open(libs[0],'r')
firstline = fh_in.readline()
if not firstline.startswith('>') and len(firstline.split('\t')) > 1:
print("** File doesn't seems to be in FASTA format")
print("** Please provide correct setting for @libFormat in 'phasis.set' settings file")
sys.exit()
else:
print("#### Converting FASTA format to counts #######")
dedup_start = time.time()
## TEST
# newList = []
# for alib in libs:
# aname = dedup_process(alib)
# newList.append(aname)
# libs = newList
libs = PPResults(dedup_process,libs)
# print('Converted libs: %s' % (libs))
dedup_end = time.time()
fh_run.write("FASTA conversion time:%ss\n" % (round(dedup_end-dedup_start,2)))
elif libFormat == "T":
### Can be used as-is, check if it is really
### Sanity check
fh_in = open(libs[0],'r')
firstline = fh_in.readline()
if firstline.startswith('>'):
print("** File seems tobe in FASTA format")
print("** Please provide correct setting for @libFormat in 'phasis.set' settings file")
sys.exit()
else:
# print("File seems to be in correct format")
pass
else:
print("** Please provide correct setting for @libFormat in 'phasis.set' settings file")
print("** If sRNA data is in tag count format use 'T' and for FASTA format use 'F' ")
sys.exit()
#### 3. Run Phaser ############################
###############################################
# print('These are the libs: %s' % (libs))
rawInputs = inputList(libs,runType,genoIndex,deg,nthread,noiseLimit,hitsLimit,clustBuffer)
# ### Test - Serial Mode
# for aninput in rawInputs:
# PHASBatch2(aninput)
#### Original - Parallel mode
PPBalance(PHASBatch2,rawInputs)
#### close runLog
phaser_end = time.time()
fh_run.write("Total analysis time:%ss\n" % (round(phaser_end-phaser_start,2)))
fh_run.close()
if __name__ == '__main__':
#### Cores to use for analysis
nproc = coreReserve(cores)
###############
checkUser()
checkDependency()
# checkHost(allowedHost)
global reference
libs = readSet(setFile)
nthread = optimize(nproc)
main(libs)
print('\n\n#### Phasing Analysis finished successfully')
print("#### Results are in folder: %s" % (res_folder))
print("#### 'phasmerge' can be run by command: python3 phasmerge -mode merge -dir %s\n" % (res_folder))
sys.exit()
########### CHANGE LOG ########
###############################
### Version 01 -> v02
### Added PARE switch
### Added sRNA ratio option
### Added option to specify libs
## v02 -> v03
## Added option to get libs from the server with hits filter
## COrrected bug in main(), repaced libs with userlibs for specific libraries part
## Perl location added as variable
## v03 -> v04
## Changed order of user settings to make them more clear
## Added functionality to check if the abundance file for library already exists in folder - Saves a lot of time
## v04 -> v05
## Added local mode to run on smallRNA files specified by user and present in current directory unlike fetching from DB or ALLDATA
## Simplfied user settings
## v05 -> v06
## Added extension to sRNA library files for easy copying
## v06 -> v065
## Fixed regresion introduced in v06
## v065 -> v07 [major][stable]
## Includes fix for iltering out tags with no hits, these are inlcuded now for libraries that have no genomes
## v070 -> v075 [major]
## Paralelization schema improved - Now paralelized three libraries together, only the analysis part is parallelized and not downloading part
## v075 -> v080
## Changed nProc input from "Y" to 0
## Fixed bug from v075 if fethLibs = 'Y', then also use libs were being used for raw inputs
## v08 -> v085
## moved scripts to svn/users/kakrana and updated the paths
## v085 -> v090
## Script run on local libraries
## Localization complete
## Added sanity checks
## Index made if change in genome detected, and reused if genome/reference is not changed
## v090 -> v095
## Added the phaster-core production/installed path
## v095 -> v099 [major]
## Added de-duplication functions to handle FASTA file, and convert to required format
## Modified FASTA clean module to implement a relatively faster method (should save a few minutes 2-4 depending on genome)
## Updated runTYPEs - G,T and S modes
## changed genoType to reference in settings reader
## Fixed a buf while checking for previous index - index file was being looked instead of index directory, also added another md5 check to loop
## Added a dependency checks
## Updated script for updated version of phaster core files
## v0.99 - v1.0
## Updated the index builder function with optimized parameters. Now 6-8 minutes faster
## Added a argument to run on low memory
## v1.0 - v1.01
## Remade changes to indexBuilder module by copying the working version from v0.99. Not sure what went wrong in the v1.0
## v1.01 -> v1.02
## Fixed error where comma-seprated @userlibs has a an empty entry, like a stray comma in end. Script doesn't ends in that case
#### I thought its because of many libraries being used and results not reported back i.e. multiprocessing issue but I was wrong
## Fixed summary file name, it was being written at somewhere else randomly
## nthread parameter comes from optimize function and need not to be defines as a
#### static global variable
## v1.02 -> v1.03 [major]
## Added IF-loop to catch if index is an *.ebwtl (for big genomes) file before computing its hash
## Added an index integrity checker function to ensure that index file with all six parts exists
## Added a new function "coreReserve" for better assignment of cores for quad, hexa and octacore machines
## "phaser" renamed to "phasdetect"
## Added mindepth, clustbuffer, mismat parameters to phasworks.set for user tunability
## Default setting hardcoded if mindepth, clustbuffer, mismat left empty by users
## Index extension to the indexIntegrity loop when no index is found, else it was giving error at return step
## v1.03 -> v1.04
## Updated phasemerge analysis statement
## v1.04 -> v1.05
## Added sanity checks to readset function
## v1.05 -> v1.06
## Renamed to PHASIS
## v1.06 -> v1.07
## Added check for memory file, if an empty one exists from earlier crash
## Fixed issue with index extension determination for big genomes
## organized the index builder call in main(). It is now called at one place
## TO-DO
## Add automatic index resolution
## Add functionality to share library folder, for 21-,24- and 22nt analysis
## Core-scripts usage
# Mandatory:
# -i small RNA input file name
# -f two file formats are supported,[t] for tagcount, [f] for fasta.
# -d indexed genome by bowtie, indicating the prefix of the genome
# -px prefix for each of the file, used to distinguish for eachother
# -rl register len(rl), such as 21 or 24, separated by comma: 21,24 etc
# optional:
# -k bowtie alignment hits report, default = all
# -q degradome or PARE data in ONLY tagcount format: seq abundance
# -m mismatches for both the sRNA and PARE/degradome alignment by bowtie, default = 0
# -p p-value in decimal number, defult = 0.005;
# -cpu cpu numbers for the bowtie alignment
# -n noise, default = 1, for those which have abundance less equal than 1, properly increase
# noise value for union dataset
# -g gap between two separate cluster, 300bp by default
# -t minimal proportation of the interested register small RNA abundance, default = 85
# -ht the maximal average of hits for the small RNA of a certain cluster, defaut = 10
``` |
{
"source": "1a11/grrr",
"score": 2
} |
#### File: bot/cogs/testModule.py
```python
from discord.ext import commands
import discord
import re
from loguru import logger
from .utils import checks # This will say error in most IDSs, Ignore it
class CommonSpam(commands.Cog):
"""
Fights against common spam that can plague discord servers
"""
def __init__(self, bot):
self.bot = bot
self._last_member = None
async def countEmojis(self, message: discord.Message):
count = len(re.findall("(\:[a-z_1-9A-Z]+\:)", message.content)) # custom emojis
if count == 0: # Test for Unicode Emojis
count = len(re.findall('(\u00a9|\u00ae|[\u2000-\u3300]|\ud83c[\ud000-\udfff]|'
'\ud83d[\ud000-\udfff]|\ud83e[\ud000-\udfff])', message.content))
return count
@commands.Cog.listener()
async def on_message(self, message):
number_of_emojis = await self.countEmojis(message)
if number_of_emojis >= 3:
await message.delete()
channel = await message.author.create_dm()
await channel.send("Please do not spam")
@commands.command()
@checks.is_admin()
async def commonspam(self, ctx, setting = None):
"""
Turn on or off the command spam checks
:param ctx:
:param setting: on or off
:return:
"""
if setting is None:
await ctx.send("Please specify a setting! (on | off)")
else:
if setting.lower() == "on" or setting.lower() == "off":
pass
else:
await ctx.send("Please specify a *correct* setting! (on | off)")
def setup(bot):
bot.add_cog(CommonSpam(bot))
``` |
{
"source": "1a8e/img-org",
"score": 3
} |
#### File: img-org/src/driver.py
```python
import collections
import hashlib
import io
import os
import tqdm
import shutil
from PIL import Image, ExifTags, UnidentifiedImageError
from PIL.Image import DecompressionBombWarning
import warnings
warnings.filterwarnings("error")
Image.MAX_IMAGE_PIXELS = None
def get_image_size(img):
"""
The function return REAL size of image
"""
# Lets get exif information in a form of nice dict:
# It will be something like: {'ResolutionUnit': 2, 'Orientation': 6, 'YCbCrPositioning': 1}
if img._getexif():
exif = {
ExifTags.TAGS[k]: v
for k, v in img._getexif().items()
if k in ExifTags.TAGS
}
else:
exif = {}
size = img.size
# If orientation is horizontal, lets swap width and height:
if exif.get("Orientation", 0) > 4:
size = (size[1], size[0])
return size, 'w' if size[0]/size[1] > 1 else 't'
src_dir = '/Users/cyan/workdir/Wall 3'
dst_dir = '/Users/cyan/workdir/Wall 4'
filenames = os.listdir(src_dir)
file_hash_name_map = collections.defaultdict(list)
for filename in tqdm.tqdm(sorted(filenames)):
with open(os.path.join(src_dir, filename), 'rb') as image_file:
try:
extension = filename.split('.')[-1]
read_image = image_file.read()
file_hash = hashlib.sha1(read_image).hexdigest()
image = Image.open(io.BytesIO(read_image))
size, ratio = get_image_size(image)
file_hash_name_map[f'{ratio}_{size[0]}_{file_hash}'].append(filename)
except (UnidentifiedImageError, DecompressionBombWarning) as err:
print(f'Skipping {filename}: {file_hash}')
continue
for file_hash, filenames in tqdm.tqdm(file_hash_name_map.items()):
shutil.copy(
os.path.join(src_dir, filenames[0]),
os.path.join(dst_dir, f'{file_hash}.{filenames[0].split(".")[-1]}')
)
``` |
{
"source": "1abner1/machin",
"score": 2
} |
#### File: examples/framework_examples/sac.py
```python
from machin.frame.algorithms import SAC
from machin.utils.logging import default_logger as logger
from torch.nn.functional import softplus
from torch.distributions import Normal
import torch as t
import torch.nn as nn
import gym
# configurations
env = gym.make("Pendulum-v0")
observe_dim = 3
action_dim = 1
action_range = 2
max_episodes = 1000
max_steps = 200
noise_param = (0, 0.2)
noise_mode = "normal"
solved_reward = -150
solved_repeat = 5
def atanh(x):
return 0.5 * t.log((1 + x) / (1 - x))
# model definition
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, action_range):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.mu_head = nn.Linear(16, action_dim)
self.sigma_head = nn.Linear(16, action_dim)
self.action_range = action_range
def forward(self, state, action=None):
a = t.relu(self.fc1(state))
a = t.relu(self.fc2(a))
mu = self.mu_head(a)
sigma = softplus(self.sigma_head(a))
dist = Normal(mu, sigma)
act = (
atanh(action / self.action_range) if action is not None else dist.rsample()
)
act_entropy = dist.entropy()
# the suggested way to confine your actions within a valid range
# is not clamping, but remapping the distribution
act_log_prob = dist.log_prob(act)
act_tanh = t.tanh(act)
act = act_tanh * self.action_range
# the distribution remapping process used in the original essay.
act_log_prob -= t.log(self.action_range * (1 - act_tanh.pow(2)) + 1e-6)
act_log_prob = act_log_prob.sum(1, keepdim=True)
# If your distribution is different from "Normal" then you may either:
# 1. deduce the remapping function for your distribution and clamping
# function such as tanh
# 2. clamp you action, but please take care:
# 1. do not clamp actions before calculating their log probability,
# because the log probability of clamped actions might will be
# extremely small, and will cause nan
# 2. do not clamp actions after sampling and before storing them in
# the replay buffer, because during update, log probability will
# be re-evaluated they might also be extremely small, and network
# will "nan". (might happen in PPO, not in SAC because there is
# no re-evaluation)
# Only clamp actions sent to the environment, this is equivalent to
# change the action reward distribution, will not cause "nan", but
# this makes your training environment further differ from you real
# environment.
return act, act_log_prob, act_entropy
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
self.fc1 = nn.Linear(state_dim + action_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, 1)
def forward(self, state, action):
state_action = t.cat([state, action], 1)
q = t.relu(self.fc1(state_action))
q = t.relu(self.fc2(q))
q = self.fc3(q)
return q
if __name__ == "__main__":
actor = Actor(observe_dim, action_dim, action_range)
critic = Critic(observe_dim, action_dim)
critic_t = Critic(observe_dim, action_dim)
critic2 = Critic(observe_dim, action_dim)
critic2_t = Critic(observe_dim, action_dim)
sac = SAC(
actor,
critic,
critic_t,
critic2,
critic2_t,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
)
episode, step, reward_fulfilled = 0, 0, 0
smoothed_total_reward = 0
while episode < max_episodes:
episode += 1
total_reward = 0
terminal = False
step = 0
state = t.tensor(env.reset(), dtype=t.float32).view(1, observe_dim)
while not terminal and step <= max_steps:
step += 1
with t.no_grad():
old_state = state
# agent model inference
action = sac.act({"state": old_state})[0]
state, reward, terminal, _ = env.step(action.numpy())
state = t.tensor(state, dtype=t.float32).view(1, observe_dim)
total_reward += reward[0]
sac.store_transition(
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": reward[0],
"terminal": terminal or step == max_steps,
}
)
# update, update more if episode is longer, else less
if episode > 100:
for _ in range(step):
sac.update()
# show reward
smoothed_total_reward = smoothed_total_reward * 0.9 + total_reward * 0.1
logger.info(f"Episode {episode} total reward={smoothed_total_reward:.2f}")
if smoothed_total_reward > solved_reward:
reward_fulfilled += 1
if reward_fulfilled >= solved_repeat:
logger.info("Environment solved!")
exit(0)
else:
reward_fulfilled = 0
```
#### File: parallel/distributed/_world.py
```python
from threading import Lock, Event
from datetime import timedelta
from typing import Union, List, Any, Callable
from inspect import getframeinfo, stack
from time import sleep
from torch.distributed import rpc
from machin.parallel.exception import ExceptionWithTraceback
from machin.utils.logging import default_logger
from logging import DEBUG
import enum
import torch as t
import torch.distributed as dist
import torch.distributed.distributed_c10d as dist_c10d
WORLD = None # type: Union[None, World]
class LUTType(enum.Enum):
VALUE = 0
SERVICE = 1
def debug_with_process(message):
if default_logger.level != DEBUG:
return
caller = getframeinfo(stack()[1][0])
default_logger.debug(
f"Process [{get_cur_rank()}]: "
f"<{caller.filename},L{caller.lineno}> "
f"{message}"
)
def _copy_doc(from_func):
"""
Used by collective group to copy documents from torch.
"""
import io
import sys
def _decorator(func):
if "sphinx" in sys.modules: # pragma: no cover
src_doc = from_func.__doc__
lines = io.StringIO(src_doc)
# remove the group line
src_doc = "".join([line for line in lines if "group" not in line])
func.__doc__ = src_doc
return func
return _decorator
def _rpc_set_lut_entry(group_name, key, proc_name, lut_type): # pragma: no cover
table = WORLD.value_lut if lut_type == LUTType.VALUE else WORLD.service_lut
with WORLD.lut_lock:
if (group_name, key) in table:
return False
else:
table[(group_name, key)] = proc_name
return True
def _rpc_unset_lut_entry(group_name, key, proc_name, lut_type): # pragma: no cover
table = WORLD.value_lut if lut_type == LUTType.VALUE else WORLD.service_lut
with WORLD.lut_lock:
if (group_name, key) in table:
if table[(group_name, key)] == proc_name:
table.pop((group_name, key))
return True
return False
def _rpc_get_lut_entry(group_name, key, lut_type): # pragma: no cover
table = WORLD.value_lut if lut_type == LUTType.VALUE else WORLD.service_lut
with WORLD.lut_lock:
if (group_name, key) in table:
return True, table[(group_name, key)]
else:
return False, None
def _rpc_has_lut_entry(group_name, key, lut_type): # pragma: no cover
table = WORLD.value_lut if lut_type == LUTType.VALUE else WORLD.service_lut
with WORLD.lut_lock:
if (group_name, key) in table:
return True
else:
return False
def _rpc_call_func(func, args, kwargs): # pragma: no cover
# Call a function/bound method
try:
return func(*args, **kwargs)
except BaseException as e:
raise RpcException(e)
def _rpc_call_service(group_name, key, args, kwargs): # pragma: no cover
# call a registered service
world = get_world()
if group_name not in world.groups:
# could happen if group has been destroyed on this process
# deregister the entry from lut manager
rpc.rpc_sync(
world.lut_manager,
_rpc_unset_lut_entry,
args=(group_name, key, get_cur_name(), LUTType.SERVICE),
)
raise KeyError(f"Group [{group_name}], not found on Process [{get_cur_name()}]")
lut = WORLD.groups[group_name].group_service_lut
if key in lut:
try:
return lut[key](*args, **kwargs)
except BaseException as e:
raise RpcException(e)
else:
# could happen if local map is not synchronized with the
# global map
# deregister the entry from lut manager
rpc.rpc_sync(
world.lut_manager,
_rpc_unset_lut_entry,
args=(group_name, key, get_cur_name(), LUTType.VALUE),
)
raise KeyError(
f"Service [{key}] not found on Group [{group_name}], "
f"Process [{get_cur_name()}]"
)
def _rpc_get_paired_value(group_name, key): # pragma: no cover
# get a paired value
world = get_world()
if group_name not in world.groups:
# could happen if group has been destroyed on this process
# deregister the entry from lut manager
rpc.rpc_sync(
world.lut_manager,
_rpc_unset_lut_entry,
args=(group_name, key, get_cur_name(), LUTType.VALUE),
)
raise KeyError(f"Group [{group_name}], not found on Process [{get_cur_name()}]")
paired_map = WORLD.groups[group_name].group_value_lut
if key in paired_map:
return paired_map[key]
else:
# could happen if local map is not synchronized with the
# global map
# deregister the entry from lut manager
rpc.rpc_sync(
world.lut_manager,
_rpc_unset_lut_entry,
args=(group_name, key, get_cur_name(), LUTType.VALUE),
)
raise KeyError(
f"Value with key [{key}] not found on Group [{group_name}], "
f"Process [{get_cur_name()}]"
)
def _world_singleton(cls):
# Decorator used to make sure that there is only one world instance.
def _world_singleton_wrapper(*args, **kwargs):
global WORLD
if WORLD is None:
WORLD = cls(*args, **kwargs)
else: # pragma: no cover
raise RuntimeError("World could only be initialized once!")
return WORLD
return _world_singleton_wrapper
def _torch_version_less_than(major, minor):
t_ver = [int(v) for v in t.__version__.split(".")[0:2]]
return t_ver < [major, minor]
def get_cur_rank():
"""
Returns:
Current real process rank.
"""
if WORLD is None: # pragma: no cover
raise RuntimeError("Distributed environment not initialized!")
return WORLD.rank
def get_cur_name():
"""
Returns:
Current real process name.
"""
if WORLD is None: # pragma: no cover
raise RuntimeError("Distributed environment not initialized!")
return WORLD.name
def get_world(): # pragma: no cover
return WORLD
def is_world_initialized(): # pragma: no cover
return WORLD is not None
def _is_group_ready(group_name): # pragma: no cover
return WORLD.group_create_signals.get(group_name, None) is False
def _unlock_group(group_name): # pragma: no cover
WORLD.group_create_signals[group_name] = True
def _check_executor(func):
def wrapped(self, *args, **kwargs):
if get_cur_name() not in self.group_members:
raise RuntimeError(
f"You should not execute function {func.__qualname__} when "
"current process is not a member of the group"
)
return func(self, *args, **kwargs)
return wrapped
class RpcException(Exception): # pragma: no cover
"""
Rpc exception class.
"""
def __init__(self, msg):
if isinstance(msg, str):
# used by rpc when reraising the exception on the caller side
super().__init__(msg)
else:
tb = ExceptionWithTraceback(msg).tb
super().__init__(tb)
@_world_singleton
class World:
"""
The distributed world.
"""
def __init__(
self,
name: str,
rank: int = -1,
world_size: int = -1,
init_dist: bool = True,
init_rpc: bool = True,
dist_backend: str = "gloo",
dist_init_method: str = "tcp://localhost:9100",
rpc_init_method: str = "tcp://localhost:9101",
dist_timeout: float = 60,
rpc_timeout: float = 60,
):
"""
Args:
name: A unique name to identify current process.
rank: A unique rank of the current process. You do not need to specify
it if you are using `torch.distributed.launch` or `torchelastic`
world_size: Size of the distributed world. You do not need to specify
it if you are using `torch.distributed.launch` or `torchelastic`
dist_timeout: Distributed package timeout in seconds.
rpc_timeout: Global rpc call timeout in seconds.
"""
self.world_size = world_size
self.rank = rank
self.name = name
self.groups = {}
self.group_create_signals = {}
if init_dist:
dist.init_process_group(
backend=dist_backend,
init_method=dist_init_method,
timeout=timedelta(seconds=dist_timeout),
rank=rank,
world_size=world_size,
)
if init_rpc:
rpc.init_rpc(
self.name,
rank=rank,
world_size=world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=rpc_init_method, rpc_timeout=rpc_timeout
),
)
# get rank-name mapping
self.rank_name_map = {}
for wi in rpc._get_current_rpc_agent().get_worker_infos():
self.rank_name_map[wi.id] = wi.name
# Start role dispatching.
self.started = True
self.rpc_timeout = rpc_timeout
# map for paired values and registered services
self.value_lut = {}
self.service_lut = {}
self.lut_lock = Lock()
self.lut_manager = self.rank_name_map[0]
def stop(self): # pragma: no cover
if not self.started:
raise RuntimeError("Cannot stop the world multiple times!")
else:
rpc.shutdown()
def create_collective_group(
self, ranks: List[int], timeout: float = 60, backend: Any = None,
):
"""
Create a sub process group for collective communications. This function
is blocking and requires that all processes in ``ranks`` to
enter this function.
Warning:
Do not make collective communications call in sub-processes,
it is unsafe.
Args:
ranks: Ranks of involved processes.
timeout: Timeout of operations in the new group.
backend: New group backend.
Returns:
A ``Group`` with type ``Group.COLLECTIVE``
"""
ranks = sorted(ranks)
group = CollectiveGroup(
dist.new_group(ranks, timedelta(seconds=timeout), backend),
ranks.index(self.rank) if self.rank in ranks else None,
)
return group
def create_rpc_group(self, group_name: str, members: List[str]):
"""
Create a sub process group for rpc calls. This function
is blocking and requires that all processes in ``members`` to
enter this function.
Args:
group_name: A unique group name.
members: Members of the group.
Returns:
A rpc group.
"""
if get_cur_name() not in members: # pragma: no cover
raise RuntimeError(
f"Creator Process [{get_cur_name()}] not in Group [{group_name}]"
)
if group_name in self.groups: # pragma: no cover
raise RuntimeError(f"Group {group_name} already exists!")
group = RpcGroup(group_name, members)
# set the group
self.groups[group_name] = group
# temporarily set a signal
self.group_create_signals[group_name] = False
# wait for other members to enter
if get_cur_name() == members[0]:
while True:
sleep(0.1)
future = [
rpc.rpc_async(m, _is_group_ready, args=(group_name,))
for m in members
]
for fut in future:
if not fut.wait():
break
else:
future = [
rpc.rpc_async(m, _unlock_group, args=(group_name,))
for m in members
]
for fut in future:
fut.wait()
# finish syncing all processes
break
else:
while self.group_create_signals[group_name] is not True:
sleep(0.1)
return group
def get_ranks(self):
"""
Returns:
A list of ranks of all processes.
"""
return list(self.rank_name_map.keys())
def get_members(self):
"""
Returns:
A list of names of all processes.
"""
return list(self.rank_name_map.values())
def __reduce__(self): # pragma: no cover
raise RuntimeError("World is not picklable, create it per process!")
class CollectiveGroup:
"""
A thin wrapper of collective communication primitives of
``torch.distributed``, the only difference is that ``irecv``
now supports to recv from any src
"""
def __init__(self, group, current_relative_rank):
"""
Do not do it your self, use :meth:`~machin.parallel\
.distributed.World.create_collective_group` instead.
"""
self.group = group
self.current_rank = current_relative_rank
self.destroyed = False
@_copy_doc(dist.send)
def send(self, tensor, dst, tag=0):
return dist.send(tensor, dst, self.group, tag)
@_copy_doc(dist.recv)
def recv(self, tensor, src=None, tag=0):
return dist.recv(tensor, src, self.group, tag)
@_copy_doc(dist.isend)
def isend(self, tensor, dst, tag=0):
return dist.isend(tensor, dst, self.group, tag)
def irecv(self, tensor, src=None, tag=0): # pragma: no cover
"""
Returns:
An object you can call .wait() on, .wait()
will return the source rank.
"""
# pylint: disable=protected-access
# Original irecv doesn't support recv from any
# but original recv does. They are essentially
# the same except recv have a wait() call
# Note: starting from torch 1.8.0 irecv supports recv from
# any source, but you need to access work._source_rank to
# get source.
dist_c10d._check_single_tensor(tensor, "tensor")
if dist_c10d._rank_not_in_group(self.group):
class Waiter:
def wait(self):
return -1
return Waiter()
if self.group == dist_c10d.GroupMember.WORLD:
dist_c10d._check_default_pg()
pg = dist_c10d._default_pg
else:
pg = self.group
if src is None:
work = pg.recv_anysource([tensor], tag)
if self.group == dist_c10d.GroupMember.WORLD:
class Waiter:
def wait(self):
nonlocal work
work.wait()
if _torch_version_less_than(1, 7):
return work.source_rank()
else:
return work._source_rank()
return Waiter()
else:
class Waiter:
def wait(self):
nonlocal work, pg
work.wait()
if _torch_version_less_than(1, 7):
src_rank = work.source_rank()
else:
src_rank = work._source_rank()
return dist_c10d._get_global_rank(pg, src_rank)
return Waiter()
else:
if self.group == dist_c10d.GroupMember.WORLD:
work = pg.recv([tensor], src, tag)
else:
group_src_rank = dist_c10d._get_group_rank(pg, src)
work = pg.recv([tensor], group_src_rank, tag)
class Waiter:
def wait(self):
nonlocal src
work.wait()
return src
return Waiter()
@_copy_doc(dist.broadcast)
def broadcast(self, tensor, src, async_op=False):
return dist.broadcast(tensor, src, self.group, async_op)
@_copy_doc(dist.all_reduce)
def all_reduce(self, tensor, op=dist.ReduceOp.SUM, async_op=False):
return dist.all_reduce(tensor, op, self.group, async_op)
@_copy_doc(dist.reduce)
def reduce(self, tensor, dst, op=dist.ReduceOp.SUM, async_op=False):
return dist.reduce(tensor, dst, op, self.group, async_op)
@_copy_doc(dist.all_gather)
def all_gather(self, tensor_list, tensor, async_op=False):
return dist.all_gather(tensor_list, tensor, self.group, async_op)
@_copy_doc(dist.gather)
def gather(self, tensor, gather_list, dst=0, async_op=False):
return dist.gather(tensor, gather_list, dst, self.group, async_op)
@_copy_doc(dist.scatter)
def scatter(self, tensor, scatter_list=None, src=0, async_op=False):
return dist.scatter(tensor, scatter_list, src, self.group, async_op)
@_copy_doc(dist.barrier)
def barrier(self, async_op=False):
return dist.barrier(self.group, async_op)
@_copy_doc(dist.broadcast_multigpu)
def broadcast_multigpu(self, tensor_list, src, async_op=False, src_tensor=0):
return dist.broadcast_multigpu(
tensor_list, src, self.group, async_op, src_tensor
)
@_copy_doc(dist.all_reduce_multigpu)
def all_reduce_multigpu(self, tensor_list, op=dist.ReduceOp.SUM, async_op=False):
return dist.all_reduce_multigpu(tensor_list, op, self.group, async_op)
@_copy_doc(dist.reduce_multigpu)
def reduce_multigpu(
self, tensor_list, dst, op=dist.ReduceOp.SUM, async_op=False, dst_tensor=0
): # pragma: no cover
return dist.reduce_multigpu(
tensor_list, dst, op, self.group, async_op, dst_tensor
)
@_copy_doc(dist.all_gather_multigpu)
def all_gather_multigpu(
self, output_tensor_lists, input_tensor_list, async_op=False
): # pragma: no cover
return dist.all_gather_multigpu(
output_tensor_lists, input_tensor_list, self.group, async_op
)
def destroy(self):
"""
Destroy this collective communication group.
"""
if not self.destroyed:
dist.destroy_process_group(self.group)
self.destroyed = True
def size(self):
"""
Returns:
collective group size.
"""
return dist.get_world_size(self.group)
def __reduce__(self): # pragma: no cover
raise RuntimeError("Group is not picklable, create it per process!")
def __del__(self):
self.destroy()
# TODO:
# add the heartbeat mechanism to the lut_manager, to increase robustness
class RpcGroup:
def __init__(self, group_name, group_members, first_create=True):
self.group_name = group_name
self.group_members = group_members
self.group_value_lut = {}
self.group_service_lut = {}
self.destroyed = False
self._barrier_event = Event()
self._barrier_status = False
if first_create and self.is_member(get_cur_name()):
self.register(
f"_rpc_entered_barrier_{get_cur_name()}", self._rpc_entered_barrier,
)
self.register(f"_rpc_exit_barrier_{get_cur_name()}", self._rpc_exit_barrier)
@_copy_doc(rpc.rpc_sync)
def rpc_sync(self, to: str, func: Callable, timeout=-1, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return self._rpc_normal_call(rpc.rpc_sync, to, func, timeout, args, kwargs)
@_copy_doc(rpc.rpc_async)
def rpc_async(self, to: str, func: Callable, timeout=-1, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return self._rpc_normal_call(rpc.rpc_async, to, func, timeout, args, kwargs)
@_copy_doc(rpc.remote)
def remote(self, to: str, func: Callable, timeout=-1, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return self._rpc_normal_call(rpc.remote, to, func, timeout, args, kwargs)
@_check_executor
def pair(self, key: Any, value: Any):
"""
Pair a value to current process group.
Args:
key: A key which uniquely identifies this value in this group.
The name only needs to be unique for this value in this
group.
value: Value to be paired.
Raise:
``KeyError`` if value has already been paired.
"""
if key in self.group_value_lut:
raise KeyError(
f'Value with key "{key}" already paired to Group [{self.group_name}]'
)
# announce the pairing
status = rpc.rpc_sync(
get_world().lut_manager,
_rpc_set_lut_entry,
args=(self.group_name, key, get_cur_name(), LUTType.VALUE),
)
if status:
self.group_value_lut[key] = value
else:
raise KeyError(
f'Value with key "{key}" already paired to Group [{self.group_name}]'
)
@_check_executor
def unpair(self, key: Any):
"""
Unpair a paired value from current process group. The key must be
paired by the current process.
Args:
key: A key which uniquely identifies this value in this group.
The name only needs to be unique for this value in this
group.
Raise:
``KeyError`` if value has not been paired.
"""
if key not in self.group_value_lut:
raise KeyError(
f'Value with key "{key}" not paired to Group [{self.group_name}] '
f"on Process[{get_cur_name()}]"
)
# announce the unpairing
status = rpc.rpc_sync(
get_world().lut_manager,
_rpc_unset_lut_entry,
args=(self.group_name, key, get_cur_name(), LUTType.VALUE),
)
if status:
self.group_value_lut.pop(key)
else: # pragma: no cover
# should never happen
raise RuntimeError(
f'Failed to unpair value with key "{key}" from '
f"Group [{self.group_name}], executor is Process[{get_cur_name()}]"
)
def is_paired(self, key: Any):
"""
Check whether a key has been paired to the current group.
Args:
key: A key which uniquely identifies this value in this group.
The name only needs to be unique for this value in this
group.
"""
return rpc.rpc_sync(
get_world().lut_manager,
_rpc_has_lut_entry,
args=(self.group_name, key, LUTType.VALUE),
)
def get_paired(self, key: Any):
"""
Args:
key: Key of the paired value, in this group.
Returns:
A RRef to the paired value.
Raises:
``KeyError`` if not found.
"""
if key in self.group_value_lut:
holder = get_cur_name()
else:
status, holder = rpc.rpc_sync(
get_world().lut_manager,
_rpc_get_lut_entry,
args=(self.group_name, key, LUTType.VALUE),
)
if not status:
raise KeyError(
f"Value with key [{key}] not found on Group [{self.group_name}], "
)
return rpc.remote(holder, _rpc_get_paired_value, args=(self.group_name, key))
@_check_executor
def register(self, key: Any, service: Any):
"""
Register a service to current process group.
Args:
key: A key which uniquely identifies this service in this group.
The name only needs to be unique for this service in this
group.
service: Service to be registered.
Raise:
``KeyError`` if service has already been registered.
"""
if key in self.group_service_lut:
raise KeyError(
f'Service with key "{key}" already registered '
f"in Group [{self.group_name}]"
)
# announce the pairing
status = rpc.rpc_sync(
get_world().lut_manager,
_rpc_set_lut_entry,
args=(self.group_name, key, get_cur_name(), LUTType.SERVICE),
)
if status:
self.group_service_lut[key] = service
else:
raise KeyError(
f'Service with key "{key}" already registered '
f"in Group [{self.group_name}]"
)
@_check_executor
def deregister(self, key: Any):
"""
Deregister service from current process group. The key must be
paired by the current process.
Args:
key: A key which uniquely identifies this value in this group.
The name only needs to be unique for this value in this
group.
Raise:
``KeyError`` if srvice has not been registered.
"""
if key not in self.group_service_lut:
raise KeyError(
f'Service with key "{key}" not registered '
f"in Group [{self.group_name}] "
f"on Process[{get_cur_name()}]"
)
# announce the deregistration
status = rpc.rpc_sync(
get_world().lut_manager,
_rpc_unset_lut_entry,
args=(self.group_name, key, get_cur_name(), LUTType.SERVICE),
)
if status:
self.group_service_lut.pop(key)
else: # pragma: no cover
# should never happen
raise RuntimeError(
f'Failed to deregister service with key "{key}" '
f"from Group [{self.group_name}], "
f"executor is Process[{get_cur_name()}]"
)
def is_registered(self, key: Any):
"""
Check whether a service has been registered in the current group.
Args:
key: A key which uniquely identifies this service in this group.
The name only needs to be unique for this service in this
group.
"""
return rpc.rpc_sync(
get_world().lut_manager,
_rpc_has_lut_entry,
args=(self.group_name, key, LUTType.SERVICE),
)
def registered_sync(self, key: Any, args=(), kwargs=None):
"""
Args:
key: Key of the registered service, in this group.
args: Service arguments.
kwargs: Service keyword arguments.
Returns:
Result returned by the service.
Raises:
``KeyError`` if service is not found.
"""
if kwargs is None:
kwargs = {}
return self._rpc_service_call(rpc.rpc_sync, key, args, kwargs)
def registered_async(self, key: Any, args=(), kwargs=None):
"""
Args:
key: Key of the registered service, in this group.
args: Service arguments.
kwargs: Service keyword arguments.
Returns:
A future object you can call ``wait()``on.
``wait()`` will block the thread until execution is completed,
and will return the result returned by the service.
Raises:
``KeyError`` if service is not found.
"""
if kwargs is None:
kwargs = {}
return self._rpc_service_call(rpc.rpc_async, key, args, kwargs)
def registered_remote(self, key: Any, args=(), kwargs=None):
"""
Args:
key: Key of the registered service, in this group.
args: Service arguments.
kwargs: Service keyword arguments.
Returns:
A RRef object pointing to the result returned by the service.
Raises:
``KeyError`` if service is not found.
"""
if kwargs is None:
kwargs = {}
return self._rpc_service_call(rpc.remote, key, args, kwargs)
@_check_executor
def barrier(self):
"""
Synchronize all members in the group, until all members have entered
a ``barrier()`` function.
Not thread-safe.
"""
self._barrier_status = True
if get_cur_name() == self.group_members[0]:
while True:
all_entered = all(
self.registered_sync(f"_rpc_entered_barrier_{m}")
for m in self.group_members
)
if not all_entered:
sleep(0.2)
else:
break
for m in self.group_members:
self.registered_sync(f"_rpc_exit_barrier_{m}")
else:
self._barrier_event.wait()
@_check_executor
def destroy(self):
"""
Destroy the rpc group.
Note: deregistration is not considered, because they will be purged
when any lookup fail.
"""
if not self.destroyed:
WORLD.groups.pop(self.group_name)
self.destroyed = True
def size(self):
"""
Get the number of members in group.
"""
return len(self.group_members)
def is_member(self, target: str = None) -> bool:
"""
Check whether target name is a group member.
"""
if target is None:
target = self.get_cur_name()
return target in self.group_members
def get_group_name(self) -> str:
"""
Returns:
Name of this group.
"""
return self.group_name
def get_group_members(self) -> List[str]:
"""
Returns:
A list of group members.
"""
return self.group_members
@staticmethod
def get_cur_name() -> str:
return get_world().name
def _rpc_normal_call(self, rpc_method, to, func, timeout, args, kwargs):
if not self.is_member(to): # pragma: no cover
raise RuntimeError("RPC target is not a member of group.")
new_args = (func, args, kwargs)
if _torch_version_less_than(1, 6):
return rpc_method(to, _rpc_call_func, args=new_args)
return rpc_method(to, _rpc_call_func, args=new_args, timeout=timeout)
def _rpc_service_call(self, rpc_method, key, args, kwargs):
if key in self.group_service_lut:
holder = get_cur_name()
else:
status, holder = rpc.rpc_sync(
get_world().lut_manager,
_rpc_get_lut_entry,
args=(self.group_name, key, LUTType.SERVICE),
)
if not status:
raise KeyError(
f"Service with key [{key}] not found on Group [{self.group_name}], "
)
return rpc_method(
holder, _rpc_call_service, args=(self.group_name, key, args, kwargs)
)
def _rpc_entered_barrier(self):
return self._barrier_status
def _rpc_exit_barrier(self):
self._barrier_status = False
self._barrier_event.set()
self._barrier_event.clear()
def __reduce__(self): # pragma: no cover
# returns a complete description of group
return RpcGroup, (self.group_name, self.group_members, False)
```
#### File: frame/algorithms/test_a2c.py
```python
from machin.model.nets.base import static_module_wrapper as smw
from machin.frame.algorithms.a2c import A2C
from machin.utils.learning_rate import gen_learning_rate_func
from machin.utils.logging import default_logger as logger
from machin.utils.helper_classes import Counter
from machin.utils.conf import Config
from machin.env.utils.openai_gym import disable_view_window
from torch.optim.lr_scheduler import LambdaLR
from torch.distributions import Categorical
import pytest
import torch as t
import torch.nn as nn
import gym
from test.frame.algorithms.utils import unwrap_time_limit, Smooth
from test.util_fixtures import *
from test.util_platforms import linux_only
class Actor(nn.Module):
def __init__(self, state_dim, action_num):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, action_num)
def forward(self, state, action=None):
a = t.relu(self.fc1(state))
a = t.relu(self.fc2(a))
probs = t.softmax(self.fc3(a), dim=1)
dist = Categorical(probs=probs)
act = action if action is not None else dist.sample()
act_entropy = dist.entropy()
act_log_prob = dist.log_prob(act.flatten())
return act, act_log_prob, act_entropy
class Critic(nn.Module):
def __init__(self, state_dim):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, 1)
def forward(self, state):
v = t.relu(self.fc1(state))
v = t.relu(self.fc2(v))
v = self.fc3(v)
return v
class TestA2C:
# configs and definitions
@pytest.fixture(scope="class")
def train_config(self):
disable_view_window()
c = Config()
# Note: online policy algorithms such as PPO and A2C does not
# work well in Pendulum (reason unknown)
# and MountainCarContinuous (sparse returns)
c.env_name = "CartPole-v0"
c.env = unwrap_time_limit(gym.make(c.env_name))
c.observe_dim = 4
c.action_num = 2
c.max_episodes = 1000
c.max_steps = 200
c.replay_size = 10000
c.solved_reward = 150
c.solved_repeat = 5
return c
@pytest.fixture(scope="function")
def a2c(self, train_config, device, dtype):
c = train_config
actor = smw(
Actor(c.observe_dim, c.action_num).type(dtype).to(device), device, device
)
critic = smw(Critic(c.observe_dim).type(dtype).to(device), device, device)
a2c = A2C(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
)
return a2c
@pytest.fixture(scope="function")
def a2c_vis(self, train_config, device, dtype, tmpdir):
# not used for training, only used for testing apis
c = train_config
tmp_dir = tmpdir.make_numbered_dir()
actor = smw(
Actor(c.observe_dim, c.action_num).type(dtype).to(device), device, device
)
critic = smw(Critic(c.observe_dim).type(dtype).to(device), device, device)
a2c = A2C(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
visualize=True,
visualize_dir=str(tmp_dir),
)
return a2c
@pytest.fixture(scope="function")
def a2c_lr(self, train_config, device, dtype):
# not used for training, only used for testing apis
c = train_config
actor = smw(
Actor(c.observe_dim, c.action_num).type(dtype).to(device), device, device
)
critic = smw(Critic(c.observe_dim).type(dtype).to(device), device, device)
lr_func = gen_learning_rate_func([(0, 1e-3), (200000, 3e-4)], logger=logger)
with pytest.raises(TypeError, match="missing .+ positional argument"):
_ = A2C(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
lr_scheduler=LambdaLR,
)
a2c = A2C(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
lr_scheduler=LambdaLR,
lr_scheduler_args=((lr_func,), (lr_func,)),
)
return a2c
@pytest.fixture(scope="function")
def a2c_train(self, train_config):
c = train_config
# cpu is faster for testing full training.
actor = smw(Actor(c.observe_dim, c.action_num), "cpu", "cpu")
critic = smw(Critic(c.observe_dim), "cpu", "cpu")
a2c = A2C(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
)
return a2c
########################################################################
# Test for A2C acting
########################################################################
def test_act(self, train_config, a2c, dtype):
c = train_config
state = t.zeros([1, c.observe_dim], dtype=dtype)
a2c.act({"state": state})
########################################################################
# Test for A2C action evaluation
########################################################################
def test_eval_action(self, train_config, a2c, dtype):
c = train_config
state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
a2c._eval_act({"state": state}, {"action": action})
########################################################################
# Test for A2C criticizing
########################################################################
def test__criticize(self, train_config, a2c, dtype):
c = train_config
state = t.zeros([1, c.observe_dim], dtype=dtype)
a2c._criticize({"state": state})
########################################################################
# Test for A2C storage
########################################################################
def test_store_step(self, train_config, a2c, dtype):
c = train_config
old_state = state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
a2c.store_transition(
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"value": 0,
"gae": 0,
"terminal": False,
}
)
@pytest.mark.parametrize("gae_lambda", [0.0, 0.5, 1.0])
def test_store_episode(self, train_config, a2c, dtype, gae_lambda):
c = train_config
old_state = state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
episode = [
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
a2c.gae_lambda = gae_lambda
a2c.store_episode(episode)
########################################################################
# Test for A2C update
########################################################################
def test_update(self, train_config, a2c_vis, dtype):
c = train_config
old_state = state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
a2c_vis.store_episode(
[
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
)
a2c_vis.update(
update_value=True, update_policy=True, concatenate_samples=True,
)
a2c_vis.entropy_weight = 1e-3
a2c_vis.store_episode(
[
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
)
a2c_vis.update(
update_value=False, update_policy=False, concatenate_samples=True,
)
########################################################################
# Test for A2C save & load
########################################################################
# Skipped, it is the same as base
########################################################################
# Test for A2C lr_scheduler
########################################################################
def test_lr_scheduler(self, train_config, a2c_lr, dtype):
a2c_lr.update_lr_scheduler()
########################################################################
# Test for A2C config & init
########################################################################
def test_config_init(self, train_config):
c = train_config
config = A2C.generate_config({})
config["frame_config"]["models"] = ["Actor", "Critic"]
config["frame_config"]["model_kwargs"] = [
{"state_dim": c.observe_dim, "action_num": c.action_num},
{"state_dim": c.observe_dim},
]
a2c = A2C.init_from_config(config)
old_state = state = t.zeros([1, c.observe_dim], dtype=t.float32)
action = t.zeros([1, 1], dtype=t.int)
a2c.store_episode(
[
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
)
a2c.update()
########################################################################
# Test for A2C full training.
########################################################################
@linux_only
@pytest.mark.parametrize("gae_lambda", [0.0, 0.5, 1.0])
def test_full_train(self, train_config, a2c_train, gae_lambda):
c = train_config
a2c_train.gae_lambda = gae_lambda
# begin training
episode, step = Counter(), Counter()
reward_fulfilled = Counter()
smoother = Smooth()
terminal = False
env = c.env
while episode < c.max_episodes:
episode.count()
# batch size = 1
total_reward = 0
state = t.tensor(env.reset(), dtype=t.float32)
tmp_observations = []
while not terminal and step <= c.max_steps:
step.count()
with t.no_grad():
old_state = state
# agent model inference
action = a2c_train.act({"state": old_state.unsqueeze(0)})[0]
state, reward, terminal, _ = env.step(action.item())
state = t.tensor(state, dtype=t.float32).flatten()
total_reward += float(reward)
tmp_observations.append(
{
"state": {"state": old_state.unsqueeze(0)},
"action": {"action": action},
"next_state": {"state": state.unsqueeze(0)},
"reward": float(reward),
"terminal": terminal or step == c.max_steps,
}
)
# update
a2c_train.store_episode(tmp_observations)
a2c_train.update()
smoother.update(total_reward)
step.reset()
terminal = False
logger.info(f"Episode {episode} total reward={smoother.value:.2f}")
if smoother.value > c.solved_reward:
reward_fulfilled.count()
if reward_fulfilled >= c.solved_repeat:
logger.info("Environment solved!")
return
else:
reward_fulfilled.reset()
pytest.fail("A2C Training failed.")
```
#### File: frame/algorithms/test_a3c.py
```python
from machin.model.nets.base import static_module_wrapper as smw
from machin.frame.algorithms.a3c import A3C
from machin.frame.helpers.servers import grad_server_helper
from machin.utils.helper_classes import Counter
from machin.utils.conf import Config
from machin.env.utils.openai_gym import disable_view_window
from torch.distributions import Categorical
import os
import torch as t
import torch.nn as nn
import gym
from test.frame.algorithms.utils import unwrap_time_limit, Smooth
from test.util_run_multi import *
from test.util_fixtures import *
from test.util_platforms import linux_only_forall
linux_only_forall()
class Actor(nn.Module):
def __init__(self, state_dim, action_num):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, action_num)
def forward(self, state, action=None):
a = t.relu(self.fc1(state))
a = t.relu(self.fc2(a))
probs = t.softmax(self.fc3(a), dim=1)
dist = Categorical(probs=probs)
act = action if action is not None else dist.sample()
act_entropy = dist.entropy()
act_log_prob = dist.log_prob(act.flatten())
return act, act_log_prob, act_entropy
class Critic(nn.Module):
def __init__(self, state_dim):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, 1)
def forward(self, state):
v = t.relu(self.fc1(state))
v = t.relu(self.fc2(v))
v = self.fc3(v)
return v
class TestA3C:
# configs and definitions
disable_view_window()
c = Config()
# Note: online policy algorithms such as PPO and A3C does not
# work well in Pendulum (reason unknown)
# and MountainCarContinuous (sparse returns)
c.env_name = "CartPole-v0"
c.env = unwrap_time_limit(gym.make(c.env_name))
c.observe_dim = 4
c.action_num = 2
c.max_episodes = 3000
c.max_steps = 200
c.replay_size = 10000
c.solved_reward = 150
c.solved_repeat = 5
@staticmethod
def a3c(device, dtype):
c = TestA3C.c
actor = smw(
Actor(c.observe_dim, c.action_num).type(dtype).to(device), device, device
)
critic = smw(Critic(c.observe_dim).type(dtype).to(device), device, device)
# in all test scenarios, all processes will be used as reducers
servers = grad_server_helper(
[lambda: Actor(c.observe_dim, c.action_num), lambda: Critic(c.observe_dim)],
learning_rate=5e-3,
)
a3c = A3C(
actor,
critic,
nn.MSELoss(reduction="sum"),
servers,
replay_device="cpu",
replay_size=c.replay_size,
)
return a3c
########################################################################
# Test for A3C acting
########################################################################
@staticmethod
@run_multi(
expected_results=[True, True, True],
pass_through=["device", "dtype"],
timeout=180,
)
@WorldTestBase.setup_world
def test_act(_, device, dtype):
c = TestA3C.c
a3c = TestA3C.a3c(device, dtype)
state = t.zeros([1, c.observe_dim], dtype=dtype)
a3c.act({"state": state})
return True
########################################################################
# Test for A3C action evaluation
########################################################################
@staticmethod
@run_multi(
expected_results=[True, True, True],
pass_through=["device", "dtype"],
timeout=180,
)
@WorldTestBase.setup_world
def test_eval_action(_, device, dtype):
c = TestA3C.c
a3c = TestA3C.a3c(device, dtype)
state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
a3c._eval_act({"state": state}, {"action": action})
return True
########################################################################
# Test for A3C criticizing
########################################################################
@staticmethod
@run_multi(
expected_results=[True, True, True],
pass_through=["device", "dtype"],
timeout=180,
)
@WorldTestBase.setup_world
def test__criticize(_, device, dtype):
c = TestA3C.c
a3c = TestA3C.a3c(device, dtype)
state = t.zeros([1, c.observe_dim], dtype=dtype)
a3c._criticize({"state": state})
return True
########################################################################
# Test for A3C storage
########################################################################
# Skipped, it is the same as A2C
########################################################################
# Test for A3C update
########################################################################
@staticmethod
@run_multi(
expected_results=[True, True, True],
pass_through=["device", "dtype"],
timeout=180,
)
@WorldTestBase.setup_world
def test_update(rank, device, dtype):
c = TestA3C.c
c.device = gpu
a3c = TestA3C.a3c(device, dtype)
old_state = state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
begin = time()
while time() - begin < 5:
a3c.store_episode(
[
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
)
a3c.update(
update_value=True,
update_policy=True,
update_target=True,
concatenate_samples=True,
)
sleep(0.01)
if rank == 1:
# pull the newest model
a3c.manual_sync()
return True
########################################################################
# Test for A3C save & load
########################################################################
# Skipped, it is the same as A2C
########################################################################
# Test for A3C lr_scheduler
########################################################################
# Skipped, it is the same as A2C
########################################################################
# Test for A3C config & init
########################################################################
@staticmethod
@run_multi(expected_results=[True, True, True], timeout=180)
@WorldTestBase.setup_world
def test_config_init(rank):
c = TestA3C.c
config = A3C.generate_config({})
config["frame_config"]["models"] = ["Actor", "Critic"]
config["frame_config"]["model_kwargs"] = [
{"state_dim": c.observe_dim, "action_num": c.action_num},
{"state_dim": c.observe_dim},
]
a3c = A3C.init_from_config(config)
old_state = state = t.zeros([1, c.observe_dim], dtype=t.float32)
action = t.zeros([1, 1], dtype=t.int)
begin = time()
while time() - begin < 5:
a3c.store_episode(
[
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
)
a3c.update()
sleep(0.01)
if rank == 1:
# pull the newest model
a3c.manual_sync()
return True
########################################################################
# Test for A3C full training.
########################################################################
@staticmethod
@pytest.mark.parametrize("gae_lambda", [0.0, 0.5, 1.0])
@run_multi(
expected_results=[True, True, True], pass_through=["gae_lambda"], timeout=1800
)
@WorldTestBase.setup_world
def test_full_train(rank, gae_lambda):
c = TestA3C.c
a3c = TestA3C.a3c("cpu", t.float32)
a3c.set_sync(False)
# begin training
episode, step = Counter(), Counter()
reward_fulfilled = Counter()
smoother = Smooth()
terminal = False
env = c.env
# for cpu usage viewing
default_logger.info(f"{rank}, pid {os.getpid()}")
while episode < c.max_episodes:
episode.count()
# batch size = 1
total_reward = 0
state = t.tensor(env.reset(), dtype=t.float32)
a3c.manual_sync()
tmp_observations = []
while not terminal and step <= c.max_steps:
step.count()
with t.no_grad():
old_state = state
# agent model inference
action = a3c.act({"state": old_state.unsqueeze(0)})[0]
state, reward, terminal, _ = env.step(action.item())
state = t.tensor(state, dtype=t.float32).flatten()
total_reward += float(reward)
tmp_observations.append(
{
"state": {"state": old_state.unsqueeze(0)},
"action": {"action": action},
"next_state": {"state": state.unsqueeze(0)},
"reward": float(reward),
"terminal": terminal or step == c.max_steps,
}
)
# update
a3c.store_episode(tmp_observations)
a3c.update()
smoother.update(total_reward)
step.reset()
terminal = False
default_logger.info(
f"Process {rank} Episode {episode} total reward={smoother.value:.2f}"
)
if smoother.value > c.solved_reward:
reward_fulfilled.count()
if reward_fulfilled >= c.solved_repeat:
default_logger.info("Environment solved!")
return True
else:
reward_fulfilled.reset()
raise RuntimeError("A3C Training failed.")
```
#### File: frame/algorithms/test_gail.py
```python
from machin.model.nets.base import static_module_wrapper as smw
from machin.frame.algorithms.ppo import PPO
from machin.frame.algorithms.gail import GAIL
from machin.utils.learning_rate import gen_learning_rate_func
from machin.utils.logging import default_logger as logger
from machin.utils.helper_classes import Counter
from machin.utils.conf import Config
from machin.env.utils.openai_gym import disable_view_window
from torch.optim.lr_scheduler import LambdaLR
from torch.distributions import Categorical
import os
import pytest
import torch as t
import torch.nn as nn
import gym
from test.frame.algorithms.utils import unwrap_time_limit, Smooth
from test.util_fixtures import *
from test.util_platforms import linux_only
class Actor(nn.Module):
def __init__(self, state_dim, action_num):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, action_num)
def forward(self, state, action=None):
a = t.relu(self.fc1(state))
a = t.relu(self.fc2(a))
probs = t.softmax(self.fc3(a), dim=1)
dist = Categorical(probs=probs)
act = action if action is not None else dist.sample()
act_entropy = dist.entropy()
act_log_prob = dist.log_prob(act.flatten())
return act, act_log_prob, act_entropy
class Critic(nn.Module):
def __init__(self, state_dim):
super().__init__()
self.fc1 = nn.Linear(state_dim, 32)
self.fc2 = nn.Linear(32, 32)
self.fc3 = nn.Linear(32, 1)
def forward(self, state):
v = t.relu(self.fc1(state))
v = t.relu(self.fc2(v))
v = self.fc3(v)
return v
class Discriminator(nn.Module):
def __init__(self, state_dim, action_num):
super().__init__()
self.fc1 = nn.Linear(state_dim + 1, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, 1)
self.action_num = action_num
def forward(self, state, action: t.Tensor):
d = t.relu(
self.fc1(
t.cat(
[state, action.type_as(state).view(-1, 1) / self.action_num], dim=1
)
)
)
d = t.relu(self.fc2(d))
d = t.sigmoid(self.fc3(d))
return d
class TestGAIL:
# configs and definitions
@pytest.fixture(scope="class")
def train_config(self):
disable_view_window()
c = Config()
# Note: online policy algorithms such as PPO and A2C does not
# work well in Pendulum (reason unknown)
# and MountainCarContinuous (sparse returns)
c.env_name = "CartPole-v0"
c.env = unwrap_time_limit(gym.make(c.env_name))
c.observe_dim = 4
c.action_num = 2
c.max_episodes = 1000
c.max_steps = 200
c.replay_size = 10000
c.solved_reward = 150
c.solved_repeat = 5
return c
@pytest.fixture(scope="function")
def gail(self, train_config, device, dtype):
# not used for training, only used for testing apis
c = train_config
actor = smw(
Actor(c.observe_dim, c.action_num).type(dtype).to(device), device, device
)
critic = smw(Critic(c.observe_dim).type(dtype).to(device), device, device)
discriminator = smw(
Discriminator(c.observe_dim, c.action_num).type(dtype).to(device),
device,
device,
)
ppo = PPO(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
)
gail = GAIL(
discriminator,
ppo,
t.optim.Adam,
expert_replay_device="cpu",
expert_replay_size=c.replay_size,
)
return gail
@pytest.fixture(scope="function")
def gail_vis(self, train_config, device, dtype, tmpdir):
# not used for training, only used for testing apis
c = train_config
tmp_dir = tmpdir.make_numbered_dir()
actor = smw(
Actor(c.observe_dim, c.action_num).type(dtype).to(device), device, device
)
critic = smw(Critic(c.observe_dim).type(dtype).to(device), device, device)
discriminator = smw(
Discriminator(c.observe_dim, c.action_num).type(dtype).to(device),
device,
device,
)
ppo = PPO(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
visualize=True,
visualize_dir=str(tmp_dir),
)
gail = GAIL(
discriminator,
ppo,
t.optim.Adam,
expert_replay_device="cpu",
expert_replay_size=c.replay_size,
visualize=True,
visualize_dir=str(tmp_dir),
)
return gail
@pytest.fixture(scope="function")
def gail_lr(self, train_config, device, dtype):
# not used for training, only used for testing apis
c = train_config
actor = smw(
Actor(c.observe_dim, c.action_num).type(dtype).to(device), device, device
)
critic = smw(Critic(c.observe_dim).type(dtype).to(device), device, device)
discriminator = smw(
Discriminator(c.observe_dim, c.action_num).type(dtype).to(device),
device,
device,
)
ppo = PPO(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
)
lr_func = gen_learning_rate_func([(0, 1e-3), (200000, 3e-4)], logger=logger)
with pytest.raises(TypeError, match="missing .+ positional argument"):
_ = GAIL(
discriminator,
ppo,
t.optim.Adam,
expert_replay_device="cpu",
expert_replay_size=c.replay_size,
lr_scheduler=LambdaLR,
)
gail = GAIL(
discriminator,
ppo,
t.optim.Adam,
expert_replay_device="cpu",
expert_replay_size=c.replay_size,
lr_scheduler=LambdaLR,
lr_scheduler_args=((lr_func,),),
)
return gail
@pytest.fixture(scope="function")
def gail_train(self, train_config):
c = train_config
actor = smw(Actor(c.observe_dim, c.action_num), "cpu", "cpu")
critic = smw(Critic(c.observe_dim), "cpu", "cpu")
discriminator = smw(Discriminator(c.observe_dim, c.action_num), "cpu", "cpu")
ppo = PPO(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
)
gail = GAIL(
discriminator,
ppo,
t.optim.Adam,
expert_replay_device="cpu",
expert_replay_size=c.replay_size,
)
return gail
########################################################################
# Test for GAIL acting
########################################################################
def test_act(self, train_config, gail, dtype):
c = train_config
state = t.zeros([1, c.observe_dim], dtype=dtype)
gail.act({"state": state})
########################################################################
# Test for GAIL discriminating
########################################################################
def test__discriminate(self, train_config, gail, dtype):
c = train_config
state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
gail._discriminate({"state": state}, {"action": action})
########################################################################
# Test for GAIL storage
########################################################################
def test_store_episode(self, train_config, gail, dtype):
c = train_config
old_state = state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
episode = [
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
gail.store_episode(episode)
def test_store_expert_episode(self, train_config, gail, dtype):
c = train_config
old_state = state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
episode = [
{"state": {"state": old_state}, "action": {"action": action},}
for _ in range(3)
]
gail.store_expert_episode(episode)
########################################################################
# Test for GAIL update
########################################################################
def test_update(self, train_config, gail_vis, dtype):
c = train_config
old_state = state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
expert_episode = [
{"state": {"state": old_state}, "action": {"action": action}}
for _ in range(3)
]
episode = [
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
gail_vis.store_episode(episode)
gail_vis.store_expert_episode(expert_episode)
gail_vis.update(
update_value=True,
update_policy=True,
update_discriminator=True,
concatenate_samples=True,
)
gail_vis.store_episode(episode)
gail_vis.update(
update_value=False,
update_policy=False,
update_discriminator=False,
concatenate_samples=True,
)
########################################################################
# Test for GAIL save & load
########################################################################
# Skipped, it is the same as base
########################################################################
# Test for GAIL lr_scheduler
########################################################################
def test_lr_scheduler(self, train_config, gail_lr, dtype):
gail_lr.update_lr_scheduler()
########################################################################
# Test for GAIL config & init
########################################################################
def test_config_init(self, train_config, tmpdir, archives):
dir = tmpdir.make_numbered_dir()
t.save(
archives["gail"].load().item("expert_trajectories"),
os.path.join(dir, "trajectory.data"),
)
c = train_config
config = GAIL.generate_config({})
config["frame_config"]["PPO_config"]["frame_config"]["models"] = [
"Actor",
"Critic",
]
config["frame_config"]["PPO_config"]["frame_config"]["model_kwargs"] = [
{"state_dim": c.observe_dim, "action_num": c.action_num},
{"state_dim": c.observe_dim},
]
config["frame_config"]["models"] = ["Discriminator"]
config["frame_config"]["model_kwargs"] = [
{"state_dim": c.observe_dim, "action_num": c.action_num},
]
config["frame_config"]["expert_trajectory_path"] = os.path.join(
dir, "trajectory.data"
)
gail = GAIL.init_from_config(config)
old_state = state = t.zeros([1, c.observe_dim], dtype=t.float32)
action = t.zeros([1, 1], dtype=t.float32)
expert_episode = [
{"state": {"state": old_state}, "action": {"action": action}}
for _ in range(3)
]
episode = [
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
gail.store_episode(episode)
gail.store_expert_episode(expert_episode)
gail.update()
########################################################################
# Test for GAIL full training.
########################################################################
@linux_only
def test_full_train(self, train_config, gail_train, archives):
c = train_config
for expert_episode in archives["gail"].load().item("expert_trajectories"):
gail_train.store_expert_episode(expert_episode)
# begin training
episode, step = Counter(), Counter()
reward_fulfilled = Counter()
smoother = Smooth()
terminal = False
env = c.env
while episode < c.max_episodes:
episode.count()
# batch size = 1
total_reward = 0
state = t.tensor(env.reset(), dtype=t.float32)
tmp_observations = []
while not terminal and step <= c.max_steps:
step.count()
with t.no_grad():
old_state = state
# agent model inference
action = gail_train.act({"state": old_state.unsqueeze(0)})[0]
state, reward, terminal, _ = env.step(action.item())
state = t.tensor(state, dtype=t.float32).flatten()
total_reward += float(reward)
tmp_observations.append(
{
"state": {"state": old_state.unsqueeze(0)},
"action": {"action": action},
"next_state": {"state": state.unsqueeze(0)},
"reward": float(reward),
"terminal": terminal or step == c.max_steps,
}
)
# update
gail_train.store_episode(tmp_observations)
gail_train.update()
smoother.update(total_reward)
step.reset()
terminal = False
logger.info(f"Episode {episode} total reward={smoother.value:.2f}")
if smoother.value > c.solved_reward:
reward_fulfilled.count()
if reward_fulfilled >= c.solved_repeat:
logger.info("Environment solved!")
return
else:
reward_fulfilled.reset()
pytest.fail("GAIL Training failed.")
``` |
{
"source": "1adrianb/binary-networks-pytorch",
"score": 2
} |
#### File: binary-networks-pytorch/bnn/binarize.py
```python
from dataclasses import asdict
from .bconfig import BConfig
import re
import copy
import logging
import torch
import torch.nn as nn
from typing import Dict, List
from . import layers as bnn
DEFAULT_MODULE_MAPPING = {
nn.Linear: bnn.Linear,
nn.Conv2d: bnn.Conv2d,
nn.Conv1d: bnn.Conv1d
}
for k, v in copy.copy(DEFAULT_MODULE_MAPPING).items():
DEFAULT_MODULE_MAPPING[v] = v
def _get_first_layer(model: nn.Module) -> List[str]:
for name, module in model.named_modules():
if type(module) in DEFAULT_MODULE_MAPPING:
return [name]
return []
def _get_last_layer(model: nn.Module) -> List[str]:
for name, module in reversed(list(model.named_modules())):
if type(module) in DEFAULT_MODULE_MAPPING:
return [name]
return []
def _regex_match(model: nn.Module, pattern: str, modules_mapping: Dict[nn.Module, nn.Module]) -> List[str]:
# Strip first and last character (expected to be $ and $)
pattern = pattern[1:-1]
matched_names = []
pattern = re.compile(pattern)
for name, module in model.named_modules():
if type(module) in modules_mapping:
if pattern.search(name) is not None:
matched_names.append(name)
return matched_names
_KNOWN_SPECIAL_WORDS = {
'_last_': _get_first_layer,
'_first_': _get_last_layer
}
def get_unique_devices_(module: nn.Module) -> List[torch.device]:
return {p.device for p in module.parameters()} | \
{p.device for p in module.buffers()}
def get_modules_to_binarize(model: nn.Module, bconfig: BConfig,
modules_mapping: Dict[nn.Module, nn.Module] = None,
custom_config_layers_name: Dict[str, BConfig] = {},
ignore_layers_name: List[str] = []) -> Dict[str, nn.Module]:
if modules_mapping is None:
modules_mapping = DEFAULT_MODULE_MAPPING
# Parse special cases
processed_ignore_layer_names = []
for name in ignore_layers_name:
if name in _KNOWN_SPECIAL_WORDS.keys():
processed_ignore_layer_names += _KNOWN_SPECIAL_WORDS[name](model)
elif name[0] == '$' and name[-1] == '$':
processed_ignore_layer_names += _regex_match(model, name, modules_mapping)
else:
processed_ignore_layer_names.append(name)
modules_to_replace = {}
for name, module in model.named_modules():
if type(module) in modules_mapping:
if name in processed_ignore_layer_names:
continue
layer_config = copy.copy(bconfig)
# Use layer specific customization
if name in custom_config_layers_name:
for k, v in asdict(custom_config_layers_name[name]).items():
setattr(layer_config, k, v)
# respect device affinity when swapping modules
devices = get_unique_devices_(module)
assert len(devices) <= 1, (
"swap_module only works with cpu or single-device CUDA modules, "
"but got devices {}".format(devices)
)
device = next(iter(devices)) if len(devices) > 0 else None
modules_to_replace[name] = modules_mapping[type(module)].from_module(
module,
layer_config
)
if device:
modules_to_replace[name].to(device)
elif name in custom_config_layers_name:
logging.warning('Module named {} defined in the configuration was not found.'.format(name))
return modules_to_replace
def swap_modules_by_name(model: nn.Module, modules_to_replace: Dict[str, nn.Module],
modules_mapping: Dict[nn.Module, nn.Module] = None) -> nn.Module:
if modules_mapping is None:
modules_mapping = DEFAULT_MODULE_MAPPING
def _swap_module(module: nn.Module):
for name, child in module.named_children():
if type(child) in modules_mapping:
for n, m in model.named_modules():
if child is m:
if n in modules_to_replace:
setattr(module, name, modules_to_replace.pop(n))
break
else:
_swap_module(child)
if len(list(model.named_children())) == 0:
if type(model) in modules_mapping and len(modules_to_replace.keys()) == 1:
model = modules_to_replace[list(modules_to_replace.keys())[0]]
else:
_swap_module(model)
return model
def prepare_binary_model(model: nn.Module, bconfig: BConfig,
modules_mapping: Dict[nn.Module, nn.Module] = None,
custom_config_layers_name: Dict[str, BConfig] = {},
ignore_layers_name: List[str] = []) -> nn.Module:
modules_to_replace = get_modules_to_binarize(
model,
bconfig,
modules_mapping,
custom_config_layers_name,
ignore_layers_name)
model = swap_modules_by_name(model, modules_to_replace, modules_mapping)
return model
```
#### File: binary-networks-pytorch/bnn/engine.py
```python
import yaml
import re
from easydict import EasyDict as edict
from typing import List, Callable, Dict
from . import BConfig, prepare_binary_model, Identity
from .ops import *
def _option_builder_helper(partial_config: Dict[str, str]) -> str:
if 'args' in partial_config:
start_string = '.with_args('
end_string = ')'
content = ''
for k, v in partial_config.args.items():
content += '{}={},'.format(k, v)
final_string = start_string + content + end_string
return final_string
return ''
class BinaryChef(object):
r"""Converts a given model according to the configutation and steps defined in an YAML file.ut
Examples::
>>> bc = BinaryChef('config.yaml')
>>> for i in range(len(bc)):
>>> model = bc.next(model)
>>> # do training logic for desired number of epochs
Args:
config: path to a valid yaml file containing the steps
user_modules: list containing custom user defined binarizers
"""
def __init__(self, config: str, user_modules: List[Callable[..., nn.Module]] = []) -> None:
with open(config) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
config = [edict(config[k]) for k in config.keys()]
self.config = config
self.current_step = 0
# inject the received functions into the namespace
for user_module in user_modules:
globals()[user_module.__name__] = user_module
def __len__(self) -> int:
return len(self.config)
def get_num_steps(self) -> int:
return len(self)
def run_step(self, model: nn.Module, step: int) -> nn.Module:
assert len(self) > step
step_config = self.config[step]
# Ignore certain white listed layers
print(step_config, type(step_config))
ignore_layer_names = step_config.ignore_layer_names if 'ignore_layer_names' in step_config else []
# prepare model
bconfig = BConfig(
activation_pre_process=eval(
step_config.pre_activation.name
+ _option_builder_helper(
step_config.pre_activation)),
activation_post_process=eval(
step_config.post_activation.name
+ _option_builder_helper(
step_config.post_activation)),
weight_pre_process=eval(
step_config.weight.name
+ _option_builder_helper(
step_config.weight)))
bmodel = prepare_binary_model(model, bconfig=bconfig, ignore_layers_name=ignore_layer_names)
return bmodel
def next(self, model: nn.Module) -> nn.Module:
self.current_step += 1
return self.run_step(model, self.current_step - 1)
```
#### File: bnn/layers/conv.py
```python
from typing import Union
import torch
import torch.nn as nn
from torch.nn.common_types import _size_1_t, _size_2_t
from .. import BConfig
from .helpers import copy_paramters
class Conv1d(nn.Conv1d):
_FLOAT_MODULE = nn.Conv1d
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: Union[str, _size_1_t] = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
bconfig: BConfig = None
) -> None:
super(Conv1d, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias, padding_mode=padding_mode)
assert bconfig, 'bconfig is required for a binarized module'
self.bconfig = bconfig
self.activation_pre_process = bconfig.activation_pre_process()
self.activation_post_process = bconfig.activation_post_process(self)
self.weight_pre_process = bconfig.weight_pre_process()
def forward(self, input: torch.Tensor) -> torch.Tensor:
input_proc = self.activation_pre_process(input)
input_proc = self._conv_forward(input_proc, self.weight_pre_process(self.weight), bias=self.bias)
return self.activation_post_process(
input_proc,
input
)
@classmethod
def from_module(cls, mod: nn.Module, bconfig: BConfig = None, update: bool = False):
assert type(mod) == cls._FLOAT_MODULE or type(mod) == cls, 'bnn.' + cls.__name__ + \
'.from_float only works for ' + cls._FLOAT_MODULE.__name__
if not bconfig:
assert hasattr(mod, 'bconfig'), 'The input modele requires a predifined bconfig'
assert mod.bconfig, 'The input modele bconfig is invalid'
bconfig = mod.bconfig
bnn_conv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,
stride=mod.stride, padding=mod.padding, dilation=mod.dilation,
groups=mod.groups, bias=mod.bias is not None,
padding_mode=mod.padding_mode, bconfig=bconfig)
bnn_conv.weight = mod.weight
bnn_conv.bias = mod.bias
if update:
copy_paramters(mod, bnn_conv, bconfig)
return bnn_conv
class Conv2d(nn.Conv2d):
_FLOAT_MODULE = nn.Conv2d
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: Union[str, _size_2_t] = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
bconfig: BConfig = None
) -> None:
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias, padding_mode=padding_mode)
assert bconfig, 'bconfig is required for a binarized module'
self.bconfig = bconfig
self.activation_pre_process = bconfig.activation_pre_process()
self.activation_post_process = bconfig.activation_post_process(self)
self.weight_pre_process = bconfig.weight_pre_process()
def forward(self, input: torch.Tensor) -> torch.Tensor:
input_proc = self.activation_pre_process(input)
input_proc = self._conv_forward(input_proc, self.weight_pre_process(self.weight), bias=self.bias)
return self.activation_post_process(
input_proc,
input
)
@classmethod
def from_module(cls, mod: nn.Module, bconfig: BConfig = None, update: bool = False):
assert type(mod) == cls._FLOAT_MODULE or type(mod) == cls, 'bnn.' + cls.__name__ + \
'.from_float only works for ' + cls._FLOAT_MODULE.__name__
if not bconfig:
assert hasattr(mod, 'bconfig'), 'The input modele requires a predifined bconfig'
assert mod.bconfig, 'The input modele bconfig is invalid'
bconfig = mod.bconfig
bnn_conv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,
stride=mod.stride, padding=mod.padding, dilation=mod.dilation,
groups=mod.groups, bias=mod.bias is not None,
padding_mode=mod.padding_mode, bconfig=bconfig)
bnn_conv.weight = mod.weight
bnn_conv.bias = mod.bias
if update:
copy_paramters(mod, bnn_conv, bconfig)
return bnn_conv
```
#### File: binary-networks-pytorch/bnn/ops.py
```python
from functools import partial
from abc import ABCMeta, abstractmethod
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, Any, List
def _with_args(cls_or_self: Any, **kwargs: Dict[str, Any]) -> Any:
r"""Wrapper that allows creation of class factories.
This can be useful when there is a need to create classes with the same
constructor arguments, but different instances.
Source: https://github.com/pytorch/pytorch/blob/b02c932fb67717cb26d6258908541b670faa4e72/torch/quantization/observer.py
Example::
>>> Foo.with_args = classmethod(_with_args)
>>> foo_builder = Foo.with_args(a=3, b=4).with_args(answer=42)
>>> foo_instance1 = foo_builder()
>>> foo_instance2 = foo_builder()
>>> id(foo_instance1) == id(foo_instance2)
False
"""
class _PartialWrapper(object):
def __init__(self, p):
self.p = p
def __call__(self, *args, **keywords):
return self.p(*args, **keywords)
def __repr__(self):
return self.p.__repr__()
with_args = _with_args
r = _PartialWrapper(partial(cls_or_self, **kwargs))
return r
ABC = ABCMeta(str("ABC"), (object,), {})
class BinarizerBase(ABC, nn.Module):
def __init__(self) -> None:
super(BinarizerBase, self).__init__()
@abstractmethod
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
with_args = classmethod(_with_args)
class SignActivation(torch.autograd.Function):
r"""Applies the sign function element-wise
:math:`\text{sgn(x)} = \begin{cases} -1 & \text{if } x < 0, \\ 1 & \text{if} x >0 \end{cases}`
the gradients of which are computed using a STE, namely using :math:`\text{hardtanh(x)}`.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> input = torch.randn(3)
>>> output = SignActivation.apply(input)
"""
@staticmethod
def forward(ctx, input: torch.Tensor) -> torch.Tensor:
ctx.save_for_backward(input)
return input.sign()
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
input, = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input.masked_fill_(input.ge(1) | input.le(-1), 0)
return grad_input
class SignActivationStochastic(SignActivation):
r"""Binarize the data using a stochastic binarizer
:math:`\text{sgn(x)} = \begin{cases} -1 & \text{with probablity } p = \sigma(x), \\ 1 & \text{with probablity } 1 - p \end{cases}`
the gradients of which are computed using a STE, namely using :math:`\text{hardtanh(x)}`.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> input = torch.randn(3)
>>> output = SignActivationStochastic.apply(input)
"""
@staticmethod
def forward(ctx, input: torch.Tensor) -> torch.Tensor:
ctx.save_for_backward(input)
noise = torch.rand_like(input).sub_(0.5)
return input.add_(1).div_(2).add_(noise).clamp_(0, 1).round_().mul_(2).sub_(1)
class XNORWeightBinarizer(BinarizerBase):
r"""Binarize the parameters of a given layer using the analytical solution
proposed in the XNOR-Net paper.
:math:`\text{out} = \frac{1}{n}\norm{\mathbf{W}}_{\ell} \text{sgn(x)}(\mathbf{W})`
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> binarizer = XNORWeightBinarizer()
>>> output = F.conv2d(input, binarizer(weight))
Args:
compute_alpha: if True, compute the real-valued scaling factor
center_weights: make the weights zero-mean
"""
def __init__(self, compute_alpha: bool = True, center_weights: bool = False) -> None:
super(XNORWeightBinarizer, self).__init__()
self.compute_alpha = compute_alpha
self.center_weights = center_weights
def _compute_alpha(self, x: torch.Tensor) -> torch.Tensor:
n = x[0].nelement()
if x.dim() == 4:
alpha = x.norm(1, 3, keepdim=True).sum([2, 1], keepdim=True).div_(n)
elif x.dim() == 3:
alpha = x.norm(1, 2, keepdim=True).sum([1], keepdim=True).div_(n)
elif x.dim() == 2:
alpha = x.norm(1, 1, keepdim=True).div_(n)
else:
raise ValueError(f"Expected ndims equal with 2 or 4, but found {x.dim()}")
return alpha
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.center_weights:
mean = x.mean(1, keepdim=True).expand_as(x)
x = x.sub(mean)
if self.compute_alpha:
alpha = self._compute_alpha(x)
x = SignActivation.apply(x).mul_(alpha.expand_as(x))
else:
x = SignActivation.apply(x)
return x
class BasicInputBinarizer(BinarizerBase):
r"""Applies the sign function element-wise.
nn.Module version of SignActivation.
"""
def __init__(self):
super(BasicInputBinarizer, self).__init__()
def forward(self, x: torch.Tensor) -> None:
return SignActivation.apply(x)
class StochasticInputBinarizer(BinarizerBase):
r"""Applies the sign function element-wise.
nn.Module version of SignActivation.
"""
def __init__(self):
super(StochasticInputBinarizer, self).__init__()
def forward(self, x: torch.Tensor):
return SignActivationStochastic.apply(x)
class AdvancedInputBinarizer(BinarizerBase):
def __init__(self, derivative_funct=torch.tanh, t: int = 5):
super(AdvancedInputBinarizer, self).__init__()
self.derivative_funct = derivative_funct
self.t = t
def forward(self, x: torch.tensor) -> torch.Tensor:
x = self.derivative_funct(x * self.t)
with torch.no_grad():
x = torch.sign(x)
return x
class BasicScaleBinarizer(BinarizerBase):
def __init__(self, module: nn.Module, shape: List[int] = None) -> None:
super(BasicScaleBinarizer, self).__init__()
if isinstance(module, nn.Linear):
num_channels = module.out_features
elif isinstance(module, nn.Conv2d):
num_channels = module.out_channels
else:
if hasattr(module, 'out_channels'):
num_channels = module.out_channels
else:
raise Exception('Unknown layer of type {} missing out_channels'.format(type(module)))
if shape is None:
alpha_shape = [1, num_channels] + [1] * (module.weight.dim() - 2)
else:
alpha_shape = shape
self.alpha = nn.Parameter(torch.ones(*alpha_shape))
def forward(self, layer_out: torch.Tensor, layer_in: torch.Tensor) -> torch.Tensor:
x = layer_out
return x.mul_(self.alpha)
def extra_repr(self) -> str:
return '{}'.format(list(self.alpha.size()))
class XNORScaleBinarizer(BinarizerBase):
def __init__(self, module: nn.Module) -> None:
super(BasicScaleBinarizer, self).__init__()
kernel_size = module.kernel_size
self.stride = module.stride
self.padding = module.padding
self.register_buffer('fixed_weight', torch.ones(*kernel_size).div_(math.prod(kernel_size)), persistent=False)
def forward(self, layer_out: torch.Tensor, layer_in: torch.Tensor) -> torch.Tensor:
x = layer_out
scale = torch.mean(dim=1, keepdim=True)
scale = F.conv2d(scale, self.fixed_weight, stride=self.stride, padding=self.padding)
return x.mul_(scale)
``` |
{
"source": "1adrianb/video-transformers",
"score": 2
} |
#### File: slowfast/datasets/transform.py
```python
import math
import random
import numpy as np
import torch
from PIL import Image
def _fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h):
w_step = (image_w - crop_w) // 4
h_step = (image_h - crop_h) // 4
ret = list()
ret.append((0, 0)) # upper left
ret.append((4 * w_step, 0)) # upper right
ret.append((0, 4 * h_step)) # lower left
ret.append((4 * w_step, 4 * h_step)) # lower right
ret.append((2 * w_step, 2 * h_step)) # center
if more_fix_crop:
ret.append((0, 2 * h_step)) # center left
ret.append((4 * w_step, 2 * h_step)) # center right
ret.append((2 * w_step, 4 * h_step)) # lower center
ret.append((2 * w_step, 0 * h_step)) # upper center
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
return ret
def _sample_fix_offset(image_w, image_h, crop_w, crop_h, more_fix_crop):
offsets = _fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h)
return random.choice(offsets)
def _sample_crop_size(
im_size, scales, input_size, max_distort, fix_crop, more_fix_crop
):
image_w, image_h = im_size[0], im_size[1]
# find a crop size
base_size = min(image_w, image_h)
crop_sizes = [int(base_size * x) for x in scales]
crop_h = [
input_size[1] if abs(x - input_size[1]) < 3 else x for x in crop_sizes
]
crop_w = [
input_size[0] if abs(x - input_size[0]) < 3 else x for x in crop_sizes
]
pairs = []
for i, h in enumerate(crop_h):
for j, w in enumerate(crop_w):
if abs(i - j) <= max_distort:
pairs.append((w, h))
crop_pair = random.choice(pairs)
if not fix_crop:
w_offset = random.randint(0, image_w - crop_pair[0])
h_offset = random.randint(0, image_h - crop_pair[1])
else:
w_offset, h_offset = _sample_fix_offset(
image_w, image_h, crop_pair[0], crop_pair[1], more_fix_crop
)
return crop_pair[0], crop_pair[1], w_offset, h_offset
def random_scale_and_resize(
images,
input_size,
scales=[1, 0.875, 0.75, 0.66],
max_distort=1,
fix_crop=True,
more_fix_crop=True,
interpolation=Image.BILINEAR,
):
if isinstance(images, torch.Tensor):
images = torch.split(images, 1, dim=0)
images = [Image.fromarray(b.squeeze(0).numpy()) for b in images]
im_size = images[0].size
crop_w, crop_h, offset_w, offset_h = _sample_crop_size(
im_size, scales, input_size, max_distort, fix_crop, more_fix_crop
)
crop_img_group = [
img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h))
for img in images
]
ret_img_group = [
img.resize((input_size[0], input_size[1]), interpolation)
for img in crop_img_group
]
ret_img_group = torch.from_numpy(np.stack(ret_img_group, axis=0))
return ret_img_group
def random_short_side_scale_jitter(
images, min_size, max_size, boxes=None, inverse_uniform_sampling=False
):
"""
Perform a spatial short scale jittering on the given images and
corresponding boxes.
Args:
images (tensor): images to perform scale jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
min_size (int): the minimal size to scale the frames.
max_size (int): the maximal size to scale the frames.
boxes (ndarray): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale, max_scale].
Returns:
(tensor): the scaled images with dimension of
`num frames` x `channel` x `new height` x `new width`.
(ndarray or None): the scaled boxes with dimension of
`num boxes` x 4.
"""
if inverse_uniform_sampling:
size = int(
round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))
)
else:
size = int(round(np.random.uniform(min_size, max_size)))
height = images.shape[2]
width = images.shape[3]
if (width <= height and width == size) or (
height <= width and height == size
):
return images, boxes
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
if boxes is not None:
boxes = boxes * float(new_height) / height
else:
new_width = int(math.floor((float(width) / height) * size))
if boxes is not None:
boxes = boxes * float(new_width) / width
return (
torch.nn.functional.interpolate(
images,
size=(new_height, new_width),
mode="bilinear",
align_corners=False,
),
boxes,
)
def crop_boxes(boxes, x_offset, y_offset):
"""
Peform crop on the bounding boxes given the offsets.
Args:
boxes (ndarray or None): bounding boxes to peform crop. The dimension
is `num boxes` x 4.
x_offset (int): cropping offset in the x axis.
y_offset (int): cropping offset in the y axis.
Returns:
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped_boxes = boxes.copy()
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return cropped_boxes
def random_crop(images, size, boxes=None):
"""
Perform random spatial crop on the given images and corresponding boxes.
Args:
images (tensor): images to perform random crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): the size of height and width to crop on the image.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (tensor): cropped images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
if images.shape[2] == size and images.shape[3] == size:
return images
height = images.shape[2]
width = images.shape[3]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
return cropped, cropped_boxes
def horizontal_flip(prob, images, boxes=None):
"""
Perform horizontal flip on the given images and corresponding boxes.
Args:
prob (float): probility to flip the images.
images (tensor): images to perform horizontal flip, the dimension is
`num frames` x `channel` x `height` x `width`.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
images (tensor): images with dimension of
`num frames` x `channel` x `height` x `width`.
flipped_boxes (ndarray or None): the flipped boxes with dimension of
`num boxes` x 4.
"""
if boxes is None:
flipped_boxes = None
else:
flipped_boxes = boxes.copy()
was_flipped = False
if np.random.uniform() < prob:
images = images.flip((-1))
was_flipped = True
width = images.shape[3]
if boxes is not None:
flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1
return images, flipped_boxes, was_flipped
def uniform_crop(images, size, spatial_idx, boxes=None):
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
assert spatial_idx in [0, 1, 2]
height = images.shape[2]
width = images.shape[3]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
return cropped, cropped_boxes
def clip_boxes_to_image(boxes, height, width):
"""
Clip an array of boxes to an image with the given height and width.
Args:
boxes (ndarray): bounding boxes to perform clipping.
Dimension is `num boxes` x 4.
height (int): given image height.
width (int): given image width.
Returns:
clipped_boxes (ndarray): the clipped boxes with dimension of
`num boxes` x 4.
"""
clipped_boxes = boxes.copy()
clipped_boxes[:, [0, 2]] = np.minimum(
width - 1.0, np.maximum(0.0, boxes[:, [0, 2]])
)
clipped_boxes[:, [1, 3]] = np.minimum(
height - 1.0, np.maximum(0.0, boxes[:, [1, 3]])
)
return clipped_boxes
def blend(images1, images2, alpha):
"""
Blend two images with a given weight alpha.
Args:
images1 (tensor): the first images to be blended, the dimension is
`num frames` x `channel` x `height` x `width`.
images2 (tensor): the second images to be blended, the dimension is
`num frames` x `channel` x `height` x `width`.
alpha (float): the blending weight.
Returns:
(tensor): blended images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
return images1 * alpha + images2 * (1 - alpha)
def grayscale(images):
"""
Get the grayscale for the input images. The channels of images should be
in order BGR.
Args:
images (tensor): the input images for getting grayscale. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
img_gray (tensor): blended images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
# R -> 0.299, G -> 0.587, B -> 0.114.
img_gray = torch.tensor(images)
gray_channel = (
0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0]
)
img_gray[:, 0] = gray_channel
img_gray[:, 1] = gray_channel
img_gray[:, 2] = gray_channel
return img_gray
def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0):
"""
Perfrom a color jittering on the input images. The channels of images
should be in order BGR.
Args:
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
img_brightness (float): jitter ratio for brightness.
img_contrast (float): jitter ratio for contrast.
img_saturation (float): jitter ratio for saturation.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
jitter = []
if img_brightness != 0:
jitter.append("brightness")
if img_contrast != 0:
jitter.append("contrast")
if img_saturation != 0:
jitter.append("saturation")
if len(jitter) > 0:
order = np.random.permutation(np.arange(len(jitter)))
for idx in range(0, len(jitter)):
if jitter[order[idx]] == "brightness":
images = brightness_jitter(img_brightness, images)
elif jitter[order[idx]] == "contrast":
images = contrast_jitter(img_contrast, images)
elif jitter[order[idx]] == "saturation":
images = saturation_jitter(img_saturation, images)
return images
def brightness_jitter(var, images):
"""
Perfrom brightness jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for brightness.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_bright = torch.zeros(images.shape)
images = blend(images, img_bright, alpha)
return images
def contrast_jitter(var, images):
"""
Perfrom contrast jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for contrast.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True)
images = blend(images, img_gray, alpha)
return images
def saturation_jitter(var, images):
"""
Perfrom saturation jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for saturation.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
images = blend(images, img_gray, alpha)
return images
def lighting_jitter(images, alphastd, eigval, eigvec):
"""
Perform AlexNet-style PCA jitter on the given images.
Args:
images (tensor): images to perform lighting jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
alphastd (float): jitter ratio for PCA jitter.
eigval (list): eigenvalues for PCA jitter.
eigvec (list[list]): eigenvectors for PCA jitter.
Returns:
out_images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if alphastd == 0:
return images
# generate alpha1, alpha2, alpha3.
alpha = np.random.normal(0, alphastd, size=(1, 3))
eig_vec = np.array(eigvec)
eig_val = np.reshape(eigval, (1, 3))
rgb = np.sum(
eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0),
axis=1,
)
out_images = torch.zeros_like(images)
for idx in range(images.shape[1]):
out_images[:, idx] = images[:, idx] + rgb[2 - idx]
return out_images
def color_normalization(images, mean, stddev):
"""
Perform color nomration on the given images.
Args:
images (tensor): images to perform color normalization. Dimension is
`num frames` x `channel` x `height` x `width`.
mean (list): mean values for normalization.
stddev (list): standard deviations for normalization.
Returns:
out_images (tensor): the noramlized images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
assert len(mean) == images.shape[1], "channel mean not computed properly"
assert (
len(stddev) == images.shape[1]
), "channel stddev not computed properly"
out_images = torch.zeros_like(images)
for idx in range(len(mean)):
out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx]
return out_images
``` |
{
"source": "1aguna/polars",
"score": 3
} |
#### File: py-polars/polars/functions.py
```python
from datetime import datetime, timedelta
from typing import Optional, Sequence, Union, overload
import numpy as np
import polars as pl
try:
from polars.datatypes import py_type_to_dtype
from polars.polars import concat_df as _concat_df
from polars.polars import concat_lf as _concat_lf
from polars.polars import concat_series as _concat_series
from polars.polars import py_diag_concat_df as _diag_concat_df
_DOCUMENTING = False
except ImportError:
_DOCUMENTING = True
__all__ = ["get_dummies", "concat", "repeat", "arg_where", "date_range"]
def get_dummies(df: "pl.DataFrame") -> "pl.DataFrame":
"""
Convert categorical variables into dummy/indicator variables.
Parameters
----------
df
DataFrame to convert.
"""
return df.to_dummies()
@overload
def concat(
items: Sequence["pl.DataFrame"],
rechunk: bool = True,
how: str = "vertical",
) -> "pl.DataFrame":
...
@overload
def concat(
items: Sequence["pl.Series"],
rechunk: bool = True,
how: str = "vertical",
) -> "pl.Series":
...
def concat(
items: Union[
Sequence["pl.DataFrame"], Sequence["pl.Series"], Sequence["pl.LazyFrame"]
],
rechunk: bool = True,
how: str = "vertical",
) -> Union["pl.DataFrame", "pl.Series", "pl.LazyFrame"]:
"""
Aggregate all the Dataframes/Series in a List of DataFrames/Series to a single DataFrame/Series.
Parameters
----------
items
DataFrames/Series/LazyFrames to concatenate.
rechunk
rechunk the final DataFrame/Series.
how
Only used if the items are DataFrames.
On of {"vertical", "diagonal"}.
Vertical: Applies multiple `vstack` operations.
Diagonal: Finds a union between the column schemas and fills missing column values with null.
"""
if not len(items) > 0:
raise ValueError("cannot concat empty list")
out: Union["pl.Series", "pl.DataFrame", "pl.LazyFrame"]
if isinstance(items[0], pl.DataFrame):
if how == "vertical":
out = pl.wrap_df(_concat_df(items))
elif how == "diagonal":
out = pl.wrap_df(_diag_concat_df(items))
else:
raise ValueError(
f"how should be one of {'vertical', 'diagonal'}, got {how}"
)
elif isinstance(items[0], pl.LazyFrame):
return pl.wrap_ldf(_concat_lf(items, rechunk))
else:
out = pl.wrap_s(_concat_series(items))
if rechunk:
return out.rechunk() # type: ignore
return out
def repeat(
val: Union[int, float, str, bool], n: int, name: Optional[str] = None
) -> "pl.Series":
"""
Repeat a single value n times and collect into a Series.
Parameters
----------
val
Value to repeat.
n
Number of repeats.
name
Optional name of the Series.
"""
if name is None:
name = ""
dtype = py_type_to_dtype(type(val))
s = pl.Series._repeat(name, val, n, dtype)
return s
def arg_where(mask: "pl.Series") -> "pl.Series":
"""
Get index values where Boolean mask evaluate True.
Parameters
----------
mask
Boolean Series.
Returns
-------
UInt32 Series
"""
return mask.arg_true()
def date_range(
low: datetime,
high: datetime,
interval: timedelta,
closed: Optional[str] = None,
name: Optional[str] = None,
) -> pl.Series:
"""
Create a date range of type `Datetime`.
Parameters
----------
low
Lower bound of the date range
high
Upper bound of the date range
interval
Interval periods
closed {None, 'left', 'right'}
Make the interval closed to the 'left', 'right', or both sides (None, the default).
name
Name of the output Series
Returns
-------
A Series of type `Datetime`
Examples
--------
>>> import polars as pl
>>> from datetime import datetime, timedelta
>>> pl.date_range(datetime(1985, 1, 1), datetime(2015, 7, 1), timedelta(days=1, hours=12))
shape: (7426,)
Series: '' [datetime]
[
1985-01-01 00:00:00
1985-01-02 12:00:00
1985-01-04 00:00:00
1985-01-05 12:00:00
1985-01-07 00:00:00
1985-01-08 12:00:00
1985-01-10 00:00:00
1985-01-11 12:00:00
1985-01-13 00:00:00
1985-01-14 12:00:00
1985-01-16 00:00:00
1985-01-17 12:00:00
...
2015-06-14 00:00:00
2015-06-15 12:00:00
2015-06-17 00:00:00
2015-06-18 12:00:00
2015-06-20 00:00:00
2015-06-21 12:00:00
2015-06-23 00:00:00
2015-06-24 12:00:00
2015-06-26 00:00:00
2015-06-27 12:00:00
2015-06-29 00:00:00
2015-06-30 12:00:00
]
"""
values = np.arange(low, high, interval, dtype="datetime64[ms]")
if closed in (None, "right") and (high - low) % interval == timedelta(0):
values = np.append(values, np.array(high, dtype="datetime64[ms]"))
if closed == "right":
values = values[1:]
return pl.Series(name=name, values=values.astype(np.int64)).cast(pl.Datetime)
``` |
{
"source": "1alexandra/collage",
"score": 3
} |
#### File: collage/src/textconfig.py
```python
import tkinter as tk
from tkinter.colorchooser import askcolor
from datetime import datetime
from src.fonts import get_system_fonts
from src.grid import grid_frame
class TextConfigureApp(tk.Frame):
"""Simple Collage Creator second window.
Used for adding a caption to a collage. Allows user to customize a
content and a style of the caption.
The window consists of five blocks:
- text redactor,
- font chooser,
- canvas with an intermediate result,
- font parameters input fields: italic, bold, underlined checkboxes \
and font size entry,
- buttons block: ``Change color...``, ``Try font``, ``OK`` buttons.
"""
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.rgb = (0, 0, 0)
self.text_redactor = None
self.font_chooser = None
self.system_fonts = get_system_fonts()
self.font = self.system_fonts[0]
self.font_size = tk.StringVar(self.master, '12')
self.italic_var = tk.IntVar(self.master)
self.bold_var = tk.IntVar(self.master)
self.lined_var = tk.IntVar(self.master)
self.create_widgets()
def create_widgets(self):
"""Create and grid all widgets."""
grid_frame(self.master, is_root=True)
grid_frame(self, [0], [0], 0, 0, 'news')
frame = tk.Frame(self, bd=10)
grid_frame(frame, [0, 1, 2], [0, 1], 0, 0, 'news')
self.create_text_redactor(frame, 0, 0)
self.create_font_chooser(frame, 0, 1)
self.create_canvas(frame, 1, 0)
self.create_modifiers(frame, 1, 1)
self.create_buttons(frame, 2, 1)
self.draw()
def create_canvas(self, frame, row, col):
"""Create, configure and grid result canvas."""
self.canvas = tk.Canvas(frame, width=300, height=100, bg='white')
self.canvas.grid(row=row, column=col)
def create_text_redactor(self, frame, row, col):
"""Create, grid and initialize text redactor."""
# TODO: add scrollbar
text_frame = tk.Frame(frame, bd=10)
grid_frame(text_frame, [1], [0], row, col, 'news')
label = tk.Label(text_frame, text="Type text here:", bd=10)
label.grid(row=0, column=0, sticky='s')
self.text_redactor = tk.Text(text_frame, width=45, height=15, wrap=tk.WORD)
self.text_redactor.grid(row=1, column=0, sticky='news')
self.text_redactor.insert(tk.END, datetime.now().date().strftime("%B %Y"))
def create_font_chooser(self, frame, row, col):
"""Create and grid font chooser listbox, fill the options."""
# TODO: add scrollbar
font_frame = tk.Frame(frame, bd=10)
grid_frame(font_frame, [1], [0], row, col, 'news')
label = tk.Label(font_frame, text="Select font:", bd=10)
label.grid(row=0, column=0, sticky='s')
self.font_chooser = tk.Listbox(font_frame, selectmode='SINGLE')
self.font_chooser.grid(row=1, column=0, sticky='news')
for item in self.system_fonts:
self.font_chooser.insert(tk.END, item)
self.font_chooser.selection_set(0)
def create_modifiers(self, frame, row, col):
"""Create and grid font modifiers block."""
# TODO: add validation function
buttons = tk.Frame(frame, bd=10)
grid_frame(buttons, [1], [0, 1, 2], row, col, 'news')
variables = {
'italic': self.italic_var,
'bold': self.bold_var,
'underlined': self.lined_var
}
for i, (text, variable) in enumerate(variables.items()):
check = tk.Checkbutton(buttons, text=text, variable=variable, onvalue=1, offvalue=0, bd=10)
check.grid(row=0, column=i, sticky='ne')
label = tk.Label(buttons, text="Font size:", padx=5)
label.grid(row=1, column=0, sticky='ne')
entry = tk.Entry(buttons, textvariable=self.font_size, width=30)
entry.grid(row=1, column=1, sticky='new', columnspan=2)
def create_buttons(self, frame, row, col):
"""Create and grid buttons block."""
buttons = tk.Frame(frame, bd=10)
grid_frame(buttons, [], [0, 1, 2], row, col, 'news')
commands = {
'Change color...': self.choose_color,
'Try font': self.choose_font,
'OK': self.ok_quit
}
for i, (text, command) in enumerate(commands.items()):
button = tk.Button(buttons, text=text, command=command, padx=5, pady=5, width=15)
button.grid(row=0, column=i, sticky='ews')
def draw(self):
"""Show intermediate result on the canvas."""
# TODO: drawing text in choosen style on canvas
text = self.text_redactor.get('1.0', 'end-1c')
font = self.font
font_size = int(self.font_size.get())
rgb = self.rgb
is_italic = bool(self.italic_var.get())
is_bold = bool(self.bold_var.get())
is_lined = bool(self.lined_var.get())
print(text, font, font_size, rgb, is_italic, is_bold, is_lined)
pass
def choose_color(self):
"""Run askcolor dialog and show intermediate result."""
# ToDo: validation
self.rgb, _ = askcolor(parent=self, title="Choose color:")
self.draw()
def choose_font(self):
"""Update font and show intermediate result."""
self.font = self.system_fonts[self.font_chooser.curselection()[0]]
self.draw()
def ok_quit(self):
"""Update result canvas and close the window."""
self.draw()
self.master.destroy()
return 'break'
def get_return(self):
"""Return canvas with stylized capture."""
# TODO: check if canvas exists after self.master.destroy()
return self.canvas
```
#### File: collage/test/test_CollageTree.py
```python
from src.CollageTree import CollageRoot, ResizableLeaf
from src.Collage import Collage
from src.CollageImage import safe_open_image
from src.scroll import ScrolledFrame
import pytest
import os
import tkinter as tk
@pytest.fixture
def filename():
return os.path.join('test', 'files', 'kotya.jpg')
@pytest.fixture
def collage():
collage = Collage(0, 1, 1, 1, None, [], {'width': 30, 'height': 30})
return collage
@pytest.fixture
def collage_root(collage):
return collage.get_collage_root()
@pytest.fixture
def collage_image(filename, collage):
return safe_open_image(filename, collage.get_corners())
def test_init(collage_root):
assert collage_root.get_width() == 30
assert collage_root.get_height() == 30
def count_vertices(root):
res = 1
if root.get_left() is not None:
res += count_vertices(root.get_left())
if root.get_right() is not None:
res += count_vertices(root.get_right())
return res
def count_leafs(root):
res = int(type(root) == ResizableLeaf)
if root.get_left() is not None:
res += count_leafs(root.get_left())
if root.get_right() is not None:
res += count_leafs(root.get_right())
return res
def test_vertices_count_add(collage_root, collage_image):
sides = ['n', 'e', 'w', 's']
for i in range(1, 3 * len(sides) + 1):
cur_side = sides[i % len(sides)]
collage_root.add_image(image=collage_image, where=cur_side)
vertices = count_vertices(collage_root)
leafs = count_leafs(collage_root)
assert vertices == 2 * i
assert leafs == i
def test_vertices_count_remove(collage_root, collage_image):
def get_some_leaf(root):
if type(root) == ResizableLeaf:
return root
return get_some_leaf(root.get_left())
sides = ['n', 'e', 'w', 's']
for i in range(1, 3 * len(sides) + 1):
cur_side = sides[i % len(sides)]
collage_root.add_image(image=collage_image, where=cur_side)
for i in range(3 * len(sides) - 1, 0, -1):
leaf = get_some_leaf(collage_root)
leaf.destroy()
vertices = count_vertices(collage_root)
leafs = count_leafs(collage_root)
assert vertices == 2 * i
assert leafs == i
``` |
{
"source": "1am9trash/Othello_Tkinter_GUI",
"score": 3
} |
#### File: Othello_Tkinter_GUI/code/App.py
```python
import tkinter as tk
import tkinter.font as font
from functools import partial
from Othello import Othello
from Minimax import Minimax
class App:
color = ["green", "white", "black", "yellow"]
def __init__(self, board_size=8, player1=False, player2=False):
self.board_size = board_size
self.root = tk.Tk()
self.root.title("Othello")
self.root.geometry(str(50 * board_size + 200) + "x"
+ str(50 * board_size))
self.root.resizable(False, False)
self.board_frame = None
self.menu_frame = None
self.game = Othello(board_size)
self.players = [player1, player2]
self.render_first = False
def render(self):
if self.players[self.game.cur_turn - 1]:
if not self.render_first:
self.render_first = True
self.root.after(1000, self.render)
else:
if self.game.cant_move_or_end()[1] == 0:
agent = Minimax(self.game.cur_turn)
x, y, v = agent.minimax(self.game, -float("inf"), float("inf"),
4, True)
self.render_first = False
self.move(x, y)
self.draw_menu()
self.draw_board()
def restart(self, player1=False, player2=False):
self.game.reset(self.board_size)
self.players = [player1, player2]
self.render()
def draw_menu(self):
if self.menu_frame is not None:
self.menu_frame.destroy()
self.menu_frame = tk.Frame(self.root)
self.menu_frame.place(x=str(50 * self.board_size), y=0,
width=200, height=str(50 * self.board_size))
but = tk.Button(self.menu_frame, text="Start a new game",
font=font.Font(size=15), command=self.restart)
but.place(x=25, y=25, width=150, height=50)
but_ai = tk.Button(self.menu_frame, text="Start a new game\n with AI",
font=font.Font(size=15), command=partial(self.restart, False, True))
but_ai.place(x=25, y=100, width=150, height=75)
message = ""
empty, white, black, choice = self.game.get_status()
if empty + choice == 0 or white == 0 or black == 0:
if white > black:
message += "White wins\n"
elif white < black:
message += "Black wins\n"
else:
message += "Draw\n"
else:
if self.game.cur_turn == 1:
message += "White turn\n"
elif self.game.cur_turn == 2:
message += "Black turn\n"
message += \
"White count: " + str(white) + "\n" + \
"Black count: " + str(black) + "\n"
lab = tk.Label(self.menu_frame, text=message,
font=font.Font(size=20))
lab.place(x=25, y=200, width=150)
def move(self, x, y, event=None):
self.game.move(x, y)
self.render()
def draw_board(self):
if self.board_frame is not None:
self.board_frame.destroy()
self.board_frame = tk.Frame(self.root)
self.board_frame.place(x=0, y=0,
width=str(50 * self.board_size), height=str(50 * self.board_size))
for i in range(self.board_size):
for j in range(self.board_size):
cvs = tk.Canvas(self.board_frame, width=50, height=50,
bg="green", highlightthickness=1)
cvs.place(x=50*j, y=50*i)
if self.game.state[i][j] in [1, 2]:
cvs.create_oval(10, 10, 40, 40,
fill=App.color[self.game.state[i][j]],
width=0)
elif not self.players[self.game.cur_turn - 1] and self.game.state[i][j] == 3:
cvs.create_oval(20, 20, 30, 30,
fill=App.color[self.game.state[i][j]],
width=0)
cvs.bind("<Button-1>", partial(self.move, i, j))
``` |
{
"source": "1amG4bor/mlflow-for-model-improvement",
"score": 2
} |
#### File: src/helper/config.py
```python
import argparse
import logging
import time
from pathlib import Path
import socket
import yaml
BASE_PATH = Path(__file__).parents[2] # root path for the application
DIST_PATH = Path(BASE_PATH, 'DIST') # DIST/
LOG_PATH = Path(DIST_PATH, 'logs') # DIST/logs
MODELS_PATH = Path(DIST_PATH, 'models') # DIST/models/
EXPORTED_MODELS_PATH = Path(DIST_PATH, 'mlruns')
SRC_PATH = Path(BASE_PATH, 'src')
CONFIG_FILE = Path(__file__).parents[1].joinpath('config.yml')
DEFAULT_TRACKING_URI = f'http://{socket.gethostbyname("localhost")}/'
DEFAULT_EXPERIMENT_NAME = 'Test experiment'
logger = logging.getLogger("Config")
def timed(func):
""" Decorator to calculate the process time of a function (use: @timed)"""
def timing(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
end = time.perf_counter()
logger.info(f'Processing time({func.__name__}): {end - start:.5f} sec')
return result
return timing
def get_value(*keys):
""" Read config values from config.yaml """
with open(CONFIG_FILE, 'r') as stream:
try:
config_obj = yaml.safe_load(stream)
return _get_nested_value(config_obj, keys)
except yaml.YAMLError as err:
logger.error(err)
def _get_nested_value(cfg, keys):
if len(keys) > 1:
dict_value = cfg.get(keys[0])
return _get_nested_value(dict_value, keys[1:])
return cfg.get(keys[0])
def setup_arguments():
"""
Utilize the named argument that provided or default them to setup the variable that is needed for customization
purposes.
:returns object with attributes
"""
parser = argparse.ArgumentParser('Arguments to customize and handle the model and to configure the training and '
'evaluation')
# Arguments to specify the dataset
dataset_group = parser.add_argument_group(title='Arguments to specify the dataset source. One of them is REQUIRED!')
ds_group = dataset_group.add_mutually_exclusive_group()
ds_group.add_argument('-d', '--download', action="store_true",
help='Download the dataset and use it in model-training & evaluation. (default:False) The '
'data-source need to be specified in the `config.yml` file')
ds_group.add_argument('-ds', '--dataset', type=str,
help="Name of the dataset in '~/.keras/datasets' folder to use in training/validation/test")
ds_extra = dataset_group.add_argument_group('Other dataset specific options')
ds_extra.add_argument('-sub', '--subset', type=str, nargs='+', metavar=('dataset-name', 'SAMPLE-CLASS'),
help='''Create a subset from the original dataset and use it for the training. Arguments
are list of strings where the first item will be the name of the new dataset. eg: ".. -sub
new-dataset stop give-way"''')
# Arguments related to the model
model_group = parser.add_argument_group('Arguments for the model related options')
model_group.add_argument('-n', '--name', type=str, required=True,
help='Name for loading/saving the model')
model_group.add_argument('-c', '--create', action="store_true",
help='Create & train the model with the given name. If this flag is not set, then the '
'previously saved model will be loaded if it exists with the given name.')
# Arguments to configure the training process
process_group = parser.add_argument_group('Arguments configure the training process')
process_group.add_argument('-r', '--run-name', type=str,
help='Name for the experiment to run and for the deployed model as well, if its '
'accuracy will reach the required value. (default: 80 percent)')
process_group.add_argument('-b', '--batch', type=int, default=64,
help='Batch size for training the model. (default:64)')
process_group.add_argument('-ep', '--epoch', type=int, default=20,
help='Epoch size for training the model. (default:20)')
process_group.add_argument('-ts', '--test-split', type=int, default=20,
help='Percentage of test dataset size relative to the total dataset. (default:20)')
process_group.add_argument('-vs', '--validation-split', type=int, default=20,
help='Percentage of validation dataset size relative to the train dataset. (default:20)')
process_group.add_argument('-dl', '--deploy-limit', type=int, default=80,
help='Value of the minimum accuracy that triggers a model deployment.')
args = parser.parse_args()
if not args.dataset and not args.download:
logger.error('Error! No any data-source has been provided, `download` or `dataset` argument is required!')
raise ValueError(
"Because of missing data-source the script is stopped. Please, check the specified CLI arguments.")
if args.subset and len(args.subset) > 1:
args.subset_name = args.subset.pop(0)
else:
args.sub = args.subset_name = None
args.test_split /= 100
args.validation_split /= 100
return args
```
#### File: src/model/recognition_model.py
```python
import logging
from pathlib import Path
from shutil import rmtree, copy
from typing import Tuple, List
import numpy as np
import tensorflow as tf
from tensorflow.keras import models, layers
import mlflow.keras
from helper import config as cfg
from helper.config import LOG_PATH
from model.props import ConvolutionProps, DenseProps
logger = logging.getLogger('recognition_model')
DEFAULT_LOG_FOLDER = LOG_PATH
class RecognitionModel(models.Sequential):
"""Traffic sign recognition model
:parameter input_shape: input shape of the model
:parameter convolution_props: properties of convolutional layers
:parameter dense_props: properties of dense layers
:parameter name: name of the model
:parameter log_folder: location for logs
"""
def __init__(self,
input_shape: Tuple[int, int, int],
convolution_props: ConvolutionProps = None,
dense_props: DenseProps = None,
name: str = 'RecognitionModel',
log_folder: str = DEFAULT_LOG_FOLDER,
):
super().__init__(name=name)
self.feature_columns = input_shape
self.convolution_props = convolution_props
self.dense_props = dense_props
self.log_folder = log_folder
if input_shape and convolution_props and dense_props:
self.__build_layer_structure(input_shape, convolution_props, dense_props)
for layer in self.layer_list:
self.add(layer)
@classmethod
def from_config(cls, config, custom_objects=None):
input_shape = config['input_shape']
# Create the model (without layers)
model = cls(input_shape=input_shape,
convolution_props=None,
dense_props=None,
name=config['name'],
log_folder=config['log_folder'])
# Load the layers
layer_configs = config['layers']
for layer_config in layer_configs:
layer = layers.deserialize(layer_config)
model.add(layer)
# Build
build_input_shape = config.get('build_input_shape') or None
if not model.inputs and build_input_shape and isinstance(build_input_shape, (tuple, list)):
model.build_model(log_summary=False)
return model
def get_config(self):
return {
'input_shape': self.feature_columns,
'convolution_props': self.convolution_props.get_config(),
'dense_props': self.dense_props.get_config(),
'name': self.name,
'log_folder': self.log_folder,
'layers': self.layers,
'build_input_shape': self.input_shape,
}
def __build_layer_structure(self, input_shape, convolution_props, dense_props):
structure = []
# Create the convolutional layers
for size in convolution_props.layers:
structure.append(layers.Conv2D(filters=size,
kernel_size=convolution_props.kernel_size,
padding='same',
activation=convolution_props.activation))
structure.append(layers.MaxPooling2D())
# Flatter
structure.append(layers.Flatten())
# Create the dense layers
for size in dense_props.layers:
structure.append(layers.Dense(units=size, activation=dense_props.activation))
structure.append(layers.Dense(units=dense_props.final_layer, activation='softmax'))
self.layer_list = structure
def compile_model(self,
optimizer: str or tf.keras.optimizers,
loss_fn: str or tf.keras.losses.Loss,
metrics: List[str or tf.keras.metrics.Metric]):
self.compile(optimizer, loss_fn, metrics)
loss_fn_name = loss_fn if isinstance(loss_fn, str) else loss_fn.name
mlflow.log_params({
'HP_optimizer': optimizer,
'HP_loss_fn': loss_fn_name
})
def build_model(self, log_summary=True):
shape = [0, *self.feature_columns]
self.build(shape)
if log_summary:
logger.info(f'Model summary:\n{self.summary()}')
def evaluate_accuracy(self, test_input):
_, accuracy = self.evaluate(test_input)
return accuracy
def train_model(self, samples, labels, validation_split, epochs):
self.fit(
x=samples,
y=labels,
validation_split=validation_split,
epochs=epochs,
callbacks=[
tf.keras.callbacks.TensorBoard(self.log_folder), # log metrics
],
)
def save_model(self, model_folder, file_to_save=None):
if not file_to_save:
file_to_save = self.name
save_format = cfg.get_value('model', 'save_format')
save_format = save_format if save_format in ['tf', 'h5'] else None
if save_format:
suffix = Path(file_to_save).suffix
ext = f'.{save_format}'
if save_format == 'h5':
file_to_save = file_to_save + ext if suffix != ext else file_to_save
else:
file_to_save = Path(file_to_save).stem if suffix == ext else file_to_save
destination = Path(model_folder, file_to_save)
""" Save the model in different forms and to different places """
# 1). Save keras model locally (tf or h5)
models.save_model(self, destination, save_format=save_format, overwrite=True)
# 2). Save MLflow model locally (with data, MLmodel, conda.yaml, etc)
mlflow_model_destination = Path(destination.parent, 'mlflow-models', Path(file_to_save).stem)
if mlflow_model_destination.exists():
rmtree(mlflow_model_destination)
env = mlflow.keras.get_default_conda_env()
mlflow.keras.save_model(self, mlflow_model_destination, env, custom_objects=self.custom_object(),
keras_module='tensorflow.keras')
# Log to MLflow
# 3). Log keras model as an artifact (should be dir)
tmp = Path(destination.parent, 'keras-model')
tmp.mkdir()
copy(destination, tmp)
mlflow.log_artifacts(tmp, 'keras-model')
rmtree(tmp)
# 4). Log mlflow-model as an artifact to MLflow
mlflow.log_artifacts(mlflow_model_destination, 'mlflow-model-artifact')
# 5). Log mlflow-model as a model to MLflow
mlflow.keras.log_model(self, artifact_path='log_model', conda_env=env, custom_objects=self.custom_object(),
keras_module='tensorflow.keras')
return destination
def predict_one(self, sample):
return self.predict_classes(x=sample, batch_size=1, verbose=1)
def predict_many(self, samples):
result = np.argmax(self.predict(samples), axis=-1)
return result
@staticmethod
def load_saved_model(model_location):
""" Returns a Keras model instance that will be compiled if it was saved that way, otherwise need to compile
:parameter model_location: destination of the saved model, it could be: `str`, `pathlib.Path`, `h5py.File`
"""
return models.load_model(model_location, custom_objects=RecognitionModel.custom_object())
@staticmethod
def separate_features_and_labels(dataset):
features = []
labels = []
for sample in dataset.as_numpy_iterator():
features.extend(sample[0])
labels.extend(sample[1])
return features, labels
@staticmethod
def custom_object():
""" Returns a dictionary mapping names (strings) to custom class that used by Keras to save/load models """
return {'RecognitionModel': RecognitionModel}
```
#### File: mlflow-for-model-improvement/src/train_eval.py
```python
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Set TensorFlow log-level
import logging
import mlflow.keras
from helper import config
from service import data_service, model_service
from helper.config import DEFAULT_TRACKING_URI, DEFAULT_EXPERIMENT_NAME
log_format = '%(asctime)s >>%(levelname)s<< %(filename)s|%(funcName)s: ln.%(lineno)d => %(message)s'
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger('Train_predict')
def init():
""" Initialize parameters and MLflow
Params will set the corresponding MLflow parameters
"""
logger.info('Initialization..')
ARGS = config.setup_arguments()
img_height = img_width = 128
input_shape = (img_height, img_width, 1)
return ARGS, input_shape
def prepare_data(ARGS, input_shape):
""" Data Augmentation Ingestion & Segregation
- Data Ingestion: gather the data that only need to be fed into the pipeline
- Data Preparation: assume that the dataset is already prepared, ready-to-use
(No internal step to analyze and/or modify the dataset)
- Data Segregation: Split the dataset into subsets of training-set and testing-set
Validation-set will be separated from the training-dataset (80/20) just before training
"""
batch = ARGS.batch
dataset_path = data_service.data_sourcing(ARGS)
train_data, test_data, class_labels = data_service.data_segregation(
dataset_path, input_shape, batch, ARGS.test_split)
logger.info(f'Classes of the dataset: ({len(class_labels)}) => {class_labels}')
return train_data, test_data, class_labels
if __name__ == '__main__':
RUN_ARGS, feature_shape = init()
# Data Extraction
train_ds, test_ds, labels = prepare_data(RUN_ARGS, feature_shape)
# Modelling
model_name = RUN_ARGS.name
if RUN_ARGS.create:
# Run with MLflow
run_name = RUN_ARGS.run_name
tracking_uri = config.get_value('mlflow', 'tracking_uri') or DEFAULT_TRACKING_URI
experiment_name = config.get_value('mlflow', 'experiment_name') or DEFAULT_EXPERIMENT_NAME
mlflow.set_tracking_uri(tracking_uri)
mlflow.set_experiment(experiment_name)
mlflow.tensorflow.autolog()
with mlflow.start_run(run_name=run_name) as run:
run_id = run.info.run_id
mlflow.set_tag('experiment_id', run.info.experiment_id)
mlflow.set_tag('run_id', run_id)
mlflow.log_param('input-shape', feature_shape)
params_to_log = ['name', 'subset_name', 'dataset', 'epoch', 'batch', 'test_split', 'validation_split']
params = {i: vars(RUN_ARGS).get(i) for i in params_to_log}
mlflow.log_params({
'cfg_model_name': params.get('subset_name') or params.get('name'),
'cfg_dataset_name': params.get('dataset'),
'cfg_labels': labels,
'HP_epochs': params.get('epoch'),
'HP_batch_size': params.get('batch'),
'HP_test_split': params.get('test_split'),
'HP_validation_split': params.get('validation_split'),
})
# Create, train, and save model
model = model_service.create(model_name, feature_shape, labels)
model_service.train(model, train_ds, RUN_ARGS.epoch, RUN_ARGS.validation_split)
model_service.save(model, model_name)
# Validation
model_service.evaluate_model(model, test_ds)
stat, cumulative_accuracy = model_service.validate_classification(model, test_ds, labels, False)
for key, value in stat.items():
acc = round(value['right'] / (value['right'] + value['wrong']) * 100, 1)
mlflow.log_param(f'accuracy.{key}', acc)
mlflow.log_param(f'stat.{key}', value)
# Register the model
if cumulative_accuracy >= RUN_ARGS.deploy_limit:
logger.info(f"The '{model_name}' model with runId of '{run_id}' and '{cumulative_accuracy}' accuracy "
f"is registered to the model-registry as '{run_name}'.")
mlflow.register_model(
model_uri=f'runs:/{run_id}/model',
name=run_name
)
logger.info(f'The run has been finished, check: `{tracking_uri}` for the result and for more information!')
else:
# Load model
model = model_service.load(model_name)
# Validation
model_service.evaluate_model(model, test_ds)
model_service.validate_classification(model, test_ds, labels, False)
``` |
{
"source": "1amG4bor/PySnake",
"score": 4
} |
#### File: PySnake/view/gfx.py
```python
import pygame
from config import Colors
import pygame.gfxdraw as draw
canvas = None
default_font = 'BreeSerif-Regular.ttf'
def set_screen(screen):
global canvas
canvas = screen
def draw_line(line_position, line_size, color=Colors.red, thick=2):
"""Draw a line"""
pygame.draw.line(canvas, color, line_position, (line_position[0] + line_size, line_position[1]), thick)
def draw_text(text, position, color=Colors.white, size=18, font_name=default_font):
"""Draw a configured text element to a specific place"""
text_font = pygame.font.Font(f'./font/{font_name}', size)
rendered_text = text_font.render(text, False, color)
canvas.blit(rendered_text, position)
def draw_panel(position, size, border_color, background):
x, y, w, h = position[0], position[1], size[0], size[1]
pygame.draw.rect(canvas, border_color, (x, y, w, h), 2)
draw.box(canvas, [x + 2, y + 2, w - 3, h - 3], background)
def draw_progress_bar(position, size, level, values, colors=(Colors.dark_grey, Colors.grey, Colors.green)):
"""
Draw a progress bar with the specified parameters
Show the progress of how fulfilled the actual level
:param position: where to render (x, y coordinate of top-left position)
:param size: of the bar (width, height)
:param level: Actual level
:param values: actual and maximum value (base of percentage)
:param colors: colors of the elements (border, panel, font)
"""
# Panel-border
draw_panel(position, size, colors[0], colors[1])
fill = int(size[0] * (values[0] / values[1]))
x, y, w, h = position[0]+3, position[1]+3, fill, size[1]-6
draw_panel((x, y), (w, h), colors[2], colors[2])
draw_text(f'Level: {level}', (position[0] + int(size[0]/3), position[1]+10), size=21)
```
#### File: PySnake/view/renderer.py
```python
from typing import List
import pygame
import pygame.gfxdraw as draw
from config import Colors, Config
from model.Direction import Direction
from model.Player import Body
from view import gfx
pygame.init()
pygame.font.init()
title_font = pygame.font.Font('./font/SquadaOne-Regular.ttf', 52)
mainFont = pygame.font.SysFont('comic', 42)
paragraphFont = pygame.font.Font('./font/BreeSerif-Regular.ttf', 24)
size = Config.cell_size
apple = pygame.transform.scale(pygame.image.load('./images/apple.png'), (size - 2, size - 2))
logo = pygame.transform.scale(pygame.image.load('./images/logo.jpg'), (36, 36))
def render_sidebar(screen, time, game):
"""Render the UI elements to the screen"""
gfx.set_screen(screen)
grid = Config.grid
# Game Logo & Title
# Logo
position = (grid['col'][0], grid['row'][0]+10)
gfx.draw_panel(position, (40, 40), Colors.red, Colors.white)
screen.blit(logo, (shifted_position(position, 2)))
left = grid['col'][2]
position = (left+2, grid['row'][0])
# Title
gfx.draw_text('PySnake', position, Colors.red, 52, 'SquadaOne-Regular.ttf')
gfx.draw_line((left, grid['row'][1]), 165)
left = grid['col'][1]
padding = 10
# Username
position = (left, grid['row'][2])
gfx.draw_panel(position, (180, 50), Colors.neon_green, Colors.green2)
gfx.draw_text("Dummy the Player", shifted_position(position, padding))
padding = 6
# Elapsed time
position = (left, grid['row'][3])
gfx.draw_panel(position, (180, 40), Colors.dark_grey, Colors.grey)
gfx.draw_text(_format_time(time), shifted_position(position, 12, padding))
# Actual score
position = (left, grid['row'][4])
gfx.draw_panel(position, (180, 40), Colors.dark_grey, Colors.grey)
gfx.draw_text(f'Score: {game.score:,}', shifted_position(position, 12, padding))
# Progress-bar (Level)
position = (left, grid['row'][5])
values = game.score - game.prev_limit, game.level_limit - game.prev_limit
width = grid['col'][3] - grid['col'][1]
gfx.draw_progress_bar(position, (width, 50), game.level, values)
# Highest score
position = (left, grid['row'][6])
gfx.draw_line((position[0], position[1]-20), 180, Colors.white, 1)
gfx.draw_panel(position, (180, 40), Colors.dark_grey, Colors.orange)
gfx.draw_text(f'Top score: {game.top_score:,}', shifted_position(position, 8, padding), Colors.black)
def render_board(screen, game):
dim = Config.board_position + Config.board_size
pygame.draw.rect(screen, Colors.dark_grey, dim)
pygame.draw.rect(screen, Colors.grey, dim, 3) # Border
_draw_grid(screen, game)
def _draw_grid(screen, game):
start_off = [i + 2 for i in Config.board_position]
rect = Config.cell_size
food_location = (game.board.get_food_location())
for y in range(Config.table_size):
dy = start_off[1] + y * rect
for x in range(Config.table_size):
dx = start_off[0] + x * rect
if food_location == (x, y):
screen.blit(apple, (dx + 1, dy + 1))
else:
pygame.draw.rect(screen, Colors.deep_grey, (dx, dy, rect - 1, rect - 1))
# draw snake
head = game.player.head
body: List[Body] = game.player.body
if x == head.x and y == head.y:
pygame.draw.rect(screen, Colors.green, (dx + 1, dy + 1, rect - 2, rect - 2))
# Half and quarter size (1/4) of the cell
half, q1 = Config.cell_size // 2, Config.cell_size // 4
if game.player.direction == Direction.UP or game.player.direction == Direction.DOWN:
draw.filled_circle(screen, dx + q1, dy + half, 1, Colors.red)
draw.filled_circle(screen, dx + half + q1, dy + half, 1, Colors.red)
else:
draw.filled_circle(screen, dx + half, dy + q1, 1, Colors.red)
draw.filled_circle(screen, dx + half, dy + half + q1, 1, Colors.red)
for i, part in enumerate(body):
if x == part.location.x and y == part.location.y:
# Striped color pattern for the snake
color = Colors.deep_dark_green if (i % 4 == 0 and i != 0) else Colors.dark_green
draw.box(screen, (dx + 1, dy + 1, rect - 2, rect - 2), color)
# Private functions
def _format_time(time):
minutes = time // 60
seconds = time % 60
return f"Time: {minutes:>02}:{seconds:>02}"
def shifted_position(position, x_padding, y_padding=None):
if not y_padding:
y_padding = x_padding
return position[0] + x_padding, position[1] + y_padding
``` |
{
"source": "1amn0body/ImageGetter_APOD-xkcd",
"score": 3
} |
#### File: 1amn0body/ImageGetter_APOD-xkcd/configCreator.py
```python
import configparser, os
osSep = os.path.sep
currDir = os.getcwd()
def readConfig(cfgName="imageGetter-cfg.ini"):
if os.path.exists(cfgName) and os.path.isfile(cfgName):
cfg = configparser.ConfigParser()
cfg.read(cfgName)
path = cfg['SAVEPATH']['path']
if not os.path.exists(path) or os.path.isfile(path):
try:
os.mkdir(path)
except Exception as e:
print("Tried to re-create save directory but failed.")
clearImages = cfg['IMAGES'].getboolean('clear_images')
max_APOD = int(cfg['IMAGES']['max_apod'])
max_xkcd = int(cfg['IMAGES']['max_xkcd'])
savedImages = cfg['SAVED_IMAGES']['images']
if len(savedImages) > 0: savedImages = savedImages.split(',')
else: savedImages = []
#remove images if clear images = True
if len(savedImages) > 0 and clearImages:
updateSavedImages(None, True)
return path, max_APOD, max_xkcd, cfgName
else:
createConfig(cfgName)
return readConfig()
def updateSavedImages(saveImage, clear=False, cfgName="imageGetter-cfg.ini"):
cfg = configparser.ConfigParser()
cfg.read(cfgName)
path = cfg['SAVEPATH']['path']
savedImages = cfg['SAVED_IMAGES']['images']
if len(savedImages) > 0: savedImages = savedImages.split(',')
else: savedImages = []
if clear == False:
savedImages += saveImage
cfg['SAVED_IMAGES'] = {'images': ','.join(savedImages)}
else:
for i in savedImages:
if len(i) > 0 and os.path.isfile(path + osSep + i):
try:
os.remove(path + osSep + i)
except Exception as e:
print("File could not be deleted.")
# clear imagelist
cfg['SAVED_IMAGES'] = {'images': ''}
with open(cfgName, 'wt+') as cfgFile:
cfg.write(cfgFile)
def createConfig(cfgName="imageGetter-cfg.ini"):
print("Setting up your config now:")
while True: # savepath
path = input("Savepath for downloaded images: ").replace("/", osSep).replace("\\", osSep).replace("\n", '')
if not os.path.isabs(path): # is absolute
if (path != "") and (path != osSep) and (path is not None):
if path[0] != osSep:
path = os.path.join(currDir + osSep + path)
else:
path = os.path.join(currDir + path)
else:
path = os.path.join(currDir + osSep + "imgs")
if not os.path.exists(path):
#make dir
try:
os.mkdir(path)
print("Created directory:", path)
print()
break
except Exception as e:
print("Something went wrong, please try again.")
continue
elif os.path.exists(path) and os.path.isfile(path):
print("Your path is a file, please use a directory instead.")
continue
print("Validated that directory exists:", path)
print()
break
while True: # clearImages
try:
clearImages = input("Clear saved images everytime at execution? (y/n): ")[0].lower()
if clearImages == 'y' or clearImages == 'n':
if clearImages == 'y': clearImages = "yes"
else: clearImages = "no"
print()
break
print("That was some other character, please try again.")
continue
except Exception as e:
print("Something went wrong, please try again.")
continue
while True: # apodnum
try:
apodnum = int(input("Maximal image count for Astronomy Picture Of the Day (APOD): "))
if apodnum < 0:
apodnum = 0
print()
break
except Exception as e:
print("That was not a valid number. Try again...")
continue
while True: # xkcdnum
try:
xkcdnum = int(input("Maximal image count for xkcd-Comics: "))
if xkcdnum < 0:
xkcdnum = 0
print()
break
except Exception as e:
print("That was not a valid number. Try again...")
continue
cfg = configparser.ConfigParser() # create config object
cfg['SAVEPATH'] = {'path': path}
cfg['IMAGES'] = {
'clear_images': clearImages,
'max_apod': apodnum,
'max_xkcd': xkcdnum
}
cfg['SAVED_IMAGES'] = {'images': ''}
with open(cfgName, "wt") as cfgFile:
cfg.write(cfgFile)
print("Created config file:", cfgName)
print()
``` |
{
"source": "1amn0body/ImageScraper",
"score": 3
} |
#### File: ImageScraper/img_dl/xkcd.py
```python
from img_dl.dl_img import DownloadImages
import requests
from bs4 import BeautifulSoup
class XKCD(DownloadImages):
base_url = "https://xkcd.com/"
def save_image(self, soup: BeautifulSoup) -> None:
img_s = soup.find(id='comic').findAll('img')
for img in img_s:
super(XKCD, self).save_image('https:' + img['src'])
# TODO bring comment to image...
# img['title']
def get_latest_image(self) -> int:
try:
soup = BeautifulSoup(requests.get(self.base_url).text, 'html.parser')
self.save_image(soup)
prev_link = soup.find('ul', {'class': ['comicNav']}).find('a', {'rel': ['prev']})['href']
return int(prev_link.replace('/', ''))
except requests.RequestException:
print(f"Error requesting '{self.base_url}'.")
except Exception as e:
print("An error occurred.")
print(f"Details:\n{e}")
return 0
def get_image(self, position: int) -> None:
url = self.base_url + str(position)
try:
soup = BeautifulSoup(requests.get(url).text, 'html.parser')
self.save_image(soup)
except requests.RequestException:
print(f"Error requesting '{url}'.")
except Exception as e:
print("An error occurred.")
print(f"Details:\n{e}")
def get_from_count(self) -> None:
if self.count > 0:
pos_now = self.get_latest_image()
i = 0
while i < self.count - 1 and pos_now > 0:
pos = pos_now - i
if pos > 0:
self.get_image(pos_now - i)
else:
break
i += 1
``` |
{
"source": "1AmNegan/electron",
"score": 2
} |
#### File: electron/build/zip.py
```python
import os
import subprocess
import sys
import zipfile
LINUX_BINARIES_TO_STRIP = [
'electron',
'libffmpeg.so',
'libnode.so'
]
def strip_binaries(target_cpu, dep):
for binary in LINUX_BINARIES_TO_STRIP:
if dep.endswith(binary):
strip_binary(dep, target_cpu)
def strip_binary(binary_path, target_cpu):
if target_cpu == 'arm':
strip = 'arm-linux-gnueabihf-strip'
elif target_cpu == 'arm64':
strip = 'aarch64-linux-gnu-strip'
elif target_cpu == 'mips64el':
strip = 'mips64el-redhat-linux-strip'
else:
strip = 'strip'
execute([strip, binary_path])
def execute(argv):
try:
output = subprocess.check_output(argv, stderr=subprocess.STDOUT)
return output
except subprocess.CalledProcessError as e:
print e.output
raise e
def main(argv):
dist_zip, runtime_deps, target_cpu, target_os = argv
dist_files = []
with open(runtime_deps) as f:
for dep in f.readlines():
dep = dep.strip()
dist_files += [dep]
if sys.platform == 'darwin':
mac_zip_results = execute(['zip', '-r', '-y', dist_zip] + dist_files)
else:
with zipfile.ZipFile(dist_zip, 'w', zipfile.ZIP_DEFLATED) as z:
for dep in dist_files:
if target_os == 'linux':
strip_binaries(target_cpu, dep)
if os.path.isdir(dep):
for root, dirs, files in os.walk(dep):
for file in files:
z.write(os.path.join(root, file))
else:
z.write(dep)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
```
#### File: script/lib/env_util.py
```python
from __future__ import print_function
import itertools
import os
import subprocess
import sys
def validate_pair(ob):
if not (len(ob) == 2):
print("Unexpected result:", ob, file=sys.stderr)
return False
else:
return True
def consume(iter):
try:
while True: next(iter)
except StopIteration:
pass
def get_environment_from_batch_command(env_cmd, initial=None):
"""
Take a command (either a single command or list of arguments)
and return the environment created after running that command.
Note that if the command must be a batch file or .cmd file, or the
changes to the environment will not be captured.
If initial is supplied, it is used as the initial environment passed
to the child process.
"""
if not isinstance(env_cmd, (list, tuple)):
env_cmd = [env_cmd]
# Construct the command that will alter the environment.
env_cmd = subprocess.list2cmdline(env_cmd)
# Create a tag so we can tell in the output when the proc is done.
tag = 'END OF BATCH COMMAND'
# Construct a cmd.exe command to do accomplish this.
cmd = 'cmd.exe /s /c "{env_cmd} && echo "{tag}" && set"'.format(**vars())
# Launch the process.
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=initial)
# Parse the output sent to stdout.
lines = proc.stdout
# Consume whatever output occurs until the tag is reached.
consume(itertools.takewhile(lambda l: tag not in l, lines))
# Define a way to handle each KEY=VALUE line.
handle_line = lambda l: l.rstrip().split('=',1)
# Parse key/values into pairs.
pairs = map(handle_line, lines)
# Make sure the pairs are valid.
valid_pairs = filter(validate_pair, pairs)
# Construct a dictionary of the pairs.
result = dict(valid_pairs)
# Let the process finish.
proc.communicate()
return result
def get_vs_location(vs_version):
"""
Returns the location of the VS building environment.
The vs_version can be strings like "[15.0,16.0)", meaning 2017, but not the next version.
"""
# vswhere can't handle spaces, like "[15.0, 16.0)" should become "[15.0,16.0)"
vs_version = vs_version.replace(" ", "")
program_files = os.environ.get('ProgramFiles(x86)')
# Find visual studio
proc = subprocess.Popen(
program_files + "\\Microsoft Visual Studio\\Installer\\vswhere.exe "
"-property installationPath "
"-requires Microsoft.VisualStudio.Component.VC.CoreIde "
"-format value "
"-version {0}".format(vs_version),
stdout=subprocess.PIPE)
location = proc.stdout.readline().rstrip()
return location
def get_vs_env(vs_version, arch):
"""
Returns the env object for VS building environment.
vs_version is the version of Visual Studio to use. See get_vs_location for
more details.
The arch has to be one of "x86", "amd64", "arm", "x86_amd64", "x86_arm", "amd64_x86",
"amd64_arm", i.e. the args passed to vcvarsall.bat.
"""
location = get_vs_location(vs_version)
# Launch the process.
vsvarsall = "{0}\\VC\\Auxiliary\\Build\\vcvarsall.bat".format(location)
return get_environment_from_batch_command([vsvarsall, arch])
```
#### File: electron/script/serve-node-headers.py
```python
import argparse
import atexit
import os
import shutil
import sys
import tarfile
import time
from subprocess import Popen, PIPE
from lib.util import execute_stdout
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
def main():
args = parse_args()
header_dir = os.path.join(DIST_DIR, args.version)
# Generate Headers
script_path = os.path.join(SOURCE_ROOT, 'script', 'create-node-headers.py')
execute_stdout([sys.executable, script_path, '--version', args.version,
'--directory', header_dir])
# Launch server
script_path = os.path.join(SOURCE_ROOT, 'node_modules', 'serve', 'bin',
'serve.js')
server = Popen(['node', script_path, '--port=' + args.port], stdout=PIPE,
cwd=DIST_DIR)
def cleanup():
server.kill()
atexit.register(cleanup)
time.sleep(1)
# Generate Checksums
script_path = os.path.join(SOURCE_ROOT, 'script', 'upload-node-checksums.py')
execute_stdout([sys.executable, script_path, '--version', args.version,
'--dist-url', 'http://localhost:' + args.port,
'--target-dir', header_dir])
print("Point your npm config at 'http://localhost:" + args.port + "'")
server.wait()
def parse_args():
parser = argparse.ArgumentParser(description='create node header tarballs')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
parser.add_argument('-p', '--port', help='Specify port to run local server',
default='4321')
return parser.parse_args()
if __name__ == '__main__':
sys.exit(main())
```
#### File: electron/script/upload-index-json.py
```python
import os
import sys
import urllib2
from lib.config import s3_config
from lib.util import s3put, scoped_cwd, safe_mkdir
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'D')
BASE_URL = 'https://electron-metadumper.herokuapp.com/?version='
version = sys.argv[1]
authToken = os.getenv('META_DUMPER_AUTH_HEADER')
def get_content(retry_count = 5):
try:
request = urllib2.Request(
BASE_URL + version,
headers={"Authorization" : authToken}
)
return urllib2.urlopen(
request
).read()
except Exception as e:
if retry_count == 0:
raise e
return get_content(retry_count - 1)
def main():
if not authToken or authToken == "":
raise Exception("Please set META_DUMPER_AUTH_HEADER")
# Upload the index.json.
with scoped_cwd(SOURCE_ROOT):
safe_mkdir(OUT_DIR)
index_json = os.path.relpath(os.path.join(OUT_DIR, 'index.json'))
new_content = get_content()
with open(index_json, "w") as f:
f.write(new_content)
bucket, access_key, secret_key = s3_config()
s3put(bucket, access_key, secret_key, OUT_DIR, 'atom-shell/dist',
[index_json])
if __name__ == '__main__':
sys.exit(main())
```
#### File: electron/script/upload-node-headers.py
```python
import argparse
import glob
import os
import shutil
import sys
from lib.config import PLATFORM, get_target_arch, s3_config
from lib.util import safe_mkdir, scoped_cwd, s3put
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
def main():
args = parse_args()
# Upload node's headers to S3.
bucket, access_key, secret_key = s3_config()
upload_node(bucket, access_key, secret_key, args.version)
def parse_args():
parser = argparse.ArgumentParser(description='upload sumsha file')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
return parser.parse_args()
def upload_node(bucket, access_key, secret_key, version):
with scoped_cwd(DIST_DIR):
s3put(bucket, access_key, secret_key, DIST_DIR,
'atom-shell/dist/{0}'.format(version), glob.glob('node-*.tar.gz'))
s3put(bucket, access_key, secret_key, DIST_DIR,
'atom-shell/dist/{0}'.format(version), glob.glob('iojs-*.tar.gz'))
if PLATFORM == 'win32':
if get_target_arch() == 'ia32':
node_lib = os.path.join(DIST_DIR, 'node.lib')
iojs_lib = os.path.join(DIST_DIR, 'win-x86', 'iojs.lib')
else:
node_lib = os.path.join(DIST_DIR, 'x64', 'node.lib')
iojs_lib = os.path.join(DIST_DIR, 'win-x64', 'iojs.lib')
safe_mkdir(os.path.dirname(node_lib))
safe_mkdir(os.path.dirname(iojs_lib))
# Copy atom.lib to node.lib and iojs.lib.
atom_lib = os.path.join(OUT_DIR, 'node.dll.lib')
shutil.copy2(atom_lib, node_lib)
shutil.copy2(atom_lib, iojs_lib)
# Upload the node.lib.
s3put(bucket, access_key, secret_key, DIST_DIR,
'atom-shell/dist/{0}'.format(version), [node_lib])
# Upload the iojs.lib.
s3put(bucket, access_key, secret_key, DIST_DIR,
'atom-shell/dist/{0}'.format(version), [iojs_lib])
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "1anakin20/porcupine",
"score": 2
} |
#### File: porcupine/porcupine/utils.py
```python
from __future__ import annotations
import codecs
import collections
import contextlib
import dataclasses
import functools
import json
import logging
import re
import shlex
import shutil
import subprocess
import sys
import threading
import tkinter
import traceback
from pathlib import Path
from tkinter import ttk
from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar
from urllib.request import url2pathname
import dacite
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
import porcupine
log = logging.getLogger(__name__)
_T = TypeVar("_T")
# nsis install puts Porcupine.exe and python.exe in same place
if sys.platform == "win32" and sys.executable.endswith((r"\Porcupine.exe", r"\pythonw.exe")):
running_pythonw = True
python_executable = Path(sys.executable).parent / "python.exe"
else:
running_pythonw = False
python_executable = Path(sys.executable)
if sys.platform == "win32":
# this is mostly copy/pasted from subprocess.list2cmdline
def quote(string: str) -> str:
result = []
needquote = False
bs_buf = []
needquote = (" " in string) or ("\t" in string) or not string
if needquote:
result.append('"')
for c in string:
if c == "\\":
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append("\\" * len(bs_buf) * 2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return "".join(result)
else:
quote = shlex.quote
# https://github.com/python/typing/issues/769
def copy_type(f: _T) -> Callable[[Any], _T]:
"""A decorator to tell mypy that one function or method has the same type as another.
Example::
from typing import Any
from porcupine.utils import copy_type
def foo(x: int) -> None:
print(x)
@copy_type(foo)
def bar(*args: Any, **kwargs: Any) -> Any:
foo(*args, **kwargs)
bar(1) # ok
bar("lol") # mypy error
"""
return lambda x: x
# TODO: document this?
def format_command(command: str, substitutions: dict[str, Any]) -> list[str]:
parts = shlex.split(command, posix=(sys.platform != "win32"))
return [part.format_map(substitutions) for part in parts]
# There doesn't seem to be standard library trick that works in all cases
# https://stackoverflow.com/q/5977576
#
# TODO: document this?
def file_url_to_path(file_url: str) -> Path:
assert file_url.startswith("file://")
if sys.platform == "win32":
if file_url.startswith("file:///"):
# File on this computer: 'file:///C:/Users/Akuli/Foo%20Bar.txt'
return Path(url2pathname(file_url[8:]))
else:
# Network share: 'file://Server2/Share/Test/Foo%20Bar.txt'
return Path(url2pathname(file_url[5:]))
else:
# 'file:///home/akuli/foo%20bar.txt'
return Path(url2pathname(file_url[7:]))
# Using these with subprocess prevents opening unnecessary cmd windows
# TODO: document this
subprocess_kwargs: dict[str, Any] = {}
if sys.platform == "win32":
# https://stackoverflow.com/a/1813893
subprocess_kwargs["startupinfo"] = subprocess.STARTUPINFO(
dwFlags=subprocess.STARTF_USESHOWWINDOW
)
_LIKELY_PROJECT_ROOT_THINGS = [".editorconfig"] + [
readme + extension
for readme in ["README", "readme", "Readme", "ReadMe"]
for extension in ["", ".txt", ".md", ".rst"]
]
def find_project_root(project_file_path: Path) -> Path:
"""Given an absolute path to a file, figure out what project it belongs to.
The concept of a project is explained
`in Porcupine wiki <https://github.com/Akuli/porcupine/wiki/Working-with-projects>`_.
Currently, the logic for finding the project root is:
1. If the file is inside a Git repository, then the Git repository becomes
the project root. For example, the file I'm currently editing is
``/home/akuli/porcu/porcupine/utils.py``, and Porcupine has detected
``/home/akuli/porcu`` as its project because I use Git to develop Porcupine.
2. If Git isn't used but there is a readme file or an ``.editorconfig`` file,
then the project root is the folder containing the readme or the ``.editorconfig`` file.
(Porcupine supports editorconfig files.
You can read more about them at `editorconfig.org <https://editorconfig.org/>`_.)
So, even if Porcupine didn't use Git, it would still recognize the
project correctly, because there is ``/home/akuli/porcu/README.md``.
Porcupine recognizes several different capitalizations and file extensions,
such as ``README.md``, ``ReadMe.txt`` and ``readme.rst`` for example.
3. If all else fails, the directory containing the file is used.
"""
assert project_file_path.is_absolute()
likely_root = None
for path in project_file_path.parents:
if (path / ".git").exists():
return path # trust this the most, if it exists
elif likely_root is None and any(
(path / thing).exists() for thing in _LIKELY_PROJECT_ROOT_THINGS
):
likely_root = path
return likely_root or project_file_path.parent
class PanedWindow(tkinter.PanedWindow):
"""Like :class:`tkinter.PanedWindow`, but uses Ttk colors.
Do not waste your time with ``ttk.Panedwindow``. It lacks options to
control the sizes of the panes.
"""
@copy_type(tkinter.PanedWindow.__init__)
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# even non-ttk widgets can handle <<ThemeChanged>>
self.bind("<<ThemeChanged>>", self._update_colors, add=True)
self._update_colors()
def _update_colors(self, junk_event: object = None) -> None:
ttk_bg = self.tk.eval("ttk::style lookup TLabel.label -background")
assert ttk_bg
self["bg"] = ttk_bg
# TODO: document this?
def is_bright(color: str) -> bool:
widget = porcupine.get_main_window() # any widget would do
return sum(widget.winfo_rgb(color)) / 3 > 0x7FFF
# i know, i shouldn't do math with rgb colors, but this is good enough
def invert_color(color: str, *, black_or_white: bool = False) -> str:
"""Return a color with opposite red, green and blue values.
Example: ``invert_color('white')`` is ``'#000000'`` (black).
This function uses tkinter for converting the color to RGB. That's
why a tkinter root window must have been created, but *color* can be
any Tk-compatible color string, like a color name or a ``'#rrggbb'``
string. The return value is always a ``'#rrggbb`` string (also compatible
with Tk).
If ``black_or_white=True`` is set, then the result is always ``"#000000"``
(black) or ``"#ffffff"`` (white), depending on whether the color is bright
or dark.
"""
if black_or_white:
return "#000000" if is_bright(color) else "#ffffff"
widget = porcupine.get_main_window() # any widget would do
# tkinter uses 16-bit colors, convert them to 8-bit
r, g, b = (value >> 8 for value in widget.winfo_rgb(color))
return "#%02x%02x%02x" % (0xFF - r, 0xFF - g, 0xFF - b)
def mix_colors(color1: str, color2: str, color1_amount: float) -> str:
"""Create a new color based on two existing colors.
The ``color1_amount`` should be a number between 0 and 1, specifying how
much ``color1`` to use. If you set it to 0.8, for example, then the
resulting color will be 80% ``color1`` and 20% ``color2``.
Colors are specified and returned similarly to :func:`invert_color`.
"""
color2_amount = 1 - color1_amount
widget = porcupine.get_main_window()
r, g, b = (
round(color1_amount * value1 + color2_amount * value2)
for value1, value2 in zip(widget.winfo_rgb(color1), widget.winfo_rgb(color2))
)
return "#%02x%02x%02x" % (r >> 8, g >> 8, b >> 8) # convert back to 8-bit
# This doesn't handle all possible cases, see bind(3tk)
def _format_binding(binding: str, menu: bool) -> str:
mac = porcupine.get_main_window().tk.eval("tk windowingsystem") == "aqua"
parts = binding.lstrip("<").rstrip(">").split("-")
# don't know how to show click in mac menus
if mac and menu and any(parts[i : i + 2] == "Button-1".split("-") for i in range(len(parts))):
return ""
# Must recompute length on every iteration, because length changes
i = 0
while i < len(parts):
if parts[i : i + 3] == ["Double", "Button", "1"]:
parts[i : i + 3] = ["double-click"]
elif parts[i : i + 2] == ["Button", "1"]:
parts[i : i + 2] = ["click"]
elif re.fullmatch(r"[a-z]", parts[i]):
parts[i] = parts[i].upper()
elif re.fullmatch(r"[A-Z]", parts[i]):
parts.insert(i, "Shift")
# Increment beyond the added "Shift" and letter
i += 2
continue
i += 1
if "Key" in parts:
parts.remove("Key")
if mac:
# event_info() returns <Mod1-Key-x> for <Command-x>
parts = [{"Mod1": "Command", "plus": "+", "minus": "-"}.get(part, part) for part in parts]
if mac:
# <ThePhilgrim> I think it's like from left to right... so it would be shift -> ctrl -> alt -> cmd
sort_order = {"Shift": 1, "Control": 2, "Alt": 3, "Command": 4}
symbol_mapping = {
"Shift": "⇧",
"Control": "⌃", # NOT same as ascii "^"
"Alt": "⌥",
"Command": "⌘",
"Return": "⏎",
}
else:
sort_order = {"Control": 1, "Alt": 2, "Shift": 3}
symbol_mapping = {
"Control": "Ctrl",
"0": "Zero", # not needed on mac, its font distinguishes 0 and O well
"plus": "Plus",
"minus": "Minus",
"Return": "Enter",
}
parts.sort(key=(lambda part: sort_order.get(part, 100)))
if mac and menu:
# Tk will use the proper symbols automagically, and it expects dash-separated
# Even "Command--" for command and minus key works
return "-".join(parts)
parts = [symbol_mapping.get(part, part) for part in parts]
if mac:
# e.g. "⌘-double-click"
# But not like this: ["double-click"] --> ["-double-click"]
parts[1:] = [
{"click": "-click", "double-click": "-double-click"}.get(part, part)
for part in parts[1:]
]
return ("" if mac else "+").join(parts)
# TODO: document this
def get_binding(virtual_event: str, *, menu: bool = False) -> str:
bindings = porcupine.get_main_window().event_info(virtual_event)
if not bindings and not menu:
log.warning(f"no bindings configured for {virtual_event}")
return _format_binding(bindings[0], menu) if bindings else ""
# TODO: document this
def tkinter_safe_string(string: str, *, hide_unsupported_chars: bool = False) -> str:
if hide_unsupported_chars:
replace_with = ""
else:
replace_with = "\N{replacement character}"
return "".join(replace_with if ord(char) > 0xFFFF else char for char in string)
class EventDataclass:
"""
Inherit from this class when creating a dataclass for
:func:`bind_with_data`.
All values should be JSON safe or data classes containing JSON safe values.
Nested dataclasses don't need to inherit from EventDataclass. Example::
import dataclasses
from typing import List
from porcupine import utils
@dataclasses.dataclass
class Foo:
message: str
num: int
@dataclasses.dataclass
class Bar(utils.EventDataclass):
foos: List[Foo]
def handle_event(event: utils.EventWithData) -> None:
print(event.data_class(Bar).foos[0].message)
utils.bind_with_data(some_widget, '<<Thingy>>', handle_event, add=True)
...
foos = [Foo('ab', 123), Foo('cd', 456)]
some_widget.event_generate('<<Thingy>>', data=Bar(foos))
Note that before Python 3.10, you need ``List[str]`` instead of
``list[str]``, even if you use ``from __future__ import annotations``. This
is because Porcupine uses a library that needs to evaluate the type
annotations even if ``from __future__ import annotations``
was used.
"""
def __str__(self) -> str:
# str(Foo(a=1, b=2)) --> 'Foo{"a": 1, "b": 2}'
# Content after Foo is JSON parsed in Event.data_class()
return type(self).__name__ + json.dumps(dataclasses.asdict(self))
if TYPE_CHECKING:
_Event = tkinter.Event[tkinter.Misc]
else:
_Event = tkinter.Event
class EventWithData(_Event):
"""A subclass of :class:`tkinter.Event[tkinter.Misc]` for use with :func:`bind_with_data`."""
#: If a string was passed to the ``data`` argument of ``event_generate()``,
#: then this is that string.
data_string: str
def data_class(self, T: Type[_T]) -> _T:
"""
If a dataclass instance of type ``T`` was passed as ``data`` to
``event_generate()``, then this returns a copy of it. Otherwise this
raises an error.
``T`` must be a dataclass that inherits from :class:`EventDataclass`.
"""
assert self.data_string.startswith(T.__name__ + "{")
result = dacite.from_dict(T, json.loads(self.data_string[len(T.__name__) :]))
assert isinstance(result, T)
return result
def __repr__(self) -> str:
match = re.fullmatch(r"<(.*)>", super().__repr__())
assert match is not None
return f"<{match.group(1)} data_string={self.data_string!r}>"
def bind_with_data(
widget: tkinter.Misc,
sequence: str,
callback: Callable[[EventWithData], str | None],
add: bool = False,
) -> str:
"""
Like ``widget.bind(sequence, callback)``, but supports the ``data``
argument of ``event_generate()``. Note that the callback takes an argument
of type :class:`EventWithData` rather than a usual ``tkinter.Event[tkinter.Misc]``.
Here's an example::
from porcupine import utils
def handle_event(event: utils.EventWithData):
print(event.data_string)
utils.bind_with_data(some_widget, '<<Thingy>>', handle_event, add=True)
# this prints 'wut wut'
some_widget.event_generate('<<Thingy>>', data='wut wut')
Note that everything is a string in Tcl, so tkinter ``str()``'s the data.
"""
# tkinter creates event objects normally and appends them to the
# deque, then run_callback() adds data_blablabla attributes to the
# event objects and runs callback(event)
#
# TODO: is it possible to do this without a deque?
event_objects: collections.deque[tkinter.Event[tkinter.Misc]] = collections.deque()
widget.bind(sequence, event_objects.append, add=add)
def run_the_callback(data_string: str) -> str | None:
event: tkinter.Event[tkinter.Misc] | EventWithData = event_objects.popleft()
event.__class__ = EventWithData # evil haxor muhaha
assert isinstance(event, EventWithData)
event.data_string = data_string
return callback(event) # may return 'break'
# tkinter's bind() ignores the add argument when the callback is a string :(
funcname = widget.register(run_the_callback)
widget.tk.call("bind", widget, sequence, '+ if {"[%s %%d]" == "break"} break' % funcname)
return funcname
def add_scroll_command(
widget: tkinter.Text,
option: Literal["xscrollcommand", "yscrollcommand"],
callback: Callable[[], None],
) -> None:
"""Schedule ``callback`` to run with no arguments when ``widget`` is scrolled.
The option should be ``'xscrollcommand'`` for horizontal scrolling or
``'yscrollcommand'`` for vertical scrolling.
Unlike when setting the option directly, this function can be called
multiple times with the same widget and the same option to set multiple
callbacks.
"""
if not widget[option]:
widget[option] = lambda *args: None
tcl_code = widget[option]
assert isinstance(tcl_code, str)
assert tcl_code
# from options(3tk): "... the widget will generate a Tcl command by
# concatenating the scroll command and two numbers."
#
# So if tcl_code is like this: bla bla bla
#
# it would be called like this: bla bla bla 0.123 0.456
#
# and by putting something in front on separate line we can make it get called like this
#
# something
# bla bla bla 0.123 0.456
widget[option] = widget.register(callback) + "\n" + tcl_code
# this is not bind_tab to avoid confusing with tabs.py, as in browser tabs
def bind_tab_key(
widget: tkinter.Widget, on_tab: Callable[["tkinter.Event[Any]", bool], Any], **bind_kwargs: Any
) -> None:
"""A convenience function for binding Tab and Shift+Tab.
Use this function like this::
def on_tab(event, shifted):
# shifted is True if the user held down shift while pressing
# tab, and False otherwise
...
utils.bind_tab_key(some_widget, on_tab, add=True)
The ``event`` argument and ``on_tab()`` return values are treated
just like with regular bindings.
Binding ``'<Tab>'`` works just fine everywhere, but binding
``'<Shift-Tab>'`` only works on Windows and Mac OSX. This function
also works on X11.
"""
# there's something for this in more_functools, but it's a big
# dependency for something this simple imo
def callback(shifted: bool, event: tkinter.Event[tkinter.Misc]) -> Any:
return on_tab(event, shifted)
if widget.tk.call("tk", "windowingsystem") == "x11":
# even though the event keysym says Left, holding down the right
# shift and pressing tab also works :D
shift_tab = "<ISO_Left_Tab>"
else:
shift_tab = "<Shift-Tab>"
widget.bind("<Tab>", functools.partial(callback, False), **bind_kwargs) # bindcheck: ignore
widget.bind(shift_tab, functools.partial(callback, True), **bind_kwargs) # bindcheck: ignore
# list of encodings supported by python 3.7 https://stackoverflow.com/a/25584253
_list_of_encodings = [
"ascii",
"big5",
"big5hkscs",
"cp037",
"cp273",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"cp1006",
"cp1026",
"cp1125",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp65001",
"euc-jis-2004",
"euc-jisx0213",
"euc-jp",
"euc-kr",
"gb2312",
"gb18030",
"gbk",
"hz",
"iso2022-jp",
"iso2022-jp-1",
"iso2022-jp-2",
"iso2022-jp-3",
"iso2022-jp-2004",
"iso2022-jp-ext",
"iso2022-kr",
"iso8859-2",
"iso8859-3",
"iso8859-4",
"iso8859-5",
"iso8859-6",
"iso8859-7",
"iso8859-8",
"iso8859-9",
"iso8859-10",
"iso8859-11",
"iso8859-13",
"iso8859-14",
"iso8859-15",
"iso8859-16",
"johab",
"koi8-r",
"koi8-t",
"koi8-u",
"kz1048",
"latin-1",
"mac-cyrillic",
"mac-greek",
"mac-iceland",
"mac-latin2",
"mac-roman",
"mac-turkish",
"ptcp154",
"shift-jis",
"shift-jis-2004",
"shift-jisx0213",
"utf-7",
"utf-8",
"utf-8-sig",
"utf-16",
"utf-16-be",
"utf-16-le",
"utf-32",
"utf-32-be",
"utf-32-le",
]
# TODO: document this?
def ask_encoding(text: str, old_encoding: str) -> str | None:
label_width = 400
dialog = tkinter.Toplevel()
if porcupine.get_main_window().winfo_viewable():
dialog.transient(porcupine.get_main_window())
dialog.resizable(False, False)
dialog.title("Choose an encoding")
big_frame = ttk.Frame(dialog)
big_frame.pack(fill="both", expand=True)
ttk.Label(big_frame, text=text, wraplength=label_width).pack(fill="x", padx=10, pady=10)
var = tkinter.StringVar()
combobox = ttk.Combobox(big_frame, values=_list_of_encodings, textvariable=var)
combobox.pack(pady=40)
combobox.set(old_encoding)
ttk.Label(
big_frame,
text=(
"You can create a project-specific .editorconfig file to change the encoding"
" permanently."
),
wraplength=label_width,
).pack(fill="x", padx=10, pady=10)
button_frame = ttk.Frame(big_frame)
button_frame.pack(fill="x", pady=10)
selected_encoding = None
def select_encoding() -> None:
nonlocal selected_encoding
selected_encoding = combobox.get()
dialog.destroy()
cancel_button = ttk.Button(button_frame, text="Cancel", command=dialog.destroy, width=1)
cancel_button.pack(side="left", expand=True, fill="x", padx=10)
ok_button = ttk.Button(button_frame, text="OK", command=select_encoding, width=1)
ok_button.pack(side="right", expand=True, fill="x", padx=10)
def validate_encoding(*junk: object) -> None:
encoding = combobox.get()
try:
codecs.lookup(encoding)
except LookupError:
ok_button.config(state="disabled")
else:
ok_button.config(state="normal")
var.trace_add("write", validate_encoding)
combobox.bind("<Return>", (lambda event: ok_button.invoke()), add=True)
combobox.bind("<Escape>", (lambda event: cancel_button.invoke()), add=True)
combobox.select_range(0, "end")
combobox.focus()
dialog.wait_window()
return selected_encoding
def run_in_thread(
blocking_function: Callable[[], _T],
done_callback: Callable[[bool, str | _T], None],
*,
check_interval_ms: int = 100,
daemon: bool = True,
) -> None:
"""Run ``blocking_function()`` in another thread.
If the *blocking_function* raises an error,
``done_callback(False, traceback)`` will be called where *traceback*
is the error message as a string. If no errors are raised,
``done_callback(True, result)`` will be called where *result* is the
return value from *blocking_function*. The *done_callback* is always
called from Tk's main loop, so it can do things with Tkinter widgets
unlike *blocking_function*.
Internally, this function checks whether the thread has completed every
100 milliseconds by default (so 10 times per second). Specify
*check_interval_ms* to customize this.
Unlike :class:`threading.Thread`, this function uses a daemon thread by
default. This means that the thread will end forcefully when Porcupine
exits, and it might not get a chance to finish whatever it is doing. Pass
``daemon=False`` to change this.
"""
root = porcupine.get_main_window() # any widget would do
value: _T
error_traceback: str | None = None
def thread_target() -> None:
nonlocal value
nonlocal error_traceback
try:
value = blocking_function()
except Exception:
error_traceback = traceback.format_exc()
def check() -> None:
if thread.is_alive():
# let's come back and check again later
root.after(check_interval_ms, check)
else:
if error_traceback is None:
done_callback(True, value)
else:
done_callback(False, error_traceback)
thread = threading.Thread(target=thread_target, daemon=daemon)
thread.start()
root.after_idle(check)
@copy_type(open)
@contextlib.contextmanager
def backup_open(file: Any, *args: Any, **kwargs: Any) -> Any:
"""Like :func:`open`, but uses a backup file if needed.
This is useless with modes like ``'r'`` because they don't modify
the file, but this is useful when overwriting the user's files.
This needs to be used as a context manager. For example::
try:
with utils.backup_open(cool_file, 'w') as file:
...
except (UnicodeError, OSError):
# log the error and report it to the user
This automatically restores from the backup on failure.
"""
path = Path(file)
if path.exists():
# there's something to back up
#
# for backing up foo.py:
# if foo-backup.py, then use foo-backup-backup.py etc
backuppath = path
while backuppath.exists():
backuppath = backuppath.with_name(backuppath.stem + "-backup" + backuppath.suffix)
log.info(f"backing up '{path}' to '{backuppath}'")
shutil.copy(path, backuppath)
try:
yield path.open(*args, **kwargs)
except Exception as e:
log.info(f"restoring '{path}' from the backup")
shutil.move(str(backuppath), str(path))
raise e
else:
log.info(f"deleting '{backuppath}'")
backuppath.unlink()
else:
yield path.open(*args, **kwargs)
``` |
{
"source": "1and1/confluencer",
"score": 2
} |
#### File: confluencer/api/__init__.py
```python
from __future__ import absolute_import, unicode_literals, print_function
import os
import re
import sys
import json
import base64
import struct
import logging
import collections
from contextlib import contextmanager
import requests
import requests_cache
from addict import Dict as AttrDict
from rudiments.reamed import click
from .. import config
from .. import __version__ as version
from .._compat import text_type, urlparse, urlunparse, parse_qs, urlencode, unquote_plus
# Exceptions that API calls typically emit
ERRORS = (
requests.RequestException,
)
MAX_ERROR_LINES = 15
def page_id_from_tiny_link(uri, _re=re.compile(r'/x/([-_A-Za-z0-9]+)')):
""" Extract the page ID from a so-called *tiny link*.
See `this answer <https://answers.atlassian.com/questions/87971/what-is-the-algorithm-used-to-create-the-tiny-links>`
for details.
"""
matched = _re.search(uri)
if matched:
tiny_url_id = matched.group(1)
if isinstance(tiny_url_id, text_type):
tiny_url_id = tiny_url_id.encode('ascii')
#tiny_url_id += b'=' * (len(tiny_url_id) % 4)
page_id_bytes = (base64.b64decode(tiny_url_id, altchars=b'_-') + b'\0\0\0\0')[:4]
return struct.unpack('<L', page_id_bytes)[0]
else:
raise ValueError("Not a tiny link: {}".format(uri))
def tiny_id(page_id):
"""Return *tiny link* ID for the given page ID."""
return base64.b64encode(struct.pack('<L', int(page_id)).rstrip(b'\0'), altchars=b'_-').rstrip(b'=').decode('ascii')
def diagnostics(cause):
"""Display diagnostic info based on the given cause."""
import pprint
if not cause:
return
response = getattr(cause, 'response', None)
request = getattr(response, 'request', None)
# pprint.pprint(vars(response))
# pprint.pprint(vars(request))
method = 'HTTP {}'.format(request.method) if request else 'HTTP'
try:
data = pprint.pformat(response.json(), indent=4)
except (AttributeError, TypeError, ValueError):
try:
data = response.content
except AttributeError:
data = ''
if data:
try:
data = data.decode('ascii')
except (AttributeError, UnicodeDecodeError):
pass
data = data.splitlines()
if len(data) > MAX_ERROR_LINES:
data = data[:MAX_ERROR_LINES] + ['...']
data = '| RESPONSE BODY:\n' + '\n'.join(['| ' + x for x in data])
click.serror("{} ERROR: {}".format(method, cause))
if data:
click.secho(data)
@contextmanager
def context(*args, **kwargs):
"""Context manager providing an API object with standard error logging."""
api = ConfluenceAPI(*args, **kwargs)
try:
yield api
except ERRORS as cause:
api.log.error("API ERROR: %s", cause)
raise
class ConfluenceAPI(object):
""" Support for using the Confluence API.
Since the Confluence API has excellent support for discovery by
e.g. the ``_links`` attribute in results, this just adds a thin
convenience layer above plain ``requests`` HTTP calls.
"""
CACHE_EXPIRATION = 10 * 60 * 60 # seconds
UA_NAME = 'Confluencer'
def __init__(self, endpoint=None, session=None):
self.log = logging.getLogger('cfapi')
self.base_url = endpoint or os.environ.get('CONFLUENCE_BASE_URL')
assert self.base_url, "You MUST set the CONFLUENCE_BASE_URL environment variable!"
self.base_url = self.base_url.rstrip('/')
# Enable HTTP logging when 'requests' logger is on DEBUG level
if logging.getLogger("requests").getEffectiveLevel() <= logging.DEBUG:
try:
import http.client as http_client
except ImportError: # Python 2
import httplib as http_client # pylint: disable=import-error
http_client.HTTPConnection.debuglevel = 1
self.session = session or requests.Session()
self.session.headers['User-Agent'] = '{}/{} [{}]'.format(
self.UA_NAME, version, requests.utils.default_user_agent())
self.cached_session = requests_cache.CachedSession(
cache_name=config.cache_file(type(self).__name__),
expire_after=self.CACHE_EXPIRATION)
self.cached_session.headers['User-Agent'] = self.session.headers['User-Agent']
def url(self, path):
""" Build an API URL from partial paths.
Parameters:
path (str): Page URL / URI in various formats (tiny, title, id).
Returns:
str: The fully qualified API URL for the page.
Raises:
ValueError: A ``path`` was passed that isn't understood, or malformed.
"""
url = path
# Fully qualify partial URLs
if not url.startswith('/rest/api/') and '://' not in url:
url = '/rest/api/' + url.lstrip('/')
if not url.startswith('http'):
url = self.base_url + url
if '/rest/api/' not in url:
# Parse and rewrite URLs of the following forms:
# https://confluence.example.com/pages/viewpage.action?pageId=#######
# https://confluence.example.com/display/SPACEKEY/Page+Title
# https://confluence.example.com/x/TTTTT
scheme, netloc, url_path, params, query, fragment = urlparse(url)
query = parse_qs(query or '')
#print((scheme, netloc, url_path, params, query, fragment))
if url_path.endswith('/pages/viewpage.action'):
# Page link with ID
page_id = int(query.pop('pageId', [0])[0])
if page_id:
url_path = '{}/rest/api/content/{}'.format(url_path.split('/pages/')[0], page_id)
else:
raise ValueError("Missing 'pageId' in malformed URL '{}'".format(path))
elif 'display' in url_path.lstrip('/').split('/')[:2]:
# Page link with title
matched = re.search(r'/display/([^/]+)/([^/]+)', url_path)
if matched:
url_path = '{}/rest/api/content/search'.format(url_path.split('/display/')[0])
title = unquote_plus(matched.group(2))
search_query = dict(
# CF 3.5.x ignores cqlcontext?
cql='title="{}" AND space="{}"'.format(
title.replace('"', '?'), matched.group(1)
),
cqlcontext=json.dumps(dict(spaceKey=matched.group(1))),
)
search_url = urlunparse((scheme, netloc, url_path, params, urlencode(search_query), fragment))
found = self.get(search_url)
if found.size == 1:
url_path, url = None, found.results[0]._links.self
else:
raise ValueError("{} results while searching for page with URL '{}'{}, query was:\n{}"
.format('Multiple' if found.size else 'No',
path,
'' if found.size else ' (maybe indexing is lagging)',
search_url))
else:
raise ValueError("Missing '.../display/SPACE/TITLE' in malformed URL '{}'".format(path))
elif 'x' in url_path.lstrip('/').split('/')[:2]:
# Tiny link
page_id = page_id_from_tiny_link(url_path)
url_path = '{}/rest/api/content/{}'.format(url_path.split('/x/')[0], page_id)
else:
raise ValueError("Cannot create API endpoint from malformed URL '{}'".format(path))
if url_path:
url = urlunparse((scheme, netloc, url_path, params, urlencode(query), fragment))
return url
def get(self, path, **params):
""" GET an API path and return result.
If ``_cached=True`` is provided, the cached session is used.
"""
params = params.copy()
cached = params.pop('_cached', False)
url = self.url(path)
self.log.debug("GET from %r", url)
response = (self.cached_session if cached else self.session).get(url, params=params)
response.raise_for_status()
result = AttrDict(response.json())
result._info.server = response.headers.get('Server', '')
result._info.sen = response.headers.get('X-ASEN', '')
return result
def getall(self, path, **params):
""" Yield all results of a paginated GET.
If the ``limit`` keyword argument is set, it is used to stop the
generator after the given number of result items.
:param path: Confluence API URI.
:param params: Request parameters.
"""
params = params.copy()
pos, outer_limit = 0, params.pop('limit', sys.maxsize)
while path:
response = self.get(path, **params)
#import pprint; print('\nGETALL RESPONSE'); pprint.pprint(response); print('')
if 'page' in params.get('expand', '').split(','):
response = response['page']
items = response.get('results', [])
for item in items:
pos += 1
if pos > outer_limit:
return
yield item
path = response.get('_links', {}).get('next', None)
params.clear()
def add_page(self, space_key, title, body, parent_id=None, labels=None):
""" Create a new page.
The body must be in 'storage' representation.
"""
data = {
"type": "page",
"title": title,
"space": {
"key": space_key,
},
"body": {
"storage": {
"value": body,
"representation": "storage",
}
}
}
if parent_id:
data.update(dict(ancestors=[dict(type='page', id=parent_id)]))
url = self.url('/content')
self.log.debug("POST (add page) to %r", url)
response = self.session.post(url, json=data)
response.raise_for_status()
page = AttrDict(response.json())
self.log.debug("Create '%s': %r", title, response)
# Add any provided labels
if labels:
data = [dict(prefix='global', name=label) for label in labels]
response = self.session.post(page._links.self + '/label', json=data)
response.raise_for_status()
self.log.debug("Labels for #'%s': %r %r",
page.id, response, [i['name'] for i in response.json()['results']])
return page
def update_page(self, page, body, minor_edit=True):
""" Update an existing page.
The page **MUST** have been retrieved using ``expand='body.storage,version,ancestors'``.
"""
if page.body.storage.value == body:
self.log.debug("Update: Unchanged page '%s', doing nothing", page.title)
else:
data = {
"id": page.id,
"type": page.type,
"title": page.title,
"space": {
"key": page._expandable.space.split('/')[-1],
},
"body": {
"storage": {
"value": body,
"representation": "storage",
}
},
"version": {"number": page.version.number + 1, "minorEdit": minor_edit},
"ancestors": [{'type': page.ancestors[-1].type, 'id': page.ancestors[-1].id}],
}
url = self.url('/content/{}'.format(page.id))
self.log.debug("PUT (update page) to %r", url)
#import pprint; print('\nPAGE UPDATE'); pprint.pprint(data); print('')
response = self.session.put(url, json=data)
response.raise_for_status()
page = AttrDict(response.json())
self.log.debug("Create '%s': %r", page.title, response)
return page
def delete_page(self, page, status=None):
""" Delete an existing page.
To permanently purge trashed content, pass ``status='trashed'``.
"""
url = self.url('/content/{}'.format(page.id))
self.log.debug("DELETE %r (status=%r)", url, status)
data = {}
if status:
data['status'] = status
response = self.session.delete(url, json=data)
response.raise_for_status()
def user(self, username=None, key=None):
""" Return user details.
Passing neither user name nor key retrieves the current user.
"""
if key:
user = self.get('user', key=key, _cached=True)
elif username:
user = self.get('user', username=username, _cached=True)
else:
user = self.get('user/current')
return user
def walk(self, path, **params):
""" Walk a page tree recursively, and yield the root and all its children.
"""
params = params.copy()
depth_1st = params.pop('depth_1st', False)
root_url = self.url(path)
self.log.debug("Walking %r %s", root_url, 'depth 1st' if depth_1st else 'breadth 1st')
stack = collections.deque([(0, [self.get(root_url, **params)])])
while stack:
depth, pages = stack.pop()
for page in pages:
##import pprint; print('~ {:3d} {} '.format(depth, page.title).ljust(78, '~')); pprint.pprint(dict(page))
yield depth, page
children = self.getall(page._links.self + '/child/page', **params)
if depth_1st:
for child in children:
stack.append((depth+1, [child]))
else:
stack.appendleft((depth+1, children))
```
#### File: confluencer/commands/pretty.py
```python
from __future__ import absolute_import, unicode_literals, print_function
import os
import sys
import json as jsonlib
from rudiments.reamed import click
from .. import config, api
from ..tools import content
@config.cli.command()
@click.option('-R', '--recursive', is_flag=True, default=False, help='Handle all descendants.')
@click.option('-J', '--json', is_flag=True, default=False, help='Print raw API response (JSON).')
@click.option('-f', '--format', 'markup', default='view', type=click.Choice(content.CLI_CONTENT_FORMATS.keys()),
help="Markup format.",
)
@click.argument('pages', metavar='‹page-url›…', nargs=-1)
@click.pass_context
def pretty(ctx, pages, markup, recursive=False, json=False):
"""Pretty-print page content markup."""
content_format = content.CLI_CONTENT_FORMATS[markup]
with api.context() as cf:
for page_url in pages:
try:
page = content.ConfluencePage(cf, page_url, markup=content_format,
expand='metadata.labels,metadata.properties')
except api.ERRORS as cause:
# Just log and otherwise ignore any errors
api.diagnostics(cause)
else:
if json:
jsonlib.dump(page.json, sys.stdout, indent=' ', sort_keys=True)
else:
root = page.etree()
with os.fdopen(sys.stdout.fileno(), "wb", closefd=False) as stdout:
root.getroottree().write(stdout, encoding='utf8', pretty_print=True, xml_declaration=False)
```
#### File: src/tests/test_tools_content.py
```python
from __future__ import absolute_import, unicode_literals, print_function
from munch import Munch as Bunch
from confluencer.tools import content
class APIMock(object):
def get(self, _, **_dummy):
return Bunch(body={'storage': Bunch(value='foo')})
def test_page_object_creation():
page = content.ConfluencePage(APIMock(), '/SOME/URL')
assert page.body == 'foo'
``` |
{
"source": "1and1/dim",
"score": 3
} |
#### File: dim/dim/ldap_auth.py
```python
import logging
import ldap3
from flask import current_app as app
def check_credentials(username, password):
user_dn = app.config['LDAP_USER_DN'] % username
if app.config['LDAP_SEARCH_BASE']:
user_dn += "," + app.config['LDAP_SEARCH_BASE']
server = ldap3.Server(app.config['LDAP_SERVER'])
conn = ldap3.Connection(server, user=user_dn, password=password, client_strategy=SAFE_SYNC)
if not conn.bind():
logging.info('LDAP login for user %s failed: %s', username, conn.result)
return False
return True
``` |
{
"source": "1and1/yagocd",
"score": 2
} |
#### File: yagocd/tests/test_encryption_manager.py
```python
import pytest
from tests import AbstractTestManager, ReturnValueMixin
from yagocd.resources import encryption
@pytest.fixture()
def manager(session_fixture):
return encryption.EncryptionManager(session=session_fixture)
class TestEncrypt(AbstractTestManager, ReturnValueMixin):
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("encrypt/encrypt") as cass:
return cass, manager.encrypt('S3cr37 m3ss4ge.')
@pytest.fixture()
def expected_request_url(self):
return '/go/api/admin/encrypt'
@pytest.fixture()
def expected_request_method(self):
return 'POST'
@pytest.fixture()
def expected_return_type(self):
return str
@pytest.fixture()
def expected_return_value(self, gocd_docker):
return "1sZ4W1w7ajI1AAoNprA1mw=="
```
#### File: yagocd/tests/test_environment_manager.py
```python
from distutils.version import LooseVersion
import pytest
from mock import mock
from six import string_types
from tests import AbstractTestManager, ReturnValueMixin
from yagocd.resources import environment
@pytest.fixture()
def manager(session_fixture):
return environment.EnvironmentManager(session=session_fixture)
class BaseManager(AbstractTestManager):
@pytest.fixture()
def prepare_environment(self, manager, my_vcr):
with my_vcr.use_cassette("environment/prepare"):
manager.create(dict(name='foo'))
manager.create(dict(name='bar', pipelines=[dict(name='Automated_Tests')]))
class TestList(BaseManager, ReturnValueMixin):
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr, prepare_environment):
with my_vcr.use_cassette("environment/list") as cass:
return cass, manager.list()
@pytest.fixture()
def expected_request_url(self):
return '/go/api/admin/environments'
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_return_type(self):
return list
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert all(isinstance(i, environment.EnvironmentConfig) for i in result)
return check_value
class TestGet(BaseManager, ReturnValueMixin):
NAME = 'bar'
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr, prepare_environment):
with my_vcr.use_cassette("environment/get_{}".format(self.NAME)) as cass:
return cass, manager.get(self.NAME)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/admin/environments/{}'.format(self.NAME)
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_return_type(self):
return environment.EnvironmentConfig
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert result.data.name == self.NAME
return check_value
class TestCreate(BaseManager, ReturnValueMixin):
NAME = 'baz'
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("environment/create_{}".format(self.NAME)) as cass:
return cass, manager.create(dict(name=self.NAME, pipelines=[dict(name='Shared_Services')]))
@pytest.fixture()
def expected_request_url(self):
return '/go/api/admin/environments'
@pytest.fixture()
def expected_request_method(self):
return 'POST'
@pytest.fixture()
def expected_return_type(self):
return environment.EnvironmentConfig
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert result.data.name == self.NAME
assert result.data.pipelines[0].name == 'Shared_Services'
return check_value
class TestUpdate(BaseManager, ReturnValueMixin):
NAME = 'bar'
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr, prepare_environment):
with my_vcr.use_cassette("environment/prepare_update_{}".format(self.NAME)):
env = manager.get(self.NAME)
with my_vcr.use_cassette("environment/update_{}".format(self.NAME)) as cass:
env.data.pipelines.append(dict(name='Deploy_UAT'))
return cass, manager.update(name=self.NAME, config=dict(name='new_name'), etag=env.etag)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/admin/environments/{}'.format(self.NAME)
@pytest.fixture()
def expected_request_method(self, manager):
if LooseVersion(manager._session.server_version) <= LooseVersion('16.9.0'):
return 'PATCH'
return 'PUT'
@pytest.fixture()
def expected_return_type(self):
return environment.EnvironmentConfig
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert result.data.name == 'new_name'
return check_value
class TestDelete(BaseManager, ReturnValueMixin):
NAME = 'foo'
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr, prepare_environment):
with my_vcr.use_cassette("environment/delete_{}".format(self.NAME)) as cass:
return cass, manager.delete(self.NAME)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/admin/environments/{}'.format(self.NAME)
@pytest.fixture()
def expected_request_method(self):
return 'DELETE'
@pytest.fixture()
def expected_return_type(self):
return string_types
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert result in [
"Environment '{}' was deleted successfully.".format(self.NAME),
"The environment '{}' was deleted successfully.".format(self.NAME),
]
return check_value
class TestMagicMethods(object):
@mock.patch('yagocd.resources.environment.EnvironmentManager.get')
def test_indexed_based_access(self, get_mock, manager):
name = mock.MagicMock()
_ = manager[name] # noqa
get_mock.assert_called_once_with(name=name)
@mock.patch('yagocd.resources.environment.EnvironmentManager.list')
def test_iterator_access(self, list_mock, manager):
for _ in manager:
pass
list_mock.assert_called_once_with()
```
#### File: yagocd/tests/test_exception.py
```python
import pytest
from mock.mock import MagicMock
from yagocd.exception import RequestError
class TestRequestError(object):
@pytest.mark.parametrize('summary, json, expected_str', [
('Some error', None, 'Some error'),
('Some error', '', 'Some error'),
('Some error', '{ non json', 'Some error'),
('Some error', dict(message='foobar is here'), 'Some error\n Reason: foobar is here'),
('Some error', dict(error='baz is there'), 'Some error\n baz is there'),
(
'Some error',
dict(message='foobar is here', error='baz is there'),
'Some error\n Reason: foobar is here\n baz is there'),
(
'Some error',
dict(message='foobar is here', data=dict(errors=dict())),
'Some error\n Reason: foobar is here'
),
(
'Some error',
dict(
message='foobar is here',
data=dict(errors=dict(field_x=['Error for field x!'], field_y=['Error for field y!']))
),
[
'Some error\n Reason: foobar is here\n field_x: Error for field x!\n field_y: Error for field y!',
'Some error\n Reason: foobar is here\n field_y: Error for field y!\n field_x: Error for field x!',
]
), (
'Some error',
dict(
message='foobar is here',
data=dict(errors=dict(field_x=['Error for field x!'], field_y=['Error for field y!'])),
error='baz is there'
),
[
(
'Some error\n Reason: foobar is here'
'\n field_x: Error for field x!\n field_y: Error for field y!\n baz is there'
),
(
'Some error\n Reason: foobar is here'
'\n field_y: Error for field y!\n field_x: Error for field x!\n baz is there'
),
]
),
])
def test_to_string(self, summary, json, expected_str):
response = MagicMock()
response.json.return_value = json
error = RequestError(summary=summary, response=response)
if isinstance(expected_str, list):
assert any(expected == str(error) for expected in expected_str)
else:
assert expected_str == str(error)
```
#### File: yagocd/tests/test_material_manager.py
```python
import pytest
from six import string_types
from tests import AbstractTestManager, ConfirmHeaderMixin, ReturnValueMixin
from yagocd.resources import material
class BaseTestConfigurationManager(AbstractTestManager):
def expected_request_url(self, *args, **kwargs):
raise NotImplementedError()
def expected_request_method(self, *args, **kwargs):
raise NotImplementedError()
def _execute_test_action(self, *args, **kwargs):
raise NotImplementedError()
@pytest.fixture()
def manager(self, session_fixture):
return material.MaterialManager(session=session_fixture)
class TestList(BaseTestConfigurationManager, ReturnValueMixin):
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("material/list") as cass:
return cass, manager.list()
@pytest.fixture()
def expected_request_url(self):
return '/go/api/config/materials'
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
@pytest.fixture()
def expected_return_type(self):
return list
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert all(isinstance(i, material.MaterialEntity) for i in result)
return check_value
class TestModifications(BaseTestConfigurationManager, ReturnValueMixin):
FINGERPRINT = 'e302ce7f43cd1a5009d218e7d6c1adf6a38fa9a33f6d0054d1607a00209fa810'
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("material/modifications") as cass:
return cass, manager.modifications(self.FINGERPRINT)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/materials/{0}/modifications/{1}'.format(
self.FINGERPRINT, 0
)
@pytest.fixture()
def expected_request_method(self):
return 'GET'
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
@pytest.fixture()
def expected_return_type(self):
return list
@pytest.fixture()
def expected_return_value(self):
def check_value(result):
assert all(isinstance(i, material.ModificationEntity) for i in result)
return check_value
#
# class TestNotifySvn(BaseTestConfigurationManager):
# UUID = ''
#
# def test_notify_svn_request_url(self, manager, my_vcr):
# with my_vcr.use_cassette("material/notify_svn") as cass:
# manager.notify_svn(self.UUID)
# assert cass.requests[0].path == '/material/notify/svn'
#
# def test_notify_svn_request_method(self, manager, my_vcr):
# with my_vcr.use_cassette("material/notify_svn") as cass:
# manager.notify_svn(self.UUID)
# assert cass.requests[0].method == 'POST'
#
# def test_notify_svn_request_data(self, manager, my_vcr):
# with my_vcr.use_cassette("material/notify_svn") as cass:
# manager.notify_svn(self.UUID)
# assert False # add check for uuid in data
#
# def test_notify_svn_request_accept_headers(self, manager, my_vcr):
# with my_vcr.use_cassette("material/notify_svn") as cass:
# manager.notify_svn(self.UUID)
# assert cass.requests[0].headers['accept'] == 'application/json'
#
# def test_notify_svn_response_code(self, manager, my_vcr):
# with my_vcr.use_cassette("material/notify_svn") as cass:
# manager.notify_svn(self.UUID)
# assert cass.responses[0]['status']['code'] == 200
#
# def test_notify_svn_return_type(self, manager, my_vcr):
# with my_vcr.use_cassette("material/notify_svn"):
# result = manager.notify_svn(self.UUID)
# assert all(isinstance(i, material.ModificationEntity) for i in result)
class TestNotifyGit(BaseTestConfigurationManager, ReturnValueMixin, ConfirmHeaderMixin):
URL = 'https://github.com/grundic/yagocd.git'
@pytest.fixture()
def _execute_test_action(self, manager, my_vcr):
with my_vcr.use_cassette("material/notify_git") as cass:
return cass, manager.notify_git(self.URL)
@pytest.fixture()
def expected_request_url(self):
return '/go/api/material/notify/git'
@pytest.fixture()
def expected_request_method(self):
return 'POST'
@pytest.fixture()
def expected_response_code(self, *args, **kwargs):
return 202
@pytest.fixture()
def expected_accept_headers(self, server_version):
return 'application/json'
@pytest.fixture()
def expected_return_type(self):
return string_types
@pytest.fixture()
def expected_return_value(self):
return 'The material is now scheduled for an update. Please check relevant pipeline(s) for status.\n'
```
#### File: yagocd/tests/test_pipeline_instance.py
```python
import json
import pytest
from mock import mock
from yagocd.resources import Base
from yagocd.resources import pipeline
from yagocd.resources import stage
from yagocd.resources.pipeline_config import PipelineConfigManager
class TestPipelineEntity(object):
@pytest.fixture()
def pipeline_instance(self, mock_session):
data = json.load(open('tests/fixtures/resources/pipeline/pipeline_instance.json'))
return pipeline.PipelineInstance(session=mock_session, data=data)
@mock.patch('yagocd.resources.pipeline.PipelineInstance.stage')
def test_indexed_based_access(self, stage_mock, pipeline_instance):
name = mock.MagicMock()
_ = pipeline_instance[name] # noqa
stage_mock.assert_called_once_with(name=name)
@mock.patch('yagocd.resources.pipeline.PipelineInstance.stages')
def test_iterator_access(self, stages_mock, pipeline_instance):
for _ in pipeline_instance:
pass
stages_mock.assert_called_once_with()
def test_instance_is_not_none(self, pipeline_instance):
assert pipeline_instance is not None
def test_is_instance_of_base(self, pipeline_instance):
assert isinstance(pipeline_instance, Base)
def test_getting_name(self, pipeline_instance):
assert pipeline_instance.data.name == 'Shared_Services'
def test_getting_url(self, pipeline_instance):
assert pipeline_instance.url == 'http://example.com/go/pipelines/value_stream_map/Shared_Services/2'
def test_getting_pipeline_url(self, pipeline_instance):
assert pipeline_instance.pipeline_url == 'http://example.com/go/tab/pipeline/history/Shared_Services'
def test_stages_are_not_empty(self, pipeline_instance):
assert len(pipeline_instance.stages()) > 0
def test_stages_instances(self, pipeline_instance):
assert all(isinstance(i, stage.StageInstance) for i in pipeline_instance.stages())
@mock.patch('yagocd.resources.pipeline.PipelineInstance.stages')
def test_stage(self, stages_mock, pipeline_instance):
foo = mock.MagicMock()
foo.data.name = 'foo'
bar = mock.MagicMock()
bar.data.name = 'bar'
baz = mock.MagicMock()
baz.data.name = 'baz'
stages_mock.return_value = [foo, bar, baz]
result = pipeline_instance.stage(name='bar')
assert result == bar
@mock.patch('yagocd.resources.pipeline.PipelineManager.value_stream_map')
def test_value_stream_map_call(self, value_stream_map_mock, pipeline_instance):
pipeline_instance.value_stream_map()
value_stream_map_mock.assert_called_with(
name=pipeline_instance.data.name,
counter=pipeline_instance.data.counter
)
def test_config(self, pipeline_instance):
assert isinstance(pipeline_instance.config, PipelineConfigManager)
``` |
{
"source": "1anshu-56/Face-X",
"score": 3
} |
#### File: Snapchat_Filters/Butterfly Glass Filter/Code.py
```python
import cv2
import matplotlib.pyplot as plt
face = cv2.CascadeClassifier(cv2.haarcascades+'haarcascade_frontalface_default.xml')
filename=input("Enter image path here:") #r'C:\Users\xyz\OneDrive\Desktop\images\photo.JPG'
img=cv2.imread(filename)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ey=face.detectMultiScale(gray,1.09,9)
butterfly=cv2.imread('Butterfly.png')
def put_butterflyglass(butterfly, fc, x, y, w, h):
face_width = w
face_height = h
butterfly_width = face_width + 1
butterfly_height = int(0.8 * face_height) + 1
butterfly = cv2.resize(butterfly, (butterfly_width, butterfly_height))
for i in range(butterfly_height):
for j in range(butterfly_width):
for k in range(3):
if butterfly[i][j][k] < 235:
fc[y + i - int(-0.01 * face_height)][x + j][k] = butterfly[i][j][k]
return fc
for (x, y, w, h) in ey:
frame=put_butterflyglass(butterfly,img, x, y, w, h)
cv2.imshow('image',frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
fig = plt.figure()
fig.set_figheight(20)
fig.set_figwidth(10)
plt.title("Butterfly Filter")
plt.imshow(frame)
plt.show()
cv2.waitKey(8000)& 0xff
cv2.destroyAllWindows()
``` |
{
"source": "1A-OneAngstrom/SAMSON-Python-Samples",
"score": 3
} |
#### File: SAMSON-Python-Samples/animations/gif_using_imageio.py
```python
import datetime
import imageio
def create_gif(filenames, duration):
'''
function that creates a gif-file from images thanks to imageio package
'''
images = []
for filename in filenames:
images.append(imageio.imread(filename))
output_gif_file = 'imageio-%sam.gif' % datetime.datetime.now().strftime('%Y-%M-%d-%H-%M-%S') # generate name for the gif-file
output_mp4_file = 'imageio-%sam.mp4' % datetime.datetime.now().strftime('%Y-%M-%d-%H-%M-%S') # generate name for the mp4-file
imageio.mimsave(output_gif_file, images, duration=duration) # save clip to the gif-file
imageio.mimsave(output_mp4_file, images) # save clip to the mp4-file
```
#### File: SAMSON-Python-Samples/postprocessing/export_trajectory.py
```python
from samson.Facade import SAMSON
def export_trajectory(sbpath, filename, fileformat):
'''
Exports trajectories from a Path node 'sbpath' to files with a name starting with 'filename' and extension 'fileformat'
'''
sbpath.currentStep = 0 # set currentStep for the Path to 0
trajectory_files = []
indexer = SAMSON.getNodes('n.t sm') # get a node indexer for all structural models
if indexer.size > 0:
for step in range(sbpath.numberOfSteps): # loop over steps in the Path
sbpath.currentStep = step # increment currentStep
fn = filename + str(step) + '.' + fileformat # a name of a file
trajectory_files.append(fn) # append list of trajectory files
SAMSON.exportToFile(indexer, fn, []) # export current trajectory into a file 'fn'
return trajectory_files # return list of trajectory files
def export_to_PDB(sbpath, filename):
'''
Export trajectory from path into pdb format
'''
return export_trajectory(sbpath, filename, 'pdb')
def export_to_XYZ(sbpath, filename):
'''
Export trajectory from path into xyz format
'''
return export_trajectory(sbpath, filename, 'xyz')
``` |
{
"source": "1arshan/Eyantra_Virgi-bot",
"score": 3
} |
#### File: pkg_ros_iot_bridge/scripts/dispatch.py
```python
import time
import rospy
import datetime
from IotClient.iot_client import IotClient # tesmporry testing
def get_item_details(item):
"""
This function finds packgen details using item
:param item: item is a string containing packgen content type
:return: it returns dict of details of packgen.
"""
details = {'Estimated Time of Delivery':'Na','priority':'Na','cost':'Na','item':'Na'}
if item == 'Medicines' or item == 'Medicine':
details = {'priority':'HP','cost':'450','Estimated Time of Delivery':'1','item':'Medicines'}
elif item == 'Food':
details = {'priority':'MP','cost':'250','Estimated Time of Delivery':'3',"item":'Food'}
elif item == 'Clothes':
details = {'priority':'LP','cost':'150','Estimated Time of Delivery':'5','item':'Clothes'}
return details
def get_dispatch_string(order_to_be_procced):
"""
This function create a coma separated string of packgen information to send data on the inventory sheet.
:param color:It is color of packgen.It can be red,yellow and green.
:param i:i is index of column of packgen in the shelf.
:param j:j is index of row of packgen in the shelf.
:return: returns a string containing each column details of inventory sheet separated by comas.
"""
order_details_sheet = get_item_details(order_to_be_procced[2])
dispatch_date = datetime.datetime.now()
dispatch_time_str = dispatch_date.strftime("%a")+' '+dispatch_date.strftime("%b")+' '+dispatch_date.strftime("%d")+' '+dispatch_date.strftime("%Y")+' - '+dispatch_date.strftime("%X")
print(dispatch_time_str,'DispatchTime')
dipatch_order_string = 'OrdersDispatched,'+order_to_be_procced[0]+','+order_to_be_procced[1]+','+order_details_sheet['item']+','+order_details_sheet['priority']+',1,'+order_details_sheet['cost']+',Yes,'+dispatch_time_str
return dipatch_order_string
def pushdata():
orders = [
('1003','Amritsar','Medicines'),
('1001','Delhi','Medicines'),
('2002','Kolkata','Food'),
('2003','Agartala','Food'),
('1002','Hyderabad','Medicines'),
('2001','Chennai','Food'),
('3001','Mumbai','Clothes'),
('3002','Pune','Clothes'),
('3003','Bangalore','Clothes')
]
time_gap = [10, 117,241,95,196,146,206,146,235]
i = 0;
global action_client
for order in orders:
str = get_dispatch_string(order)
goal_handle1 = action_client.send_goal("mqtt", "pub", action_client._config_mqtt_pub_topic, str)
action_client._goal_handles['1'] = goal_handle1
time.sleep(time_gap[i])
i = i+1
if __name__ == '__main__':
rospy.init_node('dispatchorder',anonymous=True)
global action_client
action_client = IotClient()
pushdata()
print("Out of Loop. Exiting..")
```
#### File: pkg_ros_iot_bridge/scripts/node_action_server_ros_iot_bridge.py
```python
import rospy
import actionlib
import threading
import requests
import re #to split msg receive from action client
import json
from pkg_ros_iot_bridge.msg import msgRosIotAction # Message Class that is used by ROS Actions internally
from pkg_ros_iot_bridge.msg import msgRosIotGoal # Message Class that is used for Goal Messages
from pkg_ros_iot_bridge.msg import msgRosIotResult # Message Class that is used for Result Messages
from pkg_ros_iot_bridge.msg import msgRosIotFeedback # Message Class that is used for Feedback Messages
from pkg_ros_iot_bridge.msg import msgMqttSub # Message Class for MQTT Subscription Messages,communicate with pkg_task1
from pyiot import iot # Custom Python Module to perform MQTT Tasks
import paho.mqtt.client as mqtt #for mqtt
import heapq as hq #heap
from IotClient.iot_client import IotClient # tesmporry testing
#from pkg_task5.scripts.IotClient.iot_client import IotClient
import datetime
import json
class IotRosBridgeActionServer:
# Constructor
def __init__(self):
# Initialize the Action Server
self._as = actionlib.ActionServer('/action_iot_ros',
msgRosIotAction,
self.on_goal,
self.on_cancel,
auto_start=False)
'''
* self.on_goal - It is the function pointer which points to a function which will be called
when the Action Server receives a Goal.
* self.on_cancel - It is the function pointer which points to a function which will be called
when the Action Server receives a Cancel Request.
'''
#sheet url id add
# Read and Store IoT Configuration data from Parameter Server
param_config_iot = rospy.get_param('config_iot')
self._config_mqtt_server_url = param_config_iot['mqtt']['server_url']
self._config_mqtt_server_port = param_config_iot['mqtt']['server_port']
self._config_mqtt_sub_topic = param_config_iot['mqtt']['topic_sub']
self._config_mqtt_pub_topic = param_config_iot['mqtt']['topic_pub']
self._config_mqtt_qos = param_config_iot['mqtt']['qos']
self._config_mqtt_sub_cb_ros_topic = param_config_iot['mqtt']['sub_cb_ros_topic']
self._config_google_apps_spread_sheet_id = param_config_iot['google_apps']['spread_sheet_id']
self._keys = param_config_iot['keys']
#print(param_config_iot)
# Initialize ROS Topic Publication
# Incoming message from MQTT Subscription will be published on a ROS Topic (/ros_iot_bridge/mqtt/sub).
# ROS Nodes can subscribe to this ROS Topic (/ros_iot_bridge/mqtt/sub) to get messages from MQTT Subscription.
self._handle_ros_pub = rospy.Publisher(self._config_mqtt_sub_cb_ros_topic, msgMqttSub, queue_size=10)
# Subscribe to MQTT Topic (eyrc/xYzqLm/iot_to_ros) which is defined in 'config_iot_ros.yaml'.
# self.mqtt_sub_callback() function will be called when there is a message from MQTT Subscription.
ret = iot.mqtt_subscribe_thread_start(self.mqtt_sub_callback,
self._config_mqtt_server_url,
self._config_mqtt_server_port,
self._config_mqtt_sub_topic,
self._config_mqtt_qos)
if ret == 0:
rospy.loginfo("MQTT Subscribe Thread Started")
else:
rospy.logerr("Failed to start MQTT Subscribe Thread")
# Start the Action Server
self._as.start()
rospy.loginfo("Started ROS-IoT Bridge Action Server.")
# This is a callback function for MQTT Subscriptions
def mqtt_sub_callback(self, client, userdata, message):
payload = str(message.payload)
#payload = str(.format(message.payload.decode()))
#print("payload",payload)
print("[INFO] Topic: {}".format(message.topic) )
print("[INFO] Message Recieved: {}".format(message.payload.decode()))
print(json.loads(payload),'this')
order_json = json.loads(payload)
# {u'city': u'Mumbai', u'order_time': u'2021-03-02 08:14:33', u'order_id': u'3001', u'lon': u'72.8777 E', u'qty': u'1', u'item': u'Clothes', u'lat': u'19.0760 N'}
# - Order ID
# - Order Date and Time
# - Item
# - Priority
# - Order Quantity
# - City
# - Longitude
# - Latitude
# - Cost
if order_json[u'item'].encode('utf-8')=="Medicines" or order_json[u'item'].encode('utf-8')=="Medicine":
priority ='HP' #0
cost = '450'
elif order_json[u'item'].encode('utf-8')=="Food":
priority='MP' #1
cost = '250'
elif order_json[u'item'].encode('utf-8')=="Clothes":
priority ='LP'
cost = '150' #2
order_to_be_publish = 'IncomingOrders,'+order_json[u'order_id'].encode('utf-8')+','+order_json[u'order_time'].encode('utf-8')+','+order_json[u'item'].encode('utf-8')+','+priority+','+order_json[u'qty'].encode('utf-8')+','+order_json[u'city'].encode('utf-8')+','+order_json[u'lon'].encode('utf-8')+','+order_json[u'lat'].encode('utf-8')+','+cost
#sending ur5_1 msg to pick the required box
# single_order_info=payload.split(',')
# global order_no
# order_no = order_no+1
# order_no = int(single_order_info[6])
# if single_order_info[0]=="Medicines":
# priority ='HP' #0
# elif single_order_info[0]=="Food":
# priority='MP' #1
# else:
# priority ='LP' #2
# tup=(priority,order_no,single_order_info[0]) #to make heap
# global order_info
# # global match_box_color_with_index
# hq.heappush(order_info,tup) #always have highest priority upward
# print("before sending order",order_info)
# order_to_be_procced =hq.heappop(order_info) #order with highest priority
# print("aftersending order",order_info)
# for key, value in match_box_color_with_index.items():
# print(key,value,order_to_be_procced[2])
# if order_to_be_procced[2] == value:
# box_to_be_pick=key #key pta chl gyi
# match_box_color_with_index.pop(key)
# break
# print("match_box_color_with_index: ",match_box_color_with_index)
global action_client #IOT clinet
# incomingDate = datetime.datetime.now()
# incomingTimeStr = incomingDate.strftime("%a")+' '+incomingDate.strftime("%b")+' '+incomingDate.strftime("%d")+' '+incomingDate.strftime("%Y")+' - '+incomingDate.strftime("%X")
# order_to_be_publish='IncomingOrders,'+str(order_no)+','+str(incomingTimeStr)+','+ single_order_info[0]+','+priority+','+single_order_info[2]+','+single_order_info[1]+','+single_order_info[5]+','+single_order_info[4]+','+single_order_info[3]
# print(order_to_be_publish,incomingTimeStr)
action_client.send_goal("mqtt", "pub", action_client._config_mqtt_pub_topic,order_to_be_publish) #send to IOT bridge
# This function will be called when Action Server receives a Goal
def on_goal(self, goal_handle):
goal = goal_handle.get_goal()
rospy.loginfo("Received new goal from Client")
rospy.loginfo(goal) # action
#---pushign to google shhet
# defining our sheet name in the 'id' variable and the the column where we want to update the value
temp =goal.message
# [+-]?\d+
# x = re.findall(r"[\w']+", temp)
# x = re.findall(r"[+-]?\d+", temp)
x = temp.split(',')
# global match_box_color_with_index #to store box color
print(x)
# if x[0] == 'Inventory':
# box_index =x[1]
# box_index=box_index[1:3]
# print(box_index,"box")
# match_box_color_with_index.update({box_index:x[2]}) # dic which will match storage number with box item
# print(match_box_color_with_index,"Ggg")
# print("color with index",match_box_color_with_index)
i = 1
parameters = {"id":x[0],'Team Id':'VB#1637','Unique Id':"VBfetjmi"}
#print(self._keys)
sheet_name = x[0]
keys = self._keys[sheet_name]
for j in keys:
parameters.update({j:x[i]})
i = i+1
print(parameters,'parameters')
# print(parameters,'Params')
# parameters = {"id":"Sheet1", "turtle_x":x[0],"turtle_y":x[1],"turtle_theta":x[2]}
# parameters = {"id":"task1", "turtle_x":x[0],"turtle_y":x[1],"turtle_theta":x[2],"team_id":"VB_1637","unique_id":"VB_1637"}
URL = "https://script.google.com/macros/s/"+self._config_google_apps_spread_sheet_id+"/exec"
response = requests.get(URL, params=parameters)
print(response.content)
# Validate incoming goal parameters
if goal.protocol == "mqtt":
if (goal.mode == "pub") or (goal.mode == "sub"):
goal_handle.set_accepted()
# Start a new thread to process new goal from the client (For Asynchronous Processing of Goals)
# 'self.process_goal' - is the function pointer which points to a function that will process incoming Goals
thread = threading.Thread(name="worker",
target=self.process_goal,
args=(goal_handle,))
thread.start()
else:
goal_handle.set_rejected()
return
else:
goal_handle.set_rejected()
return
# This function is called is a separate thread to process Goal.
def process_goal(self, goal_handle):
flag_success = False
result = msgRosIotResult()
goal_id = goal_handle.get_goal_id()
rospy.loginfo("Processing goal : " + str(goal_id.id))
goal = goal_handle.get_goal()
# Goal Processing
if goal.protocol == "mqtt":
rospy.logwarn("MQTT")
if goal.mode == "pub":
rospy.logwarn("MQTT PUB Goal ID: " + str(goal_id.id))
rospy.logwarn(goal.topic + " > " + goal.message)
ret = iot.mqtt_publish(self._config_mqtt_server_url,
self._config_mqtt_server_port,
goal.topic,
goal.message,
self._config_mqtt_qos)
if ret == 0:
rospy.loginfo("MQTT Publish Successful.")
result.flag_success = True
else:
rospy.logerr("MQTT Failed to Publish")
result.flag_success = False
elif goal.mode == "sub":
rospy.logwarn("MQTT SUB Goal ID: " + str(goal_id.id))
rospy.logwarn(goal.topic)
ret = iot.mqtt_subscribe_thread_start(self.mqtt_sub_callback,
self._config_mqtt_server_url,
self._config_mqtt_server_port,
goal.topic,
self._config_mqtt_qos)
if (ret == 0):
rospy.loginfo("MQTT Subscribe Thread Started")
result.flag_success = True
else:
rospy.logerr("Failed to start MQTT Subscribe Thread")
result.flag_success = False
rospy.loginfo("Send goal result to client")
if (result.flag_success == True):
rospy.loginfo("Succeeded abc")
goal_handle.set_succeeded(result)
# goal_handle.set_succeeded(result)
else:
rospy.loginfo("Goal Failed. Aborting.")
goal_handle.set_aborted(result)
rospy.loginfo("Goal ID: " + str(goal_id.id) + " Goal Processing Done.")
# This function will be called when Goal Cancel request is send to the Action Server
def on_cancel(self, goal_handle):
rospy.loginfo("Received cancel request.")
goal_id = goal_handle.get_goal_id()
# Main
def main():
rospy.init_node('node_action_server_ros_iot_bridge')
action_server = IotRosBridgeActionServer()
global order_info #store information of order
global order_no #order no
order_no = 1000
global action_client
action_client = IotClient()
# global match_box_color_with_index #box colr with index
# match_box_color_with_index={}
# match_box_color_with_index={'02': 'Clothes', '10': 'Clothes', '00': 'Medicines',
# '01': 'Food', '20': 'Clothes', '21': 'Medicines',
# '22': 'Food', '32': 'Medicines', '31': 'Clothes',
# '30': 'Food', '12': 'Medicines', '11': 'Food'} # testing
# order_no=1000
order_info=[]
hq.heapify(order_info)
rospy.spin()
if __name__ == '__main__':
global action_client
main()
```
#### File: scripts/temp_for_salim/get_sheet.py
```python
import requests
import json
import heapq as hq #heap
def check_order(order_id,order_info):
for i in order_info:
if i[1] == order_id:
return True
return False
def check_if_dispatched(order_id):
# URL = "https://spreadsheets.google.com/feeds/list/1rianYVvWCIJeoa17Jlrg7GZTUwuI_SG3KaKaaHtgGvY/4/public/full?alt=json" ##<EMAIL>
URL = "https://spreadsheets.google.com/feeds/list/1QTyFVQA0YheuERNtD7Vq1ASVJl6tQ4rPGh65vFpExhg/4/public/full?alt=json" <EMAIL>
#URL = "https://spreadsheets.google.com/feeds/list/1Twkrdg5QvlTRH15SLgWfh8tom5Pxjp-6QphH_s3vPIk/4/public/full?alt=json" <EMAIL>
response = requests.get(URL) #order
data =response.content
res = json.loads(data)
if u'entry' in res["feed"]:
res2 = res["feed"][u'entry']
else:
return False
for x in res2:
content =x[u'content']
content =content[u'$t']
Dict = dict((a.strip(), b.strip())
for a, b in (element.split(': ')
for element in content.split(', ')))
if order_id == Dict[u'orderid'].encode('utf-8'):
return True
return False
def get_data_from_sheet(max_order_id,order_info):
# URL = "https://spreadsheets.google.com/feeds/list/1rianYVvWCIJeoa17Jlrg7GZTUwuI_SG3KaKaaHtgGvY/3/public/full?alt=json" ##<EMAIL>
URL = "https://spreadsheets.google.com/feeds/list/1QTyFVQA0YheuERNtD7Vq1ASVJl6tQ4rPGh65vFpExhg/3/public/full?alt=json" <EMAIL>
#URL = "https://spreadsheets.google.com/feeds/list/1Twkrdg5QvlTRH15SLgWfh8tom5Pxjp-6QphH_s3vPIk/3/public/full?alt=json" <EMAIL>
response = requests.get(URL) #order
data =response.content
res = json.loads(data)
if u'entry' in res["feed"]:
#print("entry present")
res2 = res["feed"][u'entry']
else:
order_to_be_procced=()
#print("no data present")
return order_to_be_procced,max_order_id,order_info
res2 = res["feed"][u'entry']
#order_info=[]
hq.heapify(order_info)
#max_order_id =0
for x in res2:
content =x[u'content']
content =content[u'$t']
Dict = dict((a.strip(), b.strip())
for a, b in (element.split(': ')
for element in content.split(', ')))
if Dict[u'item']=="Medicines" or Dict[u'item']=="Medicine":
Dict[u'priority'] =0 #0
color ="red"
elif Dict[u'item']=="Food":
Dict[u'priority']=1 #1
color ="yellow"
else:
Dict[u'priority'] =2 #2
color ="green"
# if max_order_id < int(Dict[u'orderid']):
order_id_encoded = Dict[u'orderid'].encode('utf-8')
if not check_order(Dict[u'orderid'],order_info) and not check_if_dispatched(order_id_encoded):
max_order_id=int(Dict[u'orderid'])
tup=(Dict[u'priority'],Dict[u'orderid'],Dict[u'item'],Dict[u'city'])
hq.heappush(order_info,tup) #always have highest priority upward
#print(order_info)
if len(order_info)>0:
order_to_be_procced =hq.heappop(order_info) #order with highest priority
else:
order_to_be_procced=()
print("order_to_be_procced",order_to_be_procced)
print("order_info: ", order_info)
return order_to_be_procced,max_order_id,order_info
"""
order_info=[]
hq.heapify(order_info)
max_order_id =0
#order_to_be_procced,max_order_id,order_info =get_data_from_sheet(0,order_info)
#print(order_to_be_procced, max_order_id)
for i in range(8):
order_to_be_procced,max_order_id,order_info =get_data_from_sheet(max_order_id,order_info)
print(order_to_be_procced, max_order_id)
"""
def get_data_from_inventory_sheet():
# URL = "https://spreadsheets.google.com/feeds/list/1rianYVvWCIJeoa17Jlrg7GZTUwuI_SG3KaKaaHtgGvY/2/public/full?alt=json" ##<EMAIL>
URL = "https://spreadsheets.google.com/feeds/list/1QTyFVQA0YheuERNtD7Vq1ASVJl6tQ4rPGh65vFpExhg/2/public/full?alt=json" ##<EMAIL>
#URL = "https://spreadsheets.google.com/feeds/list/1Twkrdg5QvlTRH15SLgWfh8tom5Pxjp-6QphH_s3vPIk/2/public/full?alt=json" ##<EMAIL>
response = requests.get(URL) #inventory
data =response.content
res = json.loads(data)
if u'entry' in res["feed"]:
res2 = res["feed"][u'entry']
else:
match_box_color_with_index ={}
return match_box_color_with_index
res2 = res["feed"][u'entry']
match_box_color_with_index ={}
for x in res2:
content =x[u'content']
content =content[u'$t']
Dict = dict((a.strip(), b.strip())
for a, b in (element.split(': ')
for element in content.split(', ')))
box_index =Dict[u'sku']
box_index=box_index[1:3]
match_box_color_with_index.update({box_index.encode("utf-8"):Dict[u'item'].encode("utf-8")}) # dic which will match storage number with box item
#print(match_box_color_with_index)
return match_box_color_with_index
check_if_dispatched('2002')
```
#### File: scripts/temp_for_salim/server_multi_2.py
```python
import socket
import os
from thread import *
from get_sheet import get_data_from_inventory_sheet,get_data_from_sheet
import json
ServerSideSocket = socket.socket()
host = '127.0.0.1'
port = 2010
ThreadCount = 0
try:
ServerSideSocket.bind((host, port))
except socket.error as e:
print(str(e))
print('Socket is listening..')
ServerSideSocket.listen(15)
global ros_service_status
ros_service_status="free"
def multi_threaded_client(connection):
connection.send(str.encode('Server is working:'))
global ros_service_status
match_box_color_with_index ={}
while True:
data = connection.recv(1024)
response =data.decode('utf-8')
if not data:
break
#print(response)
if response=="inventory":
match_box_color_with_index=get_data_from_inventory_sheet()
print("inventory",match_box_color_with_index)
connection.sendall(str.encode(str(match_box_color_with_index)))
else:
#response=str(response)
print("response: ",response)
response=json.loads(response)
order_to_be_procced,max_order_id,order_info =get_data_from_sheet(response["max_order_id"],response["order_info"])
data_to_be_send={}
data_to_be_send["order_to_be_procced"]=order_to_be_procced
#print("order_to_be_procced",data_to_be_send["order_to_be_procced"])
data_to_be_send["max_order_id"]=max_order_id
data_to_be_send["order_info"]=order_info
print("what data is sending",data_to_be_send)
connection.sendall(str.encode(json.dumps(data_to_be_send)))
#print(ros_service_status)
connection.close()
while True:
Client, address = ServerSideSocket.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
start_new_thread(multi_threaded_client, (Client, ))
ThreadCount += 1
print('Thread Number: ' + str(ThreadCount))
ServerSideSocket.close()
```
#### File: scripts/temp_for_salim/server_multi.py
```python
import socket
import os
from thread import *
from get_sheet import get_data_from_inventory_sheet,get_data_from_sheet
import json
ServerSideSocket = socket.socket()
host = '127.0.0.1'
port = 2004
ThreadCount = 0
try:
ServerSideSocket.bind((host, port))
except socket.error as e:
print(str(e))
print('Socket is listening..')
ServerSideSocket.listen(15)
global ros_service_status
ros_service_status="free"
def multi_threaded_client(connection):
connection.send(str.encode('Server is working:'))
global ros_service_status
match_box_color_with_index ={}
while True:
data = connection.recv(1024)
response =data.decode('utf-8')
if not data:
break
#print(response)
if response=="busy":
ros_service_status=response
print("busy",ros_service_status)
connection.sendall(str.encode(str(ros_service_status)))
elif response=="free":
ros_service_status=response
print("free",ros_service_status)
connection.sendall(str.encode(str(ros_service_status)))
elif response=="inventory":
match_box_color_with_index=get_data_from_inventory_sheet()
print("inventory",match_box_color_with_index)
connection.sendall(str.encode(str(match_box_color_with_index)))
continue
else:
#response=="status_check"
#response=ros_service_status
print("statuscheck ",ros_service_status)
connection.sendall(str.encode(str(ros_service_status)))
#print(ros_service_status)
connection.close()
while True:
Client, address = ServerSideSocket.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
start_new_thread(multi_threaded_client, (Client, ))
ThreadCount += 1
print('Thread Number: ' + str(ThreadCount))
ServerSideSocket.close()
```
#### File: scripts/temp-ims/inventory.py
```python
import time
import rospy
import datetime
from IotClient.iot_client import IotClient # tesmporry testing
def get_item_details(color):
"""
This function returns specification of Packgen based on color.
:param color: It is color of the Packgen! Its possible values are red,yellow and green.
:return:details,details is a dict containing information of packgen.It contains item,sku_alpha,priority,cost and Estimated Time of Delivery.
"""
details = {'item':'Na','sku_alpha':'Na','priority':'Na','cost':'Na','Estimated Time of Delivery':'Na'}
if color == 'red':
details = {'item':'Medicines','sku_alpha':'R','priority':'HP','cost':'450','Estimated Time of Delivery':'1'}
elif color == 'yellow':
details = {'item':'Food','sku_alpha':'Y','priority':'MP','cost':'250','Estimated Time of Delivery':'3'}
elif color == 'green':
details = {'item':'Clothes','sku_alpha':'G','priority':'LP','cost':'150','Estimated Time of Delivery':'5'}
return details
def get_inventory_string(color, i, j):
"""
This function create a coma separated string of packgen information to send data on the inventory sheet.
:param color:It is color of packgen.It can be red,yellow and green.
:param i:i is index of column of packgen in the shelf.
:param j:j is index of row of packgen in the shelf.
:return: returns a string containing each column details of inventory sheet separated by comas.
"""
details = get_item_details(color)
date_now = datetime.datetime.now()
storage_number = 'R'+str(i)+' C'+str(j)
return 'Inventory,'+details['sku_alpha']+str(i)+str(j)+date_now.strftime("%m")+date_now.strftime("%y")+','+details['item']+','+details['priority']+','+storage_number+','+details['cost']+',1'
def pushdata():
inventory = [
('red',0,0),
('green',0,1),
('yellow',0,2),
('yellow',1,0),
('green',1,1),
('red',1,2),
('yellow',2,0),
('green',2,1),
('red',2,2),
('yellow',3,0),
('red',3,1),
('green',3,2)
]
global action_client
for inv in inventory:
time.sleep(8)
str = get_inventory_string(inv[0],inv[1],inv[2])
goal_handle1 = action_client.send_goal("mqtt", "pub", action_client._config_mqtt_pub_topic, str)
action_client._goal_handles['1'] = goal_handle1
if __name__ == '__main__':
rospy.init_node('inventory',anonymous=True)
global action_client
action_client = IotClient()
pushdata()
print("Out of Loop. Exiting..")
```
#### File: pkg_task6/scripts/currupted_ur5_1_controller.py
```python
import rospy
import time
import datetime
from pkg_vb_sim.msg import LogicalCameraImage
from pkg_vb_sim.srv import vacuumGripper
from pkg_vb_sim.srv import conveyorBeltPowerMsg
from ur5_moveit.ur5_moveit import Ur5Moveit,define_joint_angle_list,get_item_details
from sensor_msgs.msg import Image
import heapq as hq #heap
from iot_client.iot_client import IotClient
from pkg_task6.msg import OrderDetailArray
import socket #tcp/ip connection
import json
def add_all_boxes(ur5_bot):
"""
This functions add all boxes in the shelf to the planning scene.
:param ur5_bot: it is instance of class ur5_moveit.
:return: null
"""
pose_x=[.28,0,-.28]
pose_y= -0.41 # -.41
pose_z=[1.92,1.66,1.43,1.20]
for x in range(0,3):
for z in range(0,4):
ur5_bot.add_box(pose_x[x],pose_y,pose_z[z],'box'+str(z)+str(x))
def handle_box_clashing(data):
global ur5
global is_box_in_camera
models_length = len(data.models)
print("inside ros service")
if (models_length == 2):
is_box_in_camera = True
rospy.sleep(1)
else:
is_box_in_camera = False
#time.sleep(10)
def handle_bot1():
"""
Controls ur5_1 pick boxes from the shelf and place them on the belt.
:return: null
"""
global ur5
action_client = IotClient()
#connet to local server1
client_multi_socket = socket.socket()
host = '127.0.0.1'
port = 2004
print('Waiting for connection1 response')
try:
client_multi_socket.connect((host, port))
except socket.error as e:
print(str(e))
client_multi_socket.recv(1024)
#----->>>>
#connet to local server2
client_multi_socket2 = socket.socket()
host = '127.0.0.1'
port2 = 2010
print('Waiting for connection2 response')
try:
client_multi_socket2.connect((host, port2))
except socket.error as e:
print(str(e))
client_multi_socket2.recv(1024)
#----->>>>
coordinate_matrix = [
[
define_joint_angle_list(161, -120, 12, -83, 20, 0),
define_joint_angle_list(-124,-91,26,-117,-59,0), #[01]
# define_joint_angle_list(-124, -90, 26, -117, -59, 0),
define_joint_angle_list(56, -115, 2, -70, 123, 0),
],
[
define_joint_angle_list(-54, -97, 82, -166, -128, 0),
define_joint_angle_list(-122, -103, 55, 46, 61, 0),
define_joint_angle_list(55, -84, -82, -17, 123, 0),
],
[
define_joint_angle_list(-55, -98, 88, 7, 125, 0),
define_joint_angle_list(-122, -119, 103, 14, 59, 0),
define_joint_angle_list(52, -85, -83, 173, -130, 0)
],
[
define_joint_angle_list(-56, -94, 119, -27, 127, 0),
define_joint_angle_list(-125,-118,135,-17,55,0), #31
define_joint_angle_list(55,-88,-117,-154,-122,0) #32
# define_joint_angle_list(-117, -118, 134, -17, 66, 0),
# define_joint_angle_list(-161, -93, 118, -27, 22, 0)
]
]
# coordinate_matrix = [
# [
# define_joint_angle_list(164, -117, 11, -84, 17, 0),
# define_joint_angle_list(-123,-86,14,-106,-62,0), #[01]
# define_joint_angle_list(56, -114, 4, -69, 125, 0),
# ],
# [
# define_joint_angle_list(163, -82, -84, -10, 18, 0),
# define_joint_angle_list(123, -62, -96, -21, 58, 0),
# define_joint_angle_list(54, -82, -82, -14, 128, 0),
# ],
# [
# define_joint_angle_list(-54, -96, 116, 158, -125, 0),
# define_joint_angle_list(122, -58, -131, 9, 59, 0),
# define_joint_angle_list(-163, -99, 90, 9, 16, 0)
# ],
# [
# define_joint_angle_list(-54, -93, 120, -27, 125, 0),
# define_joint_angle_list(-123,-120,135,-14,55,0), #31
# define_joint_angle_list(-163,-96,120,-24,16,0) #32
# ]
# ]
add_all_boxes(ur5)
#requesting local server to provide inventory data
msg="inventory"
client_multi_socket.send(str.encode(msg))
res = client_multi_socket.recv(1024)
res =res.encode('utf-8')
bad_char=["{","}","'"]
for i in bad_char:
res=res.replace(i,'')
match_box_color_with_index={}
l =[]
for element in res.split(','):
for a in element.split(':'):
temp =str(a.strip())
l.append(temp)
#print(json.dumps(l))
match_box_color_with_index[l[0]]=l[1]
l=[]
# ----editing have to delete those which are not picking----
# del match_box_color_with_index['32']
# del match_box_color_with_index['01']
# del match_box_color_with_index['31']
order_info=[]
hq.heapify(order_info)
max_order_id =0
print(order_info,'Order Info')
order_no_number_pub = rospy.Publisher('/eyrc/vb/order_number',OrderDetailArray,queue_size=1)
order_dictionary={}
first_iteration = True
global is_box_in_camera
print(len(order_info),'Ggg',is_box_in_camera)
while first_iteration or len(order_info)>0:
camera1_service = rospy.Subscriber('/eyrc/vb/logical_camera_1',LogicalCameraImage,handle_box_clashing,queue_size=1)
time.sleep(.5)
camera1_service.unregister()
if not is_box_in_camera:
first_iteration = False
order_dictionary["max_order_id"]=max_order_id
order_dictionary["order_info"]=order_info
msg=json.dumps(order_dictionary)
client_multi_socket2.send(str.encode(msg))
res2 = client_multi_socket2.recv(1024)
print("res: ",res2)
#res=str(res)
res2=json.loads(res2)
order_to_be_procced=res2["order_to_be_procced"]
max_order_id=res2["max_order_id"]
order_info=res2["order_info"]
for key, value in match_box_color_with_index.items():
if order_to_be_procced[2] == value:
box_to_be_pick=key #key pta chl gyi
match_box_color_with_index.pop(key)
break
print(order_to_be_procced, max_order_id)
i =int(box_to_be_pick[0])
z =int(box_to_be_pick[1])
print(i,z,': Box\n')
order_details_sheet = get_item_details(order_to_be_procced[2])
order_details = OrderDetailArray()
order_details.order_id = order_to_be_procced[1].encode("utf-8")
order_details.name = 'packagen'+str(i)+str(z)
order_details.city = order_to_be_procced[3].encode("utf-8")
order_no_number_pub.publish(order_details)
#print(order_details,'string')
global vaccum
status = ur5.hard_set_joint_angles(coordinate_matrix[i][z],4)
print(status,"STATUS")
while not status:
ur5.hard_set_joint_angles(define_joint_angle_list(0, -90, 0, 0, 0, 0),4) #straight up
status = ur5.hard_set_joint_angles(coordinate_matrix[i][z],4)
ur5.attach_box('box'+str(i)+str(z))
# attach_box_thread(True)
#checking if somewhere else ros service is being use
ros_service_status ="busy"
while ros_service_status=="busy":
msg="status_check"
client_multi_socket.send(str.encode(msg))
res = client_multi_socket.recv(1024)
ros_service_status=str(res.decode('utf-8'))
#print(res.decode('utf-8'))
if ros_service_status=="busy":
rospy.sleep(1)
else:
msg="busy"
client_multi_socket.send(str.encode(msg))
rospy.sleep(.2)
break
#---->>>>>>>
y = vaccum(True)
#print("Vaccum Gripper : ",y)
rospy.sleep(.4)
msg="free" #test
client_multi_socket.send(str.encode(msg)) #test
ur5.ee_cartesian_translation(0,1.4, 0)
ur5.hard_set_joint_angles(define_joint_angle_list(179,-57,86,-119,-88,0),4)
ur5.detach_box('box'+str(i)+str(z))
#testing--->>>
ros_service_status ="busy"
while ros_service_status=="busy":
msg="status_check"
client_multi_socket.send(str.encode(msg))
res = client_multi_socket.recv(1024)
ros_service_status=str(res.decode('utf-8'))
#print(res.decode('utf-8'))
if ros_service_status=="busy":
rospy.sleep(1)
else:
msg="busy"
client_multi_socket.send(str.encode(msg))
rospy.sleep(.2)
break
#---->>>>>>>
y = vaccum(False)
#print("Vaccum Gripper: ",y)
rospy.sleep(.4)
msg="free" #test
client_multi_socket.send(str.encode(msg)) #test
ur5.remove_box('box'+str(i)+str(z))
dispatch_date = datetime.datetime.now()
dispatch_time_str = dispatch_date.strftime("%a")+' '+dispatch_date.strftime("%b")+' '+dispatch_date.strftime("%d")+' '+dispatch_date.strftime("%Y")+' - '+dispatch_date.strftime("%X")
print(dispatch_time_str,'DispatchTime')
dipatch_order_string = 'OrdersDispatched,'+order_to_be_procced[1].encode("utf-8")+','+order_to_be_procced[3].encode("utf-8")+','+order_details_sheet['item']+','+order_details_sheet['priority']+',1,'+order_details_sheet['cost']+',Yes,'+dispatch_time_str
goal_handle1 = action_client.send_goal("mqtt", "pub", action_client.config_mqtt_pub_topic, dipatch_order_string)
action_client.goal_handles['1'] = goal_handle1
#del ur5
else:
rospy.sleep(1)
def main():
rospy.init_node('node_task5_ur5_1', anonymous=True)
global ur5
global is_box_in_camera
is_box_in_camera = False
ur5 = Ur5Moveit('ur5_1')
#rospy.Subscriber('/eyrc/vb/logical_camera_1',LogicalCameraImage,handle_conveyor,queue_size=1)
global vaccum
vaccum = rospy.ServiceProxy('/eyrc/vb/ur5/activate_vacuum_gripper/ur5_1', vacuumGripper)
handle_bot1()
rospy.spin()
if __name__ == '__main__':
global ur5
global vaccum
global is_box_in_camera
main()
```
#### File: scripts/handling_ros_service/server_multi.py
```python
import socket
import os
from thread import *
ServerSideSocket = socket.socket()
host = '127.0.0.1'
port = 2004
ThreadCount = 0
try:
ServerSideSocket.bind((host, port))
except socket.error as e:
print(str(e))
print('Socket is listening..')
ServerSideSocket.listen(5)
global ros_service_status
ros_service_status="free"
def multi_threaded_client(connection):
"""
This function create multiple threads for multiple clients
:param connection: New Connection request
:return: null
"""
connection.send(str.encode('Server is working:'))
global ros_service_status
while True:
data = connection.recv(2048)
response =data.decode('utf-8')
if not data:
break
print(response)
if response=="busy":
ros_service_status=response
elif response=="free":
ros_service_status=response
else:
response=ros_service_status
print(response)
connection.sendall(str.encode(str(ros_service_status)))
connection.close()
while True:
Client, address = ServerSideSocket.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
start_new_thread(multi_threaded_client, (Client, ))
ThreadCount += 1
print('Thread Number: ' + str(ThreadCount))
```
#### File: pkg_cv_example/scripts/temp.py
```python
import rospy
import cv2
import sys
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from pyzbar.pyzbar import decode
class Camera1:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/eyrc/vb/camera_1/image_raw", Image,self.callback)
def get_qr_data(self, arg_image):
qr_result = decode(arg_image)
#print(qr_result)
for i in range(len(qr_result)):
print(qr_result[i].data)
if ( len( qr_result ) > 0):
#print(qr_result[0])
return (qr_result[0].data)
else :
return ('NA')
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
rospy.logerr(e)
(rows,cols,channels) = cv_image.shape
image = cv_image
l=[]
# Resize a 720x1280 image to 360x640 to fit it on the screen
#resized_image = cv2.resize(image, (720/2, 1280/2))
for i in range(5):
for j in range(4):
down=160*i
side=180*j
#down =160
#side =180
crop_image=image[280+down:440+down,90+side:270+side]
l.append(self.get_qr_data(crop_image))
#print(l)
rospy.loginfo(self.get_qr_data(crop_image))
#crop_image=image[280:440,90:270] #top:bottom,right:left ---pkg00
crop_image=image[280:440,450:630] #top:bottom,right:left ---pkg01
#crop_image=image[760:920,100:340] #top:bottom,right:left ---pkg00
cv2.imshow("/eyrc/vb/camera_1/image_raw1", image)
cv2.imshow("/eyrc/vb/camera_1/image_raw", crop_image)
#rospy.loginfo(self.get_qr_data(crop_image))
#print(image)
cv2.waitKey(3)
def main(args):
rospy.init_node('node_eg1_read_camera', anonymous=True)
ic = Camera1()
try:
rospy.spin()
except KeyboardInterrupt:
rospy.loginfo("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
``` |
{
"source": "1asset/finalprojectpy",
"score": 2
} |
#### File: finalprojectpy/src/main.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask import request, render_template
from tensorflow.python.framework.ops import name_scope
from paragraphs import Paragraphs
from flask import Flask, request, render_template
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime, timedelta
from functools import wraps
import jwt
from selenium.webdriver.common.by import By
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://postgres:5432@localhost/assignment4"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'thisismyflasksecretkey'
db = SQLAlchemy(app)
token_save = ""
class Clients(db.Model):
__tablename__ = 'clients'
id = db.Column('id', db.Integer, primary_key = True)
login = db.Column('login', db.Unicode)
password = db.Column('password', db.Unicode)
token = db.Column('token', db.Unicode)
def __init__(self, id, login, password, token):
self.login = login
self.password = password
self.token = token
class News(db.Model):
id = db.Column(db.Integer, primary_key=True)
coin_name = db.Column(db.String)
body = db.Column(db.String)
title = db.Column(db.String)
link = db.Column(db.String)
def __init__(self, coin_name, title, body, link):
self.coin_name = coin_name
self.title = title
self.body = body
self.link = link
def __repr__(self):
return '<title %r>' % self.title
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
find_token = request.args.get('token')
if not find_token:
return '''<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet"
integrity="<KEY>" crossorigin="anonymous"> <h1 class="text-warning; style="color: black; text-align: center"> You need to input the token </h1>''', 403
real = Clients.query.filter_by(token = find_token).first()
if real is None:
return '''<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet"
integrity="<KEY>" crossorigin="anonymous"> <h1 class="text-danger; style="color: black; text-align: center"> The token can't be verified... </h1>'''
return f(*args, **kwargs)
return decorated
db.create_all()
@app.route('/')
def home():
return render_template('home.html')
@app.route('/token')
def token():
return '<h2>Copy paste this token in /protected to be verified: <h2>' + token_save
@app.route('/protected')
@token_required
def protected():
return '''
<html>
<head>
<link href="https://fonts.googleapis.com/css?family=Nunito+Sans:400,400i,700,900&display=swap" rel="stylesheet">
</head>
<style>
body {
text-align: center;
padding: 40px 0;
background: #EBF0F5;
}
h1 {
color: #88B04B;
font-family: "Nunito Sans", "Helvetica Neue", sans-serif;
font-weight: 900;
font-size: 40px;
margin-bottom: 10px;
}
p {
color: #404F5E;
font-family: "Nunito Sans", "Helvetica Neue", sans-serif;
font-size:20px;
margin: 0;
}
i {
color: #9ABC66;
font-size: 100px;
line-height: 200px;
margin-left:-15px;
}
.card {
background: white;
padding: 60px;
border-radius: 4px;
box-shadow: 0 2px 3px #C8D0D8;
display: inline-block;
margin: 0 auto;
}
</style>
<body>
<div class="card">
<div style="border-radius:200px; height:200px; width:200px; background: #F8FAF5; margin:0 auto;">
<i class="checkmark">✓</i>
</div>
<h1>Success</h1>
<p>We received your token and it's correct;<br/>You can continue using our website, we verified you!</p>
</div>
</body>
</html>
'''
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
usernamedata = Clients.query.filter_by(login = request.form['username'], password = request.form['password']).first()
if usernamedata is not None:
token = jwt.encode({'user': usernamedata.login, 'exp':datetime.utcnow() + timedelta(minutes=15)}, app.config['SECRET_KEY'])
global token_save
token_save = token
print(token)
update_token = Clients.query.filter_by(id = usernamedata.id).first()
token2 = token
update_token.token = token2
db.session.commit()
return render_template('web.html')
else:
error = 'Invalid login or password!'
return render_template('login.html', error = error)
return render_template('login.html')
@app.route("/coin", methods = ['POST', 'GET'])
def coin():
if request.method == 'POST':
coin_name = request.form['coin'].lower()
db_articles = News.query.filter_by(coin_name = coin_name).all()
if (db_articles):
return render_template('web.html', articles = db_articles)
coininfo = Paragraphs()
articles = coininfo.get_p(coin_name)
for article in articles:
db.session.add(News(coin_name, article['title'], article['body'], article['link']))
db.session.commit()
return render_template('web.html', articles = articles)
elif request.method == 'GET':
return render_template('web.html')
if __name__ == '__main__':
app.run(debug=True)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.