file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
amap.go | "`
Pname string `json:"pname"`
Poiweight []interface{} `json:"poiweight"`
Postcode []interface{} `json:"postcode"`
Recommend string `json:"recommend"`
Shopid []interface{} `json:"shopid"`
Shopinfo string `json:"shopinfo"`
Tag []interface{} `json:"tag"`
Tel string `json:"tel"`
Timestamp []interface{} `json:"timestamp"`
Type string `json:"type"`
Typecode string `json:"typecode"`
Website []interface{} `json:"website"`
}
func (p Poi) String() string {
return fmt.Sprintln(spaceD(p.ID), spaceD(p.Name), spaceD(p.Type), spaceD(p.Typecode), spaceD(p.Address), spaceD(p.Cityname), spaceD(p.Adname), spaceD(p.Location), spaceD(p.Alias))
}
func spaceD(s string) string {
return strings.Join(strings.Fields(s), "")
}
// Point Point
type Point struct {
Lng float64
Lat float64
}
// Rectangle Rectangle
type Rectangle struct {
PointLT Point
PointRB Point
}
func (r Rectangle) check() bool {
return r.PointLT.Lng < r.PointRB.Lng && r.PointLT.Lat > r.PointRB.Lat
}
func (r Rectangle) polygon() string {
return fmt.Sprintf("%f,%f|%f,%f", r.PointLT.Lng, r.PointLT.Lat, r.PointRB.Lng, r.PointRB.Lat)
}
func (r Rectangle) quadtree() []Rectangle {
halflng, halflat := math.Abs(r.PointRB.Lng-r.PointLT.Lng)/2, math.Abs(r.PointLT.Lat-r.PointRB.Lat)/2
return []Rectangle{
{r.PointLT, Point{round(r.PointLT.Lng + halflng), round(r.PointLT.Lat - halflat)}},
{Point{round(r.PointLT.Lng + halflng), r.PointLT.Lat}, Point{r.PointRB.Lng, round(r.PointLT.Lat - halflat)}},
{Point{r.PointLT.Lng, round(r.PointLT.Lat - halflat)}, Point{round(r.PointLT.Lng + halflng), r.PointRB.Lat}},
{Point{round(r.PointLT.Lng + halflng), round(r.PointLT.Lat - halflat)}, r.PointRB}}
}
type minRec struct {
Rec Rectangle
Types string
Count int
Err error
}
type minRecPage struct {
Rec Rectangle
Types string
Page string
}
func round(f float64) float64 {
n10 := math.Pow10(6)
return math.Trunc(f*n10) / n10
}
var gaoDePolygonURL = "https://restapi.amap.com/v3/place/polygon"
var gaoDeDetailURL = "https://www.amap.com/detail/get/detail"
var key = "aaa8abdaf05433e3702eae99964cc8c6"
// var key = "935c7385f239000f98ade53bbbc002e7"
func cutRec(rec Rectangle, types string) (recCutresult []minRec) {
count, err := recCount(rec, types)
if err != nil {
fmt.Println(rec, types, count, err)
recCutresult = append(recCutresult, minRec{rec, types, count, err})
} else if count <= 800 && count > 0 {
fmt.Println(rec, types, count, err)
recCutresult = append(recCutresult, minRec{rec, types, count, err})
} else if count > 800 {
// fmt.Println("cuting:", rec, types, count, err)
rec4s := rec.quadtree()
for _, rec4 := range rec4s {
recCutresult = append(recCutresult, cutRec(rec4, types)...)
}
}
return
}
func recCount(rec Rectangle, types string) (count int, err error) {
para := map[string]string{
"types": types,
"offset": "1",
"polygon": rec.polygon(),
}
poiResult1, err := recRequest(para)
if err != nil {
return
}
count, err = strconv.Atoi(poiResult1.Count)
if err != nil {
return
}
return
}
func minRecPagePois(minRecPage minRecPage) (pois []Poi, err error) {
para := map[string]string{
"types": minRecPage.Types,
"offset": "20",
"polygon": minRecPage.Rec.polygon(),
"page": minRecPage.Page,
}
result, err := recRequest(para)
if err != nil {
return
}
pois = result.Pois
return
}
func minRecPagesPois(minRecPages []minRecPage) (pois []Poi) {
for _, minRecPage := range minRecPages {
pagePois, err := minRecPagePois(minRecPage)
if err == nil {
pois = append(pois, pagePois...)
} else {
fmt.Println(minRecPages, err)
}
}
return
}
func minRecPages(mRec minRec) (minRecPages []minRecPage) {
for page := int(math.Ceil(float64(mRec.Count) / 20)); page > 0; page-- {
minRecPages = append(minRecPages, minRecPage{mRec.Rec, mRec.Types, strconv.Itoa(page)})
}
return
}
func | (mRecs []minRec) (mrp []minRecPage) {
for _, mRec := range mRecs {
mrp = append(mrp, minRecPages(mRec)...)
}
return
}
func recTypePages(rec Rectangle, types string) (mrp []minRecPage) {
cutrec := cutRec(rec, types)
mrp = minRecsPages(cutrec)
return
}
// RecTypePois RecTypePois
func RecTypePois(rec Rectangle, types string) (pois []Poi) {
pages := recTypePages(rec, types)
pois = minRecPagesPois(pages)
return
}
func recRequest(para map[string]string) (result PoiResult, err error) {
para["key"] = key
resp, err := resty.
SetTimeout(10 * time.Second).
SetRetryCount(5).
SetRetryWaitTime(10 * time.Second).
SetRetryMaxWaitTime(65 * time.Second).
R().
SetQueryParams(para).
Get(gaoDePolygonURL)
if err != nil {
return
}
json.Unmarshal(resp.Body(), &result)
if err != nil {
return
}
if result.Status != "1" || result.Infocode != "10000" {
err = fmt.Errorf(result.Status, result.Infocode, result.Info)
return
}
return
}
// Detail Detail
type Detail struct {
Status string `json:"status"`
Data struct {
Base struct {
PoiTag string `json:"poi_tag"`
Code string `json:"code"`
ImportanceVipFlag int `json:"importance_vip_flag"`
CityAdcode string `json:"city_adcode"`
Telephone string `json:"telephone"`
NewType string `json:"new_type"`
CityName string `json:"city_name"`
NewKeytype string `json:"new_keytype"`
Checked string `json:"checked"`
Title string `json:"title"`
CreFlag int `json:"cre_flag"`
StdTTag0V string `json:"std_t_tag_0_v"`
NaviGeometry string `json:"navi_geometry"`
Classify string `json:"classify"`
Business string `json:"business"`
ShopInfo struct {
Claim int `json:"claim"`
} `json:"shop_info"`
PoiTagHasTTag int `json:"poi_tag_has_t_tag"`
Pixelx string `json:"pixelx"`
Pixely string `json:"pixely"`
Geodata struct {
Aoi []struct {
Name string `json:"name"`
Mainpoi string `json:"mainpoi"`
Area float64 `json:"area"`
} `json:"aoi"`
} `json:"geodata"`
Poiid string `json:"poiid"`
Distance int `json:"distance"`
Name string `json:"name"`
StdVTag0V string `json:"std_v_tag_0_v"`
EndPoiExtension string `json:"end_poi_extension"`
Y string `json:"y"`
X string `json:"x"`
Address string `json:"address"`
Bcs string `json:"bcs"`
Tag string `json:"tag"`
} `json:"base"`
Spec struct {
MiningShape | minRecsPages | identifier_name |
models.py | ("将保证金改为0")
# 增加不良记录天数
self.none_punch_days = 1
elif self.none_punch_days >= 1 and self.down_payment > 0:
print("如果不良天数不等于1")
#防止修改数据库debug而出现的错误
self.guaranty = 0
# 如果是日常模式
if self.guaranty == 0:
# 底金次数
pay_out = self.average
print(pay_out, "当保证金等于0的时候需要扣除的底金金额")
# 如果有降低投入
# 从账户中扣除金额
self.down_payment -= pay_out
print("扣除之后需要将用户的底金减去")
# 不良天数记录+1
self.none_punch_days += 1
# 如果是自由模式
else:
print("若是自由模式,开始扣款")
if float(self.left_distance) > 0.0:
print("当剩余距离大于0的时候才开始扣款")
#剩余的距离
left_distance = self.left_distance
# 求解剩余距离
if left_distance<=1:
pay_out = self.guaranty
print("当剩余的距离小于1的时候,直接扣除用户的保证金{}".format(self.guaranty))
self.guaranty = 0
else:
remain = math.floor(self.left_distance)-1
print("剩余的距离减去1是:{}".format(remain))
if remain <=self.down_num:
print(type(remain),type(self.down_num),"remain:{},down_num{}".format(remain,self.down_num))
print("走这里就对了")
pay_out = remain*self.average+self.guaranty
self.guaranty=0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance)-1),"平均需要扣除的金额{}".format(self.average))
self.down_payment -= remain * self.average
else:
# remain = self.down_num
print("若剩余距离大于底金次数,那么剩余距离{}".format(remain))
pay_out = self.down_num * self.average + self.guaranty
self.guaranty = 0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance) - 1),
"平均需要扣除的金额{}".format(self.average))
self.down_payment -= self.down_num*self.average
else:
pay_out = 0
print("当剩余的距离大于零的时候,需要付出的金额就是保证金")
if pay_out > 0:
# 更新值
self.save()
# 把本次瓜分金额写入数据库记录中
UserSettlement.objects.loose_pay(goal_id=self.goal_id, bonus=pay_out)
print("瓜分记录写入成功")
# 完成所有瓜分金额的计算
return pay_out
@staticmethod
def get_activity():
return "1"
def update_activity(self, user_id):
# 更新该种活动的总系数
Activity.objects.add_bonus_coeff(RunningGoal.get_activity(), self.guaranty + self.down_payment,
self.coefficient)
# 增加用户的累计参加次数
UserRecord.objects.update_join(user=UserInfo.objects.get(user_id=user_id), coeff=self.coefficient)
def update_activity_person(self):
Activity.objects.update_person(RunningGoal.get_activity())
Activity.objects.update_coeff(RunningGoal.get_activity(), -self.coefficient)
import base64
# TODO
class RunningPunchRecordManager(models.Manager):
# 创建一个新的record
def create_record(self, goal, filename, distance,punch_record_time, document,base64_str):
print(3333333333333333333333333333333333333333)
# 文件存储的实际路径
filePath = os.path.join(settings.MEDIA_DIR, timezone.now().strftime("%Y-%m-%d")+"/")
# # 引用所使用的路径
refPath = os.path.join(settings.MEDIA_ROOT, timezone.now().strftime("%Y-%m-%d")+"/")
#mysql存储的地址
file_filepath = filePath+filename
file_refpath = refPath+filename
if not os.path.exists(filePath):
os.makedirs(filePath)
print(444444444444444444444444444444444)
# 写入文件内容
with open(filePath+filename, 'wb') as f:
f.write(base64_str)
print("保存图片成功")
# 如果是日常模式打卡,则规定distance必须为日常距离
if goal.goal_type:
distance = goal.kilos_day
print(666666666666666666666666666666666666)
record = self.create(goal=goal, voucher_ref=file_refpath, voucher_store=file_filepath, distance=distance,record_time = punch_record_time,
document=document)
print(555555555555555555555555555555555555555)
# 如果是自由模式, 则计算剩余距离
if not goal.goal_type:
goal.left_distance -= distance
goal.save()
return record
#
# 获取时间
def get_day_record(self, daydelta):
"""
:param day: 表示一个timedelta
:return:
"""
# 判断现在的时间距离开始时间的时长
# day = (timezone.now()-self.recod_time)
# print(day)
# 今天的日期加上
today = timezone.now().date() + timedelta(daydelta)
print(today,"这个时间加上一个时间段")
# 明天
end = today + timedelta(1)
print(end,"today加上一天,表示的是那一天的一整天的时间段")
return self.filter(record_time__range=(today, end))
# 第一天是否存在打卡记录
# user对某punch点赞
def praise_punch(self, user_id, punch_id):
try:
praise = RunningPunchPraise(user_id=user_id, punch_id=punch_id)
praise.save()
record = self.get(punch_id=punch_id)
record.praise += 1
record.save()
except Exception:
pass
# user对某punch举报
def report_punch(self, user_id, punch_id):
try:
praise = RunningPunchReport(user_id=user_id, punch_id=punch_id)
praise.save()
record = self.get(punch_id=punch_id)
record.report += 1
record.save()
except Exception:
pass
# 是否存在某user对某punch的点赞
def exist_praise_punch(self, user_id, punch_id):
record = RunningPunchPraise.objects.filter(user_id=user_id, punch_id=punch_id)
if record:
return True
else:
return False
# 是否存在某user对某punch的点赞
def exist_report_punch(self, user_id, punch_id):
record = RunningPunchReport.objects.filter(user_id=user_id, punch_id=punch_id)
if record:
return True
else:
return False
class RunningPunchRecord(models.Model):
""" Model for running task record
To save user's actual running distance per day
"""
# 主键ID,标识打卡记录
punch_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# 外键ID,标识对应目标
goal = models.ForeignKey(RunningGoal, related_name="punch", on_delete=models.PROTECT)
# Time when user creates the record
record_time = models.DateTimeField(null=False)
# 截图的引用地址
# voucher_ref = models.CharField(max_length=256, null=False)
voucher_ref = models.TextField(null=False)
# 截图的存储地址
voucher_store = models.TextField(null=False)
# 跑步距离
distance = models.FloatField(default=0)
# 被赞数
praise = models.IntegerField(default=0)
# 被举报数
report = models.IntegerField(default=0)
# 保存的一段话
document = models.TextField(default=" ", null=True)
# 重新打卡
reload = models.IntegerField(default=0, null=True)
# 指定一个Manager
objects = RunningPunchRecordManager()
# 点赞
class RunningPunchPraise(models.Model):
# 点赞的人
user_id = models.IntegerField()
# punch id
punch_id = models.UUIDField()
class Meta:
unique_together = ("punch_id", "user_id")
# 举报
class RunningPunchReport(models.Model):
# 举报的人
user_id = models.IntegerField(null=False, default=0)
# punch id
punch_id = models.UUIDField(null=False, default=uuid.uuid4)
class Meta:
unique_together = ("punch_id", "user_id")
| identifier_body |
||
models.py | = distance
distances = int(distance)
kilos_day = 2 * distances // actual_day_map[goal_day]
# 查询出没有支付的活动
goal = self.filter(user_id=user_id).filter(start_time=start_time).filter(status="PENDING")
# 如果存在的话就删掉
if goal:
goal.first().delete()
goal = self.create(user_id=user_id,
activity_type=RunningGoal.get_activity(),
start_time=start_time,
goal_day=goal_day,
mode=mode,
guaranty=guaranty,
down_payment=down_payment,
activate_deposit=activate_deposit,
coefficient=coefficient,
goal_type=running_type,
goal_distance=goal_distance,
left_distance=left_distance,
kilos_day=kilos_day,
extra_earn=extra_earn,
average=average,
reality_price=reality_price,
deserve_price=deserve_price,
down_num=down_num
)
# 更新活动的免签卡券
if running_type:
nosgin_number = int(nosign)
UserTicket.objects.create_ticket(goal.goal_id, "NS", nosgin_number)
return goal
# 删除一个目标
def delete_goal(self, goal_id):
goal = self.get(goal_id=goal_id)
# 删除本目标对应的所有打卡记录
goal.punch.all().delete()
# 删除本目标
goal.delete()
class RunningGoal(Goal):
""" Model for running goal
User needs to set running duration days and distance as
objective
"""
# 目标距离
goal_distance = models.FloatField(null=True)
# 单日目标距离,对于自由模式来说,kilos_day为单日目标上限
kilos_day = models.FloatField(null=True)
# 剩余距离, 只针对自由模式有效
left_distance = models.FloatField(null=True)
# 用户实际要付出的金额
reality_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 用户应该要付出的金额
deserve_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 扣完底金需要的次数
down_num = models.IntegerField(default=1, null=False)
# 平均每次要扣的
average = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 活动押金
activate_deposit = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 累计距离,只对自由模式有效
add_distance = models.FloatField(default=0,null=True)
# 活动额外收益
extra_earn = models.DecimalField(max_digits=12, decimal_places=2, null=False)
objects = RunningGoalManager()
@staticmethod
def get_start_date():
return datetime.strptime("00:01", "%H:%M").time()
def calc_pay_out(self):
print("计算开始..........")
pay_out = 0
# 如果是日常模式
if self.goal_type==1:
# 如果之前没有过不良记录, 则扣除保证金
if self.none_punch_days == 0:
pay_out = self.guaranty
print(pay_out,'如果之前没有过不良记录, 则扣除保证金,扣除金额就是保证金的数量')
# 清除个人的保证金数额
self.guaranty = 0
print("将保证金改为0")
# 增加不良记录天数
self.none_punch_days = 1
elif self.none_punch_days >= 1 and self.down_payment > 0:
print("如果不良天数不等于1")
#防止修改数据库debug而出现的错误
self.guaranty = 0
# 如果是日常模式
if self.guaranty == 0:
# 底金次数
pay_out = self.average
print(pay_out, "当保证金等于0的时候需要扣除的底金金额")
# 如果有降低投入
# 从账户中扣除金额
self.down_payment -= pay_out
print("扣除之后需要将用户的底金减去")
# 不良天数记录+1
self.none_punch_days += 1
# 如果是自由模式
else:
print("若是自由模式,开始扣款")
if float(self.left_distance) > 0.0:
print("当剩余距离大于0的时候才开始扣款")
#剩余的距离
left_distance = self.left_distance
# 求解剩余距离
if left_distance<=1:
pay_out = self.guaranty
print("当剩余的距离小于1的时候,直接扣除用户的保证金{}".format(self.guaranty))
self.guaranty = 0
else:
remain = math.floor(self.left_distance)-1
print("剩余的距离减去1是:{}".format(remain))
if remain <=self.down_num:
print(type(remain),type(self.down_num),"remain:{},down_num{}".format(remain,self.down_num))
print("走这里就对了")
pay_out = remain*self.average+self.guaranty
self.guaranty=0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance)-1),"平均需要扣除的金额{}".format(self.average))
self.down_payment -= remain * self.average
else:
# remain = self.down_num
print("若剩余距离大于底金次数,那么剩余距离{}".format(remain))
pay_out = self.down_num * self.average + self.guaranty
self.guaranty = 0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance) - 1),
"平均需要扣除的金额{}".format(self.average))
self.down_payment -= self.down_num*self.average
else:
pay_out = 0
print("当剩余的距离大于零的时候,需要付出的金额就是保证金")
if pay_out > 0:
# 更新值
self.save()
# 把本次瓜分金额写入数据库记录中
UserSettlement.objects.loose_pay(goal_id=self.goal_id, bonus=pay_out)
print("瓜分记录写入成功")
# 完成所有瓜分金额的计算
return pay_out
@staticmethod
def get_activity():
return "1"
def update_activity(self, user_id):
# 更新该种活动的总系数
Activity.objects.add_bonus_coeff(RunningGoal.get_activity(), self.guaranty + self.down_payment,
self.coefficient)
# 增加用户的累计参加次数
UserRecord.objects.update_join(user=UserInfo.objects.get(user_id=user_id), coeff=self.coefficient)
def update_activity_person(self):
Activity.objects.update_person(RunningGoal.get_activity())
Activity.objects.update_coeff(RunningGoal.get_activity(), -self.coefficient)
import base64
# TODO
class RunningPunchRecordManager(models.Manager):
# 创建一个新的record
def create_record(self, goal, filename, distance,punch_record_time, document,base64_str):
print(3333333333333333333333333333333333333333)
# 文件存储的实际路径
filePath = os.path.join(settings.MEDIA_DIR, timezone.now().strftime("%Y-%m-%d")+"/")
# # 引用所使用的路径
refPath = os.path.join(settings.MEDIA_ROOT, timezone.now().strftime("%Y-%m-%d")+"/")
#mysql存储的地址
file_filepath = filePath+filename
file_refpath = refPath+filename
if not os.path.exists(filePath):
os.makedirs(filePath)
print(444444444444444444444444444444444)
# 写入文件内容
with open(filePath+filename, 'wb') as f:
f.write(base64_str)
print("保存图片成功")
# 如果是日常模式打卡,则规定distance必须为日常距离
if goal.goal_type:
distance = goal.kilos_day
print(666666666666666666666666666666666666)
| te(goal=goal, voucher_ref=file_refpath, voucher_store=file_filepath, distance=distance,record_time = punch_record_time,
document=document)
print(555555555555555555555555555555555555555)
# 如果是自由模式, 则计算剩余距离
if not goal.goal_type:
goal.left_distance -= distance
goal.save()
return record
#
# 获取时间
def get_day_record(self, daydelta):
"""
:param day: 表示一个timedelta
:return:
"""
# 判断现在的时间距离开始时间的时长
# day = (timezone.now()-self.recod_time)
# print | record = self.crea | identifier_name |
models.py | filter(start_time=start_time).filter(status="PENDING")
# 如果存在的话就删掉
if goal:
goal.first().delete()
goal = self.create(user_id=user_id,
activity_type=RunningGoal.get_activity(),
start_time=start_time,
goal_day=goal_day,
mode=mode,
guaranty=guaranty,
down_payment=down_payment,
activate_deposit=activate_deposit,
coefficient=coefficient,
goal_type=running_type,
goal_distance=goal_distance,
left_distance=left_distance,
kilos_day=kilos_day,
extra_earn=extra_earn,
average=average,
reality_price=reality_price,
deserve_price=deserve_price,
down_num=down_num
)
# 更新活动的免签卡券
if running_type:
nosgin_number = int(nosign)
UserTicket.objects.create_ticket(goal.goal_id, "NS", nosgin_number)
return goal
# 删除一个目标
def delete_goal(self, goal_id):
goal = self.get(goal_id=goal_id)
# 删除本目标对应的所有打卡记录
goal.punch.all().delete()
# 删除本目标
goal.delete()
class RunningGoal(Goal):
""" Model for running goal
User needs to set running duration days and distance as
objective
"""
# 目标距离
goal_distance = models.FloatField(null=True)
# 单日目标距离,对于自由模式来说,kilos_day为单日目标上限
kilos_day = models.FloatField(null=True)
# 剩余距离, 只针对自由模式有效
left_distance = models.FloatField(null=True)
# 用户实际要付出的金额
reality_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 用户应该要付出的金额
deserve_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 扣完底金需要的次数
down_num = models.IntegerField(default=1, null=False)
# 平均每次要扣的
average = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 活动押金
activate_deposit = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 累计距离,只对自由模式有效
add_distance = models.FloatField(default=0,null=True)
# 活动额外收益
extra_earn = models.DecimalField(max_digits=12, decimal_places=2, null=False)
objects = RunningGoalManager()
@staticmethod
def get_start_date():
return datetime.strptime("00:01", "%H:%M").time()
def calc_pay_out(self):
print("计算开始..........")
pay_out = 0
# 如果是日常模式
if self.goal_type==1:
# 如果之前没有过不良记录, 则扣除保证金
if self.none_punch_days == 0:
pay_out = self.guaranty
print(pay_out,'如果之前没有过不良记录, 则扣除保证金,扣除金额就是保证金的数量')
# 清除个人的保证金数额
self.guaranty = 0
print("将保证金改为0")
# 增加不良记录天数
self.none_punch_days = 1
elif self.none_punch_days >= 1 and self.down_payment > 0:
print("如果不良天数不等于1")
#防止修改数据库debug而出现的错误
self.guaranty = 0
# 如果是日常模式
if self.guaranty == 0:
# 底金次数
pay_out = self.average
print(pay_out, "当保证金等于0的时候需要扣除的底金金额")
# 如果有降低投入
# 从账户中扣除金额
self.down_payment -= pay_out
print("扣除之后需要将用户的底金减去")
# 不良天数记录+1
self.none_punch_days += 1
# 如果是自由模式
else:
print("若是自由模式,开始扣款")
if float(self.left_distance) > 0.0:
print("当剩余距离大于0的时候才开始扣款")
#剩余的距离
left_distance = self.left_distance
# 求解剩余距离
if left_distance<=1:
pay_out = self.guaranty
print("当剩余的距离小于1的时候,直接扣除用户的保证金{}".format(self.guaranty))
self.guaranty = 0
else:
remain = math.floor(self.left_distance)-1
print("剩余的距离减去1是:{}".format(remain))
if remain <=self.down_num:
print(type(remain),type(self.down_num),"remain:{},down_num{}".format(remain,self.down_num))
print("走这里就对了")
pay_out = remain*self.average+self.guaranty
self.guaranty=0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance)-1),"平均需要扣除的金额{}".format(self.average))
self.down_payment -= remain * self.average
else:
# remain = self.down_num
print("若剩余距离大于底金次数,那么剩余距离{}".format(remain))
pay_out = self.down_num * self.average + self.guaranty
self.guaranty = 0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance) - 1),
"平均需要扣除的金额{}".format(self.average))
self.down_payment -= self.down_num*self.average
else:
pay_out = 0
print("当剩余的距离大于零的时候,需要付出的金额就是保证金")
if pay_out > 0:
# 更新值
self.save()
# 把本次瓜分金额写入数据库记录中
UserSettlement.objects.loose_pay(goal_id=self.goal_id, bonus=pay_out)
print("瓜分记录写入成功")
# 完成所有瓜分金额的计算
return pay_out
@staticmethod
def get_activity():
return "1"
def update_activity(self, user_id):
# 更新该种活动的总系数
Activity.objects.add_bonus_coeff(RunningGoal.get_activity(), self.guaranty + self.down_payment,
self.coefficient)
# 增加用户的累计参加次数
UserRecord.objects.update_join(user=UserInfo.objects.get(user_id=user_id), coeff=self.coefficient)
def update_activity_person(self):
Activity.objects.update_person(RunningGoal.get_activity())
Activity.objects.update_coeff(RunningGoal.get_activity(), -self.coefficient)
import base64
# TODO
class RunningPunchRecordManager(models.Manager):
# 创建一个新的record
def create_record(self, goal, filename, distance,punch_record_time, document,base64_str):
print(3333333333333333333333333333333333333333)
# 文件存储的实际路径
filePath = os.path.join(settings.MEDIA_DIR, timezone.now().strftime("%Y-%m-%d")+"/")
# # 引用所使用的路径
refPath = os.path.join(settings.MEDIA_ROOT, timezone.now().strftime("%Y-%m-%d")+"/")
#mysql存储的地址
file_filepath = filePath+filename
file_refpath = refPath+filename
if not os.path.exists(filePath):
os.makedirs(filePath)
print(444444444444444444444444444444444)
# 写入文件内容
with open(filePath+filename, 'wb') as f:
f.write(base64_str)
print("保存图片成功")
# 如果是日常模式打卡,则规定distance必须为日常距离
if goal.goal_type:
distance = goal.kilos_day
print(666666666666666666666666666666666666)
record = self.create(goal=goal, voucher_ref=file_refpath, voucher_store=file_filepath, distance=distance,record_time = punch_record_time,
document=document)
print(555555555555555555555555555555555555555)
# 如果是自由模式, 则计算剩余距离
if not goal.goal_type:
goal.left_distance -= distance
goal.save()
return record
#
# 获取时间
def get_day_record(self, daydelta):
"""
:param day: 表示一个timedelta
| 21: 18,
30: 25,
61: 50
}
goal_distance = distance
left_distance = distance
distances = int(distance)
kilos_day = 2 * distances // actual_day_map[goal_day]
# 查询出没有支付的活动
goal = self.filter(user_id=user_id). | conditional_block |
|
models.py | start_time = timezone.now() # + timedelta(days=1)
# start_time = datetime.strptime("2018-01-01 00:00:01", "%Y-%m-%d %H:%M:%S")
kilos_day, goal_distance, left_distance = None, None, None
if running_type:
kilos_day = distance
else:
actual_day_map = {
7: 6,
14: 12,
21: 18,
30: 25,
61: 50
}
goal_distance = distance
left_distance = distance
distances = int(distance)
kilos_day = 2 * distances // actual_day_map[goal_day]
# 查询出没有支付的活动
goal = self.filter(user_id=user_id).filter(start_time=start_time).filter(status="PENDING")
# 如果存在的话就删掉
if goal:
goal.first().delete()
goal = self.create(user_id=user_id,
activity_type=RunningGoal.get_activity(),
start_time=start_time,
goal_day=goal_day,
mode=mode,
guaranty=guaranty,
down_payment=down_payment,
activate_deposit=activate_deposit,
coefficient=coefficient,
goal_type=running_type,
goal_distance=goal_distance,
left_distance=left_distance,
kilos_day=kilos_day,
extra_earn=extra_earn,
average=average,
reality_price=reality_price,
deserve_price=deserve_price,
down_num=down_num
)
# 更新活动的免签卡券
if running_type:
nosgin_number = int(nosign)
UserTicket.objects.create_ticket(goal.goal_id, "NS", nosgin_number)
return goal
# 删除一个目标
def delete_goal(self, goal_id):
goal = self.get(goal_id=goal_id)
# 删除本目标对应的所有打卡记录
goal.punch.all().delete()
# 删除本目标
goal.delete()
class RunningGoal(Goal):
""" Model for running goal
User needs to set running duration days and distance as
objective
"""
# 目标距离
goal_distance = models.FloatField(null=True)
# 单日目标距离,对于自由模式来说,kilos_day为单日目标上限
kilos_day = models.FloatField(null=True)
# 剩余距离, 只针对自由模式有效
left_distance = models.FloatField(null=True)
# 用户实际要付出的金额
reality_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 用户应该要付出的金额
deserve_price = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 扣完底金需要的次数
down_num = models.IntegerField(default=1, null=False)
# 平均每次要扣的
average = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 活动押金
activate_deposit = models.DecimalField(max_digits=12, decimal_places=2, null=False)
# 累计距离,只对自由模式有效
add_distance = models.FloatField(default=0,null=True)
# 活动额外收益
extra_earn = models.DecimalField(max_digits=12, decimal_places=2, null=False)
objects = RunningGoalManager()
@staticmethod
def get_start_date():
return datetime.strptime("00:01", "%H:%M").time()
def calc_pay_out(self):
print("计算开始..........")
pay_out = 0
# 如果是日常模式
if self.goal_type==1:
# 如果之前没有过不良记录, 则扣除保证金
if self.none_punch_days == 0:
pay_out = self.guaranty
print(pay_out,'如果之前没有过不良记录, 则扣除保证金,扣除金额就是保证金的数量')
# 清除个人的保证金数额
self.guaranty = 0
print("将保证金改为0")
# 增加不良记录天数
self.none_punch_days = 1
elif self.none_punch_days >= 1 and self.down_payment > 0:
print("如果不良天数不等于1")
#防止修改数据库debug而出现的错误
self.guaranty = 0
# 如果是日常模式
if self.guaranty == 0:
# 底金次数
pay_out = self.average
print(pay_out, "当保证金等于0的时候需要扣除的底金金额")
# 如果有降低投入
# 从账户中扣除金额
self.down_payment -= pay_out
print("扣除之后需要将用户的底金减去")
# 不良天数记录+1
self.none_punch_days += 1
# 如果是自由模式
else:
print("若是自由模式,开始扣款")
if float(self.left_distance) > 0.0:
print("当剩余距离大于0的时候才开始扣款")
#剩余的距离
left_distance = self.left_distance
# 求解剩余距离
if left_distance<=1:
pay_out = self.guaranty
print("当剩余的距离小于1的时候,直接扣除用户的保证金{}".format(self.guaranty))
self.guaranty = 0
else:
remain = math.floor(self.left_distance)-1
print("剩余的距离减去1是:{}".format(remain))
if remain <=self.down_num:
print(type(remain),type(self.down_num),"remain:{},down_num{}".format(remain,self.down_num))
print("走这里就对了")
pay_out = remain*self.average+self.guaranty
self.guaranty=0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance)-1),"平均需要扣除的金额{}".format(self.average))
self.down_payment -= remain * self.average
else:
# remain = self.down_num
print("若剩余距离大于底金次数,那么剩余距离{}".format(remain))
pay_out = self.down_num * self.average + self.guaranty
self.guaranty = 0
print("用户的剩余距离减去1之后的距离数{}".format(math.floor(self.left_distance) - 1),
"平均需要扣除的金额{}".format(self.average))
self.down_payment -= self.down_num*self.average
else:
pay_out = 0
print("当剩余的距离大于零的时候,需要付出的金额就是保证金")
if pay_out > 0:
# 更新值
self.save()
# 把本次瓜分金额写入数据库记录中
UserSettlement.objects.loose_pay(goal_id=self.goal_id, bonus=pay_out)
print("瓜分记录写入成功")
# 完成所有瓜分金额的计算
return pay_out
@staticmethod
def get_activity():
return "1"
def update_activity(self, user_id):
# 更新该种活动的总系数
Activity.objects.add_bonus_coeff(RunningGoal.get_activity(), self.guaranty + self.down_payment,
self.coefficient)
# 增加用户的累计参加次数
UserRecord.objects.update_join(user=UserInfo.objects.get(user_id=user_id), coeff=self.coefficient)
def update_activity_person(self):
Activity.objects.update_person(RunningGoal.get_activity())
Activity.objects.update_coeff(RunningGoal.get_activity(), -self.coefficient)
import base64
# TODO
class RunningPunchRecordManager(models.Manager):
# 创建一个新的record
def create_record(self, goal, filename, distance,punch_record_time, document,base64_str):
print(3333333333333333333333333333333333333333)
# 文件存储的实际路径
filePath = os.path.join(settings.MEDIA_DIR, timezone.now().strftime("%Y-%m-%d")+"/")
# # 引用所使用的路径
refPath = os.path.join(settings.MEDIA_ROOT, timezone.now().strftime("%Y-%m-%d")+"/")
#mysql存储的地址
file_filepath = filePath+filename
file_refpath = refPath+filename
if not os.path.exists(filePath):
os.makedirs(filePath)
print(444444444444444444444444444444444)
# 写入文件内容
with open(filePath+filename, 'wb') as f:
f.write(base64_str)
print("保存图片成功")
# 如果是日常模式打卡,则规定distance必须为日常距离
if goal.goal_type:
distance = goal.kilos_day
print(666666666666666666666666666666666666)
| if settings.DEBUG:
start_time = timezone.now()
else:
# 当天创建活动只有后一天才能参加,所以以后一天为开始日期 | random_line_split |
|
j1f.rs | (ix: u32, x: f32, y1: bool, sign: bool) -> f32 {
let z: f64;
let mut s: f64;
let c: f64;
let mut ss: f64;
let mut cc: f64;
s = sinf(x) as f64;
if y1 {
s = -s;
}
c = cosf(x) as f64;
cc = s - c;
if ix < 0x7f000000 {
ss = -s - c;
z = cosf(2.0 * x) as f64;
if s * c > 0.0 {
cc = z / ss;
} else {
ss = z / cc;
}
if ix < 0x58800000 {
if y1 {
ss = -ss;
}
cc = (ponef(x) as f64) * cc - (qonef(x) as f64) * ss;
}
}
if sign {
cc = -cc;
}
return (((INVSQRTPI as f64) * cc) / (sqrtf(x) as f64)) as f32;
}
/* R0/S0 on [0,2] */
const R00: f32 = -6.2500000000e-02; /* 0xbd800000 */
const R01: f32 = 1.4070566976e-03; /* 0x3ab86cfd */
const R02: f32 = -1.5995563444e-05; /* 0xb7862e36 */
const R03: f32 = 4.9672799207e-08; /* 0x335557d2 */
const S01: f32 = 1.9153760746e-02; /* 0x3c9ce859 */
const S02: f32 = 1.8594678841e-04; /* 0x3942fab6 */
const S03: f32 = 1.1771846857e-06; /* 0x359dffc2 */
const S04: f32 = 5.0463624390e-09; /* 0x31ad6446 */
const S05: f32 = 1.2354227016e-11; /* 0x2d59567e */
pub fn j1f(x: f32) -> f32 {
let mut z: f32;
let r: f32;
let s: f32;
let mut ix: u32;
let sign: bool;
ix = x.to_bits();
sign = (ix >> 31) != 0;
ix &= 0x7fffffff;
if ix >= 0x7f800000 {
return 1.0 / (x * x);
}
if ix >= 0x40000000 {
/* |x| >= 2 */
return common(ix, fabsf(x), false, sign);
}
if ix >= 0x39000000 {
/* |x| >= 2**-13 */
z = x * x;
r = z * (R00 + z * (R01 + z * (R02 + z * R03)));
s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * (S04 + z * S05))));
z = 0.5 + r / s;
} else {
z = 0.5;
}
return z * x;
}
const U0: [f32; 5] = [
-1.9605709612e-01, /* 0xbe48c331 */
5.0443872809e-02, /* 0x3d4e9e3c */
-1.9125689287e-03, /* 0xbafaaf2a */
2.3525259166e-05, /* 0x37c5581c */
-9.1909917899e-08, /* 0xb3c56003 */
];
const V0: [f32; 5] = [
1.9916731864e-02, /* 0x3ca3286a */
2.0255257550e-04, /* 0x3954644b */
1.3560879779e-06, /* 0x35b602d4 */
6.2274145840e-09, /* 0x31d5f8eb */
1.6655924903e-11, /* 0x2d9281cf */
];
pub fn y1f(x: f32) -> f32 {
let z: f32;
let u: f32;
let v: f32;
let ix: u32;
ix = x.to_bits();
if (ix & 0x7fffffff) == 0 {
return -1.0 / 0.0;
}
if (ix >> 31) != 0 {
return 0.0 / 0.0;
}
if ix >= 0x7f800000 {
return 1.0 / x;
}
if ix >= 0x40000000 {
/* |x| >= 2.0 */
return common(ix, x, true, false);
}
if ix < 0x33000000 {
/* x < 2**-25 */
return -TPI / x;
}
z = x * x;
u = U0[0] + z * (U0[1] + z * (U0[2] + z * (U0[3] + z * U0[4])));
v = 1.0 + z * (V0[0] + z * (V0[1] + z * (V0[2] + z * (V0[3] + z * V0[4]))));
return x * (u / v) + TPI * (j1f(x) * logf(x) - 1.0 / x);
}
/* For x >= 8, the asymptotic expansions of pone is
* 1 + 15/128 s^2 - 4725/2^15 s^4 - ..., where s = 1/x.
* We approximate pone by
* pone(x) = 1 + (R/S)
* where R = pr0 + pr1*s^2 + pr2*s^4 + ... + pr5*s^10
* S = 1 + ps0*s^2 + ... + ps4*s^10
* and
* | pone(x)-1-R/S | <= 2 ** ( -60.06)
*/
const PR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
1.1718750000e-01, /* 0x3df00000 */
1.3239480972e+01, /* 0x4153d4ea */
4.1205184937e+02, /* 0x43ce06a3 */
3.8747453613e+03, /* 0x45722bed */
7.9144794922e+03, /* 0x45f753d6 */
];
const PS8: [f32; 5] = [
1.1420736 | common | identifier_name |
|
j1f.rs | 60174561e+02, /* 0xc43de683 */
-1.1849806641e+04, /* 0xc639273a */
-4.8438511719e+04, /* 0xc73d3683 */
];
const QS8: [f32; 6] = [
1.6139537048e+02, /* 0x43216537 */
7.8253862305e+03, /* 0x45f48b17 */
1.3387534375e+05, /* 0x4802bcd6 */
7.1965775000e+05, /* 0x492fb29c */
6.6660125000e+05, /* 0x4922be94 */
-2.9449025000e+05, /* 0xc88fcb48 */
];
const QR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
-2.0897993405e-11, /* 0xadb7d219 */
-1.0253904760e-01, /* 0xbdd1fffe */
-8.0564479828e+00, /* 0xc100e736 */
-1.8366960144e+02, /* 0xc337ab6b */
-1.3731937256e+03, /* 0xc4aba633 */
-2.6124443359e+03, /* 0xc523471c */
];
const QS5: [f32; 6] = [
8.1276550293e+01, /* 0x42a28d98 */
1.9917987061e+03, /* 0x44f8f98f */
1.7468484375e+04, /* 0x468878f8 */
4.9851425781e+04, /* 0x4742bb6d */
2.7948074219e+04, /* 0x46da5826 */
-4.7191835938e+03, /* 0xc5937978 */
];
const QR3: [f32; 6] = [
-5.0783124372e-09, /* 0xb1ae7d4f */
-1.0253783315e-01, /* 0xbdd1ff5b */
-4.6101160049e+00, /* 0xc0938612 */
-5.7847221375e+01, /* 0xc267638e */
-2.2824453735e+02, /* 0xc3643e9a */
-2.1921012878e+02, /* 0xc35b35cb */
];
const QS3: [f32; 6] = [
4.7665153503e+01, /* 0x423ea91e */
6.7386511230e+02, /* 0x4428775e */
3.3801528320e+03, /* 0x45534272 */
5.5477290039e+03, /* 0x45ad5dd5 */
1.9031191406e+03, /* 0x44ede3d0 */
-1.3520118713e+02, /* 0xc3073381 */
];
const QR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
-1.7838172539e-07, /* 0xb43f8932 */
-1.0251704603e-01, /* 0xbdd1f475 */
-2.7522056103e+00, /* 0xc0302423 */
-1.9663616180e+01, /* 0xc19d4f16 */
-4.2325313568e+01, /* 0xc2294d1f */
-2.1371921539e+01, /* 0xc1aaf9b2 */
];
const QS2: [f32; 6] = [
2.9533363342e+01, /* 0x41ec4454 */
2.5298155212e+02, /* 0x437cfb47 */
7.5750280762e+02, /* 0x443d602e */
7.3939318848e+02, /* 0x4438d92a */
1.5594900513e+02, /* 0x431bf2f2 */
-4.9594988823e+00, /* 0xc09eb437 */
];
fn qonef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 6];
let s: f32;
let r: f32;
let z: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &QR8;
q = &QS8;
} else if ix >= 0x409173eb {
p = &QR5;
q = &QS5;
} else if ix >= 0x4036d917 {
p = &QR3;
q = &QS3;
} else
/*ix >= 0x40000000*/
{
p = &QR2;
q = &QS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
return (0.375 + r / s) / x;
}
// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
#[cfg(not(target_arch = "powerpc64"))]
#[cfg(test)]
mod tests {
use super::{j1f, y1f};
#[test]
fn test_j1f_2488() | {
// 0x401F3E49
assert_eq!(j1f(2.4881766_f32), 0.49999475_f32);
} | identifier_body |
|
j1f.rs | ix = x.to_bits();
sign = (ix >> 31) != 0;
ix &= 0x7fffffff;
if ix >= 0x7f800000 {
return 1.0 / (x * x);
}
if ix >= 0x40000000 {
/* |x| >= 2 */
return common(ix, fabsf(x), false, sign);
}
if ix >= 0x39000000 {
/* |x| >= 2**-13 */
z = x * x;
r = z * (R00 + z * (R01 + z * (R02 + z * R03)));
s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * (S04 + z * S05))));
z = 0.5 + r / s;
} else {
z = 0.5;
}
return z * x;
}
const U0: [f32; 5] = [
-1.9605709612e-01, /* 0xbe48c331 */
5.0443872809e-02, /* 0x3d4e9e3c */
-1.9125689287e-03, /* 0xbafaaf2a */
2.3525259166e-05, /* 0x37c5581c */
-9.1909917899e-08, /* 0xb3c56003 */
];
const V0: [f32; 5] = [
1.9916731864e-02, /* 0x3ca3286a */
2.0255257550e-04, /* 0x3954644b */
1.3560879779e-06, /* 0x35b602d4 */
6.2274145840e-09, /* 0x31d5f8eb */
1.6655924903e-11, /* 0x2d9281cf */
];
pub fn y1f(x: f32) -> f32 {
let z: f32;
let u: f32;
let v: f32;
let ix: u32;
ix = x.to_bits();
if (ix & 0x7fffffff) == 0 {
return -1.0 / 0.0;
}
if (ix >> 31) != 0 {
return 0.0 / 0.0;
}
if ix >= 0x7f800000 {
return 1.0 / x;
}
if ix >= 0x40000000 {
/* |x| >= 2.0 */
return common(ix, x, true, false);
}
if ix < 0x33000000 {
/* x < 2**-25 */
return -TPI / x;
}
z = x * x;
u = U0[0] + z * (U0[1] + z * (U0[2] + z * (U0[3] + z * U0[4])));
v = 1.0 + z * (V0[0] + z * (V0[1] + z * (V0[2] + z * (V0[3] + z * V0[4]))));
return x * (u / v) + TPI * (j1f(x) * logf(x) - 1.0 / x);
}
/* For x >= 8, the asymptotic expansions of pone is
* 1 + 15/128 s^2 - 4725/2^15 s^4 - ..., where s = 1/x.
* We approximate pone by
* pone(x) = 1 + (R/S)
* where R = pr0 + pr1*s^2 + pr2*s^4 + ... + pr5*s^10
* S = 1 + ps0*s^2 + ... + ps4*s^10
* and
* | pone(x)-1-R/S | <= 2 ** ( -60.06)
*/
const PR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
1.1718750000e-01, /* 0x3df00000 */
1.3239480972e+01, /* 0x4153d4ea */
4.1205184937e+02, /* 0x43ce06a3 */
3.8747453613e+03, /* 0x45722bed */
7.9144794922e+03, /* 0x45f753d6 */
];
const PS8: [f32; 5] = [
1.1420736694e+02, /* 0x42e46a2c */
3.6509309082e+03, /* 0x45642ee5 */
3.6956207031e+04, /* 0x47105c35 */
9.7602796875e+04, /* 0x47bea166 */
3.0804271484e+04, /* 0x46f0a88b */
];
const PR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
1.3199052094e-11, /* 0x2d68333f */
1.1718749255e-01, /* 0x3defffff */
6.8027510643e+00, /* 0x40d9b023 */
1.0830818176e+02, /* 0x42d89dca */
5.1763616943e+02, /* 0x440168b7 */
5.2871520996e+02, /* 0x44042dc6 */
];
const PS5: [f32; 5] = [
5.9280597687e+01, /* 0x426d1f55 */
9.9140142822e+02, /* 0x4477d9b1 */
5.3532670898e+03, /* 0x45a74a23 */
7.8446904297e+03, /* 0x45f52586 */
1.5040468750e+03, /* 0x44bc0180 */
];
const PR3: [f32; 6] = [
3.0250391081e-09, /* 0x314fe10d */
1.1718686670e-01, /* 0x3defffab */
3.9329774380e+00, /* 0x407bb5e7 */
3.5119403839e+01, /* 0x420c7a45 */
9.1055007935 | let sign: bool;
| random_line_split |
|
postgres.go | dev and production
type DB struct {
dbProxy
squirrel.StatementBuilderType
}
type dbProxy interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
Prepare(query string) (*sql.Stmt, error)
}
// dbWait waits for database connection to be established
func dbWait(db *sql.DB) error {
deadline := time.Now().Add(dbTimeout)
var err error
for tries := 0; time.Now().Before(deadline); tries++ {
err = db.Ping()
if err == nil {
return nil
}
level.Warn(util_log.Logger).Log("msg", "db connection not established, retrying...", "err", err)
time.Sleep(time.Second << uint(tries))
}
return errors.Wrapf(err, "db connection not established after %s", dbTimeout)
}
// New creates a new postgres DB
func New(uri, migrationsDir string) (DB, error) {
db, err := sql.Open("postgres", uri)
if err != nil {
return DB{}, errors.Wrap(err, "cannot open postgres db")
}
if err := dbWait(db); err != nil {
return DB{}, errors.Wrap(err, "cannot establish db connection")
}
if migrationsDir != "" {
// Add file scheme if no scheme is present
if !strings.HasPrefix(migrationsDir, "file:") {
migrationsDir = "file:" + migrationsDir
}
m, err := migrate.New(migrationsDir, uri)
if err != nil {
return DB{}, errors.Wrap(err, "database migrations initialization failed")
}
level.Info(util_log.Logger).Log("msg", "running database migrations...")
if err := m.Up(); err != nil {
if err != migrate.ErrNoChange {
return DB{}, errors.Wrap(err, "database migrations failed")
}
level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err)
}
}
return DB{
dbProxy: db,
StatementBuilderType: statementBuilder(db),
}, err
}
var statementBuilder = squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).RunWith
func (d DB) findConfigs(filter squirrel.Sqlizer) (map[string]userconfig.View, error) {
rows, err := d.Select("id", "owner_id", "config", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.View{}
for rows.Next() {
var cfg userconfig.View
var cfgBytes []byte
var userID string
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config)
if err != nil {
return nil, err
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetConfig gets a configuration.
func (d DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) {
var cfgView userconfig.View
var cfgBytes []byte
var deletedAt pq.NullTime
err := d.Select("id", "config", "deleted_at").
From("configs").
Where(squirrel.And{allConfigs, squirrel.Eq{"owner_id": userID}}).
OrderBy("id DESC").
Limit(1).
QueryRow().Scan(&cfgView.ID, &cfgBytes, &deletedAt)
if err != nil {
return cfgView, err
}
cfgView.DeletedAt = deletedAt.Time
err = json.Unmarshal(cfgBytes, &cfgView.Config)
return cfgView, err
}
// SetConfig sets a configuration.
func (d DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error |
// GetAllConfigs gets all of the userconfig.
func (d DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) {
return d.findConfigs(allConfigs)
}
// GetConfigs gets all of the configs that have changed recently.
func (d DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) {
return d.findConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// GetRulesConfig gets the latest alertmanager config for a user.
func (d DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) {
current, err := d.GetConfig(ctx, userID)
if err != nil {
return userconfig.VersionedRulesConfig{}, err
}
cfg := current.GetVersionedRulesConfig()
if cfg == nil {
return userconfig.VersionedRulesConfig{}, sql.ErrNoRows
}
return *cfg, nil
}
// SetRulesConfig sets the current alertmanager config for a user.
func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) {
updated := false
err := d.Transaction(func(tx DB) error {
current, err := d.GetConfig(ctx, userID)
if err != nil && err != sql.ErrNoRows {
return err
}
// The supplied oldConfig must match the current config. If no config
// exists, then oldConfig must be nil. Otherwise, it must exactly
// equal the existing config.
if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) {
return nil
}
new := userconfig.Config{
AlertmanagerConfig: current.Config.AlertmanagerConfig,
RulesConfig: newConfig,
}
updated = true
return d.SetConfig(ctx, userID, new)
})
return updated, err
}
// findRulesConfigs helps GetAllRulesConfigs and GetRulesConfigs retrieve the
// set of all active rules configurations across all our users.
func (d DB) findRulesConfigs(filter squirrel.Sqlizer) (map[string]userconfig.VersionedRulesConfig, error) {
rows, err := d.Select("id", "owner_id", "config ->> 'rules_files'", "config ->> 'rule_format_version'", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
// `->>` gets a JSON object field as text. When a config row exists
// and alertmanager config is provided but ruler config has not yet
// been, the 'rules_files' key will have an empty JSON object as its
// value. This is (probably) the most efficient way to test for a
// non-empty `rules_files` key.
//
// This whole situation is way too complicated. See
// https://github.com/cortexproject/cortex/issues/619 for the whole
// story, and our plans to improve it.
Where("config ->> 'rules_files' <> '{}'").
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.VersionedRulesConfig{}
for rows.Next() {
var cfg userconfig.VersionedRulesConfig
var userID string
var cfgBytes []byte
var rfvBytes []byte
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &rfvBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config.Files)
if err != nil {
return nil, err
}
// Legacy configs don't have a rule format version, in which case this will
// be a zero-length (but non-nil) slice.
if len(rfvBytes) > 0 {
err = json.Unmarshal([]byte(`"`+string(rfvBytes)+`"`), &cfg.Config.FormatVersion)
if err != nil {
return nil, err
}
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetAllRulesConfigs gets all alertmanager configs for all users.
func (d | {
if !cfg.RulesConfig.FormatVersion.IsValid() {
return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion)
}
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "config").
Values(userID, entityType, subsystem, cfgBytes).
Exec()
return err
} | identifier_body |
postgres.go | migrationsDir = "file:" + migrationsDir
}
m, err := migrate.New(migrationsDir, uri)
if err != nil {
return DB{}, errors.Wrap(err, "database migrations initialization failed")
}
level.Info(util_log.Logger).Log("msg", "running database migrations...")
if err := m.Up(); err != nil {
if err != migrate.ErrNoChange {
return DB{}, errors.Wrap(err, "database migrations failed")
}
level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err)
}
}
return DB{
dbProxy: db,
StatementBuilderType: statementBuilder(db),
}, err
}
var statementBuilder = squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).RunWith
func (d DB) findConfigs(filter squirrel.Sqlizer) (map[string]userconfig.View, error) {
rows, err := d.Select("id", "owner_id", "config", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.View{}
for rows.Next() {
var cfg userconfig.View
var cfgBytes []byte
var userID string
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config)
if err != nil {
return nil, err
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetConfig gets a configuration.
func (d DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) {
var cfgView userconfig.View
var cfgBytes []byte
var deletedAt pq.NullTime
err := d.Select("id", "config", "deleted_at").
From("configs").
Where(squirrel.And{allConfigs, squirrel.Eq{"owner_id": userID}}).
OrderBy("id DESC").
Limit(1).
QueryRow().Scan(&cfgView.ID, &cfgBytes, &deletedAt)
if err != nil {
return cfgView, err
}
cfgView.DeletedAt = deletedAt.Time
err = json.Unmarshal(cfgBytes, &cfgView.Config)
return cfgView, err
}
// SetConfig sets a configuration.
func (d DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error {
if !cfg.RulesConfig.FormatVersion.IsValid() {
return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion)
}
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "config").
Values(userID, entityType, subsystem, cfgBytes).
Exec()
return err
}
// GetAllConfigs gets all of the userconfig.
func (d DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) {
return d.findConfigs(allConfigs)
}
// GetConfigs gets all of the configs that have changed recently.
func (d DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) {
return d.findConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// GetRulesConfig gets the latest alertmanager config for a user.
func (d DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) {
current, err := d.GetConfig(ctx, userID)
if err != nil {
return userconfig.VersionedRulesConfig{}, err
}
cfg := current.GetVersionedRulesConfig()
if cfg == nil {
return userconfig.VersionedRulesConfig{}, sql.ErrNoRows
}
return *cfg, nil
}
// SetRulesConfig sets the current alertmanager config for a user.
func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) {
updated := false
err := d.Transaction(func(tx DB) error {
current, err := d.GetConfig(ctx, userID)
if err != nil && err != sql.ErrNoRows {
return err
}
// The supplied oldConfig must match the current config. If no config
// exists, then oldConfig must be nil. Otherwise, it must exactly
// equal the existing config.
if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) {
return nil
}
new := userconfig.Config{
AlertmanagerConfig: current.Config.AlertmanagerConfig,
RulesConfig: newConfig,
}
updated = true
return d.SetConfig(ctx, userID, new)
})
return updated, err
}
// findRulesConfigs helps GetAllRulesConfigs and GetRulesConfigs retrieve the
// set of all active rules configurations across all our users.
func (d DB) findRulesConfigs(filter squirrel.Sqlizer) (map[string]userconfig.VersionedRulesConfig, error) {
rows, err := d.Select("id", "owner_id", "config ->> 'rules_files'", "config ->> 'rule_format_version'", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
// `->>` gets a JSON object field as text. When a config row exists
// and alertmanager config is provided but ruler config has not yet
// been, the 'rules_files' key will have an empty JSON object as its
// value. This is (probably) the most efficient way to test for a
// non-empty `rules_files` key.
//
// This whole situation is way too complicated. See
// https://github.com/cortexproject/cortex/issues/619 for the whole
// story, and our plans to improve it.
Where("config ->> 'rules_files' <> '{}'").
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.VersionedRulesConfig{}
for rows.Next() {
var cfg userconfig.VersionedRulesConfig
var userID string
var cfgBytes []byte
var rfvBytes []byte
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &rfvBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config.Files)
if err != nil {
return nil, err
}
// Legacy configs don't have a rule format version, in which case this will
// be a zero-length (but non-nil) slice.
if len(rfvBytes) > 0 {
err = json.Unmarshal([]byte(`"`+string(rfvBytes)+`"`), &cfg.Config.FormatVersion)
if err != nil {
return nil, err
}
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetAllRulesConfigs gets all alertmanager configs for all users.
func (d DB) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(allConfigs)
}
// GetRulesConfigs gets all the alertmanager configs that have changed since a given config.
func (d DB) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// SetDeletedAtConfig sets a deletedAt for configuration
// by adding a single new row with deleted_at set
// the same as SetConfig is actually insert
func (d DB) SetDeletedAtConfig(ctx context.Context, userID string, deletedAt pq.NullTime, cfg userconfig.Config) error {
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "deleted_at", "config").
Values(userID, entityType, subsystem, deletedAt, cfgBytes).
Exec()
return err
}
// DeactivateConfig deactivates a configuration.
func (d DB) DeactivateConfig(ctx context.Context, userID string) error {
cfg, err := d.GetConfig(ctx, userID)
if err != nil { | return err
}
return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{Time: time.Now(), Valid: true}, cfg.Config)
} | random_line_split |
|
postgres.go | dev and production
type DB struct {
dbProxy
squirrel.StatementBuilderType
}
type dbProxy interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
Prepare(query string) (*sql.Stmt, error)
}
// dbWait waits for database connection to be established
func dbWait(db *sql.DB) error {
deadline := time.Now().Add(dbTimeout)
var err error
for tries := 0; time.Now().Before(deadline); tries++ {
err = db.Ping()
if err == nil {
return nil
}
level.Warn(util_log.Logger).Log("msg", "db connection not established, retrying...", "err", err)
time.Sleep(time.Second << uint(tries))
}
return errors.Wrapf(err, "db connection not established after %s", dbTimeout)
}
// New creates a new postgres DB
func New(uri, migrationsDir string) (DB, error) {
db, err := sql.Open("postgres", uri)
if err != nil {
return DB{}, errors.Wrap(err, "cannot open postgres db")
}
if err := dbWait(db); err != nil {
return DB{}, errors.Wrap(err, "cannot establish db connection")
}
if migrationsDir != "" {
// Add file scheme if no scheme is present
if !strings.HasPrefix(migrationsDir, "file:") {
migrationsDir = "file:" + migrationsDir
}
m, err := migrate.New(migrationsDir, uri)
if err != nil {
return DB{}, errors.Wrap(err, "database migrations initialization failed")
}
level.Info(util_log.Logger).Log("msg", "running database migrations...")
if err := m.Up(); err != nil {
if err != migrate.ErrNoChange {
return DB{}, errors.Wrap(err, "database migrations failed")
}
level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err)
}
}
return DB{
dbProxy: db,
StatementBuilderType: statementBuilder(db),
}, err
}
var statementBuilder = squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).RunWith
func (d DB) findConfigs(filter squirrel.Sqlizer) (map[string]userconfig.View, error) {
rows, err := d.Select("id", "owner_id", "config", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.View{}
for rows.Next() {
var cfg userconfig.View
var cfgBytes []byte
var userID string
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config)
if err != nil {
return nil, err
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetConfig gets a configuration.
func (d DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) {
var cfgView userconfig.View
var cfgBytes []byte
var deletedAt pq.NullTime
err := d.Select("id", "config", "deleted_at").
From("configs").
Where(squirrel.And{allConfigs, squirrel.Eq{"owner_id": userID}}).
OrderBy("id DESC").
Limit(1).
QueryRow().Scan(&cfgView.ID, &cfgBytes, &deletedAt)
if err != nil {
return cfgView, err
}
cfgView.DeletedAt = deletedAt.Time
err = json.Unmarshal(cfgBytes, &cfgView.Config)
return cfgView, err
}
// SetConfig sets a configuration.
func (d DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error {
if !cfg.RulesConfig.FormatVersion.IsValid() {
return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion)
}
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "config").
Values(userID, entityType, subsystem, cfgBytes).
Exec()
return err
}
// GetAllConfigs gets all of the userconfig.
func (d DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) {
return d.findConfigs(allConfigs)
}
// GetConfigs gets all of the configs that have changed recently.
func (d DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) {
return d.findConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// GetRulesConfig gets the latest alertmanager config for a user.
func (d DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) {
current, err := d.GetConfig(ctx, userID)
if err != nil {
return userconfig.VersionedRulesConfig{}, err
}
cfg := current.GetVersionedRulesConfig()
if cfg == nil {
return userconfig.VersionedRulesConfig{}, sql.ErrNoRows
}
return *cfg, nil
}
// SetRulesConfig sets the current alertmanager config for a user.
func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) {
updated := false
err := d.Transaction(func(tx DB) error {
current, err := d.GetConfig(ctx, userID)
if err != nil && err != sql.ErrNoRows {
return err
}
// The supplied oldConfig must match the current config. If no config
// exists, then oldConfig must be nil. Otherwise, it must exactly
// equal the existing config.
if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) {
return nil
}
new := userconfig.Config{
AlertmanagerConfig: current.Config.AlertmanagerConfig,
RulesConfig: newConfig,
}
updated = true
return d.SetConfig(ctx, userID, new)
})
return updated, err
}
// findRulesConfigs helps GetAllRulesConfigs and GetRulesConfigs retrieve the
// set of all active rules configurations across all our users.
func (d DB) findRulesConfigs(filter squirrel.Sqlizer) (map[string]userconfig.VersionedRulesConfig, error) {
rows, err := d.Select("id", "owner_id", "config ->> 'rules_files'", "config ->> 'rule_format_version'", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
// `->>` gets a JSON object field as text. When a config row exists
// and alertmanager config is provided but ruler config has not yet
// been, the 'rules_files' key will have an empty JSON object as its
// value. This is (probably) the most efficient way to test for a
// non-empty `rules_files` key.
//
// This whole situation is way too complicated. See
// https://github.com/cortexproject/cortex/issues/619 for the whole
// story, and our plans to improve it.
Where("config ->> 'rules_files' <> '{}'").
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.VersionedRulesConfig{}
for rows.Next() {
var cfg userconfig.VersionedRulesConfig
var userID string
var cfgBytes []byte
var rfvBytes []byte
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &rfvBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config.Files)
if err != nil |
// Legacy configs don't have a rule format version, in which case this will
// be a zero-length (but non-nil) slice.
if len(rfvBytes) > 0 {
err = json.Unmarshal([]byte(`"`+string(rfvBytes)+`"`), &cfg.Config.FormatVersion)
if err != nil {
return nil, err
}
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetAllRulesConfigs gets all alertmanager configs for all users.
func (d | {
return nil, err
} | conditional_block |
postgres.go | , "database migrations failed")
}
level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err)
}
}
return DB{
dbProxy: db,
StatementBuilderType: statementBuilder(db),
}, err
}
var statementBuilder = squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).RunWith
func (d DB) findConfigs(filter squirrel.Sqlizer) (map[string]userconfig.View, error) {
rows, err := d.Select("id", "owner_id", "config", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.View{}
for rows.Next() {
var cfg userconfig.View
var cfgBytes []byte
var userID string
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config)
if err != nil {
return nil, err
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetConfig gets a configuration.
func (d DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) {
var cfgView userconfig.View
var cfgBytes []byte
var deletedAt pq.NullTime
err := d.Select("id", "config", "deleted_at").
From("configs").
Where(squirrel.And{allConfigs, squirrel.Eq{"owner_id": userID}}).
OrderBy("id DESC").
Limit(1).
QueryRow().Scan(&cfgView.ID, &cfgBytes, &deletedAt)
if err != nil {
return cfgView, err
}
cfgView.DeletedAt = deletedAt.Time
err = json.Unmarshal(cfgBytes, &cfgView.Config)
return cfgView, err
}
// SetConfig sets a configuration.
func (d DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error {
if !cfg.RulesConfig.FormatVersion.IsValid() {
return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion)
}
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "config").
Values(userID, entityType, subsystem, cfgBytes).
Exec()
return err
}
// GetAllConfigs gets all of the userconfig.
func (d DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) {
return d.findConfigs(allConfigs)
}
// GetConfigs gets all of the configs that have changed recently.
func (d DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) {
return d.findConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// GetRulesConfig gets the latest alertmanager config for a user.
func (d DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) {
current, err := d.GetConfig(ctx, userID)
if err != nil {
return userconfig.VersionedRulesConfig{}, err
}
cfg := current.GetVersionedRulesConfig()
if cfg == nil {
return userconfig.VersionedRulesConfig{}, sql.ErrNoRows
}
return *cfg, nil
}
// SetRulesConfig sets the current alertmanager config for a user.
func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) {
updated := false
err := d.Transaction(func(tx DB) error {
current, err := d.GetConfig(ctx, userID)
if err != nil && err != sql.ErrNoRows {
return err
}
// The supplied oldConfig must match the current config. If no config
// exists, then oldConfig must be nil. Otherwise, it must exactly
// equal the existing config.
if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) {
return nil
}
new := userconfig.Config{
AlertmanagerConfig: current.Config.AlertmanagerConfig,
RulesConfig: newConfig,
}
updated = true
return d.SetConfig(ctx, userID, new)
})
return updated, err
}
// findRulesConfigs helps GetAllRulesConfigs and GetRulesConfigs retrieve the
// set of all active rules configurations across all our users.
func (d DB) findRulesConfigs(filter squirrel.Sqlizer) (map[string]userconfig.VersionedRulesConfig, error) {
rows, err := d.Select("id", "owner_id", "config ->> 'rules_files'", "config ->> 'rule_format_version'", "deleted_at").
Options("DISTINCT ON (owner_id)").
From("configs").
Where(filter).
// `->>` gets a JSON object field as text. When a config row exists
// and alertmanager config is provided but ruler config has not yet
// been, the 'rules_files' key will have an empty JSON object as its
// value. This is (probably) the most efficient way to test for a
// non-empty `rules_files` key.
//
// This whole situation is way too complicated. See
// https://github.com/cortexproject/cortex/issues/619 for the whole
// story, and our plans to improve it.
Where("config ->> 'rules_files' <> '{}'").
OrderBy("owner_id, id DESC").
Query()
if err != nil {
return nil, err
}
defer rows.Close()
cfgs := map[string]userconfig.VersionedRulesConfig{}
for rows.Next() {
var cfg userconfig.VersionedRulesConfig
var userID string
var cfgBytes []byte
var rfvBytes []byte
var deletedAt pq.NullTime
err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &rfvBytes, &deletedAt)
if err != nil {
return nil, err
}
err = json.Unmarshal(cfgBytes, &cfg.Config.Files)
if err != nil {
return nil, err
}
// Legacy configs don't have a rule format version, in which case this will
// be a zero-length (but non-nil) slice.
if len(rfvBytes) > 0 {
err = json.Unmarshal([]byte(`"`+string(rfvBytes)+`"`), &cfg.Config.FormatVersion)
if err != nil {
return nil, err
}
}
cfg.DeletedAt = deletedAt.Time
cfgs[userID] = cfg
}
// Check for any errors encountered.
err = rows.Err()
if err != nil {
return nil, err
}
return cfgs, nil
}
// GetAllRulesConfigs gets all alertmanager configs for all users.
func (d DB) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(allConfigs)
}
// GetRulesConfigs gets all the alertmanager configs that have changed since a given config.
func (d DB) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) {
return d.findRulesConfigs(squirrel.And{
allConfigs,
squirrel.Gt{"id": since},
})
}
// SetDeletedAtConfig sets a deletedAt for configuration
// by adding a single new row with deleted_at set
// the same as SetConfig is actually insert
func (d DB) SetDeletedAtConfig(ctx context.Context, userID string, deletedAt pq.NullTime, cfg userconfig.Config) error {
cfgBytes, err := json.Marshal(cfg)
if err != nil {
return err
}
_, err = d.Insert("configs").
Columns("owner_id", "owner_type", "subsystem", "deleted_at", "config").
Values(userID, entityType, subsystem, deletedAt, cfgBytes).
Exec()
return err
}
// DeactivateConfig deactivates a configuration.
func (d DB) DeactivateConfig(ctx context.Context, userID string) error {
cfg, err := d.GetConfig(ctx, userID)
if err != nil {
return err
}
return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{Time: time.Now(), Valid: true}, cfg.Config)
}
// RestoreConfig restores configuration.
func (d DB) RestoreConfig(ctx context.Context, userID string) error {
cfg, err := d.GetConfig(ctx, userID)
if err != nil {
return err
}
return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{}, cfg.Config)
}
// Transaction runs the given function in a postgres transaction. If fn returns
// an error the txn will be rolled back.
func (d DB) | Transaction | identifier_name |
|
chroot.go | if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
return nil
}
func moveMounts(mounts []ChrootMount) error {
for i, mount := range mounts {
// Source path
tmpSource := filepath.Join("/", ".distrobuilder", fmt.Sprintf("%d", i))
// Resolve symlinks
target := mount.Target
for {
// Get information on current target
fi, err := os.Lstat(target)
if err != nil {
break
}
// If not a symlink, we're done
if fi.Mode()&os.ModeSymlink == 0 {
break
}
// If a symlink, resolve it
newTarget, err := os.Readlink(target)
if err != nil {
break
}
target = newTarget
}
// If the target's parent directory is a symlink, we need to resolve that as well.
targetDir := filepath.Dir(target)
if lxd.PathExists(targetDir) {
// Get information on current target
fi, err := os.Lstat(targetDir)
if err != nil {
return fmt.Errorf("Failed to stat directory %q: %w", targetDir, err)
}
// If a symlink, resolve it
if fi.Mode()&os.ModeSymlink != 0 {
newTarget, err := os.Readlink(targetDir)
if err != nil {
return fmt.Errorf("Failed to get destination of %q: %w", targetDir, err)
}
targetDir = newTarget
}
}
// Create parent paths if missing
err := os.MkdirAll(targetDir, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", targetDir, err)
}
// Create target path
if mount.IsDir {
err = os.MkdirAll(target, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", target, err)
}
} else {
err := os.WriteFile(target, nil, 0644)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", target, err)
}
}
// Move the mount to its destination
err = unix.Mount(tmpSource, target, "", unix.MS_MOVE, "")
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
// Cleanup our temporary path
err := os.RemoveAll(filepath.Join("/", ".distrobuilder"))
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", filepath.Join("/", ".distrobuilder"), err)
}
return nil
}
func killChrootProcesses(rootfs string) error {
// List all files under /proc
proc, err := os.Open(filepath.Join(rootfs, "proc"))
if err != nil {
return fmt.Errorf("Failed to open file %q: %w", filepath.Join(rootfs, "proc"), err)
}
dirs, err := proc.Readdirnames(0)
if err != nil {
return fmt.Errorf("Failed to read directory content of %q: %w", filepath.Join(rootfs, "proc"), err)
}
// Get all processes and kill them
re := regexp.MustCompile(`\d+`)
for _, dir := range dirs {
if re.MatchString(dir) {
link, _ := os.Readlink(filepath.Join(rootfs, "proc", dir, "root"))
if link == rootfs {
pid, _ := strconv.Atoi(dir)
err = unix.Kill(pid, unix.SIGKILL)
if err != nil {
return fmt.Errorf("Failed killing process: %w", err)
}
}
}
}
return nil
}
// SetupChroot sets up mount and files, a reverter and then chroots for you.
func SetupChroot(rootfs string, definition Definition, m []ChrootMount) (func() error, error) {
// Mount the rootfs
err := unix.Mount(rootfs, rootfs, "", unix.MS_BIND, "")
if err != nil {
return nil, fmt.Errorf("Failed to mount '%s': %w", rootfs, err)
}
// Setup all other needed mounts
mounts := []ChrootMount{
{"none", "/proc", "proc", 0, "", true},
{"none", "/sys", "sysfs", 0, "", true},
{"none", "/run", "tmpfs", 0, "", true},
{"none", "/tmp", "tmpfs", 0, "", true},
{"none", "/dev", "tmpfs", 0, "", true},
{"none", "/dev/shm", "tmpfs", 0, "", true},
{"/etc/resolv.conf", "/etc/resolv.conf", "", unix.MS_BIND, "", false},
}
// Keep a reference to the host rootfs and cwd
root, err := os.Open("/")
if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
// Setup all needed mounts in a temporary location
if len(m) > 0 {
err = setupMounts(rootfs, append(mounts, m...))
} else {
err = setupMounts(rootfs, mounts)
}
if err != nil {
return nil, fmt.Errorf("Failed to mount filesystems: %w", err)
}
// Chroot into the container's rootfs
err = unix.Chroot(rootfs)
if err != nil {
root.Close()
return nil, err
}
err = unix.Chdir("/")
if err != nil {
return nil, err
}
// Move all the mounts into place
err = moveMounts(append(mounts, m...))
if err != nil {
return nil, err
}
// Populate /dev directory instead of bind mounting it from the host
err = populateDev()
if err != nil {
return nil, fmt.Errorf("Failed to populate /dev: %w", err)
}
// Change permission for /dev/shm
err = unix.Chmod("/dev/shm", 01777)
if err != nil {
return nil, fmt.Errorf("Failed to chmod /dev/shm: %w", err)
}
var env Environment
envs := definition.Environment
if envs.ClearDefaults {
env = Environment{}
} else {
env = Environment{
"PATH": EnvVariable{
Value: "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin",
Set: true,
},
"SHELL": EnvVariable{
Value: "/bin/sh",
Set: true,
},
"TERM": EnvVariable{
Value: "xterm",
Set: true,
},
"DEBIAN_FRONTEND": EnvVariable{
Value: "noninteractive",
Set: true,
},
}
}
if envs.EnvVariables != nil && len(envs.EnvVariables) > 0 {
imageTargets := ImageTargetUndefined | ImageTargetAll
if definition.Targets.Type == DefinitionFilterTypeContainer {
imageTargets |= ImageTargetContainer
} else if definition.Targets.Type == DefinitionFilterTypeVM {
imageTargets |= ImageTargetVM
}
for _, e := range envs.EnvVariables {
if !ApplyFilter(&e, definition.Image.Release, definition.Image.ArchitectureMapped, definition.Image.Variant, definition.Targets.Type, imageTargets) {
continue
}
entry, ok := env[e.Key]
if ok {
entry.Value = e.Value
entry.Set = true
} else {
env[e.Key] = EnvVariable{
Value: e.Value,
Set: true,
}
}
}
}
// Set environment variables
oldEnv := SetEnvVariables(env)
// Setup policy-rc.d override
policyCleanup := false
if lxd.PathExists("/usr/sbin/") && !lxd.PathExists("/usr/sbin/policy-rc.d") {
err = os.WriteFile("/usr/sbin/policy-rc.d", []byte(`#!/bin/sh
exit 101
`), 0755)
if err != nil {
return nil, err
}
policyCleanup = true
}
exitFunc := func() error {
defer root.Close()
// Cleanup policy-rc.d
if policyCleanup {
err = os.Remove("/usr/sbin/policy-rc.d")
if err != nil {
return fmt.Errorf("Failed to remove %q: %w", "/usr/sbin/policy-rc.d", err)
}
}
// Reset old environment variables
SetEnvVariables(oldEnv)
// Switch back to the host rootfs
err = root.Chdir()
|
// Mount to the temporary path
err := unix.Mount(mount.Source, tmpTarget, mount.FSType, mount.Flags, mount.Data) | random_line_split |
|
chroot.go | temporary path
err := unix.Mount(mount.Source, tmpTarget, mount.FSType, mount.Flags, mount.Data)
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
return nil
}
func moveMounts(mounts []ChrootMount) error {
for i, mount := range mounts {
// Source path
tmpSource := filepath.Join("/", ".distrobuilder", fmt.Sprintf("%d", i))
// Resolve symlinks
target := mount.Target
for {
// Get information on current target
fi, err := os.Lstat(target)
if err != nil {
break
}
// If not a symlink, we're done
if fi.Mode()&os.ModeSymlink == 0 {
break
}
// If a symlink, resolve it
newTarget, err := os.Readlink(target)
if err != nil {
break
}
target = newTarget
}
// If the target's parent directory is a symlink, we need to resolve that as well.
targetDir := filepath.Dir(target)
if lxd.PathExists(targetDir) {
// Get information on current target
fi, err := os.Lstat(targetDir)
if err != nil {
return fmt.Errorf("Failed to stat directory %q: %w", targetDir, err)
}
// If a symlink, resolve it
if fi.Mode()&os.ModeSymlink != 0 {
newTarget, err := os.Readlink(targetDir)
if err != nil {
return fmt.Errorf("Failed to get destination of %q: %w", targetDir, err)
}
targetDir = newTarget
}
}
// Create parent paths if missing
err := os.MkdirAll(targetDir, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", targetDir, err)
}
// Create target path
if mount.IsDir {
err = os.MkdirAll(target, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", target, err)
}
} else {
err := os.WriteFile(target, nil, 0644)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", target, err)
}
}
// Move the mount to its destination
err = unix.Mount(tmpSource, target, "", unix.MS_MOVE, "")
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
// Cleanup our temporary path
err := os.RemoveAll(filepath.Join("/", ".distrobuilder"))
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", filepath.Join("/", ".distrobuilder"), err)
}
return nil
}
func killChrootProcesses(rootfs string) error {
// List all files under /proc
proc, err := os.Open(filepath.Join(rootfs, "proc"))
if err != nil {
return fmt.Errorf("Failed to open file %q: %w", filepath.Join(rootfs, "proc"), err)
}
dirs, err := proc.Readdirnames(0)
if err != nil {
return fmt.Errorf("Failed to read directory content of %q: %w", filepath.Join(rootfs, "proc"), err)
}
// Get all processes and kill them
re := regexp.MustCompile(`\d+`)
for _, dir := range dirs {
if re.MatchString(dir) {
link, _ := os.Readlink(filepath.Join(rootfs, "proc", dir, "root"))
if link == rootfs {
pid, _ := strconv.Atoi(dir)
err = unix.Kill(pid, unix.SIGKILL)
if err != nil {
return fmt.Errorf("Failed killing process: %w", err)
}
}
}
}
return nil
}
// SetupChroot sets up mount and files, a reverter and then chroots for you.
func SetupChroot(rootfs string, definition Definition, m []ChrootMount) (func() error, error) | if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
// Setup all needed mounts in a temporary location
if len(m) > 0 {
err = setupMounts(rootfs, append(mounts, m...))
} else {
err = setupMounts(rootfs, mounts)
}
if err != nil {
return nil, fmt.Errorf("Failed to mount filesystems: %w", err)
}
// Chroot into the container's rootfs
err = unix.Chroot(rootfs)
if err != nil {
root.Close()
return nil, err
}
err = unix.Chdir("/")
if err != nil {
return nil, err
}
// Move all the mounts into place
err = moveMounts(append(mounts, m...))
if err != nil {
return nil, err
}
// Populate /dev directory instead of bind mounting it from the host
err = populateDev()
if err != nil {
return nil, fmt.Errorf("Failed to populate /dev: %w", err)
}
// Change permission for /dev/shm
err = unix.Chmod("/dev/shm", 01777)
if err != nil {
return nil, fmt.Errorf("Failed to chmod /dev/shm: %w", err)
}
var env Environment
envs := definition.Environment
if envs.ClearDefaults {
env = Environment{}
} else {
env = Environment{
"PATH": EnvVariable{
Value: "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin",
Set: true,
},
"SHELL": EnvVariable{
Value: "/bin/sh",
Set: true,
},
"TERM": EnvVariable{
Value: "xterm",
Set: true,
},
"DEBIAN_FRONTEND": EnvVariable{
Value: "noninteractive",
Set: true,
},
}
}
if envs.EnvVariables != nil && len(envs.EnvVariables) > 0 {
imageTargets := ImageTargetUndefined | ImageTargetAll
if definition.Targets.Type == DefinitionFilterTypeContainer {
imageTargets |= ImageTargetContainer
} else if definition.Targets.Type == DefinitionFilterTypeVM {
imageTargets |= ImageTargetVM
}
for _, e := range envs.EnvVariables {
if !ApplyFilter(&e, definition.Image.Release, definition.Image.ArchitectureMapped, definition.Image.Variant, definition.Targets.Type, imageTargets) {
continue
}
entry, ok := env[e.Key]
if ok {
entry.Value = e.Value
entry.Set = true
} else {
env[e.Key] = EnvVariable{
Value: e.Value,
Set: true,
}
}
}
}
// Set environment variables
oldEnv := SetEnvVariables(env)
// Setup policy-rc.d override
policyCleanup := false
if lxd.PathExists("/usr/sbin/") && !lxd.PathExists("/usr/sbin/policy-rc.d") {
err = os.WriteFile("/usr/sbin/policy-rc.d", []byte(`#!/bin/sh
exit 101
`), 0755)
if err != nil {
return nil, err
}
policyCleanup = true
}
exitFunc := func() error {
defer root.Close()
// Cleanup policy-rc.d
if policyCleanup {
err = os.Remove("/usr/sbin/policy-rc.d")
if err != nil {
return fmt.Errorf("Failed to remove %q: %w", "/usr/sbin/policy-rc.d", err)
}
}
// Reset old environment variables
SetEnvVariables(oldEnv)
// Switch back to the host rootfs
err = root.Chdir()
if err != nil {
| {
// Mount the rootfs
err := unix.Mount(rootfs, rootfs, "", unix.MS_BIND, "")
if err != nil {
return nil, fmt.Errorf("Failed to mount '%s': %w", rootfs, err)
}
// Setup all other needed mounts
mounts := []ChrootMount{
{"none", "/proc", "proc", 0, "", true},
{"none", "/sys", "sysfs", 0, "", true},
{"none", "/run", "tmpfs", 0, "", true},
{"none", "/tmp", "tmpfs", 0, "", true},
{"none", "/dev", "tmpfs", 0, "", true},
{"none", "/dev/shm", "tmpfs", 0, "", true},
{"/etc/resolv.conf", "/etc/resolv.conf", "", unix.MS_BIND, "", false},
}
// Keep a reference to the host rootfs and cwd
root, err := os.Open("/") | identifier_body |
chroot.go | (rootfs string, mounts []ChrootMount) error {
// Create a temporary mount path
err := os.MkdirAll(filepath.Join(rootfs, ".distrobuilder"), 0700)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", filepath.Join(rootfs, ".distrobuilder"), err)
}
for i, mount := range mounts {
// Target path
tmpTarget := filepath.Join(rootfs, ".distrobuilder", fmt.Sprintf("%d", i))
// Create the target mountpoint
if mount.IsDir {
err := os.MkdirAll(tmpTarget, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", tmpTarget, err)
}
} else {
f, err := os.Create(tmpTarget)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", tmpTarget, err)
}
f.Close()
}
// Mount to the temporary path
err := unix.Mount(mount.Source, tmpTarget, mount.FSType, mount.Flags, mount.Data)
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
return nil
}
func moveMounts(mounts []ChrootMount) error {
for i, mount := range mounts {
// Source path
tmpSource := filepath.Join("/", ".distrobuilder", fmt.Sprintf("%d", i))
// Resolve symlinks
target := mount.Target
for {
// Get information on current target
fi, err := os.Lstat(target)
if err != nil {
break
}
// If not a symlink, we're done
if fi.Mode()&os.ModeSymlink == 0 {
break
}
// If a symlink, resolve it
newTarget, err := os.Readlink(target)
if err != nil {
break
}
target = newTarget
}
// If the target's parent directory is a symlink, we need to resolve that as well.
targetDir := filepath.Dir(target)
if lxd.PathExists(targetDir) {
// Get information on current target
fi, err := os.Lstat(targetDir)
if err != nil {
return fmt.Errorf("Failed to stat directory %q: %w", targetDir, err)
}
// If a symlink, resolve it
if fi.Mode()&os.ModeSymlink != 0 {
newTarget, err := os.Readlink(targetDir)
if err != nil {
return fmt.Errorf("Failed to get destination of %q: %w", targetDir, err)
}
targetDir = newTarget
}
}
// Create parent paths if missing
err := os.MkdirAll(targetDir, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", targetDir, err)
}
// Create target path
if mount.IsDir {
err = os.MkdirAll(target, 0755)
if err != nil {
return fmt.Errorf("Failed to create directory %q: %w", target, err)
}
} else {
err := os.WriteFile(target, nil, 0644)
if err != nil {
return fmt.Errorf("Failed to create file %q: %w", target, err)
}
}
// Move the mount to its destination
err = unix.Mount(tmpSource, target, "", unix.MS_MOVE, "")
if err != nil {
return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err)
}
}
// Cleanup our temporary path
err := os.RemoveAll(filepath.Join("/", ".distrobuilder"))
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", filepath.Join("/", ".distrobuilder"), err)
}
return nil
}
func killChrootProcesses(rootfs string) error {
// List all files under /proc
proc, err := os.Open(filepath.Join(rootfs, "proc"))
if err != nil {
return fmt.Errorf("Failed to open file %q: %w", filepath.Join(rootfs, "proc"), err)
}
dirs, err := proc.Readdirnames(0)
if err != nil {
return fmt.Errorf("Failed to read directory content of %q: %w", filepath.Join(rootfs, "proc"), err)
}
// Get all processes and kill them
re := regexp.MustCompile(`\d+`)
for _, dir := range dirs {
if re.MatchString(dir) {
link, _ := os.Readlink(filepath.Join(rootfs, "proc", dir, "root"))
if link == rootfs {
pid, _ := strconv.Atoi(dir)
err = unix.Kill(pid, unix.SIGKILL)
if err != nil {
return fmt.Errorf("Failed killing process: %w", err)
}
}
}
}
return nil
}
// SetupChroot sets up mount and files, a reverter and then chroots for you.
func SetupChroot(rootfs string, definition Definition, m []ChrootMount) (func() error, error) {
// Mount the rootfs
err := unix.Mount(rootfs, rootfs, "", unix.MS_BIND, "")
if err != nil {
return nil, fmt.Errorf("Failed to mount '%s': %w", rootfs, err)
}
// Setup all other needed mounts
mounts := []ChrootMount{
{"none", "/proc", "proc", 0, "", true},
{"none", "/sys", "sysfs", 0, "", true},
{"none", "/run", "tmpfs", 0, "", true},
{"none", "/tmp", "tmpfs", 0, "", true},
{"none", "/dev", "tmpfs", 0, "", true},
{"none", "/dev/shm", "tmpfs", 0, "", true},
{"/etc/resolv.conf", "/etc/resolv.conf", "", unix.MS_BIND, "", false},
}
// Keep a reference to the host rootfs and cwd
root, err := os.Open("/")
if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
// Setup all needed mounts in a temporary location
if len(m) > 0 {
err = setupMounts(rootfs, append(mounts, m...))
} else {
err = setupMounts(rootfs, mounts)
}
if err != nil {
return nil, fmt.Errorf("Failed to mount filesystems: %w", err)
}
// Chroot into the container's rootfs
err = unix.Chroot(rootfs)
if err != nil {
root.Close()
return nil, err
}
err = unix.Chdir("/")
if err != nil {
return nil, err
}
// Move all the mounts into place
err = moveMounts(append(mounts, m...))
if err != nil {
return nil, err
}
// Populate /dev directory instead of bind mounting it from the host
err = populateDev()
if err != nil {
return nil, fmt.Errorf("Failed to populate /dev: %w", err)
}
// Change permission for /dev/shm
err = unix.Chmod("/dev/shm", 01777)
if err != nil {
return nil, fmt.Errorf("Failed to chmod /dev/shm: %w", err)
}
var env Environment
envs := definition.Environment
if envs.ClearDefaults {
env = Environment{}
} else {
env = Environment{
"PATH": EnvVariable{
Value: "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin",
Set: true,
},
"SHELL": EnvVariable{
Value: "/bin/sh",
Set: true,
},
"TERM": EnvVariable{
Value: "xterm",
Set: true,
},
"DEBIAN_FRONTEND": EnvVariable{
Value: "noninteractive",
Set: true,
},
}
}
if envs.EnvVariables != nil && len(envs.EnvVariables) > 0 {
imageTargets := ImageTargetUndefined | ImageTargetAll
if definition.Targets.Type == DefinitionFilterTypeContainer {
imageTargets |= ImageTargetContainer
} else if definition.Targets.Type == DefinitionFilterTypeVM {
imageTargets |= ImageTargetVM
}
for _, e := range envs.EnvVariables {
if !ApplyFilter(&e, definition.Image.Release, definition.Image.ArchitectureMapped, definition.Image.Variant, definition.Targets.Type, imageTargets) {
continue
}
entry, ok := env[e.Key]
if ok {
entry.Value = e.Value
entry.Set = true
} else {
env[e.Key] = EnvVariable{
Value: e.Value,
Set: true,
}
}
}
}
| setupMounts | identifier_name |
|
chroot.go | distrobuilder"), err)
}
return nil
}
func killChrootProcesses(rootfs string) error {
// List all files under /proc
proc, err := os.Open(filepath.Join(rootfs, "proc"))
if err != nil {
return fmt.Errorf("Failed to open file %q: %w", filepath.Join(rootfs, "proc"), err)
}
dirs, err := proc.Readdirnames(0)
if err != nil {
return fmt.Errorf("Failed to read directory content of %q: %w", filepath.Join(rootfs, "proc"), err)
}
// Get all processes and kill them
re := regexp.MustCompile(`\d+`)
for _, dir := range dirs {
if re.MatchString(dir) {
link, _ := os.Readlink(filepath.Join(rootfs, "proc", dir, "root"))
if link == rootfs {
pid, _ := strconv.Atoi(dir)
err = unix.Kill(pid, unix.SIGKILL)
if err != nil {
return fmt.Errorf("Failed killing process: %w", err)
}
}
}
}
return nil
}
// SetupChroot sets up mount and files, a reverter and then chroots for you.
func SetupChroot(rootfs string, definition Definition, m []ChrootMount) (func() error, error) {
// Mount the rootfs
err := unix.Mount(rootfs, rootfs, "", unix.MS_BIND, "")
if err != nil {
return nil, fmt.Errorf("Failed to mount '%s': %w", rootfs, err)
}
// Setup all other needed mounts
mounts := []ChrootMount{
{"none", "/proc", "proc", 0, "", true},
{"none", "/sys", "sysfs", 0, "", true},
{"none", "/run", "tmpfs", 0, "", true},
{"none", "/tmp", "tmpfs", 0, "", true},
{"none", "/dev", "tmpfs", 0, "", true},
{"none", "/dev/shm", "tmpfs", 0, "", true},
{"/etc/resolv.conf", "/etc/resolv.conf", "", unix.MS_BIND, "", false},
}
// Keep a reference to the host rootfs and cwd
root, err := os.Open("/")
if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
// Setup all needed mounts in a temporary location
if len(m) > 0 {
err = setupMounts(rootfs, append(mounts, m...))
} else {
err = setupMounts(rootfs, mounts)
}
if err != nil {
return nil, fmt.Errorf("Failed to mount filesystems: %w", err)
}
// Chroot into the container's rootfs
err = unix.Chroot(rootfs)
if err != nil {
root.Close()
return nil, err
}
err = unix.Chdir("/")
if err != nil {
return nil, err
}
// Move all the mounts into place
err = moveMounts(append(mounts, m...))
if err != nil {
return nil, err
}
// Populate /dev directory instead of bind mounting it from the host
err = populateDev()
if err != nil {
return nil, fmt.Errorf("Failed to populate /dev: %w", err)
}
// Change permission for /dev/shm
err = unix.Chmod("/dev/shm", 01777)
if err != nil {
return nil, fmt.Errorf("Failed to chmod /dev/shm: %w", err)
}
var env Environment
envs := definition.Environment
if envs.ClearDefaults {
env = Environment{}
} else {
env = Environment{
"PATH": EnvVariable{
Value: "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin",
Set: true,
},
"SHELL": EnvVariable{
Value: "/bin/sh",
Set: true,
},
"TERM": EnvVariable{
Value: "xterm",
Set: true,
},
"DEBIAN_FRONTEND": EnvVariable{
Value: "noninteractive",
Set: true,
},
}
}
if envs.EnvVariables != nil && len(envs.EnvVariables) > 0 {
imageTargets := ImageTargetUndefined | ImageTargetAll
if definition.Targets.Type == DefinitionFilterTypeContainer {
imageTargets |= ImageTargetContainer
} else if definition.Targets.Type == DefinitionFilterTypeVM {
imageTargets |= ImageTargetVM
}
for _, e := range envs.EnvVariables {
if !ApplyFilter(&e, definition.Image.Release, definition.Image.ArchitectureMapped, definition.Image.Variant, definition.Targets.Type, imageTargets) {
continue
}
entry, ok := env[e.Key]
if ok {
entry.Value = e.Value
entry.Set = true
} else {
env[e.Key] = EnvVariable{
Value: e.Value,
Set: true,
}
}
}
}
// Set environment variables
oldEnv := SetEnvVariables(env)
// Setup policy-rc.d override
policyCleanup := false
if lxd.PathExists("/usr/sbin/") && !lxd.PathExists("/usr/sbin/policy-rc.d") {
err = os.WriteFile("/usr/sbin/policy-rc.d", []byte(`#!/bin/sh
exit 101
`), 0755)
if err != nil {
return nil, err
}
policyCleanup = true
}
exitFunc := func() error {
defer root.Close()
// Cleanup policy-rc.d
if policyCleanup {
err = os.Remove("/usr/sbin/policy-rc.d")
if err != nil {
return fmt.Errorf("Failed to remove %q: %w", "/usr/sbin/policy-rc.d", err)
}
}
// Reset old environment variables
SetEnvVariables(oldEnv)
// Switch back to the host rootfs
err = root.Chdir()
if err != nil {
return fmt.Errorf("Failed to chdir: %w", err)
}
err = unix.Chroot(".")
if err != nil {
return fmt.Errorf("Failed to chroot: %w", err)
}
err = unix.Chdir(cwd)
if err != nil {
return fmt.Errorf("Failed to chdir: %w", err)
}
// This will kill all processes in the chroot and allow to cleanly
// unmount everything.
err = killChrootProcesses(rootfs)
if err != nil {
return fmt.Errorf("Failed killing chroot processes: %w", err)
}
// And now unmount the entire tree
err = unix.Unmount(rootfs, unix.MNT_DETACH)
if err != nil {
return fmt.Errorf("Failed unmounting rootfs: %w", err)
}
devPath := filepath.Join(rootfs, "dev")
// Wipe $rootfs/dev
err := os.RemoveAll(devPath)
if err != nil {
return fmt.Errorf("Failed to remove directory %q: %w", devPath, err)
}
ActiveChroots[rootfs] = nil
return os.MkdirAll(devPath, 0755)
}
ActiveChroots[rootfs] = exitFunc
return exitFunc, nil
}
func populateDev() error {
devs := []struct {
Path string
Major uint32
Minor uint32
Mode uint32
}{
{"/dev/console", 5, 1, unix.S_IFCHR | 0640},
{"/dev/full", 1, 7, unix.S_IFCHR | 0666},
{"/dev/null", 1, 3, unix.S_IFCHR | 0666},
{"/dev/random", 1, 8, unix.S_IFCHR | 0666},
{"/dev/tty", 5, 0, unix.S_IFCHR | 0666},
{"/dev/urandom", 1, 9, unix.S_IFCHR | 0666},
{"/dev/zero", 1, 5, unix.S_IFCHR | 0666},
}
for _, d := range devs {
if lxd.PathExists(d.Path) {
continue
}
dev := unix.Mkdev(d.Major, d.Minor)
err := unix.Mknod(d.Path, d.Mode, int(dev))
if err != nil {
return fmt.Errorf("Failed to create %q: %w", d.Path, err)
}
// For some odd reason, unix.Mknod will not set the mode correctly.
// This fixes that.
err = unix.Chmod(d.Path, d.Mode)
if err != nil | {
return fmt.Errorf("Failed to chmod %q: %w", d.Path, err)
} | conditional_block |
|
main.rs | : QuestionDTO,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl AnswerDTO {
fn from(a: model::Answer) -> Self {
Self {
id: a.id, body: a.body, created_at: a.created_at,
created_at_recognizable: utils::recognizable_datetime(a.created_at),
question: QuestionDTO::from(a.question)
}
}
}
#[derive(Serialize, Debug)]
struct QuestionDTO {
pub id: i32,
pub body: String,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl QuestionDTO {
fn from(q: model::Question) -> Self {
Self {
id: q.id, body: q.body, created_at: q.created_at,
created_at_recognizable: utils::recognizable_datetime(q.created_at)
}
}
}
/* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template |
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
},
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */
#[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q| !q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web:: | {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
} | identifier_body |
main.rs | /* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
}
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
},
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */
#[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q| !q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web::guard::Repository,
params: request::Form<PostAnswerForm>,
tweet_sender: State<SyncSender<model::Answer>>,
_auth: web::guard::BasicAuth
) -> response::Redirect {
let answer_body = params.body.clone();
if let Some(answer) = repo.store_answer(question_id, answer_body.clone()) {
tweet_sender.send(answer).unwrap();
}
response::Redirect::to("/admin")
}
/* POST /admin/question/<question_id>/hide */
#[post("/admin/question/<question_id>/hide")]
fn admin_hide_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth ) -> response::Redirect {
let mut question = repo.find_question(question_id).unwrap();
question.hidden = true;
repo.update_question(question);
response::Redirect::to("/admin")
}
/* Force login */
struct RequireLogin();
i | mpl<'r> resp | identifier_name |
|
main.rs | : QuestionDTO,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl AnswerDTO {
fn from(a: model::Answer) -> Self {
Self {
id: a.id, body: a.body, created_at: a.created_at,
created_at_recognizable: utils::recognizable_datetime(a.created_at),
question: QuestionDTO::from(a.question)
}
}
}
#[derive(Serialize, Debug)]
struct QuestionDTO {
pub id: i32,
pub body: String,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl QuestionDTO {
fn from(q: model::Question) -> Self {
Self {
id: q.id, body: q.body, created_at: q.created_at,
created_at_recognizable: utils::recognizable_datetime(q.created_at)
}
}
}
/* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
}
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => | ,
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */
#[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q| !q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web:: | {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
} | conditional_block |
main.rs | : QuestionDTO,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl AnswerDTO {
fn from(a: model::Answer) -> Self {
Self {
id: a.id, body: a.body, created_at: a.created_at,
created_at_recognizable: utils::recognizable_datetime(a.created_at),
question: QuestionDTO::from(a.question)
}
}
}
#[derive(Serialize, Debug)]
struct QuestionDTO {
pub id: i32,
pub body: String,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl QuestionDTO {
fn from(q: model::Question) -> Self {
Self {
id: q.id, body: q.body, created_at: q.created_at,
created_at_recognizable: utils::recognizable_datetime(q.created_at)
}
}
}
/* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
}
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
},
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */ | #[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q| !q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web::guard:: | random_line_split |
|
lib.rs | //! ```
//!
//! To get started using Crabsformer, read the quickstart tutorial below.
//!
//! # Quickstart Tutorial
//!
//! ## Prerequisites
//! Before reading this quick tutorial you should know a bit of Rust. If you
//! would like to refresh your memory, take a look at the [Rust book].
//!
//! [Rust book]: https://doc.rust-lang.org/book/
//!
//! ## The Basics
//! There are two main data structures in Crabsformer:
//!
//! 1. [`Vector<T>`] is a fixed-length list of elements of the same
//! [numeric type]. It has one atribute called [`len`] to represent the
//! total number of elements.
//! 2. [`Matrix<T>`] is a table of elements of the same [numeric type]. It has
//! one atribute called [`shape`] that represent the number of rows and
//! the number of columns.
//!
//! `Vector<T>` is pronounced as 'numeric vector' to avoid confussion with
//! Rust's vector [`Vec<T>`] data structure.
//!
//! [`Vector<T>`]: vector/struct.Vector.html
//! [`Matrix<T>`]: matrix/struct.Matrix.html
//! [`len`]: vector/struct.Vector.html#method.len
//! [`shape`]: matrix/struct.Matrix.html#method.shape
//! [`Vec<T>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
//!
//! ### Numeric Vector Builders
//! There are several ways to create numeric vector.
//!
//! For example, you can create a numeric vector from a Rust vector using
//! `Vector::from` static method. The type of the resulting numeric vector is
//! deduced from the type of the elements in the sequences.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vec![3, 1, 4, 1, 5];
//! let y = Vector::from(x);
//! ```
//!
//! The [`vector!`] macro is provided to make initialization of the numeric
//! vector more convenient.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let v = vector![1, 10, 11, 314];
//! ```
//!
//! It can also initialize each element of a numeric vector with a given value.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let v = vector![0; 5]; // vector![0, 0, 0, 0, 0]
//! ```
//!
//! To create a numeric vector of evenly spaced values, Crabformer provide
//! [`Vector::range`] function.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = Vector::range(0, 10, 1).unwrap();
//! assert_eq!(x, vector![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
//! ```
//!
//! To create random numeric vectors, Crabsformer provide
//! [`RandomVectorBuilder`]. It can be explicitly seeded to make the results
//! are reproducible.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let mut rvb = RandomVectorBuilder::new();
//! ```
//!
//! The method [`rvb.uniform`] creates a numeric vector of the given length
//! and populate it with random samples from a uniform distribution over the
//! half-open interval.
//!
//! ```
//! # use crabsformer::prelude::*;
//! # let mut rvb = RandomVectorBuilder::new();
//! let v = rvb.uniform(5, 0.0, 1.0).unwrap();
//! // Random
//! // [0.054709196, 0.86043775, 0.21187294, 0.6413728, 0.14186311]
//! ```
//!
//! See also: [Numeric Vector Builders].
//!
//! [`vector!`]: macro.vector.html
//! [`RandomVectorBuilder`]: vector/builders/struct.RandomVectorBuilder.html
//! [`rvb.uniform`]: vector/builders/struct.RandomVectorBuilder.html#method.uniform
//! [Numeric Vector Builders]: vector/builders/index.html
//! [`Vector::range`]: vector/struct.Vector.html#method.range
//!
//! ### Numeric Vector Basic Operations
//! You can perform arithmetic operations on a numeric vector. Arithmetic
//! operators on numeric vectors apply elementwise. A new numeric vector is
//! created and filled with the result.
//!
//! For example, if you add the numeric vector, the arithmetic operator
//! will work element-wise. The output will be a numeric vector of the same
//! length.
//!
//! ```rust
//! # use crabsformer::prelude::*;
//! let x = vector![2, 4, 6] + vector![1, 3, 5];
//! assert_eq!(x, vector![3, 7, 11]);
//! ```
//!
//! Numeric vector substraction and multiplication also works the same:
//!
//! ```rust
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 5] - vector![1, 3, 5];
//! assert_eq!(x, vector![2, -2, 0]);
//!
//! let y = vector![5, 4, 1] * vector![2, 1, 4];
//! assert_eq!(y, vector![10, 4, 4]);
//! ```
//!
//! You can run an arithmetic operation on the numeric vector with a scalar
//! value too. For example, this code multiplies each element of the numeric
//! vector by 2.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4] * 2;
//! assert_eq!(x, vector![6, 2, 8]);
//! ```
//!
//! Some operations, such as `+=` and `*=`, act in place to modify an
//! existing numeric vector rather than create a new one.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let mut x = vector![3, 1, 4];
//!
//! x += 3;
//! assert_eq!(x, vector![6, 4, 7]);
//!
//! x -= 1;
//! assert_eq!(x, vector![5, 3, 6]);
//!
//! x *= 2;
//! assert_eq!(x, vector![10, 6, 12]);
//! ```
//!
//! If you try to add, substract or multiply numeric vector with a different
//! number of elements, you will get an error. For example:
//!
//! ```should_panic
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4, 1, 5] + vector![2, 10, 9];
//! // thread 'main' panicked at 'Vector addition with invalid length: 5 != 3' src/main.rs:12:13
//! ```
//!
//! *TODO: add alternative x.add() to return Result instead of panics*
//!
//! If you would like to square of the individual elements of the numeric
//! vector, or even higher up, use the [`power`] method. Here, each element of the
//! numeric vector is raised to the power 2.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4, 1];
//! let y = x.power(2);
//! assert_eq!(y, vector![9, 1, 16, 1]);
//! ```
//!
//! [`power`]: struct.Vector.html#method.power
//!
//! When operating with numeric vectors of different types,
//! the Rust compiler will raise error like the following:
//!
//! ```text
//! cannot add `vector::Vector<{integer}>` to `vector::Vector<{float}>`
//! ```
//!
//! Many unary operations, such as computing the sum of all the elements in the
//! numeric vector, are implemented as methods.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4];
//! let sum = x.sum();
//! assert_eq!(sum, 8);
//! assert_eq!(*x.max(), 4);
//! assert_eq!(*x.min(), 1);
//! ```
//!
//! See also: [`power`], [`filter`], [`sum`], [`max`], [`min`].
//!
//! [`power`]: struct.Vector.html#method.power
//! [`filter`]: struct.Vector.html#method.filter
//! [`sum`]: struct.Vector.html#method.sum
//! [`max`]: struct.Vector.html#method.max
//! [`min`]: struct.Vector.html#method.min
//!
//! ### Indexing, Slicing and Iterating Numeric Vector
//! Numeric vectors can be indexed, sliced and iterated over, much like
//! Rust's vector.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4, 1];
//!
//! // Indexing numeric vector
//! assert_eq!(x[0], 3);
//! assert_eq!(x[2], 4);
//!
//! // Slicing numeric vector
//! x.slice(0..2); // [3, 1]
//! x.slice(2..); // [4, 1]
//! x.slice(..2); // [3, | //!
//! and this to your crate root:
//!
//! ```
//! use crabsformer::prelude::*; | random_line_split |
|
2.1.dl_tf_intermediate_classifications.py | , may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
data.dtypes
#One hot encode
data = Encoding(data, LABEL, scale_and_center = True, fileTrain = "./data/kaggle_titanic_train_EncodedScaled.csv")
data.head(2)
#Get list of independent features
ar_independent_features = np.setdiff1d(data.columns, LABEL)
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15)
del(data)
training_set.shape
training_set.head(2)
len_fea = len(ar_independent_features)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(tf.keras.layers.Dense(2*len_fea, input_shape=(len_fea,), activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(len_fea, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(2, activation=tf.nn.softmax))
model.summary()
#Compile
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Train it
model.fit(training_set[ar_independent_features].values, training_set[LABEL].values, epochs=100, batch_size=batch_size) # 6 min
#Save and retrieve
model.save('./model/model_tf_kaggle_titanic_binary_classsification.h5')
#model = tf.keras.models.load_model('./model/model_tf_kaggle_titanic_binary_classsification.h5')
# Evaluate on test data
model.evaluate(test_set[ar_independent_features].values, test_set[LABEL].values, verbose = 0)
# loss value & metrics values: [0.45, 0.79]
#Making Predictions
predictions = model.predict(x=test_set[ar_independent_features].values)
# Extracting max probability
predictions_number = np.array([])
for row_num in range(predictions.shape[0]): # row_num = 0
predictions_number = np.append(predictions_number, np.argmax(predictions[row_num]))
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.56
del(training_set, test_set, predictions_number); gc.collect()
#%% Binary classification: Explore few more ways to better classification
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import gc; gc.enable()
tf.keras.backend.clear_session() # For easy reset of notebook state
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15, random_state = seed, stratify = data[LABEL])
#Building the input_fn: regressor accepts Tensors and custom function to convert pandas
#Dataframe and return feature column and label values as Tensors:
def input_fn(features, labels = None, custom_batch_size = batch_size, caller_source = 'train'):
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(dict(features))
if caller_source != 'test':
|
if caller_source == 'train':
dataset = dataset.shuffle(len(features)) #if ".repeat()" is added here then add "epochs steps_per_epoch" in fit
dataset = dataset.batch(custom_batch_size)
return dataset
#train in iterable dataset
ds_train = input_fn(training_set[FEATURES], training_set[LABEL],custom_batch_size = batch_size)
#Create feature columns
feature_cols = []
# numeric cols
for num_col in NUM_FEATURES:
feature_cols.append(tf.feature_column.numeric_column(num_col, dtype=tf.float32))
#bucketized cols: If don't want to feed a number directly odel, but instead split its value into
#different categories based on numerical ranges.
#Buckets include the left boundary, and exclude the right boundary.
bucketized_col = tf.feature_column.numeric_column(bucketized_FEATURES, dtype=tf.float32)
age_buckets = tf.feature_column.bucketized_column(bucketized_col, boundaries=[30, 40, 50, 60])
feature_cols.append(age_buckets)
# indicator cols
cat_vocab = tf.feature_column.categorical_column_with_vocabulary_list(categorical_FEATURES, pd.unique(data[categorical_FEATURES].values))
cat_one_hot = tf.feature_column.indicator_column(cat_vocab)
feature_cols.append(cat_one_hot)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(cat_one_hot)
print(feature_layer(first_batch).numpy())
#Embedding cols: When there are large values per category then use an embedding column to
#overcome this limitation. Instead of representing the data as a one-hot vector of many
#dimensions, an embedding column represents that data as a lower-dimensional, dense vector in
#which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the
#example below) is a parameter that must be tuned.
embedding_col = tf.feature_column.embedding_column(cat_vocab, dimension=8) # 8 Need to be tuned
feature_cols.append(embedding_col)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(embedding_col)
print(feature_layer(first_batch).numpy())
#CW: Read 'Hashed feature columns' and practice above
## crossed cols TBD: Not working
#cat_vocab_crosssed = tf.feature_column.categorical_column_with_vocabulary_list(crossed_FEATURES, pd.unique | dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels)) | conditional_block |
2.1.dl_tf_intermediate_classifications.py | , may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
data.dtypes
#One hot encode
data = Encoding(data, LABEL, scale_and_center = True, fileTrain = "./data/kaggle_titanic_train_EncodedScaled.csv")
data.head(2)
#Get list of independent features
ar_independent_features = np.setdiff1d(data.columns, LABEL)
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15)
del(data)
training_set.shape
training_set.head(2)
len_fea = len(ar_independent_features)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(tf.keras.layers.Dense(2*len_fea, input_shape=(len_fea,), activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(len_fea, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(2, activation=tf.nn.softmax))
model.summary()
#Compile
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Train it
model.fit(training_set[ar_independent_features].values, training_set[LABEL].values, epochs=100, batch_size=batch_size) # 6 min
#Save and retrieve
model.save('./model/model_tf_kaggle_titanic_binary_classsification.h5')
#model = tf.keras.models.load_model('./model/model_tf_kaggle_titanic_binary_classsification.h5')
# Evaluate on test data
model.evaluate(test_set[ar_independent_features].values, test_set[LABEL].values, verbose = 0)
# loss value & metrics values: [0.45, 0.79]
#Making Predictions
predictions = model.predict(x=test_set[ar_independent_features].values)
# Extracting max probability
predictions_number = np.array([])
for row_num in range(predictions.shape[0]): # row_num = 0
predictions_number = np.append(predictions_number, np.argmax(predictions[row_num]))
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.56
del(training_set, test_set, predictions_number); gc.collect()
#%% Binary classification: Explore few more ways to better classification
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import gc; gc.enable()
tf.keras.backend.clear_session() # For easy reset of notebook state
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15, random_state = seed, stratify = data[LABEL])
#Building the input_fn: regressor accepts Tensors and custom function to convert pandas
#Dataframe and return feature column and label values as Tensors:
def | (features, labels = None, custom_batch_size = batch_size, caller_source = 'train'):
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(dict(features))
if caller_source != 'test':
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
if caller_source == 'train':
dataset = dataset.shuffle(len(features)) #if ".repeat()" is added here then add "epochs steps_per_epoch" in fit
dataset = dataset.batch(custom_batch_size)
return dataset
#train in iterable dataset
ds_train = input_fn(training_set[FEATURES], training_set[LABEL],custom_batch_size = batch_size)
#Create feature columns
feature_cols = []
# numeric cols
for num_col in NUM_FEATURES:
feature_cols.append(tf.feature_column.numeric_column(num_col, dtype=tf.float32))
#bucketized cols: If don't want to feed a number directly odel, but instead split its value into
#different categories based on numerical ranges.
#Buckets include the left boundary, and exclude the right boundary.
bucketized_col = tf.feature_column.numeric_column(bucketized_FEATURES, dtype=tf.float32)
age_buckets = tf.feature_column.bucketized_column(bucketized_col, boundaries=[30, 40, 50, 60])
feature_cols.append(age_buckets)
# indicator cols
cat_vocab = tf.feature_column.categorical_column_with_vocabulary_list(categorical_FEATURES, pd.unique(data[categorical_FEATURES].values))
cat_one_hot = tf.feature_column.indicator_column(cat_vocab)
feature_cols.append(cat_one_hot)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(cat_one_hot)
print(feature_layer(first_batch).numpy())
#Embedding cols: When there are large values per category then use an embedding column to
#overcome this limitation. Instead of representing the data as a one-hot vector of many
#dimensions, an embedding column represents that data as a lower-dimensional, dense vector in
#which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the
#example below) is a parameter that must be tuned.
embedding_col = tf.feature_column.embedding_column(cat_vocab, dimension=8) # 8 Need to be tuned
feature_cols.append(embedding_col)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(embedding_col)
print(feature_layer(first_batch).numpy())
#CW: Read 'Hashed feature columns' and practice above
## crossed cols TBD: Not working
#cat_vocab_crosssed = tf.feature_column.categorical_column_with_vocabulary_list(crossed_FEATURES, pd | input_fn | identifier_name |
2.1.dl_tf_intermediate_classifications.py | , may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
data.dtypes
#One hot encode
data = Encoding(data, LABEL, scale_and_center = True, fileTrain = "./data/kaggle_titanic_train_EncodedScaled.csv")
data.head(2)
#Get list of independent features
ar_independent_features = np.setdiff1d(data.columns, LABEL)
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15)
del(data)
| training_set.shape
training_set.head(2)
len_fea = len(ar_independent_features)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(tf.keras.layers.Dense(2*len_fea, input_shape=(len_fea,), activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(len_fea, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(2, activation=tf.nn.softmax))
model.summary()
#Compile
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Train it
model.fit(training_set[ar_independent_features].values, training_set[LABEL].values, epochs=100, batch_size=batch_size) # 6 min
#Save and retrieve
model.save('./model/model_tf_kaggle_titanic_binary_classsification.h5')
#model = tf.keras.models.load_model('./model/model_tf_kaggle_titanic_binary_classsification.h5')
# Evaluate on test data
model.evaluate(test_set[ar_independent_features].values, test_set[LABEL].values, verbose = 0)
# loss value & metrics values: [0.45, 0.79]
#Making Predictions
predictions = model.predict(x=test_set[ar_independent_features].values)
# Extracting max probability
predictions_number = np.array([])
for row_num in range(predictions.shape[0]): # row_num = 0
predictions_number = np.append(predictions_number, np.argmax(predictions[row_num]))
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.56
del(training_set, test_set, predictions_number); gc.collect()
#%% Binary classification: Explore few more ways to better classification
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import gc; gc.enable()
tf.keras.backend.clear_session() # For easy reset of notebook state
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15, random_state = seed, stratify = data[LABEL])
#Building the input_fn: regressor accepts Tensors and custom function to convert pandas
#Dataframe and return feature column and label values as Tensors:
def input_fn(features, labels = None, custom_batch_size = batch_size, caller_source = 'train'):
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(dict(features))
if caller_source != 'test':
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
if caller_source == 'train':
dataset = dataset.shuffle(len(features)) #if ".repeat()" is added here then add "epochs steps_per_epoch" in fit
dataset = dataset.batch(custom_batch_size)
return dataset
#train in iterable dataset
ds_train = input_fn(training_set[FEATURES], training_set[LABEL],custom_batch_size = batch_size)
#Create feature columns
feature_cols = []
# numeric cols
for num_col in NUM_FEATURES:
feature_cols.append(tf.feature_column.numeric_column(num_col, dtype=tf.float32))
#bucketized cols: If don't want to feed a number directly odel, but instead split its value into
#different categories based on numerical ranges.
#Buckets include the left boundary, and exclude the right boundary.
bucketized_col = tf.feature_column.numeric_column(bucketized_FEATURES, dtype=tf.float32)
age_buckets = tf.feature_column.bucketized_column(bucketized_col, boundaries=[30, 40, 50, 60])
feature_cols.append(age_buckets)
# indicator cols
cat_vocab = tf.feature_column.categorical_column_with_vocabulary_list(categorical_FEATURES, pd.unique(data[categorical_FEATURES].values))
cat_one_hot = tf.feature_column.indicator_column(cat_vocab)
feature_cols.append(cat_one_hot)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(cat_one_hot)
print(feature_layer(first_batch).numpy())
#Embedding cols: When there are large values per category then use an embedding column to
#overcome this limitation. Instead of representing the data as a one-hot vector of many
#dimensions, an embedding column represents that data as a lower-dimensional, dense vector in
#which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the
#example below) is a parameter that must be tuned.
embedding_col = tf.feature_column.embedding_column(cat_vocab, dimension=8) # 8 Need to be tuned
feature_cols.append(embedding_col)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(embedding_col)
print(feature_layer(first_batch).numpy())
#CW: Read 'Hashed feature columns' and practice above
## crossed cols TBD: Not working
#cat_vocab_crosssed = tf.feature_column.categorical_column_with_vocabulary_list(crossed_FEATURES, pd | random_line_split |
|
2.1.dl_tf_intermediate_classifications.py | , may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
data.dtypes
#One hot encode
data = Encoding(data, LABEL, scale_and_center = True, fileTrain = "./data/kaggle_titanic_train_EncodedScaled.csv")
data.head(2)
#Get list of independent features
ar_independent_features = np.setdiff1d(data.columns, LABEL)
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15)
del(data)
training_set.shape
training_set.head(2)
len_fea = len(ar_independent_features)
# Build the model
model = tf.keras.models.Sequential() # same as tf.keras.Sequential()
model.add(tf.keras.layers.Dense(2*len_fea, input_shape=(len_fea,), activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(len_fea, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(2, activation=tf.nn.softmax))
model.summary()
#Compile
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Train it
model.fit(training_set[ar_independent_features].values, training_set[LABEL].values, epochs=100, batch_size=batch_size) # 6 min
#Save and retrieve
model.save('./model/model_tf_kaggle_titanic_binary_classsification.h5')
#model = tf.keras.models.load_model('./model/model_tf_kaggle_titanic_binary_classsification.h5')
# Evaluate on test data
model.evaluate(test_set[ar_independent_features].values, test_set[LABEL].values, verbose = 0)
# loss value & metrics values: [0.45, 0.79]
#Making Predictions
predictions = model.predict(x=test_set[ar_independent_features].values)
# Extracting max probability
predictions_number = np.array([])
for row_num in range(predictions.shape[0]): # row_num = 0
predictions_number = np.append(predictions_number, np.argmax(predictions[row_num]))
#Few statistics
confusion_matrix(test_set[LABEL].values, predictions_number)
classification_report(test_set[LABEL].values, predictions_number)
#Statistics are also available as follows
print("Overall Accuracy is ", round(accuracy_score(test_set[LABEL].values, predictions_number), 2),", Kappa is ", round(abs(cohen_kappa_score(test_set[LABEL].values, predictions_number)), 2))
#Overall Accuracy is 0.81 , Kappa is 0.56
del(training_set, test_set, predictions_number); gc.collect()
#%% Binary classification: Explore few more ways to better classification
# Restart the Spyder
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, cohen_kappa_score, confusion_matrix, classification_report
import gc; gc.enable()
tf.keras.backend.clear_session() # For easy reset of notebook state
#Set PANDAS to show all columns in DataFrame
pd.set_option('display.max_columns', None)
#Set PANDAS to show all rows in DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('precision', 2)
os.chdir("D:\\trainings\\tensorflow")
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# fix random seed for reproducibility
seed = 123; np.random.seed(seed); tf.compat.v1.set_random_seed(seed)
# Read data
data = pd.read_csv("./data/kaggle_titanic_train.csv")
data.shape
data.dtypes
data.head(2)
data.info()
print(data.describe())
#print(data.describe(include = [np.number])) # for number only
#Drop few columns, may not be use ful for current analysis
data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
data.info() # Now see if any missing values in any columns
#Note: how to impute missing value is purview of ML. Here, do simple thing so that we focus on DL
age_avg = data['Age'].mean()
data['Age'].fillna(value = age_avg, inplace=True)
#Now, drop rows if any missing
data.dropna(inplace=True)
data.info() # Now see if any missing values in any columns
# identifications of features and response. Detail'll be explained in a few minutes
NUM_FEATURES = ['Pclass','SibSp','Parch','Fare']
bucketized_FEATURES = 'Age'
categorical_FEATURES = 'Sex'
embedding_FEATURES = 'Embarked'
crossed_FEATURES = 'Embarked' # With Age
FEATURES = np.append(np.append(np.append(np.append(NUM_FEATURES, bucketized_FEATURES), categorical_FEATURES), embedding_FEATURES), crossed_FEATURES)
FEATURES = np.unique(FEATURES)
LABEL = "Survived"
batch_size = 8
#Do the data type conversion for category
data[[categorical_FEATURES,embedding_FEATURES]] = data[[categorical_FEATURES,embedding_FEATURES]].apply(lambda x: x.astype('category'))
#Segragate 85% and 15%
training_set ,test_set = train_test_split(data,test_size=0.15, random_state = seed, stratify = data[LABEL])
#Building the input_fn: regressor accepts Tensors and custom function to convert pandas
#Dataframe and return feature column and label values as Tensors:
def input_fn(features, labels = None, custom_batch_size = batch_size, caller_source = 'train'):
# Convert the inputs to a Dataset.
|
#train in iterable dataset
ds_train = input_fn(training_set[FEATURES], training_set[LABEL],custom_batch_size = batch_size)
#Create feature columns
feature_cols = []
# numeric cols
for num_col in NUM_FEATURES:
feature_cols.append(tf.feature_column.numeric_column(num_col, dtype=tf.float32))
#bucketized cols: If don't want to feed a number directly odel, but instead split its value into
#different categories based on numerical ranges.
#Buckets include the left boundary, and exclude the right boundary.
bucketized_col = tf.feature_column.numeric_column(bucketized_FEATURES, dtype=tf.float32)
age_buckets = tf.feature_column.bucketized_column(bucketized_col, boundaries=[30, 40, 50, 60])
feature_cols.append(age_buckets)
# indicator cols
cat_vocab = tf.feature_column.categorical_column_with_vocabulary_list(categorical_FEATURES, pd.unique(data[categorical_FEATURES].values))
cat_one_hot = tf.feature_column.indicator_column(cat_vocab)
feature_cols.append(cat_one_hot)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(cat_one_hot)
print(feature_layer(first_batch).numpy())
#Embedding cols: When there are large values per category then use an embedding column to
#overcome this limitation. Instead of representing the data as a one-hot vector of many
#dimensions, an embedding column represents that data as a lower-dimensional, dense vector in
#which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the
#example below) is a parameter that must be tuned.
embedding_col = tf.feature_column.embedding_column(cat_vocab, dimension=8) # 8 Need to be tuned
feature_cols.append(embedding_col)
# Just to see - one hot encoding
first_batch = next(iter(ds_train))[0]
feature_layer = tf.keras.layers.DenseFeatures(embedding_col)
print(feature_layer(first_batch).numpy())
#CW: Read 'Hashed feature columns' and practice above
## crossed cols TBD: Not working
#cat_vocab_crosssed = tf.feature_column.categorical_column_with_vocabulary_list(crossed_FEATURES, pd.unique | dataset = tf.data.Dataset.from_tensor_slices(dict(features))
if caller_source != 'test':
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
if caller_source == 'train':
dataset = dataset.shuffle(len(features)) #if ".repeat()" is added here then add "epochs steps_per_epoch" in fit
dataset = dataset.batch(custom_batch_size)
return dataset | identifier_body |
ivf_torch.py | elif dtype == int:
return int
elif dtype == list:
return "float32"
else:
raise ValueError(
"[KeOps] {} data type incompatible with KeOps.".format(dtype)
)
@staticmethod
def rand(m, n, dtype=default_dtype, device="cpu"):
return torch.rand(m, n, dtype=dtype, device=device)
@staticmethod
def randn(m, n, dtype=default_dtype, device="cpu"):
return torch.randn(m, n, dtype=dtype, device=device)
@staticmethod
def zeros(shape, dtype=default_dtype, device="cpu"):
return torch.zeros(shape, dtype=dtype, device=device)
@staticmethod
def eye(n, dtype=default_dtype, device="cpu"):
return torch.eye(n, dtype=dtype, device=device)
@staticmethod
def array(x, dtype=default_dtype, device="cpu"):
if dtype == "float32":
dtype = torch.float32
elif dtype == "float64":
dtype = torch.float64
elif dtype == "float16":
dtype = torch.float16
else:
raise ValueError("[KeOps] data type incompatible with KeOps.")
return torch.tensor(x, dtype=dtype, device=device)
@staticmethod
def device(x):
if isinstance(x, torch.Tensor):
return x.device
else:
return None
@staticmethod
def distance_function(metric):
def euclidean(x,y):
return ((x-y) ** 2).sum(-1)
def manhattan(x,y):
return ((x-y).abs()).sum(-1)
def angular(x,y):
return -(x | y)
def angular_full(x,y):
return angular(x,y)/((angular(x,x)*angular(y,y)).sqrt())
def hyperbolic(x,y):
return ((x - y) ** 2).sum(-1) / (x[0] * y[0])
if metric=='euclidean':
return euclidean
elif metric=='manhattan':
return manhattan
elif metric=='angular':
return angular
elif metric=='angular_full':
return angular_full
elif metric=='hyperbolic':
return hyperbolic
else:
raise ValueError('Unknown metric')
@staticmethod
def sort(x):
return torch.sort(x)
@staticmethod
def unsqueeze(x,n):
return torch.unsqueeze(x,n)
@staticmethod
def arange(n,device="cpu"):
return torch.arange(n,device=device)
@staticmethod
def repeat(x,n):
return torch.repeat_interleave(x,n)
@staticmethod
def to(x,device):
return x.to(device)
@staticmethod
def index_select(input,dim,index):
return torch.index_select(input,dim,index)
@staticmethod
def norm(x,p=2,dim=-1):
return torch.norm(x,p=p,dim=dim)
@staticmethod
def kmeans(x, distance=None, K=10, Niter=10, device="cuda", approx=False, n=10):
from pykeops.torch import LazyTensor
if distance is None:
distance = torchtools.distance_function("euclidean")
def calc_centroid(x, c, cl, n=10):
"Helper function to optimise centroid location"
c = torch.clone(c.detach()).to(device)
c.requires_grad = True
x1 = LazyTensor(x.unsqueeze(0))
op = torch.optim.Adam([c], lr=1 / n)
scaling = 1 / torch.gather(torch.bincount(cl), 0, cl).view(-1, 1)
scaling.requires_grad = False
with torch.autograd.set_detect_anomaly(True):
for _ in range(n):
c.requires_grad = True
op.zero_grad()
c1 = LazyTensor(torch.index_select(c, 0, cl).unsqueeze(0))
d = distance(x1, c1)
loss = (
d.sum(0) * scaling
).sum() # calculate distance to centroid for each datapoint, divide by total number of points in that cluster, and sum
loss.backward(retain_graph=False)
op.step()
return c.detach()
N, D = x.shape
c = x[:K, :].clone()
x_i = LazyTensor(x.view(N, 1, D).to(device))
for i in range(Niter):
c_j = LazyTensor(c.view(1, K, D).to(device))
D_ij = distance(x_i, c_j)
cl = D_ij.argmin(dim=1).long().view(-1)
# updating c: either with approximation or exact
if approx:
# approximate with GD optimisation
c = calc_centroid(x, c, cl, n)
else:
# exact from average
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
Ncl = torch.bincount(cl, minlength=K).type_as(c).view(K, 1)
c /= Ncl
if torch.any(torch.isnan(c)):
raise ValueError(
"NaN detected in centroids during KMeans, please check metric is correct"
)
return cl, c
def squared_distances(x, y):
x_norm = (x ** 2).sum(1).reshape(-1, 1)
y_norm = (y ** 2).sum(1).reshape(1, -1)
dist = x_norm + y_norm - 2.0 * torch.matmul(x, torch.transpose(y, 0, 1))
return dist
def torch_kernel(x, y, s, kernel):
sq = squared_distances(x, y)
_kernel = {
"gaussian": lambda _sq, _s: torch.exp(-_sq / (_s * _s)),
"laplacian": lambda _sq, _s: torch.exp(-torch.sqrt(_sq) / _s),
"cauchy": lambda _sq, _s: 1.0 / (1 + _sq / (_s * _s)),
"inverse_multiquadric": lambda _sq, _s: torch.rsqrt(1 + _sq / (_s * _s)),
}
return _kernel[kernel](sq, s)
class GenericIVF:
"""Abstract class to compute IVF functions
End-users should use 'pykeops.numpy.ivf' or 'pykeops.torch.ivf'
"""
def __init__(self, k, metric, normalise, LazyTensor):
self.__k = k
self.__normalise = normalise
self.__update_metric(metric)
self.__LazyTensor = LazyTensor
self.__c = None
def __update_metric(self, metric):
if isinstance(metric, str):
self.__distance = self.tools.distance_function(metric)
self.__metric = metric
elif callable(metric):
self.__distance = metric
self.__metric = "custom"
else:
raise ValueError("Unrecognised metric input type")
@property
def metric(self):
"""Returns the metric used in the search"""
return self.__metric
@property
def c(self):
"""Returns the clusters obtained through K-Means"""
if self.__c is not None:
return self.__c
else:
raise ValueError("Run .fit() first!")
def __get_tools(self):
pass
def __k_argmin(self, x, y, k=1):
x_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(x, 1), self.__device)
)
y_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(y, 0), self.__device)
)
d = self.__distance(x_LT, y_LT)
if not self.tools.is_tensor(x):
if self.__backend:
d.backend = self.__backend
if k == 1:
return self.tools.view(self.tools.long(d.argmin(dim=1)), -1)
else:
return self.tools.long(d.argKmin(K=k, dim=1))
def __sort_clusters(self, x, lab, store_x=True):
lab, perm = self.tools.sort(self.tools.view(lab, -1))
if store_x:
self.__x_perm = perm
else:
self.__y_perm = perm
return x[perm], lab
def __unsort(self, nn):
return self.tools.index_select(self.__x_perm[nn], 0, self.__y_perm.argsort())
def _fit(
self,
x,
clusters=50,
a=5,
Niter=15,
device=None,
backend=None,
approx=False,
n=50,
):
"""
Fits the main dataset
"""
if type(clusters) != int:
raise ValueError("Clusters must be an integer")
if clusters >= len(x):
raise ValueError("Number of clusters must be less than length of dataset")
if type(a) != int:
raise ValueError("Number of clusters to search over must be an integer")
if a > clusters:
raise ValueError(
"Number of clusters to search over must be less than total number of clusters"
)
if len(x | return "float64"
elif dtype == torch.float16:
return "float16" | random_line_split |
|
ivf_torch.py | list:
return "float32"
else:
raise ValueError(
"[KeOps] {} data type incompatible with KeOps.".format(dtype)
)
@staticmethod
def rand(m, n, dtype=default_dtype, device="cpu"):
return torch.rand(m, n, dtype=dtype, device=device)
@staticmethod
def randn(m, n, dtype=default_dtype, device="cpu"):
return torch.randn(m, n, dtype=dtype, device=device)
@staticmethod
def zeros(shape, dtype=default_dtype, device="cpu"):
return torch.zeros(shape, dtype=dtype, device=device)
@staticmethod
def eye(n, dtype=default_dtype, device="cpu"):
return torch.eye(n, dtype=dtype, device=device)
@staticmethod
def array(x, dtype=default_dtype, device="cpu"):
if dtype == "float32":
dtype = torch.float32
elif dtype == "float64":
dtype = torch.float64
elif dtype == "float16":
dtype = torch.float16
else:
raise ValueError("[KeOps] data type incompatible with KeOps.")
return torch.tensor(x, dtype=dtype, device=device)
@staticmethod
def device(x):
if isinstance(x, torch.Tensor):
return x.device
else:
return None
@staticmethod
def distance_function(metric):
def euclidean(x,y):
return ((x-y) ** 2).sum(-1)
def manhattan(x,y):
return ((x-y).abs()).sum(-1)
def angular(x,y):
return -(x | y)
def angular_full(x,y):
return angular(x,y)/((angular(x,x)*angular(y,y)).sqrt())
def hyperbolic(x,y):
return ((x - y) ** 2).sum(-1) / (x[0] * y[0])
if metric=='euclidean':
return euclidean
elif metric=='manhattan':
return manhattan
elif metric=='angular':
return angular
elif metric=='angular_full':
return angular_full
elif metric=='hyperbolic':
return hyperbolic
else:
raise ValueError('Unknown metric')
@staticmethod
def sort(x):
return torch.sort(x)
@staticmethod
def unsqueeze(x,n):
return torch.unsqueeze(x,n)
@staticmethod
def arange(n,device="cpu"):
return torch.arange(n,device=device)
@staticmethod
def repeat(x,n):
return torch.repeat_interleave(x,n)
@staticmethod
def to(x,device):
return x.to(device)
@staticmethod
def index_select(input,dim,index):
return torch.index_select(input,dim,index)
@staticmethod
def norm(x,p=2,dim=-1):
return torch.norm(x,p=p,dim=dim)
@staticmethod
def kmeans(x, distance=None, K=10, Niter=10, device="cuda", approx=False, n=10):
from pykeops.torch import LazyTensor
if distance is None:
distance = torchtools.distance_function("euclidean")
def calc_centroid(x, c, cl, n=10):
"Helper function to optimise centroid location"
c = torch.clone(c.detach()).to(device)
c.requires_grad = True
x1 = LazyTensor(x.unsqueeze(0))
op = torch.optim.Adam([c], lr=1 / n)
scaling = 1 / torch.gather(torch.bincount(cl), 0, cl).view(-1, 1)
scaling.requires_grad = False
with torch.autograd.set_detect_anomaly(True):
for _ in range(n):
c.requires_grad = True
op.zero_grad()
c1 = LazyTensor(torch.index_select(c, 0, cl).unsqueeze(0))
d = distance(x1, c1)
loss = (
d.sum(0) * scaling
).sum() # calculate distance to centroid for each datapoint, divide by total number of points in that cluster, and sum
loss.backward(retain_graph=False)
op.step()
return c.detach()
N, D = x.shape
c = x[:K, :].clone()
x_i = LazyTensor(x.view(N, 1, D).to(device))
for i in range(Niter):
c_j = LazyTensor(c.view(1, K, D).to(device))
D_ij = distance(x_i, c_j)
cl = D_ij.argmin(dim=1).long().view(-1)
# updating c: either with approximation or exact
if approx:
# approximate with GD optimisation
c = calc_centroid(x, c, cl, n)
else:
# exact from average
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
Ncl = torch.bincount(cl, minlength=K).type_as(c).view(K, 1)
c /= Ncl
if torch.any(torch.isnan(c)):
raise ValueError(
"NaN detected in centroids during KMeans, please check metric is correct"
)
return cl, c
def squared_distances(x, y):
x_norm = (x ** 2).sum(1).reshape(-1, 1)
y_norm = (y ** 2).sum(1).reshape(1, -1)
dist = x_norm + y_norm - 2.0 * torch.matmul(x, torch.transpose(y, 0, 1))
return dist
def torch_kernel(x, y, s, kernel):
sq = squared_distances(x, y)
_kernel = {
"gaussian": lambda _sq, _s: torch.exp(-_sq / (_s * _s)),
"laplacian": lambda _sq, _s: torch.exp(-torch.sqrt(_sq) / _s),
"cauchy": lambda _sq, _s: 1.0 / (1 + _sq / (_s * _s)),
"inverse_multiquadric": lambda _sq, _s: torch.rsqrt(1 + _sq / (_s * _s)),
}
return _kernel[kernel](sq, s)
class GenericIVF:
"""Abstract class to compute IVF functions
End-users should use 'pykeops.numpy.ivf' or 'pykeops.torch.ivf'
"""
def __init__(self, k, metric, normalise, LazyTensor):
self.__k = k
self.__normalise = normalise
self.__update_metric(metric)
self.__LazyTensor = LazyTensor
self.__c = None
def __update_metric(self, metric):
if isinstance(metric, str):
self.__distance = self.tools.distance_function(metric)
self.__metric = metric
elif callable(metric):
self.__distance = metric
self.__metric = "custom"
else:
raise ValueError("Unrecognised metric input type")
@property
def metric(self):
"""Returns the metric used in the search"""
return self.__metric
@property
def c(self):
"""Returns the clusters obtained through K-Means"""
if self.__c is not None:
return self.__c
else:
raise ValueError("Run .fit() first!")
def __get_tools(self):
pass
def __k_argmin(self, x, y, k=1):
x_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(x, 1), self.__device)
)
y_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(y, 0), self.__device)
)
d = self.__distance(x_LT, y_LT)
if not self.tools.is_tensor(x):
if self.__backend:
|
if k == 1:
return self.tools.view(self.tools.long(d.argmin(dim=1)), -1)
else:
return self.tools.long(d.argKmin(K=k, dim=1))
def __sort_clusters(self, x, lab, store_x=True):
lab, perm = self.tools.sort(self.tools.view(lab, -1))
if store_x:
self.__x_perm = perm
else:
self.__y_perm = perm
return x[perm], lab
def __unsort(self, nn):
return self.tools.index_select(self.__x_perm[nn], 0, self.__y_perm.argsort())
def _fit(
self,
x,
clusters=50,
a=5,
Niter=15,
device=None,
backend=None,
approx=False,
n=50,
):
"""
Fits the main dataset
"""
if type(clusters) != int:
raise ValueError("Clusters must be an integer")
if clusters >= len(x):
raise ValueError("Number of clusters must be less than length of dataset")
if type(a) != int:
raise ValueError("Number of clusters to search over must be an integer")
if a > clusters:
raise ValueError(
"Number of clusters to search over must be less than total number of clusters"
)
if len(x.shape) != 2:
raise ValueError("Input must be a 2D array")
if self.__normalise:
x = x / self.tools.repeat(self.tools.norm | d.backend = self.__backend | conditional_block |
ivf_torch.py | list:
return "float32"
else:
raise ValueError(
"[KeOps] {} data type incompatible with KeOps.".format(dtype)
)
@staticmethod
def rand(m, n, dtype=default_dtype, device="cpu"):
return torch.rand(m, n, dtype=dtype, device=device)
@staticmethod
def randn(m, n, dtype=default_dtype, device="cpu"):
return torch.randn(m, n, dtype=dtype, device=device)
@staticmethod
def zeros(shape, dtype=default_dtype, device="cpu"):
return torch.zeros(shape, dtype=dtype, device=device)
@staticmethod
def eye(n, dtype=default_dtype, device="cpu"):
return torch.eye(n, dtype=dtype, device=device)
@staticmethod
def array(x, dtype=default_dtype, device="cpu"):
if dtype == "float32":
dtype = torch.float32
elif dtype == "float64":
dtype = torch.float64
elif dtype == "float16":
dtype = torch.float16
else:
raise ValueError("[KeOps] data type incompatible with KeOps.")
return torch.tensor(x, dtype=dtype, device=device)
@staticmethod
def device(x):
if isinstance(x, torch.Tensor):
return x.device
else:
return None
@staticmethod
def distance_function(metric):
def euclidean(x,y):
return ((x-y) ** 2).sum(-1)
def manhattan(x,y):
return ((x-y).abs()).sum(-1)
def angular(x,y):
return -(x | y)
def angular_full(x,y):
return angular(x,y)/((angular(x,x)*angular(y,y)).sqrt())
def hyperbolic(x,y):
return ((x - y) ** 2).sum(-1) / (x[0] * y[0])
if metric=='euclidean':
return euclidean
elif metric=='manhattan':
return manhattan
elif metric=='angular':
return angular
elif metric=='angular_full':
return angular_full
elif metric=='hyperbolic':
return hyperbolic
else:
raise ValueError('Unknown metric')
@staticmethod
def sort(x):
return torch.sort(x)
@staticmethod
def unsqueeze(x,n):
return torch.unsqueeze(x,n)
@staticmethod
def arange(n,device="cpu"):
return torch.arange(n,device=device)
@staticmethod
def repeat(x,n):
return torch.repeat_interleave(x,n)
@staticmethod
def to(x,device):
return x.to(device)
@staticmethod
def index_select(input,dim,index):
return torch.index_select(input,dim,index)
@staticmethod
def norm(x,p=2,dim=-1):
return torch.norm(x,p=p,dim=dim)
@staticmethod
def kmeans(x, distance=None, K=10, Niter=10, device="cuda", approx=False, n=10):
from pykeops.torch import LazyTensor
if distance is None:
distance = torchtools.distance_function("euclidean")
def calc_centroid(x, c, cl, n=10):
|
N, D = x.shape
c = x[:K, :].clone()
x_i = LazyTensor(x.view(N, 1, D).to(device))
for i in range(Niter):
c_j = LazyTensor(c.view(1, K, D).to(device))
D_ij = distance(x_i, c_j)
cl = D_ij.argmin(dim=1).long().view(-1)
# updating c: either with approximation or exact
if approx:
# approximate with GD optimisation
c = calc_centroid(x, c, cl, n)
else:
# exact from average
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
Ncl = torch.bincount(cl, minlength=K).type_as(c).view(K, 1)
c /= Ncl
if torch.any(torch.isnan(c)):
raise ValueError(
"NaN detected in centroids during KMeans, please check metric is correct"
)
return cl, c
def squared_distances(x, y):
x_norm = (x ** 2).sum(1).reshape(-1, 1)
y_norm = (y ** 2).sum(1).reshape(1, -1)
dist = x_norm + y_norm - 2.0 * torch.matmul(x, torch.transpose(y, 0, 1))
return dist
def torch_kernel(x, y, s, kernel):
sq = squared_distances(x, y)
_kernel = {
"gaussian": lambda _sq, _s: torch.exp(-_sq / (_s * _s)),
"laplacian": lambda _sq, _s: torch.exp(-torch.sqrt(_sq) / _s),
"cauchy": lambda _sq, _s: 1.0 / (1 + _sq / (_s * _s)),
"inverse_multiquadric": lambda _sq, _s: torch.rsqrt(1 + _sq / (_s * _s)),
}
return _kernel[kernel](sq, s)
class GenericIVF:
"""Abstract class to compute IVF functions
End-users should use 'pykeops.numpy.ivf' or 'pykeops.torch.ivf'
"""
def __init__(self, k, metric, normalise, LazyTensor):
self.__k = k
self.__normalise = normalise
self.__update_metric(metric)
self.__LazyTensor = LazyTensor
self.__c = None
def __update_metric(self, metric):
if isinstance(metric, str):
self.__distance = self.tools.distance_function(metric)
self.__metric = metric
elif callable(metric):
self.__distance = metric
self.__metric = "custom"
else:
raise ValueError("Unrecognised metric input type")
@property
def metric(self):
"""Returns the metric used in the search"""
return self.__metric
@property
def c(self):
"""Returns the clusters obtained through K-Means"""
if self.__c is not None:
return self.__c
else:
raise ValueError("Run .fit() first!")
def __get_tools(self):
pass
def __k_argmin(self, x, y, k=1):
x_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(x, 1), self.__device)
)
y_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(y, 0), self.__device)
)
d = self.__distance(x_LT, y_LT)
if not self.tools.is_tensor(x):
if self.__backend:
d.backend = self.__backend
if k == 1:
return self.tools.view(self.tools.long(d.argmin(dim=1)), -1)
else:
return self.tools.long(d.argKmin(K=k, dim=1))
def __sort_clusters(self, x, lab, store_x=True):
lab, perm = self.tools.sort(self.tools.view(lab, -1))
if store_x:
self.__x_perm = perm
else:
self.__y_perm = perm
return x[perm], lab
def __unsort(self, nn):
return self.tools.index_select(self.__x_perm[nn], 0, self.__y_perm.argsort())
def _fit(
self,
x,
clusters=50,
a=5,
Niter=15,
device=None,
backend=None,
approx=False,
n=50,
):
"""
Fits the main dataset
"""
if type(clusters) != int:
raise ValueError("Clusters must be an integer")
if clusters >= len(x):
raise ValueError("Number of clusters must be less than length of dataset")
if type(a) != int:
raise ValueError("Number of clusters to search over must be an integer")
if a > clusters:
raise ValueError(
"Number of clusters to search over must be less than total number of clusters"
)
if len(x.shape) != 2:
raise ValueError("Input must be a 2D array")
if self.__normalise:
x = x / self.tools.repeat(self.tools | "Helper function to optimise centroid location"
c = torch.clone(c.detach()).to(device)
c.requires_grad = True
x1 = LazyTensor(x.unsqueeze(0))
op = torch.optim.Adam([c], lr=1 / n)
scaling = 1 / torch.gather(torch.bincount(cl), 0, cl).view(-1, 1)
scaling.requires_grad = False
with torch.autograd.set_detect_anomaly(True):
for _ in range(n):
c.requires_grad = True
op.zero_grad()
c1 = LazyTensor(torch.index_select(c, 0, cl).unsqueeze(0))
d = distance(x1, c1)
loss = (
d.sum(0) * scaling
).sum() # calculate distance to centroid for each datapoint, divide by total number of points in that cluster, and sum
loss.backward(retain_graph=False)
op.step()
return c.detach() | identifier_body |
ivf_torch.py | list:
return "float32"
else:
raise ValueError(
"[KeOps] {} data type incompatible with KeOps.".format(dtype)
)
@staticmethod
def rand(m, n, dtype=default_dtype, device="cpu"):
return torch.rand(m, n, dtype=dtype, device=device)
@staticmethod
def randn(m, n, dtype=default_dtype, device="cpu"):
return torch.randn(m, n, dtype=dtype, device=device)
@staticmethod
def zeros(shape, dtype=default_dtype, device="cpu"):
return torch.zeros(shape, dtype=dtype, device=device)
@staticmethod
def eye(n, dtype=default_dtype, device="cpu"):
return torch.eye(n, dtype=dtype, device=device)
@staticmethod
def array(x, dtype=default_dtype, device="cpu"):
if dtype == "float32":
dtype = torch.float32
elif dtype == "float64":
dtype = torch.float64
elif dtype == "float16":
dtype = torch.float16
else:
raise ValueError("[KeOps] data type incompatible with KeOps.")
return torch.tensor(x, dtype=dtype, device=device)
@staticmethod
def device(x):
if isinstance(x, torch.Tensor):
return x.device
else:
return None
@staticmethod
def distance_function(metric):
def euclidean(x,y):
return ((x-y) ** 2).sum(-1)
def manhattan(x,y):
return ((x-y).abs()).sum(-1)
def angular(x,y):
return -(x | y)
def angular_full(x,y):
return angular(x,y)/((angular(x,x)*angular(y,y)).sqrt())
def hyperbolic(x,y):
return ((x - y) ** 2).sum(-1) / (x[0] * y[0])
if metric=='euclidean':
return euclidean
elif metric=='manhattan':
return manhattan
elif metric=='angular':
return angular
elif metric=='angular_full':
return angular_full
elif metric=='hyperbolic':
return hyperbolic
else:
raise ValueError('Unknown metric')
@staticmethod
def sort(x):
return torch.sort(x)
@staticmethod
def unsqueeze(x,n):
return torch.unsqueeze(x,n)
@staticmethod
def arange(n,device="cpu"):
return torch.arange(n,device=device)
@staticmethod
def repeat(x,n):
return torch.repeat_interleave(x,n)
@staticmethod
def to(x,device):
return x.to(device)
@staticmethod
def index_select(input,dim,index):
return torch.index_select(input,dim,index)
@staticmethod
def norm(x,p=2,dim=-1):
return torch.norm(x,p=p,dim=dim)
@staticmethod
def kmeans(x, distance=None, K=10, Niter=10, device="cuda", approx=False, n=10):
from pykeops.torch import LazyTensor
if distance is None:
distance = torchtools.distance_function("euclidean")
def calc_centroid(x, c, cl, n=10):
"Helper function to optimise centroid location"
c = torch.clone(c.detach()).to(device)
c.requires_grad = True
x1 = LazyTensor(x.unsqueeze(0))
op = torch.optim.Adam([c], lr=1 / n)
scaling = 1 / torch.gather(torch.bincount(cl), 0, cl).view(-1, 1)
scaling.requires_grad = False
with torch.autograd.set_detect_anomaly(True):
for _ in range(n):
c.requires_grad = True
op.zero_grad()
c1 = LazyTensor(torch.index_select(c, 0, cl).unsqueeze(0))
d = distance(x1, c1)
loss = (
d.sum(0) * scaling
).sum() # calculate distance to centroid for each datapoint, divide by total number of points in that cluster, and sum
loss.backward(retain_graph=False)
op.step()
return c.detach()
N, D = x.shape
c = x[:K, :].clone()
x_i = LazyTensor(x.view(N, 1, D).to(device))
for i in range(Niter):
c_j = LazyTensor(c.view(1, K, D).to(device))
D_ij = distance(x_i, c_j)
cl = D_ij.argmin(dim=1).long().view(-1)
# updating c: either with approximation or exact
if approx:
# approximate with GD optimisation
c = calc_centroid(x, c, cl, n)
else:
# exact from average
c.zero_()
c.scatter_add_(0, cl[:, None].repeat(1, D), x)
Ncl = torch.bincount(cl, minlength=K).type_as(c).view(K, 1)
c /= Ncl
if torch.any(torch.isnan(c)):
raise ValueError(
"NaN detected in centroids during KMeans, please check metric is correct"
)
return cl, c
def squared_distances(x, y):
x_norm = (x ** 2).sum(1).reshape(-1, 1)
y_norm = (y ** 2).sum(1).reshape(1, -1)
dist = x_norm + y_norm - 2.0 * torch.matmul(x, torch.transpose(y, 0, 1))
return dist
def torch_kernel(x, y, s, kernel):
sq = squared_distances(x, y)
_kernel = {
"gaussian": lambda _sq, _s: torch.exp(-_sq / (_s * _s)),
"laplacian": lambda _sq, _s: torch.exp(-torch.sqrt(_sq) / _s),
"cauchy": lambda _sq, _s: 1.0 / (1 + _sq / (_s * _s)),
"inverse_multiquadric": lambda _sq, _s: torch.rsqrt(1 + _sq / (_s * _s)),
}
return _kernel[kernel](sq, s)
class GenericIVF:
"""Abstract class to compute IVF functions
End-users should use 'pykeops.numpy.ivf' or 'pykeops.torch.ivf'
"""
def __init__(self, k, metric, normalise, LazyTensor):
self.__k = k
self.__normalise = normalise
self.__update_metric(metric)
self.__LazyTensor = LazyTensor
self.__c = None
def __update_metric(self, metric):
if isinstance(metric, str):
self.__distance = self.tools.distance_function(metric)
self.__metric = metric
elif callable(metric):
self.__distance = metric
self.__metric = "custom"
else:
raise ValueError("Unrecognised metric input type")
@property
def metric(self):
"""Returns the metric used in the search"""
return self.__metric
@property
def c(self):
"""Returns the clusters obtained through K-Means"""
if self.__c is not None:
return self.__c
else:
raise ValueError("Run .fit() first!")
def | (self):
pass
def __k_argmin(self, x, y, k=1):
x_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(x, 1), self.__device)
)
y_LT = self.__LazyTensor(
self.tools.to(self.tools.unsqueeze(y, 0), self.__device)
)
d = self.__distance(x_LT, y_LT)
if not self.tools.is_tensor(x):
if self.__backend:
d.backend = self.__backend
if k == 1:
return self.tools.view(self.tools.long(d.argmin(dim=1)), -1)
else:
return self.tools.long(d.argKmin(K=k, dim=1))
def __sort_clusters(self, x, lab, store_x=True):
lab, perm = self.tools.sort(self.tools.view(lab, -1))
if store_x:
self.__x_perm = perm
else:
self.__y_perm = perm
return x[perm], lab
def __unsort(self, nn):
return self.tools.index_select(self.__x_perm[nn], 0, self.__y_perm.argsort())
def _fit(
self,
x,
clusters=50,
a=5,
Niter=15,
device=None,
backend=None,
approx=False,
n=50,
):
"""
Fits the main dataset
"""
if type(clusters) != int:
raise ValueError("Clusters must be an integer")
if clusters >= len(x):
raise ValueError("Number of clusters must be less than length of dataset")
if type(a) != int:
raise ValueError("Number of clusters to search over must be an integer")
if a > clusters:
raise ValueError(
"Number of clusters to search over must be less than total number of clusters"
)
if len(x.shape) != 2:
raise ValueError("Input must be a 2D array")
if self.__normalise:
x = x / self.tools.repeat(self.tools | __get_tools | identifier_name |
views.py | ,
title=title,
description=description,
start_time=start_time,
end_time=end_time,
location= location
)
return HttpResponseRedirect(reverse('manCal:calendar'))
return render(request, 'event.html', {'form': form})
#generic update view for event edit
class EventEdit(LoginRequiredMixin, generic.UpdateView):
#In which model the data are stored
model = Event
#fields to update
fields = ['title', 'description', 'start_time', 'end_time', 'location']
#template to use to get data
template_name = 'event.html'
#generic delete vie for event delete
class EventDelete(LoginRequiredMixin, generic.DeleteView):
model = Event
template_name = 'event_delete.html'
success_url = reverse_lazy('manCal:calendar')
#overriding data in confermation form to provide cancel button
def post(self, request, *args, **kwargs):
if "cancel" in request.POST:
return redirect('manCal:calendar')
else:
return super(EventDelete, self).post(request, *args, **kwargs)
#event details view
@login_required
def event_details(request, event_id):
#locating event in database useing the event_id given in the url
event = Event.objects.get(id=event_id)
#getting members and files attached to the event
eventmember = EventMember.objects.filter(event=event)
eventfiles = EventFiles.objects.filter(event=event)
#defining variables for API call
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
address = event.location
params = {
'key' : API_KEY,
'address': address
}
lat = 51.509865
lon = -0.118092
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#API response conteining geo-cordinates
response = requests.get(base_url, params=params).json()
#checking if the request was succesful
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#obtaing latiture and longitude
lat = geometry['location']['lat']
lon = geometry['location']['lng']
context = {
#pasing retrived data to the template
'event': event,
'eventmember': eventmember,
'eventfiles': eventfiles,
'lat' : lat,
'lon' : lon,
}
return render(request, 'event-details.html', context)
#weather view
@login_required
def weatherView(request):
#API variable for weather API
url = 'http://api.openweathermap.org/data/2.5/onecall?lat={lat}&exclude=hourly,minutely&lon={lon}&units=metric&appid=dbd607d4b59f61a34125bf4f2a185f8d'
user = CustomUser.objects.get(username= request.user)
#API variable for google API
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#chekc if the search form was submitted or the page was reloaded
if request.method == 'POST':
#if form submitted, get input from request
location = request.POST.get('location')
#check if location already exist
cityCount = Locations.objects.filter(user=user).filter(location = location).count()
form = AddLocation(request.POST)
#validateing from
if form.is_valid():
if cityCount == 0:
#if city does not exist in database
params = {
'key' : API_KEY,
'address': location
}
#check if the location exist useing google API
response_test = requests.get(base_url, params=params).json()
if response_test['status'] == 'OK':
#if exist save city in database
obj= form.save(commit=False)
obj.user = user
obj.save()
#should be simple params not weather becasue we are useing Google API
paramsWeather = {
'key' : API_KEY,
'address': obj.location
}
#getting location cord
response = requests.get(base_url, params=paramsWeather).json()
if response['status'] == 'OK':
#if infomation available
geometry = response['results'][0]['geometry']
lat = geometry['location']['lat']
lon = geometry['location']['lng']
#send request for weather information
r = requests.get(url.format(lat=lat, lon=lon)).json()
#adding info in dictionary
city_weather = {
'location_id' : obj.id,
'city' : obj.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#return dictionary to Ajax reqeust with JsonResponse
return JsonResponse({'city_weather' : city_weather, 'errorCode' : "200"}, status= 200)
else:
return JsonResponse({'error' : "Location not found", 'errorCode' : "500"}, status= 200)
elif cityCount > 0:
return JsonResponse({'error' : "Location already added", 'errorCode' : "500"}, status= 200)
return JsonResponse({'error' : "Invalid input", 'errorCode' : "500"}, status= 200)
form = AddLocation()
#if the page was loaded without from submittion
#get all weather location saved by the user
cities = Locations.objects.filter(user=user)
#create empty arrasy to store all weather data about each city
weather_data = []
#do the same thing as we did when a city was added for each city in the database
for city in cities:
params = {
'key' : API_KEY,
'address': city.location
}
response = requests.get(base_url, params=params).json()
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#check if lat and lgn are obtained correctly
lat = geometry['location']['lat']
lon = geometry['location']['lng']
r = requests.get(url.format(lat=lat, lon=lon)).json()
city_weather = {
'location_id' : city.id,
'city' : city.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#append the data for the city to weather_data before passing to the next city
weather_data.append(city_weather)
context = {
'form' : form,
'weather_data' : weather_data,
}
return render(request, 'weather.html', context)
#add a member for an event
@login_required
def add_eventmember(request, event_id):
forms = AddMemberForm()
#check request method
if request.method == 'POST':
#if POST validate and sabe
forms = AddMemberForm(request.POST)
if forms.is_valid():
member = EventMember.objects.filter(event=event_id)
event = Event.objects.get(id=event_id)
#maximum 9 member for event
if member.count() <= 9:
#save meber
user = forms.cleaned_data['user']
EventMember.objects.create(
event=event,
user=user
)
return redirect('manCal:event-detail', event_id = event.id,)
else:
print('--------------User limit exceed!-----------------')
context = {
'form': forms
}
return render(request, 'add_member.html', context)
#delete member
@login_required
def member_delete(request, member_id):
#get member useing the member_id in the url
member = EventMember.objects.get(id= member_id)
#delete form database
member.delete()
#return succesfful response to Ajax request
return JsonResponse({'result' : 'ok'}, status=200)
#delete file, same process as delete member
@login_required
def file_delete(request, file_id):
file = EventFiles.objects.get(id = file_id)
file.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#delete location, same process as delete member
@login_required
def location_delete(request, location_id):
location = Locations.objects.get(id = location_id)
location.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#note delte same process as delete member
@login_required
def | note_delete | identifier_name |
|
views.py |
else:
Http404('Wrong credentials')
# If logged in, session variables are cleaned up and user logged out. Otherwise redirected to login page
@login_required
def logoutView(request):
logout(request)
#registration
def signUpView(request):
#checking if methos is POST
if request.method == "POST":
#getting from from request
form = signUpForm(request.POST)
#validateing from
if form.is_valid():
#if valid save and redirect to login with messege
form.save()
messages.success(request,"Registration Successful!")
return redirect("/login")
else:
#error
print('failed after falidation')
else:
#clean up form
form = signUpForm()
return render(request, "signup.html", {"form": form})
#view to updated account info
@login_required
def profileView(request):
#checking request methos
if request.method == 'POST':
#extracting form infromation form request and storing them in local variable
user = CustomUser.objects.get(username= request.user)
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
#updateding existing value with updated one
user.first_name= first_name
user.last_name = last_name
user.email=email
#save and redirect to same page
user.save()
return redirect("manCal:profile")
context = {
}
return render(request, "profile.html", context)
# start calendar render views
#get date for starting calendar date
def get_date(req_day):
if req_day:
year, month = (int(x) for x in req_day.split('-'))
return date(year, month, day=1)
return datetime.today()
#action to go prev month
def prev_month(d):
#changeing the day with which the calendar is started
first = d.replace(day=1)
prev_month = first - timedelta(days=1)
#coverting and formatting data for html
month = 'month=' + str(prev_month.year) + '-' + str(prev_month.month)
return month
##same as prev_month
def next_month(d):
days_in_month = calendar.monthrange(d.year, d.month)[1]
last = d.replace(day=days_in_month)
next_month = last + timedelta(days=1)
month = 'month=' + str(next_month.year) + '-' + str(next_month.month)
return month
#calendar genric list view
class CalendarView(LoginRequiredMixin, generic.ListView):
model = Event
#template to render
template_name = 'calendar.html'
#setting up context data
def get_context_data(self, **kwargs):
#supercalss call
context = super().get_context_data(**kwargs)
#getting date for calendar start
d = get_date(self.request.GET.get('month', None))
user = CustomUser.objects.get(username= self.request.user)
#pasing initializing variable for calendar
cal = Calendar(d.year, d.month, user)
html_cal = cal.formatmonth(withyear=True)
#getting user notes
notes = Notes.objects.filter(user=user)
#defining new context data
context['calendar'] = mark_safe(html_cal)
context['prev_month'] = prev_month(d)
context['next_month'] = next_month(d)
context['notes'] = notes
context['user']= user
return context
#create events
@login_required
def create_event(request):
form = EventForm(request.POST or None)
#checking if the request type is post and if the form is valid
if request.POST and form.is_valid():
#getting specific inputs from Django form and storing them in separated variable
title = form.cleaned_data['title']
description = form.cleaned_data['description']
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
location = form.cleaned_data['location']
#creating new event object
Event.objects.get_or_create(
user=request.user,
title=title,
description=description,
start_time=start_time,
end_time=end_time,
location= location
)
return HttpResponseRedirect(reverse('manCal:calendar'))
return render(request, 'event.html', {'form': form})
#generic update view for event edit
class EventEdit(LoginRequiredMixin, generic.UpdateView):
#In which model the data are stored
model = Event
#fields to update
fields = ['title', 'description', 'start_time', 'end_time', 'location']
#template to use to get data
template_name = 'event.html'
#generic delete vie for event delete
class EventDelete(LoginRequiredMixin, generic.DeleteView):
model = Event
template_name = 'event_delete.html'
success_url = reverse_lazy('manCal:calendar')
#overriding data in confermation form to provide cancel button
def post(self, request, *args, **kwargs):
if "cancel" in request.POST:
return redirect('manCal:calendar')
else:
return super(EventDelete, self).post(request, *args, **kwargs)
#event details view
@login_required
def event_details(request, event_id):
#locating event in database useing the event_id given in the url
event = Event.objects.get(id=event_id)
#getting members and files attached to the event
eventmember = EventMember.objects.filter(event=event)
eventfiles = EventFiles.objects.filter(event=event)
#defining variables for API call
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
address = event.location
params = {
'key' : API_KEY,
'address': address
}
lat = 51.509865
lon = -0.118092
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#API response conteining geo-cordinates
response = requests.get(base_url, params=params).json()
#checking if the request was succesful
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#obtaing latiture and longitude
lat = geometry['location']['lat']
lon = geometry['location']['lng']
context = {
#pasing retrived data to the template
'event': event,
'eventmember': eventmember,
'eventfiles': eventfiles,
'lat' : lat,
'lon' : lon,
}
return render(request, 'event-details.html', context)
#weather view
@login_required
def weatherView(request):
#API variable for weather API
url = 'http://api.openweathermap.org/data/2.5/onecall?lat={lat}&exclude=hourly,minutely&lon={lon}&units=metric&appid=dbd607d4b59f61a34125bf4f2a185f8d'
user = CustomUser.objects.get(username= request.user)
#API variable for google API
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#chekc if the search form was submitted or the page was reloaded
if request.method == 'POST':
#if form submitted, get input from request
location = request.POST.get('location')
#check if location already exist
cityCount = Locations.objects.filter(user=user).filter(location = location).count()
form = AddLocation(request.POST)
#validateing from
if form.is_valid():
if cityCount == 0:
#if city does not exist in database
params = {
'key' : API_KEY,
'address': location
}
#check if the location exist useing google API
response_test = requests.get(base_url, params=params).json()
if response_test['status'] == 'OK':
#if exist save city in database
obj= form.save(commit=False)
obj.user = user
obj.save()
#should be simple params not weather becasue we are useing Google API
paramsWeather = {
'key' : API_KEY,
'address': obj.location
}
#getting location cord
response = requests.get(base_url, params=paramsWeather).json()
if response['status'] == 'OK':
#if | request.session['username'] = username
request.session['password'] = password
context = {
'username': username,
'password': password,
'loggedin': True
}
response = render(request, 'index.html', context)
# Remember last login in cookie
now = D.datetime.utcnow()
max_age = 365 * 24 * 60 * 60 #one year
delta = now + D.timedelta(seconds=max_age)
format = "%a, %d-%b-%Y %H:%M:%S GMT"
expires = D.datetime.strftime(delta, format)
response.set_cookie('last_login',now,expires=expires)
#return response
return redirect("/index") | conditional_block |
|
views.py | .com/maps/api/geocode/json?'
#API response conteining geo-cordinates
response = requests.get(base_url, params=params).json()
#checking if the request was succesful
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#obtaing latiture and longitude
lat = geometry['location']['lat']
lon = geometry['location']['lng']
context = {
#pasing retrived data to the template
'event': event,
'eventmember': eventmember,
'eventfiles': eventfiles,
'lat' : lat,
'lon' : lon,
}
return render(request, 'event-details.html', context)
#weather view
@login_required
def weatherView(request):
#API variable for weather API
url = 'http://api.openweathermap.org/data/2.5/onecall?lat={lat}&exclude=hourly,minutely&lon={lon}&units=metric&appid=dbd607d4b59f61a34125bf4f2a185f8d'
user = CustomUser.objects.get(username= request.user)
#API variable for google API
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#chekc if the search form was submitted or the page was reloaded
if request.method == 'POST':
#if form submitted, get input from request
location = request.POST.get('location')
#check if location already exist
cityCount = Locations.objects.filter(user=user).filter(location = location).count()
form = AddLocation(request.POST)
#validateing from
if form.is_valid():
if cityCount == 0:
#if city does not exist in database
params = {
'key' : API_KEY,
'address': location
}
#check if the location exist useing google API
response_test = requests.get(base_url, params=params).json()
if response_test['status'] == 'OK':
#if exist save city in database
obj= form.save(commit=False)
obj.user = user
obj.save()
#should be simple params not weather becasue we are useing Google API
paramsWeather = {
'key' : API_KEY,
'address': obj.location
}
#getting location cord
response = requests.get(base_url, params=paramsWeather).json()
if response['status'] == 'OK':
#if infomation available
geometry = response['results'][0]['geometry']
lat = geometry['location']['lat']
lon = geometry['location']['lng']
#send request for weather information
r = requests.get(url.format(lat=lat, lon=lon)).json()
#adding info in dictionary
city_weather = {
'location_id' : obj.id,
'city' : obj.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#return dictionary to Ajax reqeust with JsonResponse
return JsonResponse({'city_weather' : city_weather, 'errorCode' : "200"}, status= 200)
else:
return JsonResponse({'error' : "Location not found", 'errorCode' : "500"}, status= 200)
elif cityCount > 0:
return JsonResponse({'error' : "Location already added", 'errorCode' : "500"}, status= 200)
return JsonResponse({'error' : "Invalid input", 'errorCode' : "500"}, status= 200)
form = AddLocation()
#if the page was loaded without from submittion
#get all weather location saved by the user
cities = Locations.objects.filter(user=user)
#create empty arrasy to store all weather data about each city
weather_data = []
#do the same thing as we did when a city was added for each city in the database
for city in cities:
params = {
'key' : API_KEY,
'address': city.location
}
response = requests.get(base_url, params=params).json()
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#check if lat and lgn are obtained correctly
lat = geometry['location']['lat']
lon = geometry['location']['lng']
r = requests.get(url.format(lat=lat, lon=lon)).json()
city_weather = {
'location_id' : city.id,
'city' : city.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#append the data for the city to weather_data before passing to the next city
weather_data.append(city_weather)
context = {
'form' : form,
'weather_data' : weather_data,
}
return render(request, 'weather.html', context)
#add a member for an event
@login_required
def add_eventmember(request, event_id):
forms = AddMemberForm()
#check request method
if request.method == 'POST':
#if POST validate and sabe
forms = AddMemberForm(request.POST)
if forms.is_valid():
member = EventMember.objects.filter(event=event_id)
event = Event.objects.get(id=event_id)
#maximum 9 member for event
if member.count() <= 9:
#save meber
user = forms.cleaned_data['user']
EventMember.objects.create(
event=event,
user=user
)
return redirect('manCal:event-detail', event_id = event.id,)
else:
print('--------------User limit exceed!-----------------')
context = {
'form': forms
}
return render(request, 'add_member.html', context)
#delete member
@login_required
def member_delete(request, member_id):
#get member useing the member_id in the url
member = EventMember.objects.get(id= member_id)
#delete form database
member.delete()
#return succesfful response to Ajax request
return JsonResponse({'result' : 'ok'}, status=200)
#delete file, same process as delete member
@login_required
def file_delete(request, file_id):
file = EventFiles.objects.get(id = file_id)
file.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#delete location, same process as delete member
@login_required
def location_delete(request, location_id):
location = Locations.objects.get(id = location_id)
location.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#note delte same process as delete member
@login_required
def note_delete(request, note_id):
note= Notes.objects.get(id= note_id)
note.delete()
return JsonResponse({'result' : 'ok'}, status=200)
#add file for event view
@login_required
def add_files(request):
#getting the event to which we want to add file
event_id = request.POST.get('event_id')
event = Event.objects.get(id=event_id)
#list of the file to upload, this is a list becasue in the HTML form we allowed the user to select multiple files
files = request.FILES.getlist('files')
#looping throw all seleted files
for file in files:
fs= FileSystemStorage()
#saveing the file and getting the path to it
file_path = fs.save(file.name, file)
#creating new EventFiles object
sfile= EventFiles(event = event, files = file_path)
#saveing the object
sfile.save()
return redirect('manCal:event-detail', event_id = event_id,)
#create note
@login_required
def add_note(request):
#getting the user and the content of the note
if request.method == 'POST':
user = CustomUser.objects.get(username= request.user)
note = request.POST.get('note')
#createing new note
new_note = Notes.objects.create(
user = user,
note = note
)
#returning created object to Ajax request converting the model data to dictionary
return JsonResponse({'note' : model_to_dict(new_note)}, status=200)
#update note status
@login_required
def note_complited(request, note_id):
#getting note from note id
note = Notes.objects.get(id=note_id)
#changeing note staus
if note.complited == True: | note.complited = False
elif note.complited == False:
note.complited = True
#saveing new status | random_line_split |
|
views.py | variable
title = form.cleaned_data['title']
description = form.cleaned_data['description']
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
location = form.cleaned_data['location']
#creating new event object
Event.objects.get_or_create(
user=request.user,
title=title,
description=description,
start_time=start_time,
end_time=end_time,
location= location
)
return HttpResponseRedirect(reverse('manCal:calendar'))
return render(request, 'event.html', {'form': form})
#generic update view for event edit
class EventEdit(LoginRequiredMixin, generic.UpdateView):
#In which model the data are stored
model = Event
#fields to update
fields = ['title', 'description', 'start_time', 'end_time', 'location']
#template to use to get data
template_name = 'event.html'
#generic delete vie for event delete
class EventDelete(LoginRequiredMixin, generic.DeleteView):
model = Event
template_name = 'event_delete.html'
success_url = reverse_lazy('manCal:calendar')
#overriding data in confermation form to provide cancel button
def post(self, request, *args, **kwargs):
if "cancel" in request.POST:
return redirect('manCal:calendar')
else:
return super(EventDelete, self).post(request, *args, **kwargs)
#event details view
@login_required
def event_details(request, event_id):
#locating event in database useing the event_id given in the url
event = Event.objects.get(id=event_id)
#getting members and files attached to the event
eventmember = EventMember.objects.filter(event=event)
eventfiles = EventFiles.objects.filter(event=event)
#defining variables for API call
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
address = event.location
params = {
'key' : API_KEY,
'address': address
}
lat = 51.509865
lon = -0.118092
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#API response conteining geo-cordinates
response = requests.get(base_url, params=params).json()
#checking if the request was succesful
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#obtaing latiture and longitude
lat = geometry['location']['lat']
lon = geometry['location']['lng']
context = {
#pasing retrived data to the template
'event': event,
'eventmember': eventmember,
'eventfiles': eventfiles,
'lat' : lat,
'lon' : lon,
}
return render(request, 'event-details.html', context)
#weather view
@login_required
def weatherView(request):
#API variable for weather API
url = 'http://api.openweathermap.org/data/2.5/onecall?lat={lat}&exclude=hourly,minutely&lon={lon}&units=metric&appid=dbd607d4b59f61a34125bf4f2a185f8d'
user = CustomUser.objects.get(username= request.user)
#API variable for google API
API_KEY = 'AIzaSyDio4Zj99JOhP8SBQBM3CydIsc91ld-Jbs'
base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
#chekc if the search form was submitted or the page was reloaded
if request.method == 'POST':
#if form submitted, get input from request
location = request.POST.get('location')
#check if location already exist
cityCount = Locations.objects.filter(user=user).filter(location = location).count()
form = AddLocation(request.POST)
#validateing from
if form.is_valid():
if cityCount == 0:
#if city does not exist in database
params = {
'key' : API_KEY,
'address': location
}
#check if the location exist useing google API
response_test = requests.get(base_url, params=params).json()
if response_test['status'] == 'OK':
#if exist save city in database
obj= form.save(commit=False)
obj.user = user
obj.save()
#should be simple params not weather becasue we are useing Google API
paramsWeather = {
'key' : API_KEY,
'address': obj.location
}
#getting location cord
response = requests.get(base_url, params=paramsWeather).json()
if response['status'] == 'OK':
#if infomation available
geometry = response['results'][0]['geometry']
lat = geometry['location']['lat']
lon = geometry['location']['lng']
#send request for weather information
r = requests.get(url.format(lat=lat, lon=lon)).json()
#adding info in dictionary
city_weather = {
'location_id' : obj.id,
'city' : obj.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#return dictionary to Ajax reqeust with JsonResponse
return JsonResponse({'city_weather' : city_weather, 'errorCode' : "200"}, status= 200)
else:
return JsonResponse({'error' : "Location not found", 'errorCode' : "500"}, status= 200)
elif cityCount > 0:
return JsonResponse({'error' : "Location already added", 'errorCode' : "500"}, status= 200)
return JsonResponse({'error' : "Invalid input", 'errorCode' : "500"}, status= 200)
form = AddLocation()
#if the page was loaded without from submittion
#get all weather location saved by the user
cities = Locations.objects.filter(user=user)
#create empty arrasy to store all weather data about each city
weather_data = []
#do the same thing as we did when a city was added for each city in the database
for city in cities:
params = {
'key' : API_KEY,
'address': city.location
}
response = requests.get(base_url, params=params).json()
if response['status'] == 'OK':
geometry = response['results'][0]['geometry']
#check if lat and lgn are obtained correctly
lat = geometry['location']['lat']
lon = geometry['location']['lng']
r = requests.get(url.format(lat=lat, lon=lon)).json()
city_weather = {
'location_id' : city.id,
'city' : city.location,
'temperature' : round(r['current']['temp']),
'main' : r['daily'][0]['weather'][0]['main'],
'icon' : r['daily'][0]['weather'][0]['icon'],
'tempMax' : round(r['daily'][0]['temp']['max']),
'tempMin' : round(r['daily'][0]['temp']['min']),
}
#append the data for the city to weather_data before passing to the next city
weather_data.append(city_weather)
context = {
'form' : form,
'weather_data' : weather_data,
}
return render(request, 'weather.html', context)
#add a member for an event
@login_required
def add_eventmember(request, event_id):
forms = AddMemberForm()
#check request method
if request.method == 'POST':
#if POST validate and sabe
forms = AddMemberForm(request.POST)
if forms.is_valid():
member = EventMember.objects.filter(event=event_id)
event = Event.objects.get(id=event_id)
#maximum 9 member for event
if member.count() <= 9:
#save meber
user = forms.cleaned_data['user']
EventMember.objects.create(
event=event,
user=user
)
return redirect('manCal:event-detail', event_id = event.id,)
else:
print('--------------User limit exceed!-----------------')
context = {
'form': forms
}
return render(request, 'add_member.html', context)
#delete member
@login_required
def member_delete(request, member_id):
#get member useing the member_id in the url
member = EventMember.objects.get(id= member_id)
#delete form database
member.delete()
#return succesfful response to Ajax request
return JsonResponse({'result' : 'ok'}, status=200)
#delete file, same process as delete member
@login_required
def file_delete(request, file_id):
| file = EventFiles.objects.get(id = file_id)
file.delete()
return JsonResponse({'result' : 'ok'}, status=200) | identifier_body |
|
utils.py | and SENTENCE_END tokens
print("Reading CSV file...")
with open(filename, 'rt') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
# Split full comments into sentences
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode("utf-8").lower()) for x in reader])
# Filter sentences
sentences = [s for s in sentences if len(s) >= min_sent_characters]
sentences = [s for s in sentences if "http" not in s]
# Append SENTENCE_START and SENTENCE_END
sentences = ["%s %s %s" % (SENTENCE_START_TOKEN, x, SENTENCE_END_TOKEN) for x in sentences]
print("Parsed %d sentences." % (len(sentences)))
# Tokenize the sentences into words
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print("Found %d unique words tokens." % len(word_freq.items()))
# Get the most common words and build index_to_word and word_to_index vectors
vocab = sorted(word_freq.items(), key=lambda x: (x[1], x[0]), reverse=True)[:vocabulary_size-2]
print("Using vocabulary size %d." % vocabulary_size)
print("The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1]))
sorted_vocab = sorted(vocab, key=operator.itemgetter(1))
index_to_word = ["<MASK/>", UNKNOWN_TOKEN] + [x[0] for x in sorted_vocab]
word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
return X_train, y_train, word_to_index, index_to_word
def loadText(path, origin="", vocsize=1000, maxlen=25, training_type=1, verbose=True):
"""
type(path): string
path : path of text file to save to
origin : URL where text is
vocsize : vocabulary size
maxlen : max size of one sentence
Return:
x_train, y_train, vocabulary
eg: x,y,voc,i2w,w2i = loadData('pg11.txt', origin="http://www.gutenberg.org/cache/epub/11/pg11.txt")
"""
filesource = get_file(path, origin=origin)
text = open(filesource).read()
text = SENTENCE_START_TOKEN + text + SENTENCE_END_TOKEN
if verbose:
|
tokens = word_tokenize(text)
word_freq = nltk.FreqDist(tokens)
if verbose:
print("Found %d unique words tokens." % len(word_freq.items()))
vocab = word_freq.most_common(vocsize-3)
indices_word = [x[0] for x in vocab]
indices_word.append(UNKNOWN_TOKEN)
indices_word.append(SENTENCE_START_TOKEN)
indices_word.append(SENTENCE_END_TOKEN)
word_indices = dict([(w,i) for i,w in enumerate(indices_word)])
for i, word in enumerate(tokens):
tokens[i] = [word if word in word_indices else UNKNOWN_TOKEN]
# now the whole text is indices of words in the vocabulary
for i, word in enumerate(tokens):
tokens[i] = word_indices[word[0]]
# Create the training data
xx = np.asarray(tokens[:-1], dtype=np.int32)
yy = np.asarray(tokens[1:], dtype=np.int32)
return xx, yy, vocab, word_indices, indices_word
def train_with_sgd(model, X_train, y_train, learning_rate=0.001, nepoch=40, startfrom = 0, decay=0.9,
callback_every=10000, callback=None):
for epoch in range(startfrom, nepoch):
num_examples_seen = 0
# For each training example...
for i in np.random.permutation(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate, decay)
num_examples_seen += 1
# Optionally do callback
if (callback and callback_every and num_examples_seen % callback_every == 0):
callback(model, epoch, num_examples_seen)
return model
def save_model_parameters_theano(model, outfile):
np.savez(outfile,
E=model.E.get_value(),
U=model.U.get_value(),
W=model.W.get_value(),
V=model.V.get_value(),
b=model.b.get_value(),
c=model.c.get_value())
print "Saved model parameters to %s." % outfile
def load_model_parameters_theano(path, modelClass=GRUTheano):
npzfile = np.load(path)
E, U, W, V, b, c = npzfile["E"], npzfile["U"], npzfile["W"], npzfile["V"], npzfile["b"], npzfile["c"]
hidden_dim, word_dim = E.shape[0], E.shape[1]
print "Building model from %s with word_dim=%d" % (path, word_dim)
sys.stdout.flush()
model = modelClass(word_dim, hidden_dim=hidden_dim)
model.E.set_value(E)
model.U.set_value(U)
model.W.set_value(W)
model.V.set_value(V)
model.b.set_value(b)
model.c.set_value(c)
return model
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['E', 'U', 'W', 'b', 'V', 'c']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
def print_sentence(s, index_to_word):
sentence_str = [index_to_word[x] for x in s[1:-1]]
print(" ".join(sentence_str))
sys.stdout.flush()
def generate_sentence(model, index_to_word, word_to_index, min_length=5):
# We start the sentence with the start token
new_sentence = [word_to_index[SENTENCE_START_TOKEN]]
# Repeat until we get an end token
while not new_sentence[-1] == word_to_index[SENT | print('corpus length:', len(text)) | conditional_block |
utils.py | _to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
return X_train, y_train, word_to_index, index_to_word
def loadText(path, origin="", vocsize=1000, maxlen=25, training_type=1, verbose=True):
"""
type(path): string
path : path of text file to save to
origin : URL where text is
vocsize : vocabulary size
maxlen : max size of one sentence
Return:
x_train, y_train, vocabulary
eg: x,y,voc,i2w,w2i = loadData('pg11.txt', origin="http://www.gutenberg.org/cache/epub/11/pg11.txt")
"""
filesource = get_file(path, origin=origin)
text = open(filesource).read()
text = SENTENCE_START_TOKEN + text + SENTENCE_END_TOKEN
if verbose:
print('corpus length:', len(text))
tokens = word_tokenize(text)
word_freq = nltk.FreqDist(tokens)
if verbose:
print("Found %d unique words tokens." % len(word_freq.items()))
vocab = word_freq.most_common(vocsize-3)
indices_word = [x[0] for x in vocab]
indices_word.append(UNKNOWN_TOKEN)
indices_word.append(SENTENCE_START_TOKEN)
indices_word.append(SENTENCE_END_TOKEN)
word_indices = dict([(w,i) for i,w in enumerate(indices_word)])
for i, word in enumerate(tokens):
tokens[i] = [word if word in word_indices else UNKNOWN_TOKEN]
# now the whole text is indices of words in the vocabulary
for i, word in enumerate(tokens):
tokens[i] = word_indices[word[0]]
# Create the training data
xx = np.asarray(tokens[:-1], dtype=np.int32)
yy = np.asarray(tokens[1:], dtype=np.int32)
return xx, yy, vocab, word_indices, indices_word
def train_with_sgd(model, X_train, y_train, learning_rate=0.001, nepoch=40, startfrom = 0, decay=0.9,
callback_every=10000, callback=None):
for epoch in range(startfrom, nepoch):
num_examples_seen = 0
# For each training example...
for i in np.random.permutation(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate, decay)
num_examples_seen += 1
# Optionally do callback
if (callback and callback_every and num_examples_seen % callback_every == 0):
callback(model, epoch, num_examples_seen)
return model
def save_model_parameters_theano(model, outfile):
np.savez(outfile,
E=model.E.get_value(),
U=model.U.get_value(),
W=model.W.get_value(),
V=model.V.get_value(),
b=model.b.get_value(),
c=model.c.get_value())
print "Saved model parameters to %s." % outfile
def load_model_parameters_theano(path, modelClass=GRUTheano):
npzfile = np.load(path)
E, U, W, V, b, c = npzfile["E"], npzfile["U"], npzfile["W"], npzfile["V"], npzfile["b"], npzfile["c"]
hidden_dim, word_dim = E.shape[0], E.shape[1]
print "Building model from %s with word_dim=%d" % (path, word_dim)
sys.stdout.flush()
model = modelClass(word_dim, hidden_dim=hidden_dim)
model.E.set_value(E)
model.U.set_value(U)
model.W.set_value(W)
model.V.set_value(V)
model.b.set_value(b)
model.c.set_value(c)
return model
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['E', 'U', 'W', 'b', 'V', 'c']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
def print_sentence(s, index_to_word):
sentence_str = [index_to_word[x] for x in s[1:-1]]
print(" ".join(sentence_str))
sys.stdout.flush()
def generate_sentence(model, index_to_word, word_to_index, min_length=5):
# We start the sentence with the start token
new_sentence = [word_to_index[SENTENCE_START_TOKEN]]
# Repeat until we get an end token
while not new_sentence[-1] == word_to_index[SENTENCE_END_TOKEN]:
#print('not finished')
next_word_probs = model.predict(new_sentence)[-1]
if sum(next_word_probs) < 1.:
samples = np.random.multinomial(1, next_word_probs)
sampled_word = np.argmax(samples)
else:
sampled_word = word_to_index[UNKNOWN_TOKEN]
if sampled_word < len(index_to_word):
new_sentence.append(sampled_word)
else:
new_sentence.append(word_to_index[UNKNOWN_TOKEN])
# Seomtimes we get stuck if the sentence becomes too long, e.g. "........" :(
# And: We don't want sentences with UNKNOWN_TOKEN's
#print(new_sentence)
if len(new_sentence) > 50 or sampled_word == word_to_index[UNKNOWN_TOKEN]:
#return None
return new_sentence
if len(new_sentence) < min_length:
return None
return new_sentence
def generate_sentences(model, n, index_to_word, word_to_index):
for i in range(n):
sent = None
while not sent:
sent = generate_sentence(model, index_to_word, word_to_index)
print_sentence(sent, index_to_word)
def saveStuff(stuff, path=None):
| """
Saves stuff to disk as pickle object
:type stuff: any type
:param stuff: data to be stored
Return: create pickle file at path
"""
if path == None:
# TODO take name from something
output = open('results/i-will-be-overwritten.pkl', 'wb')
else:
output = open(path, 'wb')
# Pickle the list using the highest protocol available.
cPickle.dump(stuff, output, -1)
output.close() | identifier_body |
|
utils.py | and SENTENCE_END tokens
print("Reading CSV file...")
with open(filename, 'rt') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
# Split full comments into sentences
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode("utf-8").lower()) for x in reader])
# Filter sentences
sentences = [s for s in sentences if len(s) >= min_sent_characters]
sentences = [s for s in sentences if "http" not in s]
# Append SENTENCE_START and SENTENCE_END
sentences = ["%s %s %s" % (SENTENCE_START_TOKEN, x, SENTENCE_END_TOKEN) for x in sentences]
print("Parsed %d sentences." % (len(sentences)))
# Tokenize the sentences into words
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print("Found %d unique words tokens." % len(word_freq.items()))
# Get the most common words and build index_to_word and word_to_index vectors
vocab = sorted(word_freq.items(), key=lambda x: (x[1], x[0]), reverse=True)[:vocabulary_size-2]
print("Using vocabulary size %d." % vocabulary_size)
print("The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1]))
sorted_vocab = sorted(vocab, key=operator.itemgetter(1))
index_to_word = ["<MASK/>", UNKNOWN_TOKEN] + [x[0] for x in sorted_vocab]
word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
return X_train, y_train, word_to_index, index_to_word
def loadText(path, origin="", vocsize=1000, maxlen=25, training_type=1, verbose=True):
"""
type(path): string
path : path of text file to save to
origin : URL where text is
vocsize : vocabulary size
maxlen : max size of one sentence
Return:
x_train, y_train, vocabulary
eg: x,y,voc,i2w,w2i = loadData('pg11.txt', origin="http://www.gutenberg.org/cache/epub/11/pg11.txt")
"""
filesource = get_file(path, origin=origin)
text = open(filesource).read()
text = SENTENCE_START_TOKEN + text + SENTENCE_END_TOKEN
if verbose:
print('corpus length:', len(text))
tokens = word_tokenize(text)
word_freq = nltk.FreqDist(tokens)
if verbose:
print("Found %d unique words tokens." % len(word_freq.items()))
vocab = word_freq.most_common(vocsize-3)
indices_word = [x[0] for x in vocab]
indices_word.append(UNKNOWN_TOKEN)
indices_word.append(SENTENCE_START_TOKEN)
indices_word.append(SENTENCE_END_TOKEN)
word_indices = dict([(w,i) for i,w in enumerate(indices_word)])
for i, word in enumerate(tokens):
tokens[i] = [word if word in word_indices else UNKNOWN_TOKEN]
# now the whole text is indices of words in the vocabulary
for i, word in enumerate(tokens):
tokens[i] = word_indices[word[0]]
# Create the training data
xx = np.asarray(tokens[:-1], dtype=np.int32)
yy = np.asarray(tokens[1:], dtype=np.int32)
return xx, yy, vocab, word_indices, indices_word
def train_with_sgd(model, X_train, y_train, learning_rate=0.001, nepoch=40, startfrom = 0, decay=0.9,
callback_every=10000, callback=None):
for epoch in range(startfrom, nepoch):
num_examples_seen = 0
# For each training example...
for i in np.random.permutation(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate, decay)
num_examples_seen += 1
# Optionally do callback
if (callback and callback_every and num_examples_seen % callback_every == 0):
callback(model, epoch, num_examples_seen)
return model
def save_model_parameters_theano(model, outfile):
np.savez(outfile,
E=model.E.get_value(),
U=model.U.get_value(),
W=model.W.get_value(),
V=model.V.get_value(),
b=model.b.get_value(),
c=model.c.get_value())
print "Saved model parameters to %s." % outfile
def load_model_parameters_theano(path, modelClass=GRUTheano):
npzfile = np.load(path)
E, U, W, V, b, c = npzfile["E"], npzfile["U"], npzfile["W"], npzfile["V"], npzfile["b"], npzfile["c"]
hidden_dim, word_dim = E.shape[0], E.shape[1]
print "Building model from %s with word_dim=%d" % (path, word_dim)
sys.stdout.flush()
model = modelClass(word_dim, hidden_dim=hidden_dim)
model.E.set_value(E)
model.U.set_value(U)
model.W.set_value(W)
model.V.set_value(V)
model.b.set_value(b)
model.c.set_value(c)
return model
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['E', 'U', 'W', 'b', 'V', 'c']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
def | (s, index_to_word):
sentence_str = [index_to_word[x] for x in s[1:-1]]
print(" ".join(sentence_str))
sys.stdout.flush()
def generate_sentence(model, index_to_word, word_to_index, min_length=5):
# We start the sentence with the start token
new_sentence = [word_to_index[SENTENCE_START_TOKEN]]
# Repeat until we get an end token
while not new_sentence[-1] == word_to_index[SENT | print_sentence | identifier_name |
utils.py | and SENTENCE_END tokens
print("Reading CSV file...")
with open(filename, 'rt') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
# Split full comments into sentences
sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode("utf-8").lower()) for x in reader])
# Filter sentences
sentences = [s for s in sentences if len(s) >= min_sent_characters]
sentences = [s for s in sentences if "http" not in s]
# Append SENTENCE_START and SENTENCE_END
sentences = ["%s %s %s" % (SENTENCE_START_TOKEN, x, SENTENCE_END_TOKEN) for x in sentences]
print("Parsed %d sentences." % (len(sentences)))
# Tokenize the sentences into words
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
print("Found %d unique words tokens." % len(word_freq.items()))
# Get the most common words and build index_to_word and word_to_index vectors
vocab = sorted(word_freq.items(), key=lambda x: (x[1], x[0]), reverse=True)[:vocabulary_size-2]
print("Using vocabulary size %d." % vocabulary_size)
print("The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1]))
sorted_vocab = sorted(vocab, key=operator.itemgetter(1))
index_to_word = ["<MASK/>", UNKNOWN_TOKEN] + [x[0] for x in sorted_vocab]
word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
# Create the training data
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]] for sent in tokenized_sentences])
return X_train, y_train, word_to_index, index_to_word
def loadText(path, origin="", vocsize=1000, maxlen=25, training_type=1, verbose=True):
"""
type(path): string
path : path of text file to save to
origin : URL where text is
vocsize : vocabulary size
maxlen : max size of one sentence
Return:
x_train, y_train, vocabulary
eg: x,y,voc,i2w,w2i = loadData('pg11.txt', origin="http://www.gutenberg.org/cache/epub/11/pg11.txt")
"""
filesource = get_file(path, origin=origin)
text = open(filesource).read()
text = SENTENCE_START_TOKEN + text + SENTENCE_END_TOKEN
if verbose:
print('corpus length:', len(text))
tokens = word_tokenize(text)
word_freq = nltk.FreqDist(tokens)
if verbose:
print("Found %d unique words tokens." % len(word_freq.items()))
vocab = word_freq.most_common(vocsize-3)
indices_word = [x[0] for x in vocab]
indices_word.append(UNKNOWN_TOKEN)
indices_word.append(SENTENCE_START_TOKEN)
indices_word.append(SENTENCE_END_TOKEN)
word_indices = dict([(w,i) for i,w in enumerate(indices_word)])
for i, word in enumerate(tokens):
tokens[i] = [word if word in word_indices else UNKNOWN_TOKEN]
# now the whole text is indices of words in the vocabulary
for i, word in enumerate(tokens):
tokens[i] = word_indices[word[0]] |
return xx, yy, vocab, word_indices, indices_word
def train_with_sgd(model, X_train, y_train, learning_rate=0.001, nepoch=40, startfrom = 0, decay=0.9,
callback_every=10000, callback=None):
for epoch in range(startfrom, nepoch):
num_examples_seen = 0
# For each training example...
for i in np.random.permutation(len(y_train)):
# One SGD step
model.sgd_step(X_train[i], y_train[i], learning_rate, decay)
num_examples_seen += 1
# Optionally do callback
if (callback and callback_every and num_examples_seen % callback_every == 0):
callback(model, epoch, num_examples_seen)
return model
def save_model_parameters_theano(model, outfile):
np.savez(outfile,
E=model.E.get_value(),
U=model.U.get_value(),
W=model.W.get_value(),
V=model.V.get_value(),
b=model.b.get_value(),
c=model.c.get_value())
print "Saved model parameters to %s." % outfile
def load_model_parameters_theano(path, modelClass=GRUTheano):
npzfile = np.load(path)
E, U, W, V, b, c = npzfile["E"], npzfile["U"], npzfile["W"], npzfile["V"], npzfile["b"], npzfile["c"]
hidden_dim, word_dim = E.shape[0], E.shape[1]
print "Building model from %s with word_dim=%d" % (path, word_dim)
sys.stdout.flush()
model = modelClass(word_dim, hidden_dim=hidden_dim)
model.E.set_value(E)
model.U.set_value(U)
model.W.set_value(W)
model.V.set_value(V)
model.b.set_value(b)
model.c.set_value(c)
return model
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['E', 'U', 'W', 'b', 'V', 'c']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
def print_sentence(s, index_to_word):
sentence_str = [index_to_word[x] for x in s[1:-1]]
print(" ".join(sentence_str))
sys.stdout.flush()
def generate_sentence(model, index_to_word, word_to_index, min_length=5):
# We start the sentence with the start token
new_sentence = [word_to_index[SENTENCE_START_TOKEN]]
# Repeat until we get an end token
while not new_sentence[-1] == word_to_index[SENTENCE |
# Create the training data
xx = np.asarray(tokens[:-1], dtype=np.int32)
yy = np.asarray(tokens[1:], dtype=np.int32) | random_line_split |
Surreal.py | #
# Hugues THOMAS - 11/06/2018
# Nicolas DONATI - 01/01/2020
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import tensorflow as tf
import numpy as np
# Dataset parent class
from datasets.common import Dataset
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features
:param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number)
:param labels: optional (N,) matrix of integer labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)
elif (labels is None):
return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)
elif (features is None):
return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose)
else:
return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose)
# ----------------------------------------------------------------------------------------------------------------------
#
# Class Definition
# \***************/
#
class SurrealDataset(Dataset):
"""
Class to handle any subset of 5000 shapes of the surreal dataset introduced in 3D coded (for comparison in exp2)
this dataset is composed of 6890-points shapes, so the spectral data is relatively heavy.
"""
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, config):
Dataset.__init__(self, 'surreal')
####################
# Dataset parameters
####################
# Type of task conducted on this dataset
# self.network_model = 'shape_matching' # this is the only type of model here but it comes from KPConc code
##########################
# Parameters for the files
##########################
# Path of the folder containing files
self.dataset_name = 'surreal'
self.path = '../../../media/donati/Data1/Datasets/shapes_surreal/'
self.data_folder = 'off_2/'
self.spectral_folder = 'spectral_full/'
self.txt_file = 'surreal5000_training.txt'
####################################################
####################################################
####################################################
# decide the number of shapes to keep in the training set (exp 2 setting)
self.split = config.split
self.num_train = config.num_train # -1 for all
# Number of eigenvalues kept for this model fmaps
self.neig = config.neig
self.neig_full = config.neig_full
# Number of thread for input pipeline
self.num_threads = config.input_threads
# Utility methods
# ------------------------------------------------------------------------------------------------------------------
def get_batch_gen(self, config):
"""
A function defining the batch generator for each split. Should return the generator, the generated types and
generated shapes
:param split: string in "training", "validation" or "test" (here we just keep training)
:param config: configuration file
:return: gen_func, gen_types, gen_shapes
"""
################
# Def generators
################
def random_balanced_gen():
print('trying to generate batch series with ', self.num_train, 'shapes')
# Initiate concatenation lists
tp_list = [] # points
tev_list = [] # eigen vectors
tevt_list = [] # transposed eigen vectors
tv_list = [] # eigen values
tevf_list = [] # full eigen vectors for ground truth maps
ti_list = [] # cloud indices
batch_n = 0
i_batch = 0
gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator
# if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices
# print(gen_indices.shape, config.batch_num)
# if config.split == 'test':
# print('test setting here not fully supported')
# n_shapes = self.num_test # has to be defined
# gen_indices = []
# for i in range(n_shapes - 1):
# for j in range(i + 1, n_shapes):
# gen_indices += [i, j] # put all the pairs in order
# gen_indices = np.array(gen_indices)
# Generator loop
for p_i in gen_indices:
# Get points and other input data
new_points = self.input_points[p_i]
new_evecs = self.input_evecs[p_i][:, :self.neig]
new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]
new_evals = self.input_evals[p_i][:self.neig]
new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]
n = new_points.shape[0]
if i_batch == config.batch_num:
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
tp_list = []
tev_list = []
tevt_list = []
tv_list = []
tevf_list = []
ti_list = []
batch_n = 0
i_batch = 0
# Add data to current batch
tp_list += [new_points]
tev_list += [new_evecs]
tevt_list += [new_evecs_trans]
tv_list += [new_evals]
tevf_list += [new_evecs_full]
ti_list += [p_i]
# Update batch size
batch_n += n
i_batch += 1
# yield the rest if necessary (it will not be a full batch and could lead to mistakes because of
# shape matching needing pairs !!!!)
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
##################
# Return generator
##################
# Generator types and shapes
gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)
gen_shapes = ([None, 3], [None, self.neig],
[self.neig, None], [self.neig, None], [None, self.neig], [None], [None])
return random_balanced_gen, gen_types, gen_shapes
def get_tf_mapping(self, config):
def | (stacked_points, stacked_evecs, stacked_evecs_trans,
stacked_evals, stacked_evecs_full, obj_inds, stack_lengths):
"""
From the input point cloud, this function compute all the point clouds at each conv layer, the neighbors
indices, the pooling indices and other useful variables.
:param stacked_points: Tensor with size [None, 3] where None is the total number of points
:param stack_lengths: Tensor with size [None] where None = number of batch // number of points in a batch
"""
# Get batch indice for each point
batch_inds = self.tf_get_batch_inds(stack_lengths)
# Augment input points
stacked_points, scales, rots = self.tf_augment_input(stacked_points,
batch_inds,
config)
# First add a column of 1 as feature for the network to be able to learn 3D shapes
stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)
# Then use positions or not
if config.in_features_dim == 1:
pass
elif config.in_features_dim == 3:
stacked_features = tf.concat((stacked_features, stacked_points), axis=1)
else:
raise ValueError('Only accepted input dimensions are 1, 3 (with or without XYZ)')
# Get the whole input list
input_list = self.tf_shape_matching_inputs(config,
stacked_points,
stacked_features,
stack_lengths,
batch_inds)
# Add scale and rotation for testing
input_list += [scales, rots, obj_inds]
input_list += [stack_lengths] # in order further | tf_map | identifier_name |
Surreal.py | #
# Hugues THOMAS - 11/06/2018
# Nicolas DONATI - 01/01/2020
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import tensorflow as tf
import numpy as np
# Dataset parent class
from datasets.common import Dataset
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features
:param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number)
:param labels: optional (N,) matrix of integer labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)
elif (labels is None):
return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)
elif (features is None):
return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose)
else:
return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose)
# ----------------------------------------------------------------------------------------------------------------------
#
# Class Definition
# \***************/
#
class SurrealDataset(Dataset):
"""
Class to handle any subset of 5000 shapes of the surreal dataset introduced in 3D coded (for comparison in exp2)
this dataset is composed of 6890-points shapes, so the spectral data is relatively heavy.
"""
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, config):
Dataset.__init__(self, 'surreal')
####################
# Dataset parameters
####################
# Type of task conducted on this dataset
# self.network_model = 'shape_matching' # this is the only type of model here but it comes from KPConc code
##########################
# Parameters for the files
##########################
# Path of the folder containing files
self.dataset_name = 'surreal'
self.path = '../../../media/donati/Data1/Datasets/shapes_surreal/'
self.data_folder = 'off_2/'
self.spectral_folder = 'spectral_full/'
self.txt_file = 'surreal5000_training.txt'
####################################################
####################################################
####################################################
# decide the number of shapes to keep in the training set (exp 2 setting)
self.split = config.split
self.num_train = config.num_train # -1 for all
# Number of eigenvalues kept for this model fmaps
self.neig = config.neig
self.neig_full = config.neig_full
# Number of thread for input pipeline
self.num_threads = config.input_threads
# Utility methods
# ------------------------------------------------------------------------------------------------------------------
def get_batch_gen(self, config):
"""
A function defining the batch generator for each split. Should return the generator, the generated types and
generated shapes
:param split: string in "training", "validation" or "test" (here we just keep training)
:param config: configuration file
:return: gen_func, gen_types, gen_shapes
"""
################
# Def generators
################
def random_balanced_gen():
print('trying to generate batch series with ', self.num_train, 'shapes')
# Initiate concatenation lists
tp_list = [] # points
tev_list = [] # eigen vectors
tevt_list = [] # transposed eigen vectors
tv_list = [] # eigen values
tevf_list = [] # full eigen vectors for ground truth maps
ti_list = [] # cloud indices
batch_n = 0
i_batch = 0
gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator
# if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices
# print(gen_indices.shape, config.batch_num)
# if config.split == 'test':
# print('test setting here not fully supported')
# n_shapes = self.num_test # has to be defined
# gen_indices = []
# for i in range(n_shapes - 1):
# for j in range(i + 1, n_shapes):
# gen_indices += [i, j] # put all the pairs in order
# gen_indices = np.array(gen_indices)
# Generator loop
for p_i in gen_indices:
# Get points and other input data
new_points = self.input_points[p_i]
new_evecs = self.input_evecs[p_i][:, :self.neig]
new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]
new_evals = self.input_evals[p_i][:self.neig]
new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]
n = new_points.shape[0]
if i_batch == config.batch_num:
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
tp_list = []
tev_list = []
tevt_list = []
tv_list = []
tevf_list = []
ti_list = []
batch_n = 0
i_batch = 0
# Add data to current batch
tp_list += [new_points]
tev_list += [new_evecs]
tevt_list += [new_evecs_trans]
tv_list += [new_evals]
tevf_list += [new_evecs_full]
ti_list += [p_i]
# Update batch size
batch_n += n
i_batch += 1
# yield the rest if necessary (it will not be a full batch and could lead to mistakes because of
# shape matching needing pairs !!!!)
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
##################
# Return generator
##################
# Generator types and shapes
gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)
gen_shapes = ([None, 3], [None, self.neig],
[self.neig, None], [self.neig, None], [None, self.neig], [None], [None])
return random_balanced_gen, gen_types, gen_shapes
def get_tf_mapping(self, config):
def tf_map(stacked_points, stacked_evecs, stacked_evecs_trans,
stacked_evals, stacked_evecs_full, obj_inds, stack_lengths):
| pass
elif config.in_features_dim == 3:
stacked_features = tf.concat((stacked_features, stacked_points), axis=1)
else:
raise ValueError('Only accepted input dimensions are 1, 3 (with or without XYZ)')
# Get the whole input list
input_list = self.tf_shape_matching_inputs(config,
stacked_points,
stacked_features,
stack_lengths,
batch_inds)
# Add scale and rotation for testing
input_list += [scales, rots, obj_inds]
input_list += [stack_lengths] # in order further on | """
From the input point cloud, this function compute all the point clouds at each conv layer, the neighbors
indices, the pooling indices and other useful variables.
:param stacked_points: Tensor with size [None, 3] where None is the total number of points
:param stack_lengths: Tensor with size [None] where None = number of batch // number of points in a batch
"""
# Get batch indice for each point
batch_inds = self.tf_get_batch_inds(stack_lengths)
# Augment input points
stacked_points, scales, rots = self.tf_augment_input(stacked_points,
batch_inds,
config)
# First add a column of 1 as feature for the network to be able to learn 3D shapes
stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)
# Then use positions or not
if config.in_features_dim == 1: | identifier_body |
Surreal.py | #
# Hugues THOMAS - 11/06/2018
# Nicolas DONATI - 01/01/2020
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import tensorflow as tf
import numpy as np
# Dataset parent class
from datasets.common import Dataset
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features
:param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number)
:param labels: optional (N,) matrix of integer labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)
elif (labels is None):
return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)
elif (features is None):
|
else:
return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose)
# ----------------------------------------------------------------------------------------------------------------------
#
# Class Definition
# \***************/
#
class SurrealDataset(Dataset):
"""
Class to handle any subset of 5000 shapes of the surreal dataset introduced in 3D coded (for comparison in exp2)
this dataset is composed of 6890-points shapes, so the spectral data is relatively heavy.
"""
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, config):
Dataset.__init__(self, 'surreal')
####################
# Dataset parameters
####################
# Type of task conducted on this dataset
# self.network_model = 'shape_matching' # this is the only type of model here but it comes from KPConc code
##########################
# Parameters for the files
##########################
# Path of the folder containing files
self.dataset_name = 'surreal'
self.path = '../../../media/donati/Data1/Datasets/shapes_surreal/'
self.data_folder = 'off_2/'
self.spectral_folder = 'spectral_full/'
self.txt_file = 'surreal5000_training.txt'
####################################################
####################################################
####################################################
# decide the number of shapes to keep in the training set (exp 2 setting)
self.split = config.split
self.num_train = config.num_train # -1 for all
# Number of eigenvalues kept for this model fmaps
self.neig = config.neig
self.neig_full = config.neig_full
# Number of thread for input pipeline
self.num_threads = config.input_threads
# Utility methods
# ------------------------------------------------------------------------------------------------------------------
def get_batch_gen(self, config):
"""
A function defining the batch generator for each split. Should return the generator, the generated types and
generated shapes
:param split: string in "training", "validation" or "test" (here we just keep training)
:param config: configuration file
:return: gen_func, gen_types, gen_shapes
"""
################
# Def generators
################
def random_balanced_gen():
print('trying to generate batch series with ', self.num_train, 'shapes')
# Initiate concatenation lists
tp_list = [] # points
tev_list = [] # eigen vectors
tevt_list = [] # transposed eigen vectors
tv_list = [] # eigen values
tevf_list = [] # full eigen vectors for ground truth maps
ti_list = [] # cloud indices
batch_n = 0
i_batch = 0
gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator
# if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices
# print(gen_indices.shape, config.batch_num)
# if config.split == 'test':
# print('test setting here not fully supported')
# n_shapes = self.num_test # has to be defined
# gen_indices = []
# for i in range(n_shapes - 1):
# for j in range(i + 1, n_shapes):
# gen_indices += [i, j] # put all the pairs in order
# gen_indices = np.array(gen_indices)
# Generator loop
for p_i in gen_indices:
# Get points and other input data
new_points = self.input_points[p_i]
new_evecs = self.input_evecs[p_i][:, :self.neig]
new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]
new_evals = self.input_evals[p_i][:self.neig]
new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]
n = new_points.shape[0]
if i_batch == config.batch_num:
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
tp_list = []
tev_list = []
tevt_list = []
tv_list = []
tevf_list = []
ti_list = []
batch_n = 0
i_batch = 0
# Add data to current batch
tp_list += [new_points]
tev_list += [new_evecs]
tevt_list += [new_evecs_trans]
tv_list += [new_evals]
tevf_list += [new_evecs_full]
ti_list += [p_i]
# Update batch size
batch_n += n
i_batch += 1
# yield the rest if necessary (it will not be a full batch and could lead to mistakes because of
# shape matching needing pairs !!!!)
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
##################
# Return generator
##################
# Generator types and shapes
gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)
gen_shapes = ([None, 3], [None, self.neig],
[self.neig, None], [self.neig, None], [None, self.neig], [None], [None])
return random_balanced_gen, gen_types, gen_shapes
def get_tf_mapping(self, config):
def tf_map(stacked_points, stacked_evecs, stacked_evecs_trans,
stacked_evals, stacked_evecs_full, obj_inds, stack_lengths):
"""
From the input point cloud, this function compute all the point clouds at each conv layer, the neighbors
indices, the pooling indices and other useful variables.
:param stacked_points: Tensor with size [None, 3] where None is the total number of points
:param stack_lengths: Tensor with size [None] where None = number of batch // number of points in a batch
"""
# Get batch indice for each point
batch_inds = self.tf_get_batch_inds(stack_lengths)
# Augment input points
stacked_points, scales, rots = self.tf_augment_input(stacked_points,
batch_inds,
config)
# First add a column of 1 as feature for the network to be able to learn 3D shapes
stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)
# Then use positions or not
if config.in_features_dim == 1:
pass
elif config.in_features_dim == 3:
stacked_features = tf.concat((stacked_features, stacked_points), axis=1)
else:
raise ValueError('Only accepted input dimensions are 1, 3 (with or without XYZ)')
# Get the whole input list
input_list = self.tf_shape_matching_inputs(config,
stacked_points,
stacked_features,
stack_lengths,
batch_inds)
# Add scale and rotation for testing
input_list += [scales, rots, obj_inds]
input_list += [stack_lengths] # in order further | return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose) | conditional_block |
Surreal.py | ------
#
# Hugues THOMAS - 11/06/2018
# Nicolas DONATI - 01/01/2020
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import tensorflow as tf
import numpy as np
# Dataset parent class
from datasets.common import Dataset
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features | :param labels: optional (N,) matrix of integer labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)
elif (labels is None):
return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)
elif (features is None):
return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose)
else:
return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose)
# ----------------------------------------------------------------------------------------------------------------------
#
# Class Definition
# \***************/
#
class SurrealDataset(Dataset):
"""
Class to handle any subset of 5000 shapes of the surreal dataset introduced in 3D coded (for comparison in exp2)
this dataset is composed of 6890-points shapes, so the spectral data is relatively heavy.
"""
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, config):
Dataset.__init__(self, 'surreal')
####################
# Dataset parameters
####################
# Type of task conducted on this dataset
# self.network_model = 'shape_matching' # this is the only type of model here but it comes from KPConc code
##########################
# Parameters for the files
##########################
# Path of the folder containing files
self.dataset_name = 'surreal'
self.path = '../../../media/donati/Data1/Datasets/shapes_surreal/'
self.data_folder = 'off_2/'
self.spectral_folder = 'spectral_full/'
self.txt_file = 'surreal5000_training.txt'
####################################################
####################################################
####################################################
# decide the number of shapes to keep in the training set (exp 2 setting)
self.split = config.split
self.num_train = config.num_train # -1 for all
# Number of eigenvalues kept for this model fmaps
self.neig = config.neig
self.neig_full = config.neig_full
# Number of thread for input pipeline
self.num_threads = config.input_threads
# Utility methods
# ------------------------------------------------------------------------------------------------------------------
def get_batch_gen(self, config):
"""
A function defining the batch generator for each split. Should return the generator, the generated types and
generated shapes
:param split: string in "training", "validation" or "test" (here we just keep training)
:param config: configuration file
:return: gen_func, gen_types, gen_shapes
"""
################
# Def generators
################
def random_balanced_gen():
print('trying to generate batch series with ', self.num_train, 'shapes')
# Initiate concatenation lists
tp_list = [] # points
tev_list = [] # eigen vectors
tevt_list = [] # transposed eigen vectors
tv_list = [] # eigen values
tevf_list = [] # full eigen vectors for ground truth maps
ti_list = [] # cloud indices
batch_n = 0
i_batch = 0
gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator
# if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices
# print(gen_indices.shape, config.batch_num)
# if config.split == 'test':
# print('test setting here not fully supported')
# n_shapes = self.num_test # has to be defined
# gen_indices = []
# for i in range(n_shapes - 1):
# for j in range(i + 1, n_shapes):
# gen_indices += [i, j] # put all the pairs in order
# gen_indices = np.array(gen_indices)
# Generator loop
for p_i in gen_indices:
# Get points and other input data
new_points = self.input_points[p_i]
new_evecs = self.input_evecs[p_i][:, :self.neig]
new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]
new_evals = self.input_evals[p_i][:self.neig]
new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]
n = new_points.shape[0]
if i_batch == config.batch_num:
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
tp_list = []
tev_list = []
tevt_list = []
tv_list = []
tevf_list = []
ti_list = []
batch_n = 0
i_batch = 0
# Add data to current batch
tp_list += [new_points]
tev_list += [new_evecs]
tevt_list += [new_evecs_trans]
tv_list += [new_evals]
tevf_list += [new_evecs_full]
ti_list += [p_i]
# Update batch size
batch_n += n
i_batch += 1
# yield the rest if necessary (it will not be a full batch and could lead to mistakes because of
# shape matching needing pairs !!!!)
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
##################
# Return generator
##################
# Generator types and shapes
gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)
gen_shapes = ([None, 3], [None, self.neig],
[self.neig, None], [self.neig, None], [None, self.neig], [None], [None])
return random_balanced_gen, gen_types, gen_shapes
def get_tf_mapping(self, config):
def tf_map(stacked_points, stacked_evecs, stacked_evecs_trans,
stacked_evals, stacked_evecs_full, obj_inds, stack_lengths):
"""
From the input point cloud, this function compute all the point clouds at each conv layer, the neighbors
indices, the pooling indices and other useful variables.
:param stacked_points: Tensor with size [None, 3] where None is the total number of points
:param stack_lengths: Tensor with size [None] where None = number of batch // number of points in a batch
"""
# Get batch indice for each point
batch_inds = self.tf_get_batch_inds(stack_lengths)
# Augment input points
stacked_points, scales, rots = self.tf_augment_input(stacked_points,
batch_inds,
config)
# First add a column of 1 as feature for the network to be able to learn 3D shapes
stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)
# Then use positions or not
if config.in_features_dim == 1:
pass
elif config.in_features_dim == 3:
stacked_features = tf.concat((stacked_features, stacked_points), axis=1)
else:
raise ValueError('Only accepted input dimensions are 1, 3 (with or without XYZ)')
# Get the whole input list
input_list = self.tf_shape_matching_inputs(config,
stacked_points,
stacked_features,
stack_lengths,
batch_inds)
# Add scale and rotation for testing
input_list += [scales, rots, obj_inds]
input_list += [stack_lengths] # in order further on | :param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number) | random_line_split |
widget.rs | ,
) -> Option<(NodeId, CallWidgetsConfig)> {
let query_data = self.widget_queries.get_mut(&target)?;
let (definitions, errors) = configuration::deserialize_widget_definitions(
&data,
&self.graph.suggestion_db(),
&self.graph.parser(),
);
for error in errors {
error!("{:?}", error);
}
trace!("Widget definitions: {definitions:?}");
let definitions = Rc::new(definitions);
query_data.last_definitions = Some(definitions.clone());
let call_id = query_data.call_expression;
Some((query_data.node_id, CallWidgetsConfig { call_id, definitions }))
}
/// Handle a widget request from presenter. Returns the widget updates if the request can be
/// immediately fulfilled from the cache.
fn request_widget(&mut self, request: &Request) -> Option<(NodeId, CallWidgetsConfig)> {
let suggestion_db = self.graph.suggestion_db();
let suggestion = suggestion_db.lookup(request.call_suggestion).ok()?;
use std::collections::hash_map::Entry;
match self.widget_queries.entry(request.target_expression) {
Entry::Occupied(mut occupied) => {
let query = occupied.get_mut();
if query.node_id != request.node_id {
self.widgets_of_node.remove_widget(query.node_id, request.target_expression);
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
}
let visualization_modified = query.update(&suggestion, request);
if visualization_modified {
trace!("Updating widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update
// will happen in the response handler.
None
} else {
// In the event that the visualization was not modified, we want to respond with
// the last known visualization data. Each widget request needs to be responded
// to, otherwise the widget might not be displayed after the widget view has
// been temporarily removed and created again.
query.last_definitions()
}
}
Entry::Vacant(vacant) => {
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
let query = vacant.insert(QueryData::new(&suggestion, request));
trace!("Registering widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update will
// happen in the response handler.
None
}
}
}
/// Remove all widget queries of given node that are attached to expressions outside of provided
/// list. No widget update is emitted after a query is cleaned up.
fn retain_node_expressions(&mut self, node_id: NodeId, expressions: &HashSet<ast::Id>) {
self.widgets_of_node.retain_node_widgets(node_id, expressions, |expr_id| {
self.manager.remove_visualization(expr_id);
});
}
/// Remove all widget queries of given node. No widget update is emitted after a query is
/// cleaned up.
fn remove_all_node_widgets(&mut self, node_id: NodeId) {
for expr_id in self.widgets_of_node.remove_node_widgets(node_id) {
self.manager.remove_visualization(expr_id);
}
}
}
// ============================
// === NodeToWidgetsMapping ===
// ============================
/// A map of widgets attached to nodes. Used to perform cleanup of node widget queries when node is
/// removed.
#[derive(Debug, Default)]
struct NodeToWidgetsMapping {
attached_widgets: HashMap<NodeId, Vec<ExpressionId>>,
}
impl NodeToWidgetsMapping {
fn remove_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).and_modify(|exprs| {
let Some(index) = exprs.iter().position(|e| *e == target) else { return };
exprs.swap_remove(index);
});
}
fn insert_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).or_default().push(target);
}
fn retain_node_widgets(
&mut self,
node_id: NodeId,
remaining_expressions: &HashSet<ast::Id>,
mut on_remove: impl FnMut(ExpressionId),
) {
if let Some(registered) = self.attached_widgets.get_mut(&node_id) {
registered.retain(|expr_id| {
let retained = remaining_expressions.contains(expr_id);
if !retained {
on_remove(*expr_id);
}
retained
});
}
}
fn remove_node_widgets(&mut self, node_id: NodeId) -> Vec<ExpressionId> {
self.attached_widgets.remove(&node_id).unwrap_or_default()
}
}
// ===============
// === Request ===
// ===============
/// Definition of a widget request. Defines the node subexpression that the widgets will be attached
/// to, and the method call that corresponds to that expression.
#[derive(Debug, Default, Clone, Copy)]
pub struct Request {
/// The node ID of a node that contains the widget.
pub node_id: NodeId,
/// Expression of the whole method call. Only used to correlate the visualization response with
/// the widget view.
pub call_expression: ExpressionId,
/// Target (`self`) argument in the call expression. Used as a visualization target.
pub target_expression: ExpressionId,
/// The suggestion ID of the method that this call refers to.
pub call_suggestion: SuggestionId,
}
// =================
// === QueryData ===
// =================
/// Data of ongoing widget query. Defines which expressions a visualization query is attached to,
/// and maintains enough data to correlate the response with respective widget view.
#[derive(Debug)]
struct QueryData {
node_id: NodeId,
call_expression: ExpressionId,
method_name: ImString,
arguments: Vec<ImString>,
last_definitions: Option<Rc<Vec<ArgumentWidgetConfig>>>,
}
impl QueryData {
fn new(suggestion: &enso_suggestion_database::Entry, req: &Request) -> Self {
let node_id = req.node_id;
let arguments = suggestion.arguments.iter().map(|arg| arg.name.clone().into()).collect();
let method_name = suggestion.name.clone();
let call_expression = req.call_expression;
let last_definitions = None;
QueryData { node_id, arguments, method_name, call_expression, last_definitions }
}
/// Update existing query data on new request. Returns true if the visualization query needs to
/// be updated.
fn update(&mut self, suggestion: &enso_suggestion_database::Entry, req: &Request) -> bool {
let mut visualization_modified = false;
if self.method_name != suggestion.name {
self.method_name = suggestion.name.clone();
visualization_modified = true;
}
let mut zipped_arguments = self.arguments.iter().zip(&suggestion.arguments);
if self.arguments.len() != suggestion.arguments.len()
|| !zipped_arguments.all(|(a, b)| a == &b.name)
{
self.arguments =
suggestion.arguments.iter().map(|arg| arg.name.clone().into()).collect();
visualization_modified = true;
}
self.node_id = req.node_id;
self.call_expression = req.call_expression;
visualization_modified
}
fn last_definitions(&self) -> Option<(NodeId, CallWidgetsConfig)> {
self.last_definitions.as_ref().map(|definitions| {
let call_id = self.call_expression;
let config = CallWidgetsConfig { call_id, definitions: definitions.clone() };
(self.node_id, config)
})
}
fn request_visualization(&mut self, manager: &Rc<Manager>, target_expression: ast::Id) {
// When visualization is requested, remove stale queried value to prevent updates while
// language server request is pending.
self.last_definitions.take();
let vis_metadata = self.visualization_metadata();
manager.request_visualization(target_expression, vis_metadata);
}
/// Generate visualization metadata for this query.
fn visualization_metadata(&self) -> Metadata {
let arguments: Vec<Code> = vec![
Self::as_unresolved_symbol(&self.method_name).into(),
Self::arg_sequence(&self.arguments).into(),
];
let preprocessor = visualization::instance::PreprocessorConfiguration {
module: WIDGET_VISUALIZATION_MODULE.into(),
method: WIDGET_VISUALIZATION_METHOD.into(),
arguments: Rc::new(arguments),
};
Metadata { preprocessor }
}
/// Escape a string to be used as a visualization argument. Transforms the string into an enso
/// expression with string literal.
fn escape_visualization_argument(arg: &str) -> String {
Ast::raw_text_literal(arg).repr()
}
/// Creates unresolved symbol via ".name" syntax. Unresolved symbol contains name and also
/// module scope to resolve it properly.
fn as_unresolved_symbol(arg: &str) -> String {
format!(".{arg}")
}
/// Escape a list of strings to be used as a visualization argument. Transforms the strings into
/// an enso expression with a list of string literals.
fn arg_sequence(args: &[ImString]) -> String {
let mut buffer = String::from("[");
for (i, arg) in args.iter().enumerate() {
if i > 0 {
buffer.push_str(", ");
}
buffer.push_str(&Self::escape_visualization_argument(arg)); | }
buffer.push(']'); | random_line_split |
|
widget.rs | , manager_notifications) = Manager::new(executed_graph.clone_ref());
let frp = Frp::new();
let model = Rc::new(RefCell::new(Model {
manager,
graph: executed_graph.clone_ref(),
widgets_of_node: default(),
widget_queries: default(),
}));
let network = &frp.network;
let input = &frp.input;
let output = &frp.private.output;
frp::extend! { network
updates_from_cache <- input.request_widgets.filter_map(
f!((definition) model.borrow_mut().request_widget(definition))
);
output.widget_data <+ updates_from_cache;
eval input.retain_node_expressions(((node_id, expr_ids)) {
model.borrow_mut().retain_node_expressions(*node_id, expr_ids)
});
eval input.remove_all_node_widgets((node_id) {
model.borrow_mut().remove_all_node_widgets(*node_id)
});
};
let out_widget_data = output.widget_data.clone_ref();
let weak = Rc::downgrade(&model);
spawn_stream_handler(weak, manager_notifications, move |notification, model| {
let data = model.borrow_mut().handle_notification(notification);
if let Some(data) = data {
out_widget_data.emit(data);
}
std::future::ready(())
});
Self { frp, model }
}
}
// =============
// === Model ===
// =============
/// Model of the Widget controller. Manages the widget queries, stores responses in cache. See
/// [`Controller`] for more information.
#[derive(Debug)]
pub struct Model {
manager: Rc<Manager>,
graph: ExecutedGraph,
widgets_of_node: NodeToWidgetsMapping,
/// Map of queries by the target expression ID. Required to be able to map visualization update
/// responses to the corresponding widgets.
widget_queries: HashMap<ExpressionId, QueryData>,
}
impl Model {
/// Visualization update notification handler. Updates the cache and returns the widget updates
/// when the notification provides new data.
fn handle_notification(
&mut self,
notification: Notification,
) -> Option<(NodeId, CallWidgetsConfig)> {
let report_error = |message, error| {
error!("{message}: {error}");
None
};
match notification {
Notification::ValueUpdate { target, data, .. } =>
self.handle_visualization_value_update(target, data),
Notification::FailedToAttach { error, .. } =>
report_error("Failed to attach widget visualization", error),
Notification::FailedToDetach { error, .. } =>
report_error("Failed to detach widget visualization", error),
Notification::FailedToModify { error, .. } =>
report_error("Failed to modify widget visualization", error),
}
}
/// Handle visualization data update. Return widget update data.
fn handle_visualization_value_update(
&mut self,
target: ast::Id,
data: VisualizationUpdateData,
) -> Option<(NodeId, CallWidgetsConfig)> {
let query_data = self.widget_queries.get_mut(&target)?;
let (definitions, errors) = configuration::deserialize_widget_definitions(
&data,
&self.graph.suggestion_db(),
&self.graph.parser(),
);
for error in errors {
error!("{:?}", error);
}
trace!("Widget definitions: {definitions:?}");
let definitions = Rc::new(definitions);
query_data.last_definitions = Some(definitions.clone());
let call_id = query_data.call_expression;
Some((query_data.node_id, CallWidgetsConfig { call_id, definitions }))
}
/// Handle a widget request from presenter. Returns the widget updates if the request can be
/// immediately fulfilled from the cache.
fn request_widget(&mut self, request: &Request) -> Option<(NodeId, CallWidgetsConfig)> {
let suggestion_db = self.graph.suggestion_db();
let suggestion = suggestion_db.lookup(request.call_suggestion).ok()?;
use std::collections::hash_map::Entry;
match self.widget_queries.entry(request.target_expression) {
Entry::Occupied(mut occupied) => {
let query = occupied.get_mut();
if query.node_id != request.node_id {
self.widgets_of_node.remove_widget(query.node_id, request.target_expression);
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
}
let visualization_modified = query.update(&suggestion, request);
if visualization_modified {
trace!("Updating widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update
// will happen in the response handler.
None
} else {
// In the event that the visualization was not modified, we want to respond with
// the last known visualization data. Each widget request needs to be responded
// to, otherwise the widget might not be displayed after the widget view has
// been temporarily removed and created again.
query.last_definitions()
}
}
Entry::Vacant(vacant) => {
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
let query = vacant.insert(QueryData::new(&suggestion, request));
trace!("Registering widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update will
// happen in the response handler.
None
}
}
}
/// Remove all widget queries of given node that are attached to expressions outside of provided
/// list. No widget update is emitted after a query is cleaned up.
fn retain_node_expressions(&mut self, node_id: NodeId, expressions: &HashSet<ast::Id>) {
self.widgets_of_node.retain_node_widgets(node_id, expressions, |expr_id| {
self.manager.remove_visualization(expr_id);
});
}
/// Remove all widget queries of given node. No widget update is emitted after a query is
/// cleaned up.
fn remove_all_node_widgets(&mut self, node_id: NodeId) {
for expr_id in self.widgets_of_node.remove_node_widgets(node_id) {
self.manager.remove_visualization(expr_id);
}
}
}
// ============================
// === NodeToWidgetsMapping ===
// ============================
/// A map of widgets attached to nodes. Used to perform cleanup of node widget queries when node is
/// removed.
#[derive(Debug, Default)]
struct NodeToWidgetsMapping {
attached_widgets: HashMap<NodeId, Vec<ExpressionId>>,
}
impl NodeToWidgetsMapping {
fn remove_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).and_modify(|exprs| {
let Some(index) = exprs.iter().position(|e| *e == target) else { return };
exprs.swap_remove(index);
});
}
fn insert_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).or_default().push(target);
}
fn retain_node_widgets(
&mut self,
node_id: NodeId,
remaining_expressions: &HashSet<ast::Id>,
mut on_remove: impl FnMut(ExpressionId),
) {
if let Some(registered) = self.attached_widgets.get_mut(&node_id) {
registered.retain(|expr_id| {
let retained = remaining_expressions.contains(expr_id);
if !retained {
on_remove(*expr_id);
}
retained
});
}
}
fn remove_node_widgets(&mut self, node_id: NodeId) -> Vec<ExpressionId> {
self.attached_widgets.remove(&node_id).unwrap_or_default()
}
}
// ===============
// === Request ===
// ===============
/// Definition of a widget request. Defines the node subexpression that the widgets will be attached
/// to, and the method call that corresponds to that expression.
#[derive(Debug, Default, Clone, Copy)]
pub struct Request {
/// The node ID of a node that contains the widget.
pub node_id: NodeId,
/// Expression of the whole method call. Only used to correlate the visualization response with
/// the widget view.
pub call_expression: ExpressionId,
/// Target (`self`) argument in the call expression. Used as a visualization target.
pub target_expression: ExpressionId,
/// The suggestion ID of the method that this call refers to.
pub call_suggestion: SuggestionId,
}
// =================
// === QueryData ===
// =================
/// Data of ongoing widget query. Defines which expressions a visualization query is attached to,
/// and maintains enough data to correlate the response with respective widget view.
#[derive(Debug)]
struct QueryData {
node_id: NodeId,
call_expression: ExpressionId,
method_name: ImString,
arguments: Vec<ImString>,
last_definitions: Option<Rc<Vec<ArgumentWidgetConfig>>>,
}
impl QueryData {
fn new(suggestion: &enso_suggestion_database::Entry, req: &Request) -> Self {
let node_id = req.node_id;
let arguments = suggestion.arguments.iter().map(|arg| arg.name.clone().into()).collect();
let method_name = suggestion.name.clone();
let call_expression = req.call_expression;
let last_definitions = None;
QueryData { node_id, arguments, method_name, call_expression, last_definitions }
}
/// Update existing query data on new request. Returns true if the visualization query needs to
/// be updated.
fn | update | identifier_name |
|
argument_parser.py | (locustfile):
"""
Attempt to locate a locustfile, either explicitly or by searching parent dirs.
"""
# Obtain env value
names = [locustfile]
# Create .py version if necessary
if not names[0].endswith('.py'):
names.append(names[0] + '.py')
# Does the name contain path elements?
if os.path.dirname(names[0]):
# If so, expand home-directory markers and test for existence
for name in names:
expanded = os.path.expanduser(name)
if os.path.exists(expanded):
if name.endswith('.py') or _is_package(expanded):
return os.path.abspath(expanded)
else:
# Otherwise, start in cwd and work downwards towards filesystem root
path = os.path.abspath('.')
while True:
for name in names:
joined = os.path.join(path, name)
if os.path.exists(joined):
if name.endswith('.py') or _is_package(joined):
return os.path.abspath(joined)
parent_path = os.path.dirname(path)
if parent_path == path:
# we've reached the root path which has been checked this iteration
break
path = parent_path
# Implicit 'return None' if nothing was found
def get_empty_argument_parser(add_help=True, default_config_files=DEFAULT_CONFIG_FILES):
parser = configargparse.ArgumentParser(
default_config_files=default_config_files,
auto_env_var_prefix="LOCUST_",
add_env_var_help=False,
add_config_file_help=False,
add_help=add_help,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=argparse.SUPPRESS,
description=textwrap.dedent("""
Usage: locust [OPTIONS] [LocustClass ...]
"""),
#epilog="",
)
parser.add_argument(
'-f', '--locustfile',
default='locustfile',
help="Python module file to import, e.g. '../other.py'. Default: locustfile"
)
return parser
def parse_locustfile_option(args=None):
"""
Construct a command line parser that is only used to parse the -f argument so that we can
import the test scripts in case any of them adds additional command line arguments to the
parser
"""
parser = get_empty_argument_parser(add_help=False)
parser.add_argument(
'-h', '--help',
action='store_true',
default=False,
)
parser.add_argument(
'--version', '-V',
action='store_true',
default=False,
)
options, _ = parser.parse_known_args(args=args)
locustfile = find_locustfile(options.locustfile)
if not locustfile:
if options.help or options.version:
# if --help or --version is specified we'll call parse_options which will print the help/version message
parse_options(args=args)
sys.stderr.write("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.\n")
sys.exit(1)
if locustfile == "locust.py":
sys.stderr.write("The locustfile must not be named `locust.py`. Please rename the file and try again.\n")
sys.exit(1)
return locustfile
def setup_parser_arguments(parser):
"""
Setup command-line options
Takes a configargparse.ArgumentParser as argument and calls it's add_argument
for each of the supported arguments
"""
parser._optionals.title = "Common options"
parser.add_argument(
'-H', '--host',
help="Host to load test in the following format: http://10.21.32.33"
)
# Number of Locust users
parser.add_argument(
'-c', '--clients',
type=int,
dest='num_clients',
default=1,
help="Number of concurrent Locust users. Only used together with --headless"
)
# User hatch rate
parser.add_argument(
'-r', '--hatch-rate',
type=float,
default=1,
help="The rate per second in which clients are spawned. Only used together with --headless"
)
# Time limit of the test run
parser.add_argument(
'-t', '--run-time',
help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless"
)
# List locust commands found in loaded locust files/source files
parser.add_argument(
'-l', '--list',
action='store_true',
dest='list_commands',
help="Show list of possible locust classes and exit"
)
web_ui_group = parser.add_argument_group("Web UI options")
web_ui_group.add_argument(
'--web-host',
default="",
help="Host to bind the web interface to. Defaults to '*' (all interfaces)"
)
web_ui_group.add_argument(
'--web-port', '-P',
type=int,
default=8089,
help="Port on which to run web host"
)
# if we should print stats in the console
web_ui_group.add_argument(
'--headless',
action='store_true',
help="Disable the web interface, and instead start the load test immediately. Requires -c and -t to be specified."
)
web_ui_group.add_argument(
'--web-auth',
type=str,
dest='web_auth',
default=None,
help='Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password'
)
master_group = parser.add_argument_group(
"Master options",
"Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.",
)
# if locust should be run in distributed mode as master
master_group.add_argument(
'--master',
action='store_true',
help="Set locust to run in distributed mode with this process as master"
)
master_group.add_argument(
'--master-bind-host',
default="*",
help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)."
)
master_group.add_argument(
'--master-bind-port',
type=int,
default=5557,
help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557."
)
master_group.add_argument(
'--expect-workers',
type=int,
default=1,
help="How many workers master should expect to connect before starting the test (only when --headless used)."
)
master_group.add_argument(
'--expect-slaves',
action='store_true',
help=configargparse.SUPPRESS
)
worker_group = parser.add_argument_group(
"Worker options",
textwrap.dedent("""
Options for running a Locust Worker node when running Locust distributed.
Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -c, -r, -t are specified on the Master node.
"""),
)
# if locust should be run in distributed mode as worker
worker_group.add_argument(
'--worker',
action='store_true',
help="Set locust to run in distributed mode with this process as worker"
)
worker_group.add_argument(
'--slave',
action='store_true',
help=configargparse.SUPPRESS
)
# master host options
worker_group.add_argument(
'--master-host',
default="127.0.0.1",
help="Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1."
)
worker_group.add_argument(
'--master-port',
type=int,
default=5557,
help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557."
)
stats_group = parser.add_argument_group("Request statistics options")
# A file that contains the current request stats.
stats_group.add_argument(
'--csv', '--csv-base-name',
dest='csvfilebase',
help="Store current request stats to files in CSV format.",
)
# Adds each stats entry at every iteration to the _stats_history.csv file.
stats_group.add_argument(
'--csv-full-history',
action='store_true',
default=False,
dest='stats_history_enabled',
help="Store each stats entry in CSV format to _stats_history.csv file",
)
# if we should print stats in the console
stats_group.add_argument(
'--print-stats',
action='store_true',
help="Print stats in the console"
)
# only print summary stats
stats_group.add_argument(
'--only-summary',
action='store_true',
help='Only print the summary stats'
)
stats_group.add_argument(
'--reset-stats',
action='store_true',
help="Reset statistics once hatching has been completed. Should | find_locustfile | identifier_name |
|
argument_parser.py | parse_options(args=args)
sys.stderr.write("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.\n")
sys.exit(1)
if locustfile == "locust.py":
sys.stderr.write("The locustfile must not be named `locust.py`. Please rename the file and try again.\n")
sys.exit(1)
return locustfile
def setup_parser_arguments(parser):
"""
Setup command-line options
Takes a configargparse.ArgumentParser as argument and calls it's add_argument
for each of the supported arguments
"""
parser._optionals.title = "Common options"
parser.add_argument(
'-H', '--host',
help="Host to load test in the following format: http://10.21.32.33"
)
# Number of Locust users
parser.add_argument(
'-c', '--clients',
type=int,
dest='num_clients',
default=1,
help="Number of concurrent Locust users. Only used together with --headless"
)
# User hatch rate
parser.add_argument(
'-r', '--hatch-rate',
type=float,
default=1,
help="The rate per second in which clients are spawned. Only used together with --headless"
)
# Time limit of the test run
parser.add_argument(
'-t', '--run-time',
help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless"
)
# List locust commands found in loaded locust files/source files
parser.add_argument(
'-l', '--list',
action='store_true',
dest='list_commands',
help="Show list of possible locust classes and exit"
)
web_ui_group = parser.add_argument_group("Web UI options")
web_ui_group.add_argument(
'--web-host',
default="",
help="Host to bind the web interface to. Defaults to '*' (all interfaces)"
)
web_ui_group.add_argument(
'--web-port', '-P',
type=int,
default=8089,
help="Port on which to run web host"
)
# if we should print stats in the console
web_ui_group.add_argument(
'--headless',
action='store_true',
help="Disable the web interface, and instead start the load test immediately. Requires -c and -t to be specified."
)
web_ui_group.add_argument(
'--web-auth',
type=str,
dest='web_auth',
default=None,
help='Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password'
)
master_group = parser.add_argument_group(
"Master options",
"Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.",
)
# if locust should be run in distributed mode as master
master_group.add_argument(
'--master',
action='store_true',
help="Set locust to run in distributed mode with this process as master"
)
master_group.add_argument(
'--master-bind-host',
default="*",
help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)."
)
master_group.add_argument(
'--master-bind-port',
type=int,
default=5557,
help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557."
)
master_group.add_argument(
'--expect-workers',
type=int,
default=1,
help="How many workers master should expect to connect before starting the test (only when --headless used)."
)
master_group.add_argument(
'--expect-slaves',
action='store_true',
help=configargparse.SUPPRESS
)
worker_group = parser.add_argument_group(
"Worker options",
textwrap.dedent("""
Options for running a Locust Worker node when running Locust distributed.
Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -c, -r, -t are specified on the Master node.
"""),
)
# if locust should be run in distributed mode as worker
worker_group.add_argument(
'--worker',
action='store_true',
help="Set locust to run in distributed mode with this process as worker"
)
worker_group.add_argument(
'--slave',
action='store_true',
help=configargparse.SUPPRESS
)
# master host options
worker_group.add_argument(
'--master-host',
default="127.0.0.1",
help="Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1."
)
worker_group.add_argument(
'--master-port',
type=int,
default=5557,
help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557."
)
stats_group = parser.add_argument_group("Request statistics options")
# A file that contains the current request stats.
stats_group.add_argument(
'--csv', '--csv-base-name',
dest='csvfilebase',
help="Store current request stats to files in CSV format.",
)
# Adds each stats entry at every iteration to the _stats_history.csv file.
stats_group.add_argument(
'--csv-full-history',
action='store_true',
default=False,
dest='stats_history_enabled',
help="Store each stats entry in CSV format to _stats_history.csv file",
)
# if we should print stats in the console
stats_group.add_argument(
'--print-stats',
action='store_true',
help="Print stats in the console"
)
# only print summary stats
stats_group.add_argument(
'--only-summary',
action='store_true',
help='Only print the summary stats'
)
stats_group.add_argument(
'--reset-stats',
action='store_true',
help="Reset statistics once hatching has been completed. Should be set on both master and workers when running in distributed mode",
)
log_group = parser.add_argument_group("Logging options")
# skip logging setup
log_group.add_argument(
'--skip-log-setup',
action='store_true',
dest='skip_log_setup',
default=False,
help="Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults."
)
# log level
log_group.add_argument(
'--loglevel', '-L',
default='INFO',
help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.",
)
# log file
log_group.add_argument(
'--logfile',
help="Path to log file. If not set, log will go to stdout/stderr",
)
step_load_group = parser.add_argument_group("Step load options")
# Enable Step Load mode
step_load_group.add_argument(
'--step-load',
action='store_true',
help="Enable Step Load mode to monitor how performance metrics varies when user load increases. Requires --step-clients and --step-time to be specified."
)
# Number of clients to incease by Step
step_load_group.add_argument(
'--step-clients',
type=int,
default=1,
help="Client count to increase by step in Step Load mode. Only used together with --step-load"
)
# Time limit of each step
step_load_group.add_argument(
'--step-time',
help="Step duration in Step Load mode, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --step-load"
)
other_group = parser.add_argument_group("Other options")
# Display ratio table of all tasks
other_group.add_argument(
'--show-task-ratio',
action='store_true',
help="Print table of the locust classes' task execution ratio"
)
# Display ratio table of all tasks in JSON format
other_group.add_argument(
'--show-task-ratio-json',
action='store_true',
help="Print json data of the locust classes' task execution ratio"
)
# Version number (optparse gives you --version but we have to do it
# ourselves to get -V too. sigh)
other_group.add_argument(
'--version', '-V',
action='version',
help="Show program's version number and exit",
version='%(prog)s {}'.format(version),
)
# set the exit code to post on errors
other_group.add_argument(
'--exit-code-on-error',
type=int,
default=1,
help="Sets the process exit code to use when a test result contain any failure or error"
)
other_group.add_argument( | '-s', '--stop-timeout',
action='store',
type=int, | random_line_split |
|
argument_parser.py | parent_path = os.path.dirname(path)
if parent_path == path:
# we've reached the root path which has been checked this iteration
break
path = parent_path
# Implicit 'return None' if nothing was found
def get_empty_argument_parser(add_help=True, default_config_files=DEFAULT_CONFIG_FILES):
parser = configargparse.ArgumentParser(
default_config_files=default_config_files,
auto_env_var_prefix="LOCUST_",
add_env_var_help=False,
add_config_file_help=False,
add_help=add_help,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=argparse.SUPPRESS,
description=textwrap.dedent("""
Usage: locust [OPTIONS] [LocustClass ...]
"""),
#epilog="",
)
parser.add_argument(
'-f', '--locustfile',
default='locustfile',
help="Python module file to import, e.g. '../other.py'. Default: locustfile"
)
return parser
def parse_locustfile_option(args=None):
"""
Construct a command line parser that is only used to parse the -f argument so that we can
import the test scripts in case any of them adds additional command line arguments to the
parser
"""
parser = get_empty_argument_parser(add_help=False)
parser.add_argument(
'-h', '--help',
action='store_true',
default=False,
)
parser.add_argument(
'--version', '-V',
action='store_true',
default=False,
)
options, _ = parser.parse_known_args(args=args)
locustfile = find_locustfile(options.locustfile)
if not locustfile:
|
if locustfile == "locust.py":
sys.stderr.write("The locustfile must not be named `locust.py`. Please rename the file and try again.\n")
sys.exit(1)
return locustfile
def setup_parser_arguments(parser):
"""
Setup command-line options
Takes a configargparse.ArgumentParser as argument and calls it's add_argument
for each of the supported arguments
"""
parser._optionals.title = "Common options"
parser.add_argument(
'-H', '--host',
help="Host to load test in the following format: http://10.21.32.33"
)
# Number of Locust users
parser.add_argument(
'-c', '--clients',
type=int,
dest='num_clients',
default=1,
help="Number of concurrent Locust users. Only used together with --headless"
)
# User hatch rate
parser.add_argument(
'-r', '--hatch-rate',
type=float,
default=1,
help="The rate per second in which clients are spawned. Only used together with --headless"
)
# Time limit of the test run
parser.add_argument(
'-t', '--run-time',
help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless"
)
# List locust commands found in loaded locust files/source files
parser.add_argument(
'-l', '--list',
action='store_true',
dest='list_commands',
help="Show list of possible locust classes and exit"
)
web_ui_group = parser.add_argument_group("Web UI options")
web_ui_group.add_argument(
'--web-host',
default="",
help="Host to bind the web interface to. Defaults to '*' (all interfaces)"
)
web_ui_group.add_argument(
'--web-port', '-P',
type=int,
default=8089,
help="Port on which to run web host"
)
# if we should print stats in the console
web_ui_group.add_argument(
'--headless',
action='store_true',
help="Disable the web interface, and instead start the load test immediately. Requires -c and -t to be specified."
)
web_ui_group.add_argument(
'--web-auth',
type=str,
dest='web_auth',
default=None,
help='Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password'
)
master_group = parser.add_argument_group(
"Master options",
"Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.",
)
# if locust should be run in distributed mode as master
master_group.add_argument(
'--master',
action='store_true',
help="Set locust to run in distributed mode with this process as master"
)
master_group.add_argument(
'--master-bind-host',
default="*",
help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)."
)
master_group.add_argument(
'--master-bind-port',
type=int,
default=5557,
help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557."
)
master_group.add_argument(
'--expect-workers',
type=int,
default=1,
help="How many workers master should expect to connect before starting the test (only when --headless used)."
)
master_group.add_argument(
'--expect-slaves',
action='store_true',
help=configargparse.SUPPRESS
)
worker_group = parser.add_argument_group(
"Worker options",
textwrap.dedent("""
Options for running a Locust Worker node when running Locust distributed.
Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -c, -r, -t are specified on the Master node.
"""),
)
# if locust should be run in distributed mode as worker
worker_group.add_argument(
'--worker',
action='store_true',
help="Set locust to run in distributed mode with this process as worker"
)
worker_group.add_argument(
'--slave',
action='store_true',
help=configargparse.SUPPRESS
)
# master host options
worker_group.add_argument(
'--master-host',
default="127.0.0.1",
help="Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1."
)
worker_group.add_argument(
'--master-port',
type=int,
default=5557,
help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557."
)
stats_group = parser.add_argument_group("Request statistics options")
# A file that contains the current request stats.
stats_group.add_argument(
'--csv', '--csv-base-name',
dest='csvfilebase',
help="Store current request stats to files in CSV format.",
)
# Adds each stats entry at every iteration to the _stats_history.csv file.
stats_group.add_argument(
'--csv-full-history',
action='store_true',
default=False,
dest='stats_history_enabled',
help="Store each stats entry in CSV format to _stats_history.csv file",
)
# if we should print stats in the console
stats_group.add_argument(
'--print-stats',
action='store_true',
help="Print stats in the console"
)
# only print summary stats
stats_group.add_argument(
'--only-summary',
action='store_true',
help='Only print the summary stats'
)
stats_group.add_argument(
'--reset-stats',
action='store_true',
help="Reset statistics once hatching has been completed. Should be set on both master and workers when running in distributed mode",
)
log_group = parser.add_argument_group("Logging options")
# skip logging setup
log_group.add_argument(
'--skip-log-setup',
action='store_true',
dest='skip_log_setup',
default=False,
help="Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults."
)
# log level
log_group.add_argument(
'--loglevel', '-L',
default='INFO',
help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.",
)
# log file
log_group.add_argument(
'--logfile',
help="Path to log file. If not set, log will go to stdout/stderr",
)
step_load_group = parser.add_argument_group("Step load options")
# Enable Step Load mode
step_load_group.add_argument(
'--step-load',
action='store_true',
help="Enable Step Load mode to monitor how performance metrics varies when | if options.help or options.version:
# if --help or --version is specified we'll call parse_options which will print the help/version message
parse_options(args=args)
sys.stderr.write("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.\n")
sys.exit(1) | conditional_block |
argument_parser.py | parent_path = os.path.dirname(path)
if parent_path == path:
# we've reached the root path which has been checked this iteration
break
path = parent_path
# Implicit 'return None' if nothing was found
def get_empty_argument_parser(add_help=True, default_config_files=DEFAULT_CONFIG_FILES):
parser = configargparse.ArgumentParser(
default_config_files=default_config_files,
auto_env_var_prefix="LOCUST_",
add_env_var_help=False,
add_config_file_help=False,
add_help=add_help,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=argparse.SUPPRESS,
description=textwrap.dedent("""
Usage: locust [OPTIONS] [LocustClass ...]
"""),
#epilog="",
)
parser.add_argument(
'-f', '--locustfile',
default='locustfile',
help="Python module file to import, e.g. '../other.py'. Default: locustfile"
)
return parser
def parse_locustfile_option(args=None):
"""
Construct a command line parser that is only used to parse the -f argument so that we can
import the test scripts in case any of them adds additional command line arguments to the
parser
"""
parser = get_empty_argument_parser(add_help=False)
parser.add_argument(
'-h', '--help',
action='store_true',
default=False,
)
parser.add_argument(
'--version', '-V',
action='store_true',
default=False,
)
options, _ = parser.parse_known_args(args=args)
locustfile = find_locustfile(options.locustfile)
if not locustfile:
if options.help or options.version:
# if --help or --version is specified we'll call parse_options which will print the help/version message
parse_options(args=args)
sys.stderr.write("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.\n")
sys.exit(1)
if locustfile == "locust.py":
sys.stderr.write("The locustfile must not be named `locust.py`. Please rename the file and try again.\n")
sys.exit(1)
return locustfile
def setup_parser_arguments(parser):
| parser.add_argument(
'-r', '--hatch-rate',
type=float,
default=1,
help="The rate per second in which clients are spawned. Only used together with --headless"
)
# Time limit of the test run
parser.add_argument(
'-t', '--run-time',
help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless"
)
# List locust commands found in loaded locust files/source files
parser.add_argument(
'-l', '--list',
action='store_true',
dest='list_commands',
help="Show list of possible locust classes and exit"
)
web_ui_group = parser.add_argument_group("Web UI options")
web_ui_group.add_argument(
'--web-host',
default="",
help="Host to bind the web interface to. Defaults to '*' (all interfaces)"
)
web_ui_group.add_argument(
'--web-port', '-P',
type=int,
default=8089,
help="Port on which to run web host"
)
# if we should print stats in the console
web_ui_group.add_argument(
'--headless',
action='store_true',
help="Disable the web interface, and instead start the load test immediately. Requires -c and -t to be specified."
)
web_ui_group.add_argument(
'--web-auth',
type=str,
dest='web_auth',
default=None,
help='Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password'
)
master_group = parser.add_argument_group(
"Master options",
"Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.",
)
# if locust should be run in distributed mode as master
master_group.add_argument(
'--master',
action='store_true',
help="Set locust to run in distributed mode with this process as master"
)
master_group.add_argument(
'--master-bind-host',
default="*",
help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)."
)
master_group.add_argument(
'--master-bind-port',
type=int,
default=5557,
help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557."
)
master_group.add_argument(
'--expect-workers',
type=int,
default=1,
help="How many workers master should expect to connect before starting the test (only when --headless used)."
)
master_group.add_argument(
'--expect-slaves',
action='store_true',
help=configargparse.SUPPRESS
)
worker_group = parser.add_argument_group(
"Worker options",
textwrap.dedent("""
Options for running a Locust Worker node when running Locust distributed.
Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -c, -r, -t are specified on the Master node.
"""),
)
# if locust should be run in distributed mode as worker
worker_group.add_argument(
'--worker',
action='store_true',
help="Set locust to run in distributed mode with this process as worker"
)
worker_group.add_argument(
'--slave',
action='store_true',
help=configargparse.SUPPRESS
)
# master host options
worker_group.add_argument(
'--master-host',
default="127.0.0.1",
help="Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1."
)
worker_group.add_argument(
'--master-port',
type=int,
default=5557,
help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557."
)
stats_group = parser.add_argument_group("Request statistics options")
# A file that contains the current request stats.
stats_group.add_argument(
'--csv', '--csv-base-name',
dest='csvfilebase',
help="Store current request stats to files in CSV format.",
)
# Adds each stats entry at every iteration to the _stats_history.csv file.
stats_group.add_argument(
'--csv-full-history',
action='store_true',
default=False,
dest='stats_history_enabled',
help="Store each stats entry in CSV format to _stats_history.csv file",
)
# if we should print stats in the console
stats_group.add_argument(
'--print-stats',
action='store_true',
help="Print stats in the console"
)
# only print summary stats
stats_group.add_argument(
'--only-summary',
action='store_true',
help='Only print the summary stats'
)
stats_group.add_argument(
'--reset-stats',
action='store_true',
help="Reset statistics once hatching has been completed. Should be set on both master and workers when running in distributed mode",
)
log_group = parser.add_argument_group("Logging options")
# skip logging setup
log_group.add_argument(
'--skip-log-setup',
action='store_true',
dest='skip_log_setup',
default=False,
help="Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults."
)
# log level
log_group.add_argument(
'--loglevel', '-L',
default='INFO',
help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.",
)
# log file
log_group.add_argument(
'--logfile',
help="Path to log file. If not set, log will go to stdout/stderr",
)
step_load_group = parser.add_argument_group("Step load options")
# Enable Step Load mode
step_load_group.add_argument(
'--step-load',
action='store_true',
help="Enable Step Load mode to monitor how performance metrics varies when user | """
Setup command-line options
Takes a configargparse.ArgumentParser as argument and calls it's add_argument
for each of the supported arguments
"""
parser._optionals.title = "Common options"
parser.add_argument(
'-H', '--host',
help="Host to load test in the following format: http://10.21.32.33"
)
# Number of Locust users
parser.add_argument(
'-c', '--clients',
type=int,
dest='num_clients',
default=1,
help="Number of concurrent Locust users. Only used together with --headless"
)
# User hatch rate | identifier_body |
client.rs | client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else |
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn processor(
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> + 'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport + 'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R: 'static,
F: 'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
}
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
// ===============
// === Fixture ===
// ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer:: | {
Err(RpcError::MismatchedResponseType.into())
} | conditional_block |
client.rs | client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else {
Err(RpcError::MismatchedResponseType.into())
}
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn | (
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> + 'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport + 'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R: 'static,
F: 'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
}
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
// ===============
// === Fixture ===
// ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer::new | processor | identifier_name |
client.rs | client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else {
Err(RpcError::MismatchedResponseType.into())
}
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn processor(
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> + 'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport + 'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R: 'static,
F: 'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> |
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
// ===============
// === Fixture ===
// ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer:: | {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
} | identifier_body |
client.rs | client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else {
Err(RpcError::MismatchedResponseType.into())
}
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn processor(
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> + 'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport + 'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R: 'static,
F: 'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
}
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
| // ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer::new( |
// ===============
// === Fixture === | random_line_split |
anlyzPRR.py | 2d' % (recdDateTime.year, recdDateTime.month)
except Exception as e:
print('huh')
if prr['status'] == 'Closed':
# 180228
# closeDate = prr['statusUpDate']
closeDate = prr['closeDate']
if closeDate==None:
nmissClose += 1
missCloseDetails[dept][recdMonKey].append(prrID)
continue
respDelay = closeDate - recdDateTime
delayDays = respDelay.days
responseMon[recdMonKey][delayDays] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDresp[recdMonKey][delayDays] += 1
else:
openReqMon[recdMonKey] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDopen[recdMonKey] += 1
print('"%s",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose))
allMonth = list(responseMon.keys())
allMonth.sort()
normDept = normDeptName(dept)
outf = outdir + normDept + '-RT.csv'
outs = open(outf,'w')
outs.write('Month,NClose,NOpen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(responseMon[recdMonKey])
medianDelay = compMedian(responseMon[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + normDept + '-nopen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,openReqMon[recdMonKey]))
# outs.close()
allMonth = list(nonOPDresp.keys())
allMonth.sort()
outf = outdir + 'NonOPD-RT.csv'
outs = open(outf,'w')
outs.write('Month,N,NOPen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey])
medianDelay = compMedian(nonOPDresp[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + 'NonOPD-NOpen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,nonOPDopen[recdMonKey]))
# outs.close()
outf = outdir + 'missClose.csv'
outs = open(outf,'w')
# missCloseDetails: dept -> recd -> freq
allDateSet = set()
for dept in missCloseDetails.keys():
allDateSet.update(missCloseDetails[dept].keys())
allDates = sorted(list(allDateSet))
hdr = 'Dept'
for date in allDates:
hdr += ',%s' % (date,)
outs.write(hdr+'\n')
for dept in sorted(missCloseDetails.keys()):
line = dept
for date in allDates:
if date in missCloseDetails[dept]:
line += ',%d' % (len(missCloseDetails[dept][date]),)
else:
line += ', '
outs.write(line+'\n')
outs.close()
def rptDeptFreq(prrTbl, deptTbl,startDate,outf):
# freq = defaultdict(int)
outs = open(outf,'w')
outs.write('Dept,Freq\n')
for dept in sorted(deptTbl.keys()):
nrecent = 0
for prrIdx in deptTbl[dept]:
prr = prrTbl[prrIdx]
if prr['createDate'] >= startDate:
nrecent += 1
outs.write('%s,%d\n' % (dept,nrecent))
outs.close()
def rptOpenPRR(prrTbl,outf):
daysOpen = defaultdict(lambda: defaultdict(list)) # ndays -> OPD/non -> [prrID]
runDate = datetime.datetime.today()
for prrID in prrTbl.keys():
prr = prrTbl[prrID]
opdP = 'Police Department' in prr['dept']
if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr['status'] == 'Due soon':
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
# NB: capture integer dividend
openYears = openDays // 365
if openYears == 0:
dkey = openDays
else:
dkey = 1000 + openYears
daysOpen[opdP][dkey].append(prrID)
outs = open(outf,'w')
outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\n')
allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))
allNDay = sorted(list(allNDaySet))
for nday in allNDay:
if nday > 365:
lbl = '> %d year' % (nday-1000)
else:
lbl = '%d' % nday
opdList = daysOpen[1][nday] if nday in daysOpen[1] else []
nonList = daysOpen[0][nday] if nday in daysOpen[0] else []
outs.write('%s,%d,%d,"%s","%s"\n' % (lbl,len(opdList),len(nonList), opdList,nonList))
outs.close()
def getWebPages(prrTbl,outf):
outs = open(outf,'w')
outs.write('PRRID,OPD,Text\n')
nempty = 0
npdf = 0
for i,prrID in enumerate(sorted(prrTbl.keys())):
prr = prrTbl[prrID]
if prr['URL'] == '':
nempty += 1
continue
opdP = 'Police Department' in prr['dept']
url = prr['URL']
response = urllib.request.urlopen(url)
webContentBytes = response.read()
webContent = webContentBytes.decode("utf-8")
if webContent.find('pdf') != -1:
print('here')
npdf += 1
else:
continue
if i % 100 == 0:
print(i,npdf,nempty)
# outs.write('%s,%d,"%s"\n' % (prrID,opdP,prr['text']))
outs.close()
print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl),nempty))
def loadPRRQuery(inf):
reader = csv.DictReader(open(inf))
prrIDList = []
for i,entry in enumerate(reader):
# Exhibit,PRRId
prrIDList.append(entry['PRRId'].strip())
return prrIDList
def rptQry(qryList,outf):
outs = open(outf,'w')
outs.write('PRID,CreateDate,DaysOpen,Status\n')
runDate = datetime.datetime.today()
for prrID in qryList:
prr = prr20Recent[prrID]
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
outs.write('%s,%s,%d,%s\n' % (prrID,prr['createDate'].date(),openDays,prr['status']))
outs.close()
if __name__ == '__main__':
dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'
startDate = datetime.datetime(2017,1,1)
csvFile = dataDir + 'requests-2020-07-01-sdoran.csv'
# prr20, deptTbl = bldIndexTblCSV(csvFile)
prr20Recent, deptTbl = bldIndexTblCSV(csvFile,startDate)
openPRRFile = dataDir + 'openPRR_200831.csv' | random_line_split |
||
anlyzPRR.py | rrID] = prr
print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % \
(len(prrTbl),len(deptTbl),nmultDept,ncloseDate))
if startDate != None:
print('bldIndexTblCSV: NOld dropped=%d' % (nolder))
# freqList = freqHist3(deptTbl)
# print('Dept,Freq')
# for dept,freq in freqList:
# print('"%s",%d' % (dept,freq))
freqList = freqHist3(statusTbl)
print('Status,Freq')
for status,freq in freqList:
print('"%s",%d' % (status,freq))
return (prrTbl, deptTbl)
def compHistAvg(hist):
'''compute first moment
ASSUME hist: value -> freq
'''
sum = n = 0
for v in hist.keys():
n += hist[v]
sum += v * hist[v]
return n,float(sum) / n
def compMedian(hist):
'''compute MEDIAN value
ASSUME hist: value -> freq
'''
# only singletons thwart the search for half-way point
if len(hist) == 1:
return hist[0]
sum = n = 0
vn = {}
for v in sorted(hist.keys()):
n += hist[v]
sum += v * hist[v]
vn[v] = n
half = float(n/2.)
for v in sorted(hist.keys()):
if vn[v] > half:
return v
def anlyzCreateDates(prrIDTbl,outf):
'''distribution of create dates
'''
dateDist = defaultdict(int)
nmissdate = 0
for prrID,prr in prrIDTbl.items():
# 180204
# for dtype in DateTypes.values():
# if dtype in prr:
# if cdateFnd == None:
# cdateFnd = prr[dtype]
# else:
# if prr[dtype] != cdateFnd:
# cdateFnd = min([cdateFnd,prr[dtype]])
cdateFnd = prr['createDate']
if cdateFnd== None:
nmissdate += 1
continue
mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)
dateDist[mkey] += 1
print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate))
allMon = list(dateDist.keys())
allMon.sort()
outs = open(outf,'w')
outs.write('Month,Freq\n')
for mkey in allMon:
outs.write('%s,%d\n' % (mkey,dateDist[mkey]))
outs.close()
def normDeptName(dept):
return re.sub('\W','_',dept.upper())
def anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10):
'''Compute average (over previous 90 days) number of days to respond to request
Number requests open at month start
'''
allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ]
allDept.sort()
nonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
nonOPDopen = defaultdict(int) # month -> freq
print('\n# Dept,NOld,NMissRecd,NMissClose')
missCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID]
for dept in allDept:
responseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
openReqMon = defaultdict(int) # month -> freq
nmissRecd = 0
nmissClose = 0
nolder = 0
for prrID in deptTbl[dept]:
prr = prrIDTbl[prrID]
# 180228
# recdDateTime = prr['recdDate']
recdDateTime = prr['createDate']
if recdDateTime==None:
nmissRecd += 1
continue
if recdDateTime < startDate:
nolder += 1
continue
try:
recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month)
except Exception as e:
print('huh')
if prr['status'] == 'Closed':
# 180228
# closeDate = prr['statusUpDate']
closeDate = prr['closeDate']
if closeDate==None:
nmissClose += 1
missCloseDetails[dept][recdMonKey].append(prrID)
continue
respDelay = closeDate - recdDateTime
delayDays = respDelay.days
responseMon[recdMonKey][delayDays] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDresp[recdMonKey][delayDays] += 1
else:
openReqMon[recdMonKey] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDopen[recdMonKey] += 1
print('"%s",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose))
allMonth = list(responseMon.keys())
allMonth.sort()
normDept = normDeptName(dept)
outf = outdir + normDept + '-RT.csv'
outs = open(outf,'w')
outs.write('Month,NClose,NOpen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(responseMon[recdMonKey])
medianDelay = compMedian(responseMon[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + normDept + '-nopen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,openReqMon[recdMonKey]))
# outs.close()
allMonth = list(nonOPDresp.keys())
allMonth.sort()
outf = outdir + 'NonOPD-RT.csv'
outs = open(outf,'w')
outs.write('Month,N,NOPen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey])
medianDelay = compMedian(nonOPDresp[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + 'NonOPD-NOpen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,nonOPDopen[recdMonKey]))
# outs.close()
outf = outdir + 'missClose.csv'
outs = open(outf,'w')
# missCloseDetails: dept -> recd -> freq
allDateSet = set()
for dept in missCloseDetails.keys():
allDateSet.update(missCloseDetails[dept].keys())
allDates = sorted(list(allDateSet))
hdr = 'Dept'
for date in allDates:
hdr += ',%s' % (date,)
outs.write(hdr+'\n')
for dept in sorted(missCloseDetails.keys()):
line = dept
for date in allDates:
if date in missCloseDetails[dept]:
line += ',%d' % (len(missCloseDetails[dept][date]),)
else:
line += ', '
outs.write(line+'\n')
outs.close()
def rptDeptFreq(prrTbl, deptTbl,startDate,outf):
# freq = defaultdict(int)
| outs = open(outf,'w')
outs.write('Dept,Freq\n')
for dept in sorted(deptTbl.keys()):
nrecent = 0
for prrIdx in deptTbl[dept]:
prr = prrTbl[prrIdx]
if prr['createDate'] >= startDate:
nrecent += 1
outs.write('%s,%d\n' % (dept,nrecent))
outs.close() | identifier_body |
|
anlyzPRR.py | (a,b):
"decreasing order of frequencies"
return b[1] - a[1]
flist = list(tbl.items()) #python3
flist.sort(key=cmp_to_key(cmpd1))
return flist
AllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date', 'Point of Contact', 'Request Date',
'Status', 'URL', 'Visibility', 'Closed Date', 'Closure Reasons',
'Departments', 'Format Received', 'Staff Time (hrs:minutes)',
'Staff Time (minutes)', 'Tags', 'Embargo Ends On Date',
'Staff Cost', 'Date First Contact', 'First Contact Event',
'Compliance', 'Anticipated Fulfillment Date', 'Expiration Date',
'Requester City', 'Requester State', 'Requester Zipcode', 'Requester Company']
DeptNorm = {"Admin: Planning, Building & Neighborhood Preserv": "Admin: Building Inspection",
"Budget and Fiscal": "Budget and Revenue - Revenue Division",
"City Attorney Administration Unit": "City Attorney",
"City Auditor Unit": "City Auditor",
"City Clerk Unit": "City Clerk",
"Oakland Police Department": "Police Department",
"Contracts and Compliance": "Contracts Compliance",
"Transportation Services - Administration": "Department of Transportation",
"Fire": "Fire Department",
"Human Resources Management": "Human Resources",
"Information Technology (IT)": "Information Technology",
"Public Works Agency": "Public Works"}
CSVDTFormat = '%m/%d/%Y %H:%M:%S %p'
# 07/01/2020 09:54:53 AM
def bldIndexTblCSV(inf,startDate=None):
'''return prrIDTbl, deptTbl
'''
prrTbl = {}
deptTbl = defaultdict(list) # keep list of all prrIDs
statusTbl = defaultdict(int)
ncloseDate = 0
nolder = 0
nmultDept = 0
deptSepChar = b'\xef\xbf\xbd' # only used in Finance
reader = csv.DictReader(open(inf,encoding = "utf8",errors='replace'))
for i,entry in enumerate(reader):
prr = {}
prrID = entry['Id']
createDateStr = entry['Created At'].strip()
prr['createDate'] = datetime.datetime.strptime(createDateStr,CSVDTFormat) if createDateStr != '' else None
if prr['createDate'] == None or \
(startDate != None and prr['createDate'] < startDate):
nolder += 1
continue
deptStr = entry['Departments'].strip()
# NB: multiple department separated by semi-colon
if deptStr.find(';') == -1:
deptList = [deptStr]
else:
nmultDept += 1
deptList = [dept.strip() for dept in deptStr.split(';')]
deptList2 = []
for dept in deptList:
ndept = DeptNorm[dept] if dept in DeptNorm else dept
if ndept != '':
deptList2.append(ndept)
deptTbl[ndept].append(prrID)
prr['dept'] = deptList2
closeDateStr = entry['Closed Date'].strip()
prr['closeDate'] = datetime.datetime.strptime(closeDateStr,CSVDTFormat) if closeDateStr != '' else None
prr['status'] = entry['Status'].strip()
prr['text'] = entry['Request Text'].strip()
prr['closeReason'] = entry['Closure Reasons'].strip()
prr['URL'] = entry['URL'].strip()
statusTbl[ prr['status'] ] += 1
if prr['closeDate'] != None:
ncloseDate += 1
prrTbl[prrID] = prr
print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % \
(len(prrTbl),len(deptTbl),nmultDept,ncloseDate))
if startDate != None:
print('bldIndexTblCSV: NOld dropped=%d' % (nolder))
# freqList = freqHist3(deptTbl)
# print('Dept,Freq')
# for dept,freq in freqList:
# print('"%s",%d' % (dept,freq))
freqList = freqHist3(statusTbl)
print('Status,Freq')
for status,freq in freqList:
print('"%s",%d' % (status,freq))
return (prrTbl, deptTbl)
def compHistAvg(hist):
'''compute first moment
ASSUME hist: value -> freq
'''
sum = n = 0
for v in hist.keys():
n += hist[v]
sum += v * hist[v]
return n,float(sum) / n
def compMedian(hist):
'''compute MEDIAN value
ASSUME hist: value -> freq
'''
# only singletons thwart the search for half-way point
if len(hist) == 1:
return hist[0]
sum = n = 0
vn = {}
for v in sorted(hist.keys()):
n += hist[v]
sum += v * hist[v]
vn[v] = n
half = float(n/2.)
for v in sorted(hist.keys()):
if vn[v] > half:
return v
def anlyzCreateDates(prrIDTbl,outf):
'''distribution of create dates
'''
dateDist = defaultdict(int)
nmissdate = 0
for prrID,prr in prrIDTbl.items():
# 180204
# for dtype in DateTypes.values():
# if dtype in prr:
# if cdateFnd == None:
# cdateFnd = prr[dtype]
# else:
# if prr[dtype] != cdateFnd:
# cdateFnd = min([cdateFnd,prr[dtype]])
cdateFnd = prr['createDate']
if cdateFnd== None:
nmissdate += 1
continue
mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)
dateDist[mkey] += 1
print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate))
allMon = list(dateDist.keys())
allMon.sort()
outs = open(outf,'w')
outs.write('Month,Freq\n')
for mkey in allMon:
outs.write('%s,%d\n' % (mkey,dateDist[mkey]))
outs.close()
def normDeptName(dept):
return re.sub('\W','_',dept.upper())
def anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10):
'''Compute average (over previous 90 days) number of days to respond to request
Number requests open at month start
'''
allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ]
allDept.sort()
nonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
nonOPDopen = defaultdict(int) # month -> freq
print('\n# Dept,NOld,NMissRecd,NMissClose')
missCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID]
for dept in allDept:
responseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
openReqMon = defaultdict(int) # month -> freq
nmissRecd = 0
nmissClose = 0
nolder = 0
for prrID in deptTbl[dept]:
prr = prrIDTbl[prrID]
# 180228
# recdDateTime = prr['recdDate']
recdDateTime = prr['createDate']
if recdDateTime==None:
nmissRecd += 1
continue
if recdDateTime < startDate:
nolder += 1
continue
try:
recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month)
except Exception as e:
print('huh')
if prr['status'] == 'Closed':
# 180228
# closeDate = prr['statusUpDate']
closeDate = prr['closeDate']
if closeDate==None:
nmissClose += 1
missCloseDetails[dept][recdMonKey].append(prrID)
continue
respDelay = closeDate - recdDateTime
delayDays = respDelay.days
responseMon[recdMonKey][delayDays] += 1
# NB: was 'Oakland Police Deparment' in 180204
if | cmpd1 | identifier_name |
|
anlyzPRR.py | hist: value -> freq
'''
sum = n = 0
for v in hist.keys():
n += hist[v]
sum += v * hist[v]
return n,float(sum) / n
def compMedian(hist):
'''compute MEDIAN value
ASSUME hist: value -> freq
'''
# only singletons thwart the search for half-way point
if len(hist) == 1:
return hist[0]
sum = n = 0
vn = {}
for v in sorted(hist.keys()):
n += hist[v]
sum += v * hist[v]
vn[v] = n
half = float(n/2.)
for v in sorted(hist.keys()):
if vn[v] > half:
return v
def anlyzCreateDates(prrIDTbl,outf):
'''distribution of create dates
'''
dateDist = defaultdict(int)
nmissdate = 0
for prrID,prr in prrIDTbl.items():
# 180204
# for dtype in DateTypes.values():
# if dtype in prr:
# if cdateFnd == None:
# cdateFnd = prr[dtype]
# else:
# if prr[dtype] != cdateFnd:
# cdateFnd = min([cdateFnd,prr[dtype]])
cdateFnd = prr['createDate']
if cdateFnd== None:
nmissdate += 1
continue
mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)
dateDist[mkey] += 1
print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate))
allMon = list(dateDist.keys())
allMon.sort()
outs = open(outf,'w')
outs.write('Month,Freq\n')
for mkey in allMon:
outs.write('%s,%d\n' % (mkey,dateDist[mkey]))
outs.close()
def normDeptName(dept):
return re.sub('\W','_',dept.upper())
def anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10):
'''Compute average (over previous 90 days) number of days to respond to request
Number requests open at month start
'''
allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ]
allDept.sort()
nonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
nonOPDopen = defaultdict(int) # month -> freq
print('\n# Dept,NOld,NMissRecd,NMissClose')
missCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID]
for dept in allDept:
responseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
openReqMon = defaultdict(int) # month -> freq
nmissRecd = 0
nmissClose = 0
nolder = 0
for prrID in deptTbl[dept]:
prr = prrIDTbl[prrID]
# 180228
# recdDateTime = prr['recdDate']
recdDateTime = prr['createDate']
if recdDateTime==None:
nmissRecd += 1
continue
if recdDateTime < startDate:
nolder += 1
continue
try:
recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month)
except Exception as e:
print('huh')
if prr['status'] == 'Closed':
# 180228
# closeDate = prr['statusUpDate']
closeDate = prr['closeDate']
if closeDate==None:
nmissClose += 1
missCloseDetails[dept][recdMonKey].append(prrID)
continue
respDelay = closeDate - recdDateTime
delayDays = respDelay.days
responseMon[recdMonKey][delayDays] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDresp[recdMonKey][delayDays] += 1
else:
openReqMon[recdMonKey] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDopen[recdMonKey] += 1
print('"%s",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose))
allMonth = list(responseMon.keys())
allMonth.sort()
normDept = normDeptName(dept)
outf = outdir + normDept + '-RT.csv'
outs = open(outf,'w')
outs.write('Month,NClose,NOpen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(responseMon[recdMonKey])
medianDelay = compMedian(responseMon[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + normDept + '-nopen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,openReqMon[recdMonKey]))
# outs.close()
allMonth = list(nonOPDresp.keys())
allMonth.sort()
outf = outdir + 'NonOPD-RT.csv'
outs = open(outf,'w')
outs.write('Month,N,NOPen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey])
medianDelay = compMedian(nonOPDresp[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + 'NonOPD-NOpen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,nonOPDopen[recdMonKey]))
# outs.close()
outf = outdir + 'missClose.csv'
outs = open(outf,'w')
# missCloseDetails: dept -> recd -> freq
allDateSet = set()
for dept in missCloseDetails.keys():
allDateSet.update(missCloseDetails[dept].keys())
allDates = sorted(list(allDateSet))
hdr = 'Dept'
for date in allDates:
hdr += ',%s' % (date,)
outs.write(hdr+'\n')
for dept in sorted(missCloseDetails.keys()):
line = dept
for date in allDates:
if date in missCloseDetails[dept]:
line += ',%d' % (len(missCloseDetails[dept][date]),)
else:
line += ', '
outs.write(line+'\n')
outs.close()
def rptDeptFreq(prrTbl, deptTbl,startDate,outf):
# freq = defaultdict(int)
outs = open(outf,'w')
outs.write('Dept,Freq\n')
for dept in sorted(deptTbl.keys()):
nrecent = 0
for prrIdx in deptTbl[dept]:
prr = prrTbl[prrIdx]
if prr['createDate'] >= startDate:
nrecent += 1
outs.write('%s,%d\n' % (dept,nrecent))
outs.close()
def rptOpenPRR(prrTbl,outf):
daysOpen = defaultdict(lambda: defaultdict(list)) # ndays -> OPD/non -> [prrID]
runDate = datetime.datetime.today()
for prrID in prrTbl.keys():
prr = prrTbl[prrID]
opdP = 'Police Department' in prr['dept']
if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr['status'] == 'Due soon':
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
# NB: capture integer dividend
openYears = openDays // 365
if openYears == 0:
dkey = openDays
else:
| dkey = 1000 + openYears | conditional_block |
|
control.rs | This configuration file is meant to specify its
//! running state. It can be reloaded at runtime, to change the whole state of the server.
//!
//! The first time the configuration file is read, ellidri uses it to create the tokio runtime.
//! This is because the number of workers is yet unknown, and cannot be changed afterwards.
//!
//! Configuration can then be reloaded upon receiving a SIGUSR1 signal (on UNIX systems only,
//! windows is not yet supported), or a REHASH command. When it happens, `Control` reread the
//! configuration file and performs a diff algorithm to know which task needs to be stopped. This
//! is really simple:
//!
//! - If an old binding is not present in the new configuration, `Control` drops the binding,
//! - If a new binding was not present in the old configuration, `Control` spawns the binding on
//! the runtime,
//! - If a binding is present in both configurations, `Control` will keep the binding and send a
//! command to it, either to make it listen for raw TCP connections, or to listen for TLS
//! connections with a given `TlsAcceptor` (see `tokio-tls` doc for that).
//!
//! Bindings are identified by their socket address (IP address + TCP port). TLS identities are
//! not kept track of, thus ellidri might reload the same TLS identity for a binding (it is fine to
//! let it do we are not reading thousands for TLS identities here).
use crate::{Config, net, State, tls};
use crate::config::{Binding, Tls};
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Arc;
use std::{fs, process};
use tokio::runtime as rt;
use tokio::sync::{mpsc, Notify};
use tokio::task;
/// A command from `Control` to binding tasks.
pub enum Command {
/// Ask the binding task to listen for raw TCP connections and not use TLS.
UsePlain,
/// Ask the binding task to listen for TLS connections with the given acceptor.
UseTls(tls::Acceptor),
}
/// A binding task that is ready to be spawned on the runtime.
struct LoadedBinding<F> {
/// The address to be bound.
address: SocketAddr,
/// Either `None` when the binding listens for raw TCP connections, or `Some(acceptor)` when the
/// bindings listens for TLS connections with `acceptor`.
acceptor: Option<tls::Acceptor>,
/// The sending end of the channel that brings commands to the task.
handle: mpsc::Sender<Command>,
/// The actual task, ready to be polled.
future: F,
}
/// Creates a tokio runtime with the given number of worker threads.
fn create_runtime(workers: usize) -> rt::Runtime {
let mut builder = rt::Builder::new_multi_thread();
if workers != 0 {
builder.worker_threads(workers);
}
builder
.enable_io()
.enable_time()
.build()
.unwrap_or_else(|err| {
log::error!("Failed to start the tokio runtime: {}", err);
process::exit(1);
})
}
/// Creates the bindings tasks and spawns them on the given runtime.
///
/// This function is what `Control` calls on startup to generate the bindings. Because it exits
/// the program on failure, it is not to be called for reloading.
///
/// It spawns all the generated bindings on the runtime, and returns their listening address and
/// command channel.
fn load_bindings(
bindings: Vec<Binding>,
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<(SocketAddr, mpsc::Sender<Command>)> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => process::exit(1),
};
let server = net::listen(
address,
shared.clone(),
Some(acceptor),
stop.clone(),
commands,
);
res.push((address, handle));
tokio::spawn(server);
} else {
let server = net::listen(address, shared.clone(), None, stop.clone(), commands);
res.push((address, handle));
tokio::spawn(server);
}
}
res
}
/// Reloads the configuration at `config_path`.
///
/// In four steps:
///
/// - Read the configuration and load the authentication provider,
/// - Remove old bindings that are not used anymore,
/// - Add new bindings, or send them a command to listen for raw TCP or TLS connections,
/// - Update the shared state.
async fn do_rehash(
config_path: String,
shared: &State,
stop: mpsc::Sender<SocketAddr>,
bindings: &mut Vec<(SocketAddr, mpsc::Sender<Command>)>,
) {
log::info!("Reloading configuration from {:?}", config_path);
let shared_clone = shared.clone();
let reloaded = task::spawn_blocking(|| reload_config(config_path, shared_clone, stop)).await;
let (cfg, new_bindings) = match reloaded {
Ok(Some(reloaded)) => reloaded,
_ => return,
};
let mut i = 0;
while i < bindings.len() {
let old_address = bindings[i].0;
if new_bindings
.iter()
.all(|new_b| old_address != new_b.address)
{
bindings.swap_remove(i);
} else {
i += 1;
}
}
for new_b in new_bindings {
if let Some(i) = bindings.iter().position(|old_b| old_b.0 == new_b.address) {
let res = bindings[i]
.1
.send(match new_b.acceptor {
Some(acceptor) => Command::UseTls(acceptor),
None => Command::UsePlain,
})
.await;
if res.is_err() |
} else {
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
}
shared.rehash(cfg.state).await;
log::info!("Configuration reloaded");
}
/// Re-read the configuration file and re-generate the bindings.
///
/// See documentation of `reload_bindings` for how bindings are re-generated.
///
/// This function will put the contents of the MOTD file into `Config.motd_file`, so that the
/// shared state can use the field as-is, since it must not use blocking operations such as reading
/// a file.
fn reload_config(
config_path: String,
shared: State,
stop: mpsc::Sender<SocketAddr>,
) -> Option<(Config, Vec<LoadedBinding<impl Future<Output = ()>>>)> {
let mut cfg = match Config::from_file(&config_path) {
Ok(cfg) => cfg,
Err(err) => {
log::error!("Failed to read {:?}: {}", config_path, err);
return None;
}
};
cfg.state.motd_file = match fs::read_to_string(&cfg.state.motd_file) {
Ok(motd) => motd,
Err(err) => {
log::warn!("Failed to read {:?}: {}", cfg.state.motd_file, err);
String::new()
}
};
let new_bindings = reload_bindings(&cfg.bindings, &shared, &stop);
Some((cfg, new_bindings))
}
/// Equivalent of `load_bindings` for when exiting the program is not acceptable.
///
/// Instead of spawning the binding tasks on the runtime, this function returns them in an array.
/// Also instead of exiting on failure, it continues its process. Binding tasks that could not
/// be generated are not returned.
///
/// Otherwise both functions have the same behavior.
fn reload_bindings(
bindings: &[Binding],
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<LoadedBinding<impl Future<Output = ()>>> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => continue,
};
let future = net::listen(
*address,
shared.clone(),
Some(acceptor.clone()),
stop.clone(),
commands,
);
res | {
// Failure to send the command means either the binding task have dropped the
// command channel, or the binding task doesn't exist anymore. Both possibilities
// shouldn't happen (see doc for `Control.bindings`); but in the opposite case
// let's remove the binding from the array that keeps track of them, and spawn the
// new one on the runtime.
bindings.swap_remove(i);
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
} | conditional_block |
control.rs | . This configuration file is meant to specify its
//! running state. It can be reloaded at runtime, to change the whole state of the server.
//!
//! The first time the configuration file is read, ellidri uses it to create the tokio runtime.
//! This is because the number of workers is yet unknown, and cannot be changed afterwards.
//!
//! Configuration can then be reloaded upon receiving a SIGUSR1 signal (on UNIX systems only,
//! windows is not yet supported), or a REHASH command. When it happens, `Control` reread the
//! configuration file and performs a diff algorithm to know which task needs to be stopped. This
//! is really simple:
//!
//! - If an old binding is not present in the new configuration, `Control` drops the binding,
//! - If a new binding was not present in the old configuration, `Control` spawns the binding on
//! the runtime,
//! - If a binding is present in both configurations, `Control` will keep the binding and send a
//! command to it, either to make it listen for raw TCP connections, or to listen for TLS
//! connections with a given `TlsAcceptor` (see `tokio-tls` doc for that).
//!
//! Bindings are identified by their socket address (IP address + TCP port). TLS identities are
//! not kept track of, thus ellidri might reload the same TLS identity for a binding (it is fine to
//! let it do we are not reading thousands for TLS identities here).
use crate::{Config, net, State, tls};
use crate::config::{Binding, Tls};
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Arc;
use std::{fs, process};
use tokio::runtime as rt;
use tokio::sync::{mpsc, Notify};
use tokio::task;
/// A command from `Control` to binding tasks.
pub enum Command {
/// Ask the binding task to listen for raw TCP connections and not use TLS.
UsePlain,
/// Ask the binding task to listen for TLS connections with the given acceptor.
UseTls(tls::Acceptor),
}
/// A binding task that is ready to be spawned on the runtime.
struct LoadedBinding<F> {
/// The address to be bound.
address: SocketAddr,
/// Either `None` when the binding listens for raw TCP connections, or `Some(acceptor)` when the
/// bindings listens for TLS connections with `acceptor`.
acceptor: Option<tls::Acceptor>,
/// The sending end of the channel that brings commands to the task.
handle: mpsc::Sender<Command>,
/// The actual task, ready to be polled.
future: F,
}
/// Creates a tokio runtime with the given number of worker threads.
fn create_runtime(workers: usize) -> rt::Runtime {
let mut builder = rt::Builder::new_multi_thread();
if workers != 0 {
builder.worker_threads(workers);
}
builder
.enable_io()
.enable_time()
.build()
.unwrap_or_else(|err| {
log::error!("Failed to start the tokio runtime: {}", err);
process::exit(1);
})
}
/// Creates the bindings tasks and spawns them on the given runtime.
///
/// This function is what `Control` calls on startup to generate the bindings. Because it exits
/// the program on failure, it is not to be called for reloading.
///
/// It spawns all the generated bindings on the runtime, and returns their listening address and
/// command channel.
fn load_bindings(
bindings: Vec<Binding>,
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<(SocketAddr, mpsc::Sender<Command>)> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => process::exit(1),
};
let server = net::listen(
address,
shared.clone(),
Some(acceptor),
stop.clone(),
commands,
);
res.push((address, handle));
tokio::spawn(server);
} else {
let server = net::listen(address, shared.clone(), None, stop.clone(), commands);
res.push((address, handle));
tokio::spawn(server);
}
}
res
}
/// Reloads the configuration at `config_path`.
///
/// In four steps:
///
/// - Read the configuration and load the authentication provider,
/// - Remove old bindings that are not used anymore,
/// - Add new bindings, or send them a command to listen for raw TCP or TLS connections,
/// - Update the shared state.
async fn do_rehash(
config_path: String,
shared: &State,
stop: mpsc::Sender<SocketAddr>,
bindings: &mut Vec<(SocketAddr, mpsc::Sender<Command>)>,
) {
log::info!("Reloading configuration from {:?}", config_path);
let shared_clone = shared.clone();
let reloaded = task::spawn_blocking(|| reload_config(config_path, shared_clone, stop)).await;
let (cfg, new_bindings) = match reloaded {
Ok(Some(reloaded)) => reloaded,
_ => return,
};
let mut i = 0;
while i < bindings.len() {
let old_address = bindings[i].0;
if new_bindings
.iter()
.all(|new_b| old_address != new_b.address)
{
bindings.swap_remove(i);
} else {
i += 1;
}
}
for new_b in new_bindings {
if let Some(i) = bindings.iter().position(|old_b| old_b.0 == new_b.address) {
let res = bindings[i]
.1
.send(match new_b.acceptor {
Some(acceptor) => Command::UseTls(acceptor),
None => Command::UsePlain,
})
.await;
if res.is_err() {
// Failure to send the command means either the binding task have dropped the
// command channel, or the binding task doesn't exist anymore. Both possibilities
// shouldn't happen (see doc for `Control.bindings`); but in the opposite case
// let's remove the binding from the array that keeps track of them, and spawn the
// new one on the runtime.
bindings.swap_remove(i);
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
} else {
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
}
shared.rehash(cfg.state).await;
log::info!("Configuration reloaded");
}
/// Re-read the configuration file and re-generate the bindings.
///
/// See documentation of `reload_bindings` for how bindings are re-generated.
///
/// This function will put the contents of the MOTD file into `Config.motd_file`, so that the
/// shared state can use the field as-is, since it must not use blocking operations such as reading
/// a file.
fn | (
config_path: String,
shared: State,
stop: mpsc::Sender<SocketAddr>,
) -> Option<(Config, Vec<LoadedBinding<impl Future<Output = ()>>>)> {
let mut cfg = match Config::from_file(&config_path) {
Ok(cfg) => cfg,
Err(err) => {
log::error!("Failed to read {:?}: {}", config_path, err);
return None;
}
};
cfg.state.motd_file = match fs::read_to_string(&cfg.state.motd_file) {
Ok(motd) => motd,
Err(err) => {
log::warn!("Failed to read {:?}: {}", cfg.state.motd_file, err);
String::new()
}
};
let new_bindings = reload_bindings(&cfg.bindings, &shared, &stop);
Some((cfg, new_bindings))
}
/// Equivalent of `load_bindings` for when exiting the program is not acceptable.
///
/// Instead of spawning the binding tasks on the runtime, this function returns them in an array.
/// Also instead of exiting on failure, it continues its process. Binding tasks that could not
/// be generated are not returned.
///
/// Otherwise both functions have the same behavior.
fn reload_bindings(
bindings: &[Binding],
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<LoadedBinding<impl Future<Output = ()>>> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => continue,
};
let future = net::listen(
*address,
shared.clone(),
Some(acceptor.clone()),
stop.clone(),
commands,
);
res | reload_config | identifier_name |
control.rs | . This configuration file is meant to specify its
//! running state. It can be reloaded at runtime, to change the whole state of the server.
//!
//! The first time the configuration file is read, ellidri uses it to create the tokio runtime.
//! This is because the number of workers is yet unknown, and cannot be changed afterwards.
//!
//! Configuration can then be reloaded upon receiving a SIGUSR1 signal (on UNIX systems only,
//! windows is not yet supported), or a REHASH command. When it happens, `Control` reread the
//! configuration file and performs a diff algorithm to know which task needs to be stopped. This
//! is really simple:
//!
//! - If an old binding is not present in the new configuration, `Control` drops the binding,
//! - If a new binding was not present in the old configuration, `Control` spawns the binding on
//! the runtime, | //!
//! Bindings are identified by their socket address (IP address + TCP port). TLS identities are
//! not kept track of, thus ellidri might reload the same TLS identity for a binding (it is fine to
//! let it do we are not reading thousands for TLS identities here).
use crate::{Config, net, State, tls};
use crate::config::{Binding, Tls};
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Arc;
use std::{fs, process};
use tokio::runtime as rt;
use tokio::sync::{mpsc, Notify};
use tokio::task;
/// A command from `Control` to binding tasks.
pub enum Command {
/// Ask the binding task to listen for raw TCP connections and not use TLS.
UsePlain,
/// Ask the binding task to listen for TLS connections with the given acceptor.
UseTls(tls::Acceptor),
}
/// A binding task that is ready to be spawned on the runtime.
struct LoadedBinding<F> {
/// The address to be bound.
address: SocketAddr,
/// Either `None` when the binding listens for raw TCP connections, or `Some(acceptor)` when the
/// bindings listens for TLS connections with `acceptor`.
acceptor: Option<tls::Acceptor>,
/// The sending end of the channel that brings commands to the task.
handle: mpsc::Sender<Command>,
/// The actual task, ready to be polled.
future: F,
}
/// Creates a tokio runtime with the given number of worker threads.
fn create_runtime(workers: usize) -> rt::Runtime {
let mut builder = rt::Builder::new_multi_thread();
if workers != 0 {
builder.worker_threads(workers);
}
builder
.enable_io()
.enable_time()
.build()
.unwrap_or_else(|err| {
log::error!("Failed to start the tokio runtime: {}", err);
process::exit(1);
})
}
/// Creates the bindings tasks and spawns them on the given runtime.
///
/// This function is what `Control` calls on startup to generate the bindings. Because it exits
/// the program on failure, it is not to be called for reloading.
///
/// It spawns all the generated bindings on the runtime, and returns their listening address and
/// command channel.
fn load_bindings(
bindings: Vec<Binding>,
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<(SocketAddr, mpsc::Sender<Command>)> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => process::exit(1),
};
let server = net::listen(
address,
shared.clone(),
Some(acceptor),
stop.clone(),
commands,
);
res.push((address, handle));
tokio::spawn(server);
} else {
let server = net::listen(address, shared.clone(), None, stop.clone(), commands);
res.push((address, handle));
tokio::spawn(server);
}
}
res
}
/// Reloads the configuration at `config_path`.
///
/// In four steps:
///
/// - Read the configuration and load the authentication provider,
/// - Remove old bindings that are not used anymore,
/// - Add new bindings, or send them a command to listen for raw TCP or TLS connections,
/// - Update the shared state.
async fn do_rehash(
config_path: String,
shared: &State,
stop: mpsc::Sender<SocketAddr>,
bindings: &mut Vec<(SocketAddr, mpsc::Sender<Command>)>,
) {
log::info!("Reloading configuration from {:?}", config_path);
let shared_clone = shared.clone();
let reloaded = task::spawn_blocking(|| reload_config(config_path, shared_clone, stop)).await;
let (cfg, new_bindings) = match reloaded {
Ok(Some(reloaded)) => reloaded,
_ => return,
};
let mut i = 0;
while i < bindings.len() {
let old_address = bindings[i].0;
if new_bindings
.iter()
.all(|new_b| old_address != new_b.address)
{
bindings.swap_remove(i);
} else {
i += 1;
}
}
for new_b in new_bindings {
if let Some(i) = bindings.iter().position(|old_b| old_b.0 == new_b.address) {
let res = bindings[i]
.1
.send(match new_b.acceptor {
Some(acceptor) => Command::UseTls(acceptor),
None => Command::UsePlain,
})
.await;
if res.is_err() {
// Failure to send the command means either the binding task have dropped the
// command channel, or the binding task doesn't exist anymore. Both possibilities
// shouldn't happen (see doc for `Control.bindings`); but in the opposite case
// let's remove the binding from the array that keeps track of them, and spawn the
// new one on the runtime.
bindings.swap_remove(i);
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
} else {
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
}
shared.rehash(cfg.state).await;
log::info!("Configuration reloaded");
}
/// Re-read the configuration file and re-generate the bindings.
///
/// See documentation of `reload_bindings` for how bindings are re-generated.
///
/// This function will put the contents of the MOTD file into `Config.motd_file`, so that the
/// shared state can use the field as-is, since it must not use blocking operations such as reading
/// a file.
fn reload_config(
config_path: String,
shared: State,
stop: mpsc::Sender<SocketAddr>,
) -> Option<(Config, Vec<LoadedBinding<impl Future<Output = ()>>>)> {
let mut cfg = match Config::from_file(&config_path) {
Ok(cfg) => cfg,
Err(err) => {
log::error!("Failed to read {:?}: {}", config_path, err);
return None;
}
};
cfg.state.motd_file = match fs::read_to_string(&cfg.state.motd_file) {
Ok(motd) => motd,
Err(err) => {
log::warn!("Failed to read {:?}: {}", cfg.state.motd_file, err);
String::new()
}
};
let new_bindings = reload_bindings(&cfg.bindings, &shared, &stop);
Some((cfg, new_bindings))
}
/// Equivalent of `load_bindings` for when exiting the program is not acceptable.
///
/// Instead of spawning the binding tasks on the runtime, this function returns them in an array.
/// Also instead of exiting on failure, it continues its process. Binding tasks that could not
/// be generated are not returned.
///
/// Otherwise both functions have the same behavior.
fn reload_bindings(
bindings: &[Binding],
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<LoadedBinding<impl Future<Output = ()>>> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key, .. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => continue,
};
let future = net::listen(
*address,
shared.clone(),
Some(acceptor.clone()),
stop.clone(),
commands,
);
res.push | //! - If a binding is present in both configurations, `Control` will keep the binding and send a
//! command to it, either to make it listen for raw TCP connections, or to listen for TLS
//! connections with a given `TlsAcceptor` (see `tokio-tls` doc for that). | random_line_split |
config.rs | env = "PATHFINDER_ETHEREUM_API_PASSWORD",
)]
ethereum_password: Option<String>,
#[arg(
long = "ethereum.url",
long_help = r"This should point to the HTTP RPC endpoint of your Ethereum entry-point, typically a local Ethereum client or a hosted gateway service such as Infura or Cloudflare.
Examples:
infura: https://goerli.infura.io/v3/<PROJECT_ID>
geth: https://localhost:8545",
value_name = "HTTP(s) URL",
value_hint = clap::ValueHint::Url,
env = "PATHFINDER_ETHEREUM_API_URL",
)]
ethereum_url: Url,
#[arg(
long = "http-rpc",
long_help = "HTTP-RPC listening address",
value_name = "IP:PORT",
default_value = "127.0.0.1:9545",
env = "PATHFINDER_HTTP_RPC_ADDRESS"
)]
rpc_address: SocketAddr,
#[arg(
long = "rpc.websocket",
long_help = "Enable RPC WebSocket transport",
default_value = "false",
env = "PATHFINDER_RPC_WEBSOCKET"
)]
ws: bool,
#[arg(
long = "rpc.websocket.capacity",
long_help = "Maximum number of websocket subscriptions per subscription type",
default_value = "100",
env = "PATHFINDER_RPC_WEBSOCKET_CAPACITY"
)]
ws_capacity: NonZeroUsize,
#[arg(
long = "rpc.cors-domains",
long_help = r"Comma separated list of domains from which Cross-Origin requests will be accepted by the RPC server.
Use '*' to indicate any domain and an empty list to disable CORS.
Examples:
single: http://one.io
a list: http://first.com,http://second.com:1234
any: *",
value_name = "DOMAIN-LIST",
value_delimiter = ',',
env = "PATHFINDER_RPC_CORS_DOMAINS"
)]
rpc_cors_domains: Vec<String>,
#[arg(
long = "monitor-address",
long_help = "The address at which pathfinder will serve monitoring related information",
value_name = "IP:PORT",
env = "PATHFINDER_MONITOR_ADDRESS"
)]
monitor_address: Option<SocketAddr>,
#[clap(flatten)]
network: NetworkCli,
#[arg(
long = "poll-pending",
long_help = "Enable polling pending block",
action = clap::ArgAction::Set,
default_value = "false",
env = "PATHFINDER_POLL_PENDING",
)]
poll_pending: bool,
#[arg(
long = "python-subprocesses",
long_help = "Number of Python starknet VMs subprocesses to start",
default_value = "2",
env = "PATHFINDER_PYTHON_SUBPROCESSES"
)]
python_subprocesses: std::num::NonZeroUsize,
#[arg(
long = "sqlite-wal",
long_help = "Enable SQLite write-ahead logging",
action = clap::ArgAction::Set,
default_value = "true",
env = "PATHFINDER_SQLITE_WAL",
)]
sqlite_wal: bool,
#[arg(
long = "max-rpc-connections",
long_help = "Set the maximum number of connections allowed",
env = "PATHFINDER_MAX_RPC_CONNECTIONS",
default_value = "1024"
)]
max_rpc_connections: std::num::NonZeroU32,
#[arg(
long = "sync.poll-interval",
long_help = "New block poll interval in seconds",
default_value = "5",
env = "PATHFINDER_HEAD_POLL_INTERVAL_SECONDS"
)]
poll_interval: std::num::NonZeroU64,
#[arg(
long = "color",
long_help = "This flag controls when to use colors in the output logs.",
default_value = "auto",
env = "PATHFINDER_COLOR",
value_name = "WHEN"
)]
color: Color,
}
#[derive(clap::ValueEnum, Debug, Clone, Copy, PartialEq)]
pub enum Color {
Auto,
Never,
Always,
}
impl Color {
/// Returns true if color should be enabled, either because the setting is [Color::Always],
/// or because it is [Color::Auto] and stdout is targetting a terminal.
pub fn is_color_enabled(&self) -> bool {
use std::io::IsTerminal;
match self {
Color::Auto => std::io::stdout().is_terminal(),
Color::Never => false,
Color::Always => true,
}
}
}
#[derive(clap::Args)]
struct NetworkCli {
#[arg(
long = "network",
long_help = r"Specify the Starknet network for pathfinder to operate on.
Note that 'custom' requires also setting the --gateway-url and --feeder-gateway-url options.",
value_enum,
env = "PATHFINDER_NETWORK"
)]
network: Option<Network>,
#[arg(
long,
long_help = "Set a custom Starknet chain ID (e.g. SN_GOERLI)",
value_name = "CHAIN ID",
env = "PATHFINDER_CHAIN_ID",
required_if_eq("network", Network::Custom)
)]
chain_id: Option<String>,
#[arg(
long = "feeder-gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet feeder gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_FEEDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
feeder_gateway: Option<Url>,
#[arg(
long = "gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
gateway: Option<Url>,
}
#[derive(clap::ValueEnum, Clone)]
enum Network {
Mainnet,
Testnet,
Testnet2,
Integration,
Custom,
}
impl From<Network> for clap::builder::OsStr {
fn from(value: Network) -> Self {
match value {
Network::Mainnet => "mainnet",
Network::Testnet => "testnet",
Network::Testnet2 => "testnet2",
Network::Integration => "integration",
Network::Custom => "custom",
}
.into()
}
}
fn parse_cors(inputs: Vec<String>) -> Result<Option<AllowedOrigins>, RpcCorsDomainsParseError> {
if inputs.is_empty() {
return Ok(None);
}
if inputs.len() == 1 && inputs[0] == "*" {
return Ok(Some(AllowedOrigins::Any));
}
if inputs.iter().any(|s| s == "*") {
return Err(RpcCorsDomainsParseError::WildcardAmongOtherValues);
}
let valid_origins = inputs
.into_iter()
.map(|input| match url::Url::parse(&input) {
// Valid URL but has to be limited to origin form, i.e. no path, query, trailing slash for default path etc.
Ok(url) => {
let origin = url.origin();
if !origin.is_tuple() {
return Err(RpcCorsDomainsParseError::InvalidDomain(input));
}
if origin.ascii_serialization() == input {
Ok(input)
} else {
// Valid URL but not a valid origin
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
}
// Not an URL hence invalid origin
Err(_e) => {
eprintln!("Url_parse_error: {_e}");
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
})
.collect::<Result<HashSet<_>, RpcCorsDomainsParseError>>()?;
Ok(Some(AllowedOrigins::List(
valid_origins.into_iter().collect(),
)))
}
pub fn parse_cors_or_exit(input: Vec<String>) -> Option<AllowedOrigins> {
use clap::error::ErrorKind;
match parse_cors(input) {
Ok(parsed) => parsed,
Err(error) => Cli::command()
.error(ErrorKind::ValueValidation, error)
.exit(),
}
}
#[derive(Debug, thiserror::Error, PartialEq)]
#[error("Invalid domain for CORS: {0}")]
struct InvalidCorsDomainError(String);
#[derive(Debug, thiserror::Error, PartialEq)]
enum RpcCorsDomainsParseError {
#[error("Invalid allowed domain for CORS: {0}.")]
InvalidDomain(String),
#[error(
"Specify either wildcard '*' or a comma separated list of allowed domains for CORS, not both."
)]
WildcardAmongOtherValues,
}
pub struct Config {
pub data_directory: PathBuf,
pub ethereum: |
#[arg(
long = "ethereum.password",
long_help = "The optional password to use for the Ethereum API",
value_name = None, | random_line_split |
|
config.rs | {
#[arg(
long,
value_name = "DIR",
value_hint = clap::ValueHint::DirPath,
long_help = "Directory where the node should store its data",
env = "PATHFINDER_DATA_DIRECTORY",
default_value_os_t = (&std::path::Component::CurDir).into()
)]
data_directory: PathBuf,
#[arg(
long = "ethereum.password",
long_help = "The optional password to use for the Ethereum API",
value_name = None,
env = "PATHFINDER_ETHEREUM_API_PASSWORD",
)]
ethereum_password: Option<String>,
#[arg(
long = "ethereum.url",
long_help = r"This should point to the HTTP RPC endpoint of your Ethereum entry-point, typically a local Ethereum client or a hosted gateway service such as Infura or Cloudflare.
Examples:
infura: https://goerli.infura.io/v3/<PROJECT_ID>
geth: https://localhost:8545",
value_name = "HTTP(s) URL",
value_hint = clap::ValueHint::Url,
env = "PATHFINDER_ETHEREUM_API_URL",
)]
ethereum_url: Url,
#[arg(
long = "http-rpc",
long_help = "HTTP-RPC listening address",
value_name = "IP:PORT",
default_value = "127.0.0.1:9545",
env = "PATHFINDER_HTTP_RPC_ADDRESS"
)]
rpc_address: SocketAddr,
#[arg(
long = "rpc.websocket",
long_help = "Enable RPC WebSocket transport",
default_value = "false",
env = "PATHFINDER_RPC_WEBSOCKET"
)]
ws: bool,
#[arg(
long = "rpc.websocket.capacity",
long_help = "Maximum number of websocket subscriptions per subscription type",
default_value = "100",
env = "PATHFINDER_RPC_WEBSOCKET_CAPACITY"
)]
ws_capacity: NonZeroUsize,
#[arg(
long = "rpc.cors-domains",
long_help = r"Comma separated list of domains from which Cross-Origin requests will be accepted by the RPC server.
Use '*' to indicate any domain and an empty list to disable CORS.
Examples:
single: http://one.io
a list: http://first.com,http://second.com:1234
any: *",
value_name = "DOMAIN-LIST",
value_delimiter = ',',
env = "PATHFINDER_RPC_CORS_DOMAINS"
)]
rpc_cors_domains: Vec<String>,
#[arg(
long = "monitor-address",
long_help = "The address at which pathfinder will serve monitoring related information",
value_name = "IP:PORT",
env = "PATHFINDER_MONITOR_ADDRESS"
)]
monitor_address: Option<SocketAddr>,
#[clap(flatten)]
network: NetworkCli,
#[arg(
long = "poll-pending",
long_help = "Enable polling pending block",
action = clap::ArgAction::Set,
default_value = "false",
env = "PATHFINDER_POLL_PENDING",
)]
poll_pending: bool,
#[arg(
long = "python-subprocesses",
long_help = "Number of Python starknet VMs subprocesses to start",
default_value = "2",
env = "PATHFINDER_PYTHON_SUBPROCESSES"
)]
python_subprocesses: std::num::NonZeroUsize,
#[arg(
long = "sqlite-wal",
long_help = "Enable SQLite write-ahead logging",
action = clap::ArgAction::Set,
default_value = "true",
env = "PATHFINDER_SQLITE_WAL",
)]
sqlite_wal: bool,
#[arg(
long = "max-rpc-connections",
long_help = "Set the maximum number of connections allowed",
env = "PATHFINDER_MAX_RPC_CONNECTIONS",
default_value = "1024"
)]
max_rpc_connections: std::num::NonZeroU32,
#[arg(
long = "sync.poll-interval",
long_help = "New block poll interval in seconds",
default_value = "5",
env = "PATHFINDER_HEAD_POLL_INTERVAL_SECONDS"
)]
poll_interval: std::num::NonZeroU64,
#[arg(
long = "color",
long_help = "This flag controls when to use colors in the output logs.",
default_value = "auto",
env = "PATHFINDER_COLOR",
value_name = "WHEN"
)]
color: Color,
}
#[derive(clap::ValueEnum, Debug, Clone, Copy, PartialEq)]
pub enum Color {
Auto,
Never,
Always,
}
impl Color {
/// Returns true if color should be enabled, either because the setting is [Color::Always],
/// or because it is [Color::Auto] and stdout is targetting a terminal.
pub fn is_color_enabled(&self) -> bool {
use std::io::IsTerminal;
match self {
Color::Auto => std::io::stdout().is_terminal(),
Color::Never => false,
Color::Always => true,
}
}
}
#[derive(clap::Args)]
struct NetworkCli {
#[arg(
long = "network",
long_help = r"Specify the Starknet network for pathfinder to operate on.
Note that 'custom' requires also setting the --gateway-url and --feeder-gateway-url options.",
value_enum,
env = "PATHFINDER_NETWORK"
)]
network: Option<Network>,
#[arg(
long,
long_help = "Set a custom Starknet chain ID (e.g. SN_GOERLI)",
value_name = "CHAIN ID",
env = "PATHFINDER_CHAIN_ID",
required_if_eq("network", Network::Custom)
)]
chain_id: Option<String>,
#[arg(
long = "feeder-gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet feeder gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_FEEDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
feeder_gateway: Option<Url>,
#[arg(
long = "gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
gateway: Option<Url>,
}
#[derive(clap::ValueEnum, Clone)]
enum Network {
Mainnet,
Testnet,
Testnet2,
Integration,
Custom,
}
impl From<Network> for clap::builder::OsStr {
fn from(value: Network) -> Self {
match value {
Network::Mainnet => "mainnet",
Network::Testnet => "testnet",
Network::Testnet2 => "testnet2",
Network::Integration => "integration",
Network::Custom => "custom",
}
.into()
}
}
fn parse_cors(inputs: Vec<String>) -> Result<Option<AllowedOrigins>, RpcCorsDomainsParseError> {
if inputs.is_empty() {
return Ok(None);
}
if inputs.len() == 1 && inputs[0] == "*" {
return Ok(Some(AllowedOrigins::Any));
}
if inputs.iter().any(|s| s == "*") {
return Err(RpcCorsDomainsParseError::WildcardAmongOtherValues);
}
let valid_origins = inputs
.into_iter()
.map(|input| match url::Url::parse(&input) {
// Valid URL but has to be limited to origin form, i.e. no path, query, trailing slash for default path etc.
Ok(url) => {
let origin = url.origin();
if !origin.is_tuple() {
return Err(RpcCorsDomainsParseError::InvalidDomain(input));
}
if origin.ascii_serialization() == input {
Ok(input)
} else {
// Valid URL but not a valid origin
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
}
// Not an URL hence invalid origin
Err(_e) => {
eprintln!("Url_parse_error: {_e}");
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
})
.collect::<Result<HashSet<_>, RpcCorsDomainsParseError>>()?;
Ok(Some(AllowedOrigins::List(
valid_origins.into_iter().collect(),
)))
}
pub fn parse_cors_or_exit(input: Vec<String>) -> Option<AllowedOrigins> {
use clap::error::ErrorKind;
match parse_cors(input) {
Ok(parsed) => parsed,
Err(error) => Cli::command()
.error(ErrorKind::ValueValidation, error)
.exit(),
}
}
#[derive(Debug, thiserror::Error, PartialEq)]
#[error("Invalid domain for CORS: {0}")]
struct InvalidCorsDomainError(String);
#[derive(Debug, thiserror:: | Cli | identifier_name |
|
vacuum.py | .clean import CleanAction, CleanArea, CleanMode
from deebot_client.commands.custom import CustomCommand
from deebot_client.events import (
BatteryEvent,
CustomCommandEvent,
ErrorEvent,
FanSpeedEvent,
ReportStatsEvent,
RoomsEvent,
StatusEvent,
)
from deebot_client.events.event_bus import EventListener
from deebot_client.models import Room, VacuumState
from deebot_client.vacuum_bot import VacuumBot
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_MAP,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
StateVacuumEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import slugify
from .const import (
DOMAIN,
EVENT_CLEANING_JOB,
EVENT_CUSTOM_COMMAND,
LAST_ERROR,
REFRESH_MAP,
REFRESH_STR_TO_EVENT_DTO,
VACUUMSTATE_TO_STATE,
)
from .entity import DeebotEntity
from .hub import DeebotHub
from .util import dataclass_to_dict, unsubscribe_listeners
_LOGGER = logging.getLogger(__name__)
SUPPORT_DEEBOT: int = (
SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_BATTERY
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_MAP
| SUPPORT_STATE
| SUPPORT_START
)
# Must be kept in sync with services.yaml
SERVICE_REFRESH = "refresh"
SERVICE_REFRESH_PART = "part"
SERVICE_REFRESH_SCHEMA = {
vol.Required(SERVICE_REFRESH_PART): vol.In(
[*REFRESH_STR_TO_EVENT_DTO.keys(), REFRESH_MAP]
)
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add entities for passed config_entry in HA."""
hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id]
new_devices = []
for vacbot in hub.vacuum_bots:
new_devices.append(DeebotVacuum(vacbot))
if new_devices:
async_add_entities(new_devices)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_REFRESH,
SERVICE_REFRESH_SCHEMA,
"_service_refresh",
)
class DeebotVacuum(DeebotEntity, StateVacuumEntity): # type: ignore
"""Deebot Vacuum."""
def __init__(self, vacuum_bot: VacuumBot):
"""Initialize the Deebot Vacuum."""
device_info = vacuum_bot.device_info
if device_info.nick is not None:
name: str = device_info.nick
else:
# In case there is no nickname defined, use the device id
name = device_info.did
super().__init__(vacuum_bot, StateVacuumEntityDescription(key="", name=name))
self._battery: Optional[int] = None
self._fan_speed: Optional[str] = None
self._state: Optional[VacuumState] = None
self._rooms: list[Room] = []
self._last_error: Optional[ErrorEvent] = None
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
await super().async_added_to_hass()
async def on_battery(event: BatteryEvent) -> None:
self._battery = event.value
self.async_write_ha_state()
async def on_custom_command(event: CustomCommandEvent) -> None:
self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))
async def on_error(event: ErrorEvent) -> None:
self._last_error = event
self.async_write_ha_state()
async def on_fan_speed(event: FanSpeedEvent) -> None:
self._fan_speed = event.speed
self.async_write_ha_state()
async def on_report_stats(event: ReportStatsEvent) -> None:
self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))
async def on_rooms(event: RoomsEvent) -> None:
|
async def on_status(event: StatusEvent) -> None:
self._state = event.state
self.async_write_ha_state()
listeners: list[EventListener] = [
self._vacuum_bot.events.subscribe(BatteryEvent, on_battery),
self._vacuum_bot.events.subscribe(CustomCommandEvent, on_custom_command),
self._vacuum_bot.events.subscribe(ErrorEvent, on_error),
self._vacuum_bot.events.subscribe(FanSpeedEvent, on_fan_speed),
self._vacuum_bot.events.subscribe(ReportStatsEvent, on_report_stats),
self._vacuum_bot.events.subscribe(RoomsEvent, on_rooms),
self._vacuum_bot.events.subscribe(StatusEvent, on_status),
]
self.async_on_remove(lambda: unsubscribe_listeners(listeners))
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DEEBOT
@property
def state(self) -> StateType:
"""Return the state of the vacuum cleaner."""
if self._state is not None and self.available:
return VACUUMSTATE_TO_STATE[self._state]
@property
def battery_level(self) -> Optional[int]:
"""Return the battery level of the vacuum cleaner."""
return self._battery
@property
def fan_speed(self) -> Optional[str]:
"""Return the fan speed of the vacuum cleaner."""
return self._fan_speed
@property
def fan_speed_list(self) -> list[str]:
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [level.display_name for level in FanSpeedLevel]
@property
def extra_state_attributes(self) -> Optional[Mapping[str, Any]]:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
attributes: dict[str, Any] = {}
rooms: dict[str, Any] = {}
for room in self._rooms:
# convert room name to snake_case to meet the convention
room_name = slugify(room.subtype)
room_values = rooms.get(room_name)
if room_values is None:
rooms[room_name] = room.id
elif isinstance(room_values, list):
room_values.append(room.id)
else:
# Convert from int to list
rooms[room_name] = [room_values, room.id]
if rooms:
attributes["rooms"] = rooms
if self._last_error:
attributes[
LAST_ERROR
] = f"{self._last_error.description} ({self._last_error.code})"
return attributes
async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) -> None:
"""Set fan speed."""
await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))
async def async_return_to_base(self, **kwargs: Any) -> None:
"""Set the vacuum cleaner to return to the dock."""
await self._vacuum_bot.execute_command(Charge())
async def async_stop(self, **kwargs: Any) -> None:
"""Stop the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))
async def async_pause(self) -> None:
"""Pause the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))
async def async_start(self) -> None:
"""Start the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.START))
async def async_locate(self, **kwargs: Any) -> None:
"""Locate the vacuum cleaner."""
await self._vacuum_bot.execute_command(PlaySound())
async def async_send_command(
self, command: str, params: Optional[dict[str, Any]] = None, **kwargs: Any
) -> None:
"""Send a command to a vacuum cleaner."""
_LOGGER.debug("async_send_command %s with %s", command, params)
if command in ["relocate", SetRelocationState.name]:
_LOGGER.warning("DEPRECATED! Please use relocate button entity instead.")
await self._vacuum_bot.execute_command(SetRelocationState())
elif command == "auto_clean":
clean_type = params.get("type", "auto") if params else "auto"
if clean_type == "auto":
_LOGGER.warning('DEPRECATED! Please use "vacuum.start" instead.')
await self.async_start()
elif command in ["spot_area", "custom_area", "set_water"]:
if params is None:
raise RuntimeError("Params are required!")
if command in "spot_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.SPOT_AREA,
area=str(params["rooms"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "custom_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.CUSTOM_AREA,
area=str(params["coordinates"]),
cleanings=params.get | self._rooms = event.rooms
self.async_write_ha_state() | identifier_body |
vacuum.py | .clean import CleanAction, CleanArea, CleanMode
from deebot_client.commands.custom import CustomCommand
from deebot_client.events import (
BatteryEvent,
CustomCommandEvent,
ErrorEvent,
FanSpeedEvent,
ReportStatsEvent,
RoomsEvent,
StatusEvent,
)
from deebot_client.events.event_bus import EventListener
from deebot_client.models import Room, VacuumState
from deebot_client.vacuum_bot import VacuumBot
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_MAP,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
StateVacuumEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import slugify
from .const import (
DOMAIN,
EVENT_CLEANING_JOB,
EVENT_CUSTOM_COMMAND,
LAST_ERROR,
REFRESH_MAP,
REFRESH_STR_TO_EVENT_DTO,
VACUUMSTATE_TO_STATE,
)
from .entity import DeebotEntity
from .hub import DeebotHub
from .util import dataclass_to_dict, unsubscribe_listeners
_LOGGER = logging.getLogger(__name__)
| | SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_BATTERY
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_MAP
| SUPPORT_STATE
| SUPPORT_START
)
# Must be kept in sync with services.yaml
SERVICE_REFRESH = "refresh"
SERVICE_REFRESH_PART = "part"
SERVICE_REFRESH_SCHEMA = {
vol.Required(SERVICE_REFRESH_PART): vol.In(
[*REFRESH_STR_TO_EVENT_DTO.keys(), REFRESH_MAP]
)
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add entities for passed config_entry in HA."""
hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id]
new_devices = []
for vacbot in hub.vacuum_bots:
new_devices.append(DeebotVacuum(vacbot))
if new_devices:
async_add_entities(new_devices)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_REFRESH,
SERVICE_REFRESH_SCHEMA,
"_service_refresh",
)
class DeebotVacuum(DeebotEntity, StateVacuumEntity): # type: ignore
"""Deebot Vacuum."""
def __init__(self, vacuum_bot: VacuumBot):
"""Initialize the Deebot Vacuum."""
device_info = vacuum_bot.device_info
if device_info.nick is not None:
name: str = device_info.nick
else:
# In case there is no nickname defined, use the device id
name = device_info.did
super().__init__(vacuum_bot, StateVacuumEntityDescription(key="", name=name))
self._battery: Optional[int] = None
self._fan_speed: Optional[str] = None
self._state: Optional[VacuumState] = None
self._rooms: list[Room] = []
self._last_error: Optional[ErrorEvent] = None
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
await super().async_added_to_hass()
async def on_battery(event: BatteryEvent) -> None:
self._battery = event.value
self.async_write_ha_state()
async def on_custom_command(event: CustomCommandEvent) -> None:
self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))
async def on_error(event: ErrorEvent) -> None:
self._last_error = event
self.async_write_ha_state()
async def on_fan_speed(event: FanSpeedEvent) -> None:
self._fan_speed = event.speed
self.async_write_ha_state()
async def on_report_stats(event: ReportStatsEvent) -> None:
self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))
async def on_rooms(event: RoomsEvent) -> None:
self._rooms = event.rooms
self.async_write_ha_state()
async def on_status(event: StatusEvent) -> None:
self._state = event.state
self.async_write_ha_state()
listeners: list[EventListener] = [
self._vacuum_bot.events.subscribe(BatteryEvent, on_battery),
self._vacuum_bot.events.subscribe(CustomCommandEvent, on_custom_command),
self._vacuum_bot.events.subscribe(ErrorEvent, on_error),
self._vacuum_bot.events.subscribe(FanSpeedEvent, on_fan_speed),
self._vacuum_bot.events.subscribe(ReportStatsEvent, on_report_stats),
self._vacuum_bot.events.subscribe(RoomsEvent, on_rooms),
self._vacuum_bot.events.subscribe(StatusEvent, on_status),
]
self.async_on_remove(lambda: unsubscribe_listeners(listeners))
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DEEBOT
@property
def state(self) -> StateType:
"""Return the state of the vacuum cleaner."""
if self._state is not None and self.available:
return VACUUMSTATE_TO_STATE[self._state]
@property
def battery_level(self) -> Optional[int]:
"""Return the battery level of the vacuum cleaner."""
return self._battery
@property
def fan_speed(self) -> Optional[str]:
"""Return the fan speed of the vacuum cleaner."""
return self._fan_speed
@property
def fan_speed_list(self) -> list[str]:
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [level.display_name for level in FanSpeedLevel]
@property
def extra_state_attributes(self) -> Optional[Mapping[str, Any]]:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
attributes: dict[str, Any] = {}
rooms: dict[str, Any] = {}
for room in self._rooms:
# convert room name to snake_case to meet the convention
room_name = slugify(room.subtype)
room_values = rooms.get(room_name)
if room_values is None:
rooms[room_name] = room.id
elif isinstance(room_values, list):
room_values.append(room.id)
else:
# Convert from int to list
rooms[room_name] = [room_values, room.id]
if rooms:
attributes["rooms"] = rooms
if self._last_error:
attributes[
LAST_ERROR
] = f"{self._last_error.description} ({self._last_error.code})"
return attributes
async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) -> None:
"""Set fan speed."""
await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))
async def async_return_to_base(self, **kwargs: Any) -> None:
"""Set the vacuum cleaner to return to the dock."""
await self._vacuum_bot.execute_command(Charge())
async def async_stop(self, **kwargs: Any) -> None:
"""Stop the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))
async def async_pause(self) -> None:
"""Pause the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))
async def async_start(self) -> None:
"""Start the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.START))
async def async_locate(self, **kwargs: Any) -> None:
"""Locate the vacuum cleaner."""
await self._vacuum_bot.execute_command(PlaySound())
async def async_send_command(
self, command: str, params: Optional[dict[str, Any]] = None, **kwargs: Any
) -> None:
"""Send a command to a vacuum cleaner."""
_LOGGER.debug("async_send_command %s with %s", command, params)
if command in ["relocate", SetRelocationState.name]:
_LOGGER.warning("DEPRECATED! Please use relocate button entity instead.")
await self._vacuum_bot.execute_command(SetRelocationState())
elif command == "auto_clean":
clean_type = params.get("type", "auto") if params else "auto"
if clean_type == "auto":
_LOGGER.warning('DEPRECATED! Please use "vacuum.start" instead.')
await self.async_start()
elif command in ["spot_area", "custom_area", "set_water"]:
if params is None:
raise RuntimeError("Params are required!")
if command in "spot_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.SPOT_AREA,
area=str(params["rooms"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "custom_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.CUSTOM_AREA,
area=str(params["coordinates"]),
cleanings=params.get("clean | SUPPORT_DEEBOT: int = (
SUPPORT_PAUSE
| SUPPORT_STOP | random_line_split |
vacuum.py | .clean import CleanAction, CleanArea, CleanMode
from deebot_client.commands.custom import CustomCommand
from deebot_client.events import (
BatteryEvent,
CustomCommandEvent,
ErrorEvent,
FanSpeedEvent,
ReportStatsEvent,
RoomsEvent,
StatusEvent,
)
from deebot_client.events.event_bus import EventListener
from deebot_client.models import Room, VacuumState
from deebot_client.vacuum_bot import VacuumBot
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_MAP,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
StateVacuumEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import slugify
from .const import (
DOMAIN,
EVENT_CLEANING_JOB,
EVENT_CUSTOM_COMMAND,
LAST_ERROR,
REFRESH_MAP,
REFRESH_STR_TO_EVENT_DTO,
VACUUMSTATE_TO_STATE,
)
from .entity import DeebotEntity
from .hub import DeebotHub
from .util import dataclass_to_dict, unsubscribe_listeners
_LOGGER = logging.getLogger(__name__)
SUPPORT_DEEBOT: int = (
SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_BATTERY
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_MAP
| SUPPORT_STATE
| SUPPORT_START
)
# Must be kept in sync with services.yaml
SERVICE_REFRESH = "refresh"
SERVICE_REFRESH_PART = "part"
SERVICE_REFRESH_SCHEMA = {
vol.Required(SERVICE_REFRESH_PART): vol.In(
[*REFRESH_STR_TO_EVENT_DTO.keys(), REFRESH_MAP]
)
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add entities for passed config_entry in HA."""
hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id]
new_devices = []
for vacbot in hub.vacuum_bots:
new_devices.append(DeebotVacuum(vacbot))
if new_devices:
|
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_REFRESH,
SERVICE_REFRESH_SCHEMA,
"_service_refresh",
)
class DeebotVacuum(DeebotEntity, StateVacuumEntity): # type: ignore
"""Deebot Vacuum."""
def __init__(self, vacuum_bot: VacuumBot):
"""Initialize the Deebot Vacuum."""
device_info = vacuum_bot.device_info
if device_info.nick is not None:
name: str = device_info.nick
else:
# In case there is no nickname defined, use the device id
name = device_info.did
super().__init__(vacuum_bot, StateVacuumEntityDescription(key="", name=name))
self._battery: Optional[int] = None
self._fan_speed: Optional[str] = None
self._state: Optional[VacuumState] = None
self._rooms: list[Room] = []
self._last_error: Optional[ErrorEvent] = None
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
await super().async_added_to_hass()
async def on_battery(event: BatteryEvent) -> None:
self._battery = event.value
self.async_write_ha_state()
async def on_custom_command(event: CustomCommandEvent) -> None:
self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))
async def on_error(event: ErrorEvent) -> None:
self._last_error = event
self.async_write_ha_state()
async def on_fan_speed(event: FanSpeedEvent) -> None:
self._fan_speed = event.speed
self.async_write_ha_state()
async def on_report_stats(event: ReportStatsEvent) -> None:
self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))
async def on_rooms(event: RoomsEvent) -> None:
self._rooms = event.rooms
self.async_write_ha_state()
async def on_status(event: StatusEvent) -> None:
self._state = event.state
self.async_write_ha_state()
listeners: list[EventListener] = [
self._vacuum_bot.events.subscribe(BatteryEvent, on_battery),
self._vacuum_bot.events.subscribe(CustomCommandEvent, on_custom_command),
self._vacuum_bot.events.subscribe(ErrorEvent, on_error),
self._vacuum_bot.events.subscribe(FanSpeedEvent, on_fan_speed),
self._vacuum_bot.events.subscribe(ReportStatsEvent, on_report_stats),
self._vacuum_bot.events.subscribe(RoomsEvent, on_rooms),
self._vacuum_bot.events.subscribe(StatusEvent, on_status),
]
self.async_on_remove(lambda: unsubscribe_listeners(listeners))
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DEEBOT
@property
def state(self) -> StateType:
"""Return the state of the vacuum cleaner."""
if self._state is not None and self.available:
return VACUUMSTATE_TO_STATE[self._state]
@property
def battery_level(self) -> Optional[int]:
"""Return the battery level of the vacuum cleaner."""
return self._battery
@property
def fan_speed(self) -> Optional[str]:
"""Return the fan speed of the vacuum cleaner."""
return self._fan_speed
@property
def fan_speed_list(self) -> list[str]:
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [level.display_name for level in FanSpeedLevel]
@property
def extra_state_attributes(self) -> Optional[Mapping[str, Any]]:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
attributes: dict[str, Any] = {}
rooms: dict[str, Any] = {}
for room in self._rooms:
# convert room name to snake_case to meet the convention
room_name = slugify(room.subtype)
room_values = rooms.get(room_name)
if room_values is None:
rooms[room_name] = room.id
elif isinstance(room_values, list):
room_values.append(room.id)
else:
# Convert from int to list
rooms[room_name] = [room_values, room.id]
if rooms:
attributes["rooms"] = rooms
if self._last_error:
attributes[
LAST_ERROR
] = f"{self._last_error.description} ({self._last_error.code})"
return attributes
async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) -> None:
"""Set fan speed."""
await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))
async def async_return_to_base(self, **kwargs: Any) -> None:
"""Set the vacuum cleaner to return to the dock."""
await self._vacuum_bot.execute_command(Charge())
async def async_stop(self, **kwargs: Any) -> None:
"""Stop the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))
async def async_pause(self) -> None:
"""Pause the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))
async def async_start(self) -> None:
"""Start the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.START))
async def async_locate(self, **kwargs: Any) -> None:
"""Locate the vacuum cleaner."""
await self._vacuum_bot.execute_command(PlaySound())
async def async_send_command(
self, command: str, params: Optional[dict[str, Any]] = None, **kwargs: Any
) -> None:
"""Send a command to a vacuum cleaner."""
_LOGGER.debug("async_send_command %s with %s", command, params)
if command in ["relocate", SetRelocationState.name]:
_LOGGER.warning("DEPRECATED! Please use relocate button entity instead.")
await self._vacuum_bot.execute_command(SetRelocationState())
elif command == "auto_clean":
clean_type = params.get("type", "auto") if params else "auto"
if clean_type == "auto":
_LOGGER.warning('DEPRECATED! Please use "vacuum.start" instead.')
await self.async_start()
elif command in ["spot_area", "custom_area", "set_water"]:
if params is None:
raise RuntimeError("Params are required!")
if command in "spot_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.SPOT_AREA,
area=str(params["rooms"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "custom_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.CUSTOM_AREA,
area=str(params["coordinates"]),
cleanings=params.get | async_add_entities(new_devices) | conditional_block |
vacuum.py | .clean import CleanAction, CleanArea, CleanMode
from deebot_client.commands.custom import CustomCommand
from deebot_client.events import (
BatteryEvent,
CustomCommandEvent,
ErrorEvent,
FanSpeedEvent,
ReportStatsEvent,
RoomsEvent,
StatusEvent,
)
from deebot_client.events.event_bus import EventListener
from deebot_client.models import Room, VacuumState
from deebot_client.vacuum_bot import VacuumBot
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_MAP,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
StateVacuumEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import slugify
from .const import (
DOMAIN,
EVENT_CLEANING_JOB,
EVENT_CUSTOM_COMMAND,
LAST_ERROR,
REFRESH_MAP,
REFRESH_STR_TO_EVENT_DTO,
VACUUMSTATE_TO_STATE,
)
from .entity import DeebotEntity
from .hub import DeebotHub
from .util import dataclass_to_dict, unsubscribe_listeners
_LOGGER = logging.getLogger(__name__)
SUPPORT_DEEBOT: int = (
SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_BATTERY
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_MAP
| SUPPORT_STATE
| SUPPORT_START
)
# Must be kept in sync with services.yaml
SERVICE_REFRESH = "refresh"
SERVICE_REFRESH_PART = "part"
SERVICE_REFRESH_SCHEMA = {
vol.Required(SERVICE_REFRESH_PART): vol.In(
[*REFRESH_STR_TO_EVENT_DTO.keys(), REFRESH_MAP]
)
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add entities for passed config_entry in HA."""
hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id]
new_devices = []
for vacbot in hub.vacuum_bots:
new_devices.append(DeebotVacuum(vacbot))
if new_devices:
async_add_entities(new_devices)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_REFRESH,
SERVICE_REFRESH_SCHEMA,
"_service_refresh",
)
class DeebotVacuum(DeebotEntity, StateVacuumEntity): # type: ignore
"""Deebot Vacuum."""
def __init__(self, vacuum_bot: VacuumBot):
"""Initialize the Deebot Vacuum."""
device_info = vacuum_bot.device_info
if device_info.nick is not None:
name: str = device_info.nick
else:
# In case there is no nickname defined, use the device id
name = device_info.did
super().__init__(vacuum_bot, StateVacuumEntityDescription(key="", name=name))
self._battery: Optional[int] = None
self._fan_speed: Optional[str] = None
self._state: Optional[VacuumState] = None
self._rooms: list[Room] = []
self._last_error: Optional[ErrorEvent] = None
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
await super().async_added_to_hass()
async def on_battery(event: BatteryEvent) -> None:
self._battery = event.value
self.async_write_ha_state()
async def on_custom_command(event: CustomCommandEvent) -> None:
self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))
async def on_error(event: ErrorEvent) -> None:
self._last_error = event
self.async_write_ha_state()
async def | (event: FanSpeedEvent) -> None:
self._fan_speed = event.speed
self.async_write_ha_state()
async def on_report_stats(event: ReportStatsEvent) -> None:
self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))
async def on_rooms(event: RoomsEvent) -> None:
self._rooms = event.rooms
self.async_write_ha_state()
async def on_status(event: StatusEvent) -> None:
self._state = event.state
self.async_write_ha_state()
listeners: list[EventListener] = [
self._vacuum_bot.events.subscribe(BatteryEvent, on_battery),
self._vacuum_bot.events.subscribe(CustomCommandEvent, on_custom_command),
self._vacuum_bot.events.subscribe(ErrorEvent, on_error),
self._vacuum_bot.events.subscribe(FanSpeedEvent, on_fan_speed),
self._vacuum_bot.events.subscribe(ReportStatsEvent, on_report_stats),
self._vacuum_bot.events.subscribe(RoomsEvent, on_rooms),
self._vacuum_bot.events.subscribe(StatusEvent, on_status),
]
self.async_on_remove(lambda: unsubscribe_listeners(listeners))
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DEEBOT
@property
def state(self) -> StateType:
"""Return the state of the vacuum cleaner."""
if self._state is not None and self.available:
return VACUUMSTATE_TO_STATE[self._state]
@property
def battery_level(self) -> Optional[int]:
"""Return the battery level of the vacuum cleaner."""
return self._battery
@property
def fan_speed(self) -> Optional[str]:
"""Return the fan speed of the vacuum cleaner."""
return self._fan_speed
@property
def fan_speed_list(self) -> list[str]:
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [level.display_name for level in FanSpeedLevel]
@property
def extra_state_attributes(self) -> Optional[Mapping[str, Any]]:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
attributes: dict[str, Any] = {}
rooms: dict[str, Any] = {}
for room in self._rooms:
# convert room name to snake_case to meet the convention
room_name = slugify(room.subtype)
room_values = rooms.get(room_name)
if room_values is None:
rooms[room_name] = room.id
elif isinstance(room_values, list):
room_values.append(room.id)
else:
# Convert from int to list
rooms[room_name] = [room_values, room.id]
if rooms:
attributes["rooms"] = rooms
if self._last_error:
attributes[
LAST_ERROR
] = f"{self._last_error.description} ({self._last_error.code})"
return attributes
async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) -> None:
"""Set fan speed."""
await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))
async def async_return_to_base(self, **kwargs: Any) -> None:
"""Set the vacuum cleaner to return to the dock."""
await self._vacuum_bot.execute_command(Charge())
async def async_stop(self, **kwargs: Any) -> None:
"""Stop the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))
async def async_pause(self) -> None:
"""Pause the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))
async def async_start(self) -> None:
"""Start the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.START))
async def async_locate(self, **kwargs: Any) -> None:
"""Locate the vacuum cleaner."""
await self._vacuum_bot.execute_command(PlaySound())
async def async_send_command(
self, command: str, params: Optional[dict[str, Any]] = None, **kwargs: Any
) -> None:
"""Send a command to a vacuum cleaner."""
_LOGGER.debug("async_send_command %s with %s", command, params)
if command in ["relocate", SetRelocationState.name]:
_LOGGER.warning("DEPRECATED! Please use relocate button entity instead.")
await self._vacuum_bot.execute_command(SetRelocationState())
elif command == "auto_clean":
clean_type = params.get("type", "auto") if params else "auto"
if clean_type == "auto":
_LOGGER.warning('DEPRECATED! Please use "vacuum.start" instead.')
await self.async_start()
elif command in ["spot_area", "custom_area", "set_water"]:
if params is None:
raise RuntimeError("Params are required!")
if command in "spot_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.SPOT_AREA,
area=str(params["rooms"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "custom_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.CUSTOM_AREA,
area=str(params["coordinates"]),
cleanings=params.get | on_fan_speed | identifier_name |
lib.rs | (error::Jose::WrongKeyType {
expected: format!("{:?}", $expected),
actual: format!("{:?}", $actual),
}
.into())
};
}
impl Client {
/// Constructs a client from an issuer url and client parameters via discovery
pub fn discover(id: String, secret: String, redirect: Url, issuer: Url) -> Result<Self, Error> {
discovery::secure(&redirect)?;
let client = reqwest::Client::new();
let config = discovery::discover(&client, issuer)?;
let jwks = discovery::jwks(&client, config.jwks_uri.clone())?;
let provider = Discovered(config);
Ok(Self::new(id, secret, redirect, provider, jwks))
}
/// Constructs a client from a given provider, key set, and parameters. Unlike ::discover(..)
/// this function does not perform any network operations.
pub fn new(
id: String,
secret: String,
redirect: Url,
provider: Discovered,
jwks: JWKSet<Empty>,
) -> Self {
Client {
oauth: inth_oauth2::Client::new(provider, id, secret, Some(redirect.into_string())),
jwks,
}
}
/// Passthrough to the redirect_url stored in inth_oauth2 as a str.
pub fn redirect_url(&self) -> &str {
self.oauth
.redirect_uri
.as_ref()
.expect("We always require a redirect to construct client!")
}
/// Passthrough to the inth_oauth2::client's request token.
pub fn request_token(&self, client: &reqwest::Client, auth_code: &str) -> Result<Token, Error> {
self.oauth
.request_token(client, auth_code)
.map_err(Error::from)
}
/// A reference to the config document of the provider obtained via discovery
pub fn config(&self) -> &Config {
&self.oauth.provider.0
}
/// Constructs the auth_url to redirect a client to the provider. Options are... optional. Use
/// them as needed. Keep the Options struct around for authentication, or at least the nonce
/// and max_age parameter - we need to verify they stay the same and validate if you used them.
pub fn auth_url(&self, options: &Options) -> Url {
let scope = match options.scope {
Some(ref scope) => {
if !scope.contains("openid") {
String::from("openid ") + scope
} else {
scope.clone()
}
}
// Default scope value
None => String::from("openid"),
};
let mut url = self
.oauth
.auth_uri(Some(&scope), options.state.as_ref().map(String::as_str));
{
let mut query = url.query_pairs_mut();
if let Some(ref nonce) = options.nonce {
query.append_pair("nonce", nonce.as_str());
}
if let Some(ref display) = options.display {
query.append_pair("display", display.as_str());
}
if let Some(ref prompt) = options.prompt {
let s = prompt
.iter()
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(" ");
query.append_pair("prompt", s.as_str());
}
if let Some(max_age) = options.max_age {
query.append_pair("max_age", max_age.num_seconds().to_string().as_str());
}
if let Some(ref ui_locales) = options.ui_locales {
query.append_pair("ui_locales", ui_locales.as_str());
}
if let Some(ref claims_locales) = options.claims_locales {
query.append_pair("claims_locales", claims_locales.as_str());
}
if let Some(ref id_token_hint) = options.id_token_hint {
query.append_pair("id_token_hint", id_token_hint.as_str());
}
if let Some(ref login_hint) = options.login_hint {
query.append_pair("login_hint", login_hint.as_str());
}
if let Some(ref acr_values) = options.acr_values {
query.append_pair("acr_values", acr_values.as_str());
}
}
url
}
/// Given an auth_code and auth options, request the token, decode, and validate it.
pub fn authenticate(
&self,
auth_code: &str,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<Token, Error> {
let client = reqwest::Client::new();
let mut token = self.request_token(&client, auth_code)?;
self.decode_token(&mut token.id_token)?;
self.validate_token(&token.id_token, nonce, max_age)?;
Ok(token)
}
/// Mutates a Compact::encoded Token to Compact::decoded. Errors are:
///
/// - Decode::MissingKid if the keyset has multiple keys but the key id on the token is missing
/// - Decode::MissingKey if the given key id is not in the key set
/// - Decode::EmptySet if the keyset is empty
/// - Jose::WrongKeyType if the alg of the key and the alg in the token header mismatch
/// - Jose::WrongKeyType if the specified key alg isn't a signature algorithm
/// - Jose error if decoding fails
pub fn decode_token(&self, token: &mut IdToken) -> Result<(), Error> {
// This is an early return if the token is already decoded
if let Compact::Decoded { .. } = *token {
return Ok(());
}
let header = token.unverified_header()?;
// If there is more than one key, the token MUST have a key id
let key = if self.jwks.keys.len() > 1 {
let token_kid = header.registered.key_id.ok_or(Decode::MissingKid)?;
self.jwks
.find(&token_kid)
.ok_or(Decode::MissingKey(token_kid))?
} else {
// TODO We would want to verify the keyset is >1 in the constructor
// rather than every decode call, but we can't return an error in new().
self.jwks.keys.first().as_ref().ok_or(Decode::EmptySet)?
};
if let Some(alg) = key.common.algorithm.as_ref() {
if let &jwa::Algorithm::Signature(sig) = alg {
if header.registered.algorithm != sig {
return wrong_key!(sig, header.registered.algorithm);
}
} else {
return wrong_key!(SignatureAlgorithm::default(), alg);
}
}
let alg = header.registered.algorithm;
match key.algorithm {
// HMAC
AlgorithmParameters::OctectKey { ref value, .. } => match alg {
SignatureAlgorithm::HS256
| SignatureAlgorithm::HS384
| SignatureAlgorithm::HS512 => {
*token = token.decode(&Secret::Bytes(value.clone()), alg)?;
Ok(())
}
_ => wrong_key!("HS256 | HS384 | HS512", alg),
},
AlgorithmParameters::RSA(ref params) => match alg {
SignatureAlgorithm::RS256
| SignatureAlgorithm::RS384
| SignatureAlgorithm::RS512 => {
let pkcs = Secret::RSAModulusExponent {
n: params.n.clone(),
e: params.e.clone(),
};
*token = token.decode(&pkcs, alg)?;
Ok(())
}
_ => wrong_key!("RS256 | RS384 | RS512", alg),
},
AlgorithmParameters::EllipticCurve(_) => unimplemented!("No support for EC keys yet"),
}
}
/// Validate a decoded token. If you don't get an error, its valid! Nonce and max_age come from
/// your auth_uri options. Errors are:
///
/// - Jose Error if the Token isn't decoded
/// - Validation::Mismatch::Issuer if the provider issuer and token issuer mismatch
/// - Validation::Mismatch::Nonce if a given nonce and the token nonce mismatch
/// - Validation::Missing::Nonce if either the token or args has a nonce and the other does not
/// - Validation::Missing::Audience if the token aud doesn't contain the client id
/// - Validation::Missing::AuthorizedParty if there are multiple audiences and azp is missing
/// - Validation::Mismatch::AuthorizedParty if the azp is not the client_id
/// - Validation::Expired::Expires if the current time is past the expiration time
/// - Validation::Expired::MaxAge is the token is older than the provided max_age
/// - Validation::Missing::Authtime if a max_age was given and the token has no auth time
pub fn validate_token(
&self,
token: &IdToken,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<(), Error> {
let claims = token.payload()?;
if claims.iss != self.config().issuer {
let expected = self.config().issuer.as_str().to_string();
let actual = claims.iss.as_str().to_string();
return Err(Validation::Mismatch(Mismatch::Issuer { expected, actual }).into());
}
match nonce {
Some(expected) => match claims.nonce {
Some(ref actual) => { | if expected != actual { | random_line_split |
|
lib.rs | wrong_key {
($expected:expr, $actual:expr) => {
Err(error::Jose::WrongKeyType {
expected: format!("{:?}", $expected),
actual: format!("{:?}", $actual),
}
.into())
};
}
impl Client {
/// Constructs a client from an issuer url and client parameters via discovery
pub fn discover(id: String, secret: String, redirect: Url, issuer: Url) -> Result<Self, Error> {
discovery::secure(&redirect)?;
let client = reqwest::Client::new();
let config = discovery::discover(&client, issuer)?;
let jwks = discovery::jwks(&client, config.jwks_uri.clone())?;
let provider = Discovered(config);
Ok(Self::new(id, secret, redirect, provider, jwks))
}
/// Constructs a client from a given provider, key set, and parameters. Unlike ::discover(..)
/// this function does not perform any network operations.
pub fn new(
id: String,
secret: String,
redirect: Url,
provider: Discovered,
jwks: JWKSet<Empty>,
) -> Self {
Client {
oauth: inth_oauth2::Client::new(provider, id, secret, Some(redirect.into_string())),
jwks,
}
}
/// Passthrough to the redirect_url stored in inth_oauth2 as a str.
pub fn redirect_url(&self) -> &str {
self.oauth
.redirect_uri
.as_ref()
.expect("We always require a redirect to construct client!")
}
/// Passthrough to the inth_oauth2::client's request token.
pub fn request_token(&self, client: &reqwest::Client, auth_code: &str) -> Result<Token, Error> {
self.oauth
.request_token(client, auth_code)
.map_err(Error::from)
}
/// A reference to the config document of the provider obtained via discovery
pub fn config(&self) -> &Config {
&self.oauth.provider.0
}
/// Constructs the auth_url to redirect a client to the provider. Options are... optional. Use
/// them as needed. Keep the Options struct around for authentication, or at least the nonce
/// and max_age parameter - we need to verify they stay the same and validate if you used them.
pub fn auth_url(&self, options: &Options) -> Url {
let scope = match options.scope {
Some(ref scope) => {
if !scope.contains("openid") {
String::from("openid ") + scope
} else {
scope.clone()
}
}
// Default scope value
None => String::from("openid"),
};
let mut url = self
.oauth
.auth_uri(Some(&scope), options.state.as_ref().map(String::as_str));
{
let mut query = url.query_pairs_mut();
if let Some(ref nonce) = options.nonce {
query.append_pair("nonce", nonce.as_str());
}
if let Some(ref display) = options.display {
query.append_pair("display", display.as_str());
}
if let Some(ref prompt) = options.prompt {
let s = prompt
.iter()
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(" ");
query.append_pair("prompt", s.as_str());
}
if let Some(max_age) = options.max_age {
query.append_pair("max_age", max_age.num_seconds().to_string().as_str());
}
if let Some(ref ui_locales) = options.ui_locales {
query.append_pair("ui_locales", ui_locales.as_str());
}
if let Some(ref claims_locales) = options.claims_locales {
query.append_pair("claims_locales", claims_locales.as_str());
}
if let Some(ref id_token_hint) = options.id_token_hint {
query.append_pair("id_token_hint", id_token_hint.as_str());
}
if let Some(ref login_hint) = options.login_hint {
query.append_pair("login_hint", login_hint.as_str());
}
if let Some(ref acr_values) = options.acr_values {
query.append_pair("acr_values", acr_values.as_str());
}
}
url
}
/// Given an auth_code and auth options, request the token, decode, and validate it.
pub fn authenticate(
&self,
auth_code: &str,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<Token, Error> {
let client = reqwest::Client::new();
let mut token = self.request_token(&client, auth_code)?;
self.decode_token(&mut token.id_token)?;
self.validate_token(&token.id_token, nonce, max_age)?;
Ok(token)
}
/// Mutates a Compact::encoded Token to Compact::decoded. Errors are:
///
/// - Decode::MissingKid if the keyset has multiple keys but the key id on the token is missing
/// - Decode::MissingKey if the given key id is not in the key set
/// - Decode::EmptySet if the keyset is empty
/// - Jose::WrongKeyType if the alg of the key and the alg in the token header mismatch
/// - Jose::WrongKeyType if the specified key alg isn't a signature algorithm
/// - Jose error if decoding fails
pub fn | (&self, token: &mut IdToken) -> Result<(), Error> {
// This is an early return if the token is already decoded
if let Compact::Decoded { .. } = *token {
return Ok(());
}
let header = token.unverified_header()?;
// If there is more than one key, the token MUST have a key id
let key = if self.jwks.keys.len() > 1 {
let token_kid = header.registered.key_id.ok_or(Decode::MissingKid)?;
self.jwks
.find(&token_kid)
.ok_or(Decode::MissingKey(token_kid))?
} else {
// TODO We would want to verify the keyset is >1 in the constructor
// rather than every decode call, but we can't return an error in new().
self.jwks.keys.first().as_ref().ok_or(Decode::EmptySet)?
};
if let Some(alg) = key.common.algorithm.as_ref() {
if let &jwa::Algorithm::Signature(sig) = alg {
if header.registered.algorithm != sig {
return wrong_key!(sig, header.registered.algorithm);
}
} else {
return wrong_key!(SignatureAlgorithm::default(), alg);
}
}
let alg = header.registered.algorithm;
match key.algorithm {
// HMAC
AlgorithmParameters::OctectKey { ref value, .. } => match alg {
SignatureAlgorithm::HS256
| SignatureAlgorithm::HS384
| SignatureAlgorithm::HS512 => {
*token = token.decode(&Secret::Bytes(value.clone()), alg)?;
Ok(())
}
_ => wrong_key!("HS256 | HS384 | HS512", alg),
},
AlgorithmParameters::RSA(ref params) => match alg {
SignatureAlgorithm::RS256
| SignatureAlgorithm::RS384
| SignatureAlgorithm::RS512 => {
let pkcs = Secret::RSAModulusExponent {
n: params.n.clone(),
e: params.e.clone(),
};
*token = token.decode(&pkcs, alg)?;
Ok(())
}
_ => wrong_key!("RS256 | RS384 | RS512", alg),
},
AlgorithmParameters::EllipticCurve(_) => unimplemented!("No support for EC keys yet"),
}
}
/// Validate a decoded token. If you don't get an error, its valid! Nonce and max_age come from
/// your auth_uri options. Errors are:
///
/// - Jose Error if the Token isn't decoded
/// - Validation::Mismatch::Issuer if the provider issuer and token issuer mismatch
/// - Validation::Mismatch::Nonce if a given nonce and the token nonce mismatch
/// - Validation::Missing::Nonce if either the token or args has a nonce and the other does not
/// - Validation::Missing::Audience if the token aud doesn't contain the client id
/// - Validation::Missing::AuthorizedParty if there are multiple audiences and azp is missing
/// - Validation::Mismatch::AuthorizedParty if the azp is not the client_id
/// - Validation::Expired::Expires if the current time is past the expiration time
/// - Validation::Expired::MaxAge is the token is older than the provided max_age
/// - Validation::Missing::Authtime if a max_age was given and the token has no auth time
pub fn validate_token(
&self,
token: &IdToken,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<(), Error> {
let claims = token.payload()?;
if claims.iss != self.config().issuer {
let expected = self.config().issuer.as_str().to_string();
let actual = claims.iss.as_str().to_string();
return Err(Validation::Mismatch(Mismatch::Issuer { expected, actual }).into());
}
match nonce {
Some(expected) => match | decode_token | identifier_name |
lib.rs | _token(client, auth_code)
.map_err(Error::from)
}
/// A reference to the config document of the provider obtained via discovery
pub fn config(&self) -> &Config {
&self.oauth.provider.0
}
/// Constructs the auth_url to redirect a client to the provider. Options are... optional. Use
/// them as needed. Keep the Options struct around for authentication, or at least the nonce
/// and max_age parameter - we need to verify they stay the same and validate if you used them.
pub fn auth_url(&self, options: &Options) -> Url {
let scope = match options.scope {
Some(ref scope) => {
if !scope.contains("openid") {
String::from("openid ") + scope
} else {
scope.clone()
}
}
// Default scope value
None => String::from("openid"),
};
let mut url = self
.oauth
.auth_uri(Some(&scope), options.state.as_ref().map(String::as_str));
{
let mut query = url.query_pairs_mut();
if let Some(ref nonce) = options.nonce {
query.append_pair("nonce", nonce.as_str());
}
if let Some(ref display) = options.display {
query.append_pair("display", display.as_str());
}
if let Some(ref prompt) = options.prompt {
let s = prompt
.iter()
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(" ");
query.append_pair("prompt", s.as_str());
}
if let Some(max_age) = options.max_age {
query.append_pair("max_age", max_age.num_seconds().to_string().as_str());
}
if let Some(ref ui_locales) = options.ui_locales {
query.append_pair("ui_locales", ui_locales.as_str());
}
if let Some(ref claims_locales) = options.claims_locales {
query.append_pair("claims_locales", claims_locales.as_str());
}
if let Some(ref id_token_hint) = options.id_token_hint {
query.append_pair("id_token_hint", id_token_hint.as_str());
}
if let Some(ref login_hint) = options.login_hint {
query.append_pair("login_hint", login_hint.as_str());
}
if let Some(ref acr_values) = options.acr_values {
query.append_pair("acr_values", acr_values.as_str());
}
}
url
}
/// Given an auth_code and auth options, request the token, decode, and validate it.
pub fn authenticate(
&self,
auth_code: &str,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<Token, Error> {
let client = reqwest::Client::new();
let mut token = self.request_token(&client, auth_code)?;
self.decode_token(&mut token.id_token)?;
self.validate_token(&token.id_token, nonce, max_age)?;
Ok(token)
}
/// Mutates a Compact::encoded Token to Compact::decoded. Errors are:
///
/// - Decode::MissingKid if the keyset has multiple keys but the key id on the token is missing
/// - Decode::MissingKey if the given key id is not in the key set
/// - Decode::EmptySet if the keyset is empty
/// - Jose::WrongKeyType if the alg of the key and the alg in the token header mismatch
/// - Jose::WrongKeyType if the specified key alg isn't a signature algorithm
/// - Jose error if decoding fails
pub fn decode_token(&self, token: &mut IdToken) -> Result<(), Error> {
// This is an early return if the token is already decoded
if let Compact::Decoded { .. } = *token {
return Ok(());
}
let header = token.unverified_header()?;
// If there is more than one key, the token MUST have a key id
let key = if self.jwks.keys.len() > 1 {
let token_kid = header.registered.key_id.ok_or(Decode::MissingKid)?;
self.jwks
.find(&token_kid)
.ok_or(Decode::MissingKey(token_kid))?
} else {
// TODO We would want to verify the keyset is >1 in the constructor
// rather than every decode call, but we can't return an error in new().
self.jwks.keys.first().as_ref().ok_or(Decode::EmptySet)?
};
if let Some(alg) = key.common.algorithm.as_ref() {
if let &jwa::Algorithm::Signature(sig) = alg {
if header.registered.algorithm != sig {
return wrong_key!(sig, header.registered.algorithm);
}
} else {
return wrong_key!(SignatureAlgorithm::default(), alg);
}
}
let alg = header.registered.algorithm;
match key.algorithm {
// HMAC
AlgorithmParameters::OctectKey { ref value, .. } => match alg {
SignatureAlgorithm::HS256
| SignatureAlgorithm::HS384
| SignatureAlgorithm::HS512 => {
*token = token.decode(&Secret::Bytes(value.clone()), alg)?;
Ok(())
}
_ => wrong_key!("HS256 | HS384 | HS512", alg),
},
AlgorithmParameters::RSA(ref params) => match alg {
SignatureAlgorithm::RS256
| SignatureAlgorithm::RS384
| SignatureAlgorithm::RS512 => {
let pkcs = Secret::RSAModulusExponent {
n: params.n.clone(),
e: params.e.clone(),
};
*token = token.decode(&pkcs, alg)?;
Ok(())
}
_ => wrong_key!("RS256 | RS384 | RS512", alg),
},
AlgorithmParameters::EllipticCurve(_) => unimplemented!("No support for EC keys yet"),
}
}
/// Validate a decoded token. If you don't get an error, its valid! Nonce and max_age come from
/// your auth_uri options. Errors are:
///
/// - Jose Error if the Token isn't decoded
/// - Validation::Mismatch::Issuer if the provider issuer and token issuer mismatch
/// - Validation::Mismatch::Nonce if a given nonce and the token nonce mismatch
/// - Validation::Missing::Nonce if either the token or args has a nonce and the other does not
/// - Validation::Missing::Audience if the token aud doesn't contain the client id
/// - Validation::Missing::AuthorizedParty if there are multiple audiences and azp is missing
/// - Validation::Mismatch::AuthorizedParty if the azp is not the client_id
/// - Validation::Expired::Expires if the current time is past the expiration time
/// - Validation::Expired::MaxAge is the token is older than the provided max_age
/// - Validation::Missing::Authtime if a max_age was given and the token has no auth time
pub fn validate_token(
&self,
token: &IdToken,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<(), Error> {
let claims = token.payload()?;
if claims.iss != self.config().issuer {
let expected = self.config().issuer.as_str().to_string();
let actual = claims.iss.as_str().to_string();
return Err(Validation::Mismatch(Mismatch::Issuer { expected, actual }).into());
}
match nonce {
Some(expected) => match claims.nonce {
Some(ref actual) => {
if expected != actual {
let expected = expected.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::Nonce { expected, actual }).into()
);
}
}
None => return Err(Validation::Missing(Missing::Nonce).into()),
},
None => {
if claims.nonce.is_some() {
return Err(Validation::Missing(Missing::Nonce).into());
}
}
}
if !claims.aud.contains(&self.oauth.client_id) {
return Err(Validation::Missing(Missing::Audience).into());
}
// By spec, if there are multiple auds, we must have an azp
if let SingleOrMultiple::Multiple(_) = claims.aud {
if let None = claims.azp {
return Err(Validation::Missing(Missing::AuthorizedParty).into());
}
}
// If there is an authorized party, it must be our client_id
if let Some(ref actual) = claims.azp {
if actual != &self.oauth.client_id {
let expected = self.oauth.client_id.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::AuthorizedParty { expected, actual }).into(),
);
}
}
let now = Utc::now();
// Now should never be less than the time this code was written!
if now.timestamp() < 1504758600 {
panic!("chrono::Utc::now() can never be before this was written!")
}
if claims.exp <= now.timestamp() | {
return Err(Validation::Expired(Expiry::Expires(
chrono::naive::NaiveDateTime::from_timestamp(claims.exp, 0),
))
.into());
} | conditional_block |
|
machine.rs | {
// Store the error in self, then return a ref to it
self.error = Some(WithSource::new(
iter::once(SourceErrorWrapper::new(
error,
span,
&self.source,
)),
self.source.clone(),
));
Err(self.error.as_ref().unwrap())
}
}
}
/// Executes this machine until termination (or error). All instructions are
/// executed until [Self::terminated] returns true. Returns the value of
/// [Self::successful] upon termination.
pub fn execute_all(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
// We can't return the error directly from the loop because of a bug
// in the borrow checker. Instead, we have to play lifetime tetris.
while !self.terminated() {
if self.execute_next().is_err() {
break;
}
}
// Check if an error occurred, and return it if so
match &self.error {
None => Ok(self.successful()),
Some(error) => Err(error),
}
}
/// Get the source code that this machine is built for.
pub fn source_code(&self) -> &str {
&self.source
}
/// Get a reference to the program being executed.
pub fn program(&self) -> &Program<Span> {
&self.program
}
/// Get the current input buffer.
pub fn input(&self) -> &[LangValue] {
self.input.as_slice()
}
/// Get the current output buffer.
pub fn output(&self) -> &[LangValue] {
self.output.as_slice()
}
/// Get all registers and their current values.
pub fn registers(&self) -> HashMap<RegisterRef, LangValue> {
self.hardware_spec
.all_register_refs()
.into_iter()
.map(|reg_ref| (reg_ref, self.get_reg(reg_ref)))
.collect()
}
/// Get all stacks and their current values.
pub fn stacks(&self) -> HashMap<StackRef, &[LangValue]> {
self.hardware_spec
.all_stack_refs()
.into_iter()
.map(|stack_ref| (stack_ref, self.stacks[stack_ref.0].as_slice()))
.collect()
}
/// Get the runtime error that halted execution of this machine. If no error
/// has occurred, return `None`.
pub fn error(&self) -> Option<&WithSource<RuntimeError>> {
self.error.as_ref()
}
}
// Functions that get exported to wasm
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
impl Machine {
/// Get the index of the next instruction to be executed.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "programCounter")
)]
pub fn program_counter(&self) -> usize {
self.program_counter
}
/// Get the number of cycles, i.e. the number of instructions that have
/// been run, during the current program execution.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "cycleCount")
)]
pub fn cycle_count(&self) -> usize {
self.cycle_count
}
/// Checks if this machine has finished executing. This could be by normal
/// completion or by runtime error.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "terminated")
)]
pub fn terminated(&self) -> bool {
// Check for normal complete
self.program_counter >= self.program.instructions.len()
// Check for a runtime error
|| self.error.is_some()
}
/// Checks if this machine has completed successfully. The criteria are:
/// 1. Program is terminated (all instructions have been executed)
/// 2. No failures occurred (see [FailureReason] for possible failures)
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "successful")
)]
pub fn successful(&self) -> bool {
self.terminated() && self.failure_reason().is_none()
}
/// Determine why the executed program failed. **Only returns a value if
/// the program actually failed.** Will return `None` if the program
/// is still running or it succeeded.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "failureReason")
)]
pub fn failure_reason(&self) -> Option<FailureReason> {
if !self.terminated() {
// Program is still running, so we haven't failed (yet)
None
} else if self.error.is_some() {
Some(FailureReason::RuntimeError)
} else if !self.input.is_empty() {
Some(FailureReason::RemainingInput)
} else if self.output != self.expected_output {
Some(FailureReason::IncorrectOutput)
} else {
// No failure states were hit, so program was successful!
None
}
}
}
// Wasm-ONLY functions
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
impl Machine {
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "input")]
pub fn wasm_input(&self) -> Vec<LangValue> {
self.input.clone()
}
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "output")]
pub fn wasm_output(&self) -> Vec<LangValue> {
self.output.clone()
}
/// A wrapper for [Self::registers], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping register names (strings) to their values (`LangValue`).
#[wasm_bindgen(getter, js_name = "registers")]
pub fn wasm_registers(&self) -> LangValueMap {
// Convert the keys of the register map to strings
let regs_by_name: HashMap<String, LangValue> = self
.registers()
.into_iter()
.map(|(reg_ref, reg_value)| (reg_ref.to_string(), reg_value))
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(®s_by_name).unwrap().unchecked_into()
}
/// A wrapper for [Self::stacks], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping stacks names (strings) to their values (`Vec<LangValue>`).
#[wasm_bindgen(getter, js_name = "stacks")]
pub fn wasm_stacks(&self) -> LangValueArrayMap {
// Convert the keys of the stacks map to strings
let stacks_by_name: HashMap<String, &[LangValue]> = self
.stacks()
.into_iter()
.map(|(stack_ref, stack_value)| {
(stack_ref.to_string(), stack_value)
})
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(&stacks_by_name)
.unwrap()
.unchecked_into()
}
/// A wrapper for [Self::error], to be called from wasm. We can't send
/// maps through wasm, so this returns a simplified error as a
/// [SourceElement].
#[wasm_bindgen(getter, js_name = "error")]
pub fn wasm_error(&self) -> Option<SourceElement> {
self.error.as_ref().map(|wrapped_error| {
// If an error is present, there should always be exactly one
match wrapped_error.errors() {
[error] => error.into(),
errors => panic!(
"Expected exactly 1 runtime error, but got {:?}",
errors
),
}
})
}
/// A wrapper for [Self::execute_next], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeNext")]
pub fn wasm_execute_next(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_next().unwrap_or(true)
}
/// A wrapper for [Self::execute_all], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeAll")]
pub fn wasm_execute_all(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_all().unwrap_or(true)
}
}
/// The reason why a program failed. **These reasons are only applicable for
/// terminated, unsuccessful programs**. For a program that has yet to
/// terminate, or did so successfully, none of these cases apply.
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
#[derive(Copy, Clone, Debug)]
pub enum | FailureReason | identifier_name |
|
machine.rs | &mut self,
stack_ref: &SpanNode<StackRef>,
value: LangValue,
) -> Result<(), (RuntimeError, Span)> {
// Have to access this first cause borrow checker
let max_stack_length = self.hardware_spec.max_stack_length;
let stack = &mut self.stacks[stack_ref.value().0];
// If the stack is capacity, make sure we're not over it
if stack.len() >= max_stack_length {
return Err((RuntimeError::StackOverflow, *stack_ref.metadata()));
}
stack.push(value);
Ok(())
}
/// Pops an element off the given stack. If the pop is successful, the
/// popped value is returned. If the stack is empty, an error is returned.
/// If the stack reference is invalid, will panic (should be validated at
/// build time).
fn pop_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
) -> Result<LangValue, (RuntimeError, Span)> {
let stack = &mut self.stacks[stack_ref.value().0];
if let Some(val) = stack.pop() {
Ok(val)
} else {
Err((RuntimeError::EmptyStack, *stack_ref.metadata()))
}
}
/// Internal function to execute the next instruction. The return value
/// is the same as [Self::execute_next], except the error needs to be
/// wrapped before being handed to the user.
fn execute_next_inner(&mut self) -> Result<bool, (RuntimeError, Span)> {
// We've previously hit an error, prevent further execution
if self.error.is_some() {
return Ok(false);
}
let instr_node =
match self.program.instructions.get(self.program_counter) {
// Clone is necessary so we don't maintain a ref to self
Some(instr_node) => instr_node.clone(),
// out of instructions to execute, just give up
None => return Ok(false),
};
// Prevent infinite loops
if self.cycle_count >= MAX_CYCLE_COUNT {
// Include the instruction that triggered the error
return Err((RuntimeError::TooManyCycles, *instr_node.metadata()));
}
// If we've reached this point, we know we're going to execute the
// instruction. Increment the cycle count now so that if we exit with
// an error, it still counts.
self.cycle_count += 1;
// Execute the instruction, and get a resulting optional label that we
// should jump to. For most instructions there will be no label, only
// when the instruction wants to trigger a jump.
let instruction = instr_node.value();
let span = *instr_node.metadata();
let target_label: Option<&Label> = match instruction {
Instruction::Read(reg) => {
if self.input.is_empty() {
return Err((RuntimeError::EmptyInput, span));
} else {
// Remove the first element in the input
let val = self.input.remove(0);
self.set_reg(reg, val);
}
None
}
Instruction::Write(src) => {
self.output.push(self.get_val_from_src(src));
None
}
Instruction::Set(dst, src) => {
self.set_reg(dst, self.get_val_from_src(src));
None
}
Instruction::Add(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
+ Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Sub(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
- Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Mul(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
* Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Div(dst, src) => {
let divisor = self.get_val_from_src(src);
let dividend = self.get_reg(*dst.value());
if divisor != 0 {
// This does flooring division
self.set_reg(dst, dividend / divisor);
} else {
return Err((RuntimeError::DivideByZero, span));
}
None
}
Instruction::Cmp(dst, src_1, src_2) => {
let val_1 = self.get_val_from_src(src_1);
let val_2 = self.get_val_from_src(src_2);
let cmp = match val_1.cmp(&val_2) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
};
self.set_reg(dst, cmp);
None
}
Instruction::Push(src, stack_ref) => {
self.push_stack(stack_ref, self.get_val_from_src(src))?;
None
}
Instruction::Pop(stack_ref, dst) => {
let popped = self.pop_stack(stack_ref)?;
self.set_reg(dst, popped);
None
}
// Jumps
Instruction::Jmp(Node(label, _)) => Some(label),
Instruction::Jez(src, Node(label, _)) => {
if self.get_val_from_src(src) == 0 {
Some(label)
} else {
None
}
}
Instruction::Jnz(src, Node(label, _)) => {
if self.get_val_from_src(src) != 0 {
Some(label)
} else {
None
}
}
Instruction::Jlz(src, Node(label, _)) => {
if self.get_val_from_src(src) < 0 {
Some(label)
} else {
None
}
}
Instruction::Jgz(src, Node(label, _)) => {
if self.get_val_from_src(src) > 0 {
Some(label)
} else {
None
}
}
};
// If the instruction wants to jump to a label, look up its
// corresponding index and go there. Otherwise, just advance the PC one
// instruction
match target_label {
Some(label) => {
let destination = self
.program
.symbol_table
.get(label)
// If this panics, that means there's a bug in the
// compiler pipeline
.unwrap_or_else(|| panic!("unknown label: {}", label));
self.program_counter = *destination;
}
None => {
self.program_counter += 1;
}
}
debug!(println!("Executed {:?}\n\tState: {:?}", instruction, self));
Ok(true)
}
/// Executes the next instruction in the program.
///
/// # Returns
/// - `Ok(true)` if the instruction executed normally
/// - `Ok(false)` if the instruction didn't execute because the program has
/// already terminated
/// - `Err(error)` if an error occurred. The error is returned, with the
/// source information of the offending instruction
pub fn execute_next(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
match self.execute_next_inner() {
Ok(b) => Ok(b),
Err((error, span)) => {
// Store the error in self, then return a ref to it
self.error = Some(WithSource::new(
iter::once(SourceErrorWrapper::new(
error,
span,
&self.source,
)),
self.source.clone(),
));
Err(self.error.as_ref().unwrap())
}
}
}
/// Executes this machine until termination (or error). All instructions are
/// executed until [Self::terminated] returns true. Returns the value of
/// [Self::successful] upon termination.
pub fn execute_all(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
// We can't return the error directly from the loop because of a bug
// in the borrow checker. Instead, we have to play lifetime tetris.
while !self.terminated() {
if self.execute_next().is_err() {
break;
}
}
// Check if an error occurred, and return it if so
match &self.error {
None => Ok(self.successful()),
Some(error) => Err(error),
}
}
/// Get the source code that this machine is built for.
pub fn source_code(&self) -> &str {
&self.source
}
/// Get a reference to the program being executed.
pub fn program(&self) -> &Program<Span> {
&self.program
}
/// Get the current input buffer.
pub fn input(&self) -> &[LangValue] {
self.input.as_slice()
}
/// Get the current output buffer.
pub fn output(&self) -> &[LangValue] {
self.output.as_slice()
}
/// Get all registers and their current values.
pub fn registers(&self) -> HashMap<RegisterRef, LangValue> {
self.hardware_spec
.all_register_refs()
.into_iter()
.map(|reg_ref| (reg_ref, self.get_reg(reg_ref)))
.collect()
}
/// Get all stacks and their current values. | pub fn stacks(&self) -> HashMap<StackRef, &[LangValue]> {
self.hardware_spec
.all_stack_refs() | random_line_split |
|
machine.rs | is
/// invalid (shouldn't be possible because of validation).
fn get_val_from_src(&self, src: &SpanNode<ValueSource<Span>>) -> LangValue {
match src.value() {
ValueSource::Const(Node(val, _)) => *val,
ValueSource::Register(reg_ref) => self.get_reg(*reg_ref.value()),
}
}
/// Gets the value from the given register. The register reference is
/// assumed to be valid (should be validated at build time). Will panic if
/// it isn't valid.
fn get_reg(&self, reg: RegisterRef) -> LangValue {
match reg {
RegisterRef::Null => 0,
// These conversion unwraps are safe because we know that input
// and stack lengths are bounded by validation rules to fit into an
// i32 (max length is 256 at the time of writing this)
RegisterRef::InputLength => self.input.len().try_into().unwrap(),
RegisterRef::StackLength(stack_id) => {
self.stacks[stack_id].len().try_into().unwrap()
}
RegisterRef::User(reg_id) => *self.registers.get(reg_id).unwrap(),
}
}
/// Sets the register to the given value. The register reference is
/// assumed to be valid and writable (should be validated at build time).
/// Will panic if it isn't valid/writable.
fn set_reg(&mut self, reg: &SpanNode<RegisterRef>, value: LangValue) {
match reg.value() {
RegisterRef::Null => {} // /dev/null behavior - trash any input
RegisterRef::InputLength | RegisterRef::StackLength(_) => {
panic!("Unwritable register {:?}", reg)
}
RegisterRef::User(reg_id) => {
self.registers[*reg_id] = value;
}
}
}
/// Pushes the given value onto the given stack. If the stack reference is
/// invalid or the stack is at capacity, an error is returned. If the stack
/// reference is invalid, will panic (should be validated at build time).
fn push_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
value: LangValue,
) -> Result<(), (RuntimeError, Span)> |
/// Pops an element off the given stack. If the pop is successful, the
/// popped value is returned. If the stack is empty, an error is returned.
/// If the stack reference is invalid, will panic (should be validated at
/// build time).
fn pop_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
) -> Result<LangValue, (RuntimeError, Span)> {
let stack = &mut self.stacks[stack_ref.value().0];
if let Some(val) = stack.pop() {
Ok(val)
} else {
Err((RuntimeError::EmptyStack, *stack_ref.metadata()))
}
}
/// Internal function to execute the next instruction. The return value
/// is the same as [Self::execute_next], except the error needs to be
/// wrapped before being handed to the user.
fn execute_next_inner(&mut self) -> Result<bool, (RuntimeError, Span)> {
// We've previously hit an error, prevent further execution
if self.error.is_some() {
return Ok(false);
}
let instr_node =
match self.program.instructions.get(self.program_counter) {
// Clone is necessary so we don't maintain a ref to self
Some(instr_node) => instr_node.clone(),
// out of instructions to execute, just give up
None => return Ok(false),
};
// Prevent infinite loops
if self.cycle_count >= MAX_CYCLE_COUNT {
// Include the instruction that triggered the error
return Err((RuntimeError::TooManyCycles, *instr_node.metadata()));
}
// If we've reached this point, we know we're going to execute the
// instruction. Increment the cycle count now so that if we exit with
// an error, it still counts.
self.cycle_count += 1;
// Execute the instruction, and get a resulting optional label that we
// should jump to. For most instructions there will be no label, only
// when the instruction wants to trigger a jump.
let instruction = instr_node.value();
let span = *instr_node.metadata();
let target_label: Option<&Label> = match instruction {
Instruction::Read(reg) => {
if self.input.is_empty() {
return Err((RuntimeError::EmptyInput, span));
} else {
// Remove the first element in the input
let val = self.input.remove(0);
self.set_reg(reg, val);
}
None
}
Instruction::Write(src) => {
self.output.push(self.get_val_from_src(src));
None
}
Instruction::Set(dst, src) => {
self.set_reg(dst, self.get_val_from_src(src));
None
}
Instruction::Add(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
+ Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Sub(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
- Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Mul(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
* Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Div(dst, src) => {
let divisor = self.get_val_from_src(src);
let dividend = self.get_reg(*dst.value());
if divisor != 0 {
// This does flooring division
self.set_reg(dst, dividend / divisor);
} else {
return Err((RuntimeError::DivideByZero, span));
}
None
}
Instruction::Cmp(dst, src_1, src_2) => {
let val_1 = self.get_val_from_src(src_1);
let val_2 = self.get_val_from_src(src_2);
let cmp = match val_1.cmp(&val_2) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
};
self.set_reg(dst, cmp);
None
}
Instruction::Push(src, stack_ref) => {
self.push_stack(stack_ref, self.get_val_from_src(src))?;
None
}
Instruction::Pop(stack_ref, dst) => {
let popped = self.pop_stack(stack_ref)?;
self.set_reg(dst, popped);
None
}
// Jumps
Instruction::Jmp(Node(label, _)) => Some(label),
Instruction::Jez(src, Node(label, _)) => {
if self.get_val_from_src(src) == 0 {
Some(label)
} else {
None
}
}
Instruction::Jnz(src, Node(label, _)) => {
if self.get_val_from_src(src) != 0 {
Some(label)
} else {
None
}
}
Instruction::Jlz(src, Node(label, _)) => {
if self.get_val_from_src(src) < 0 {
Some(label)
} else {
None
}
}
Instruction::Jgz(src, Node(label, _)) => {
if self.get_val_from_src(src) > 0 {
Some(label)
} else {
None
}
}
};
// If the instruction wants to jump to a label, look up its
// corresponding index and go there. Otherwise, just advance the PC one
// instruction
match target_label {
Some(label) => {
let destination = self
.program
.symbol_table
.get(label)
// If this panics, that means there's a bug in the
// compiler pipeline
.unwrap_or_else(|| panic!("unknown label: {}", label));
self.program_counter = *destination;
}
None => {
self.program_counter += 1;
}
}
debug!(println!("Executed {:?}\n\tState: {:?}", instruction, self));
Ok(true)
}
/// Executes the next instruction in the program.
///
/// # Returns
/// - `Ok(true)` if the instruction executed normally
/// - `Ok(false)` if the instruction didn't execute because the program has
/// already terminated
/// - `Err(error)` if an error occurred. The error is returned, with the
/// source information of the offending instruction
pub fn execute_next(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
match self.execute_next_inner() {
Ok(b) => Ok(b),
| {
// Have to access this first cause borrow checker
let max_stack_length = self.hardware_spec.max_stack_length;
let stack = &mut self.stacks[stack_ref.value().0];
// If the stack is capacity, make sure we're not over it
if stack.len() >= max_stack_length {
return Err((RuntimeError::StackOverflow, *stack_ref.metadata()));
}
stack.push(value);
Ok(())
} | identifier_body |
machine.rs | is
/// invalid (shouldn't be possible because of validation).
fn get_val_from_src(&self, src: &SpanNode<ValueSource<Span>>) -> LangValue {
match src.value() {
ValueSource::Const(Node(val, _)) => *val,
ValueSource::Register(reg_ref) => self.get_reg(*reg_ref.value()),
}
}
/// Gets the value from the given register. The register reference is
/// assumed to be valid (should be validated at build time). Will panic if
/// it isn't valid.
fn get_reg(&self, reg: RegisterRef) -> LangValue {
match reg {
RegisterRef::Null => 0,
// These conversion unwraps are safe because we know that input
// and stack lengths are bounded by validation rules to fit into an
// i32 (max length is 256 at the time of writing this)
RegisterRef::InputLength => self.input.len().try_into().unwrap(),
RegisterRef::StackLength(stack_id) => {
self.stacks[stack_id].len().try_into().unwrap()
}
RegisterRef::User(reg_id) => *self.registers.get(reg_id).unwrap(),
}
}
/// Sets the register to the given value. The register reference is
/// assumed to be valid and writable (should be validated at build time).
/// Will panic if it isn't valid/writable.
fn set_reg(&mut self, reg: &SpanNode<RegisterRef>, value: LangValue) {
match reg.value() {
RegisterRef::Null => {} // /dev/null behavior - trash any input
RegisterRef::InputLength | RegisterRef::StackLength(_) => {
panic!("Unwritable register {:?}", reg)
}
RegisterRef::User(reg_id) => {
self.registers[*reg_id] = value;
}
}
}
/// Pushes the given value onto the given stack. If the stack reference is
/// invalid or the stack is at capacity, an error is returned. If the stack
/// reference is invalid, will panic (should be validated at build time).
fn push_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
value: LangValue,
) -> Result<(), (RuntimeError, Span)> {
// Have to access this first cause borrow checker
let max_stack_length = self.hardware_spec.max_stack_length;
let stack = &mut self.stacks[stack_ref.value().0];
// If the stack is capacity, make sure we're not over it
if stack.len() >= max_stack_length {
return Err((RuntimeError::StackOverflow, *stack_ref.metadata()));
}
stack.push(value);
Ok(())
}
/// Pops an element off the given stack. If the pop is successful, the
/// popped value is returned. If the stack is empty, an error is returned.
/// If the stack reference is invalid, will panic (should be validated at
/// build time).
fn pop_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
) -> Result<LangValue, (RuntimeError, Span)> {
let stack = &mut self.stacks[stack_ref.value().0];
if let Some(val) = stack.pop() {
Ok(val)
} else |
}
/// Internal function to execute the next instruction. The return value
/// is the same as [Self::execute_next], except the error needs to be
/// wrapped before being handed to the user.
fn execute_next_inner(&mut self) -> Result<bool, (RuntimeError, Span)> {
// We've previously hit an error, prevent further execution
if self.error.is_some() {
return Ok(false);
}
let instr_node =
match self.program.instructions.get(self.program_counter) {
// Clone is necessary so we don't maintain a ref to self
Some(instr_node) => instr_node.clone(),
// out of instructions to execute, just give up
None => return Ok(false),
};
// Prevent infinite loops
if self.cycle_count >= MAX_CYCLE_COUNT {
// Include the instruction that triggered the error
return Err((RuntimeError::TooManyCycles, *instr_node.metadata()));
}
// If we've reached this point, we know we're going to execute the
// instruction. Increment the cycle count now so that if we exit with
// an error, it still counts.
self.cycle_count += 1;
// Execute the instruction, and get a resulting optional label that we
// should jump to. For most instructions there will be no label, only
// when the instruction wants to trigger a jump.
let instruction = instr_node.value();
let span = *instr_node.metadata();
let target_label: Option<&Label> = match instruction {
Instruction::Read(reg) => {
if self.input.is_empty() {
return Err((RuntimeError::EmptyInput, span));
} else {
// Remove the first element in the input
let val = self.input.remove(0);
self.set_reg(reg, val);
}
None
}
Instruction::Write(src) => {
self.output.push(self.get_val_from_src(src));
None
}
Instruction::Set(dst, src) => {
self.set_reg(dst, self.get_val_from_src(src));
None
}
Instruction::Add(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
+ Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Sub(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
- Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Mul(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
* Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Div(dst, src) => {
let divisor = self.get_val_from_src(src);
let dividend = self.get_reg(*dst.value());
if divisor != 0 {
// This does flooring division
self.set_reg(dst, dividend / divisor);
} else {
return Err((RuntimeError::DivideByZero, span));
}
None
}
Instruction::Cmp(dst, src_1, src_2) => {
let val_1 = self.get_val_from_src(src_1);
let val_2 = self.get_val_from_src(src_2);
let cmp = match val_1.cmp(&val_2) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
};
self.set_reg(dst, cmp);
None
}
Instruction::Push(src, stack_ref) => {
self.push_stack(stack_ref, self.get_val_from_src(src))?;
None
}
Instruction::Pop(stack_ref, dst) => {
let popped = self.pop_stack(stack_ref)?;
self.set_reg(dst, popped);
None
}
// Jumps
Instruction::Jmp(Node(label, _)) => Some(label),
Instruction::Jez(src, Node(label, _)) => {
if self.get_val_from_src(src) == 0 {
Some(label)
} else {
None
}
}
Instruction::Jnz(src, Node(label, _)) => {
if self.get_val_from_src(src) != 0 {
Some(label)
} else {
None
}
}
Instruction::Jlz(src, Node(label, _)) => {
if self.get_val_from_src(src) < 0 {
Some(label)
} else {
None
}
}
Instruction::Jgz(src, Node(label, _)) => {
if self.get_val_from_src(src) > 0 {
Some(label)
} else {
None
}
}
};
// If the instruction wants to jump to a label, look up its
// corresponding index and go there. Otherwise, just advance the PC one
// instruction
match target_label {
Some(label) => {
let destination = self
.program
.symbol_table
.get(label)
// If this panics, that means there's a bug in the
// compiler pipeline
.unwrap_or_else(|| panic!("unknown label: {}", label));
self.program_counter = *destination;
}
None => {
self.program_counter += 1;
}
}
debug!(println!("Executed {:?}\n\tState: {:?}", instruction, self));
Ok(true)
}
/// Executes the next instruction in the program.
///
/// # Returns
/// - `Ok(true)` if the instruction executed normally
/// - `Ok(false)` if the instruction didn't execute because the program has
/// already terminated
/// - `Err(error)` if an error occurred. The error is returned, with the
/// source information of the offending instruction
pub fn execute_next(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
match self.execute_next_inner() {
Ok(b) => Ok(b),
| {
Err((RuntimeError::EmptyStack, *stack_ref.metadata()))
} | conditional_block |
rgou.py | (event):
print("clicked at", event.x, event.y)
coords(event.x,event.y)
# coordss(event.x,event.y)
#frame = Frame(game, width=100, height=100)
#game.mainloop()
game = tk.Tk()
game.title("Royal Game of Ur")
## BG image
#fname = "RGOU.gif"
#fname = "RGOU2.gif"
fname = "RGOU4.gif"
bg_image = tk.PhotoImage(file=fname)
bg_image = bg_image.subsample(2,2)
w = bg_image.width()
h = bg_image.height()
strs = "%dx%d+50+30" % (w,h)
print(strs)
game.geometry(strs)
cv = tk.Canvas(width=w,height=h)
cv.pack(side='top',fill='both',expand='yes')
cv.create_image(0,0,image=bg_image,anchor='nw')
cv.bind("<Button-1>", callback)
cv.pack()
print(dir(cv))
board_x_y = [ # x ,y ,xn,yn,[xycoordinates]
[100,80,180,152,[0,0]],
[100,170,180,231,[1,0]],
[100,245,180,315,[2,0]],
[100,325,180,394,[3,0]],
[20,332,69,386,[4,0]], # white start
[60,443,142,517,[5,0]], # roll white
[100,578,180,635,[6,0]],
[100,650,180,719,[7,0]],
# w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
# b = cv.create_image(330,480,image=blackrollicon)
[189,80,257,152,[0,1]],
[189,170,257,231,[1,1]],
[189,239,257,315,[2,1]],
[189,325,257,394,[3,1]],
[189,403,257,478,[4,1]],
[189,489,257,560,[5,1]],
[189,578,257,635,[6,1]],
[189,650,257,719,[7,1]],
[270,80,338,152,[0,2]],
[270,170,338,231,[1,2]],
[270,245,338,315,[2,2]],
[270,325,338,394,[3,2]],
[365,319,445,396,[4,2]], # black start
[293,446,368,517,[5,2]], # roll black
[270,578,338,635,[6,2]],
[270,650,338,719,[7,2]]
]
def setup():
global white_pieces, black_pieces
global pieces
global white_track, black_track
global tracks
global turn , rolled_num, moved , rolled
rolled = False
moved = True # did we move after roll?
turn = 0 # 0 = white , 2 = black
rolled_num = 0 # number rolled
white_pieces = [[4,0] for i in range(7)] # score white
black_pieces = [[4,2] for i in range(7)]
pieces = [white_pieces,None,black_pieces]
white_track = [[4,0],[3,0],[2,0],[1,0],[0,0],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,0],[6,0]]
black_track = [[4,2],[3,2],[2,2],[1,2],[0,2],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,2],[6,2]]
# common_track = [[1,0],[1,1],[1,2],[1,3],[1,4],[1,5],[1,6],[1,7]]
tracks = [white_track,None,black_track]
def_cv_pieces()
# roll icons
checkroll()
score()
# forfeit "button"
t = cv.create_text(90,770,text="forfeit move",font="Times 20 bold")
r = cv.create_text(350,770,text="reset",font="Times 20 bold")
rollicons = []
def rollicon(y): # 0 white , 2 black
s = ""
if turn == 2:
dd = "-black"
else:
dd = "-white"
if turn == y:
s+=dd
if not rolled:
s+="roll"
else:
s+= str(rolled_num)
# if not moved:
# s+="-active"
else:
if rolled_num == 0:
s = "0"
else:
s="wait"
s+=".gif"
pc = tk.PhotoImage(file=s)
pc = pc.subsample(2,2)
return pc
def checkroll():
# 5,0 and 5,2 coords
global rollicons
global w ,b
global cv
global whiterollicon,blackrollicon
whiterollicon = rollicon(0)
blackrollicon = rollicon(2)
if len(rollicons) == 3:
cv.delete(rollicons[0])
cv.delete(rollicons[2])
# w = rollicons[0]
# b = rollicons[2]
# cv[w]["image"] = whiterollicon
# cv[b]["image"] = blackrollicon
print(f"rollicons = {rollicons}")
# cv.delete(w)
# cv.delete(b)
# tk.Canvas.itemconfig(w,100,493,image=whiterollicon)
# tk.Canvas.itemconfig(b,270,489,image=blackrollicon)
# cv.itemcomfigure(w,image = whiterollicon)
# cv.itemconfigure(b,image = blackrollicon)
# if len(rollicons) == 0:
# white
# [100,493,152,526,[5,0]], # roll white
# [73,433,152,526,[5,0]], # roll white
w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
b = cv.create_image(330,480,image=blackrollicon)
# print(cv.itemconfig(b))
rollicons = [w,None,b]
def def_cv_pieces(delete=False):
global whitepic , blackpic
global cv
global white_cv
global black_cv
global pieces_cv
if delete:
for i in white_cv:
cv.delete(i)
#
for i in black_cv:
cv.delete(i)
return
white_cv= []
black_cv = []
pieces_cv = []
whitepic = tk.PhotoImage(file="-white.gif")
whitepic = whitepic.subsample(2,2)
blackpic = tk.PhotoImage(file="-black.gif")
blackpic = blackpic.subsample(2,2)
## check if there are no more cv objects
t = cv.create_image(-100,-100,image=whitepic)
# for i in range(2,t+1):
# cv.delete(i)
for i in white_pieces:
x,y = i[0],i[1]
for c in board_x | callback | identifier_name |
|
rgou.py | 89,80,257,152,[0,1]],
[189,170,257,231,[1,1]],
[189,239,257,315,[2,1]],
[189,325,257,394,[3,1]],
[189,403,257,478,[4,1]],
[189,489,257,560,[5,1]],
[189,578,257,635,[6,1]],
[189,650,257,719,[7,1]],
[270,80,338,152,[0,2]],
[270,170,338,231,[1,2]],
[270,245,338,315,[2,2]],
[270,325,338,394,[3,2]],
[365,319,445,396,[4,2]], # black start
[293,446,368,517,[5,2]], # roll black
[270,578,338,635,[6,2]],
[270,650,338,719,[7,2]]
]
def setup():
global white_pieces, black_pieces
global pieces
global white_track, black_track
global tracks
global turn , rolled_num, moved , rolled
rolled = False
moved = True # did we move after roll?
turn = 0 # 0 = white , 2 = black
rolled_num = 0 # number rolled
white_pieces = [[4,0] for i in range(7)] # score white
black_pieces = [[4,2] for i in range(7)]
pieces = [white_pieces,None,black_pieces]
white_track = [[4,0],[3,0],[2,0],[1,0],[0,0],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,0],[6,0]]
black_track = [[4,2],[3,2],[2,2],[1,2],[0,2],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,2],[6,2]]
# common_track = [[1,0],[1,1],[1,2],[1,3],[1,4],[1,5],[1,6],[1,7]]
tracks = [white_track,None,black_track]
def_cv_pieces()
# roll icons
checkroll()
score()
# forfeit "button"
t = cv.create_text(90,770,text="forfeit move",font="Times 20 bold")
r = cv.create_text(350,770,text="reset",font="Times 20 bold")
rollicons = []
def rollicon(y): # 0 white , 2 black
s = ""
if turn == 2:
dd = "-black"
else:
dd = "-white"
if turn == y:
s+=dd
if not rolled:
s+="roll"
else:
s+= str(rolled_num)
# if not moved:
# s+="-active"
else:
if rolled_num == 0:
s = "0"
else:
s="wait"
s+=".gif"
pc = tk.PhotoImage(file=s)
pc = pc.subsample(2,2)
return pc
def checkroll():
# 5,0 and 5,2 coords
global rollicons
global w ,b
global cv
global whiterollicon,blackrollicon
whiterollicon = rollicon(0)
blackrollicon = rollicon(2)
if len(rollicons) == 3:
cv.delete(rollicons[0])
cv.delete(rollicons[2])
# w = rollicons[0]
# b = rollicons[2]
# cv[w]["image"] = whiterollicon
# cv[b]["image"] = blackrollicon
print(f"rollicons = {rollicons}")
# cv.delete(w)
# cv.delete(b)
# tk.Canvas.itemconfig(w,100,493,image=whiterollicon)
# tk.Canvas.itemconfig(b,270,489,image=blackrollicon)
# cv.itemcomfigure(w,image = whiterollicon)
# cv.itemconfigure(b,image = blackrollicon)
# if len(rollicons) == 0:
# white
# [100,493,152,526,[5,0]], # roll white
# [73,433,152,526,[5,0]], # roll white
w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
b = cv.create_image(330,480,image=blackrollicon)
# print(cv.itemconfig(b))
rollicons = [w,None,b]
def def_cv_pieces(delete=False):
global whitepic , blackpic
global cv
global white_cv
global black_cv
global pieces_cv
if delete:
for i in white_cv:
cv.delete(i)
#
for i in black_cv:
cv.delete(i)
return
white_cv= []
black_cv = []
pieces_cv = []
whitepic = tk.PhotoImage(file="-white.gif")
whitepic = whitepic.subsample(2,2)
blackpic = tk.PhotoImage(file="-black.gif")
blackpic = blackpic.subsample(2,2)
## check if there are no more cv objects
t = cv.create_image(-100,-100,image=whitepic)
# for i in range(2,t+1):
# cv.delete(i)
for i in white_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[4] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=whitepic)
white_cv.append(s)
print("white")
for i in black_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[-1] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=blackpic)
black_cv.append(s)
print("black")
pieces_cv = [white_cv,None,black_cv]
print(pieces_cv)
def roll():
score()
global rolled_num
global moved,rolled
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if moved == False or rolled == True:
return
i = 0
for a in range(4):
i+= random.randint(0,1)
rolled_num = i
moved = False
rolled = True
checkroll()
def game_ended(turn):
if turn == 0:
s = "white"
opp = 2
else:
s = "black"
opp = 0
t = f"{s} won 7 : {7 - len(pieces[opp])}"
showinfo("Window",t)
def reset():
|
# score()
def endmove(playagain = False): # True == one more move
global turn,rolled,moved
if turn == 0:
opponent = 2
else:
opponent = 0
if not playagain:
turn = opponent
rolled = False
moved = True
if playagain:
s = roll()
if s == 0:
endmove()
checkroll()
def coords(x,y):
if 16 < x < | a = tk.messagebox.askokcancel("popup","reset?")
if a:
def_cv_pieces(True)
setup() | identifier_body |
rgou.py | 189,80,257,152,[0,1]],
[189,170,257,231,[1,1]],
[189,239,257,315,[2,1]],
[189,325,257,394,[3,1]],
[189,403,257,478,[4,1]],
[189,489,257,560,[5,1]],
[189,578,257,635,[6,1]],
[189,650,257,719,[7,1]],
[270,80,338,152,[0,2]],
[270,170,338,231,[1,2]],
[270,245,338,315,[2,2]],
[270,325,338,394,[3,2]],
[365,319,445,396,[4,2]], # black start
[293,446,368,517,[5,2]], # roll black
[270,578,338,635,[6,2]],
[270,650,338,719,[7,2]]
]
def setup():
global white_pieces, black_pieces
global pieces
global white_track, black_track
global tracks
global turn , rolled_num, moved , rolled
rolled = False
moved = True # did we move after roll?
turn = 0 # 0 = white , 2 = black
rolled_num = 0 # number rolled
white_pieces = [[4,0] for i in range(7)] # score white
black_pieces = [[4,2] for i in range(7)]
pieces = [white_pieces,None,black_pieces]
white_track = [[4,0],[3,0],[2,0],[1,0],[0,0],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,0],[6,0]]
black_track = [[4,2],[3,2],[2,2],[1,2],[0,2],
[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],
[7,2],[6,2]]
# common_track = [[1,0],[1,1],[1,2],[1,3],[1,4],[1,5],[1,6],[1,7]]
tracks = [white_track,None,black_track]
def_cv_pieces()
# roll icons
checkroll()
score()
# forfeit "button"
t = cv.create_text(90,770,text="forfeit move",font="Times 20 bold")
r = cv.create_text(350,770,text="reset",font="Times 20 bold")
rollicons = []
def rollicon(y): # 0 white , 2 black
s = ""
if turn == 2:
dd = "-black"
else:
dd = "-white"
if turn == y:
s+=dd
if not rolled:
s+="roll"
else:
s+= str(rolled_num)
# if not moved:
# s+="-active"
else:
if rolled_num == 0:
s = "0"
else:
s="wait"
s+=".gif"
pc = tk.PhotoImage(file=s)
pc = pc.subsample(2,2)
return pc
def checkroll():
# 5,0 and 5,2 coords
global rollicons | global cv
global whiterollicon,blackrollicon
whiterollicon = rollicon(0)
blackrollicon = rollicon(2)
if len(rollicons) == 3:
cv.delete(rollicons[0])
cv.delete(rollicons[2])
# w = rollicons[0]
# b = rollicons[2]
# cv[w]["image"] = whiterollicon
# cv[b]["image"] = blackrollicon
print(f"rollicons = {rollicons}")
# cv.delete(w)
# cv.delete(b)
# tk.Canvas.itemconfig(w,100,493,image=whiterollicon)
# tk.Canvas.itemconfig(b,270,489,image=blackrollicon)
# cv.itemcomfigure(w,image = whiterollicon)
# cv.itemconfigure(b,image = blackrollicon)
# if len(rollicons) == 0:
# white
# [100,493,152,526,[5,0]], # roll white
# [73,433,152,526,[5,0]], # roll white
w = cv.create_image(100,480,image=whiterollicon)
# [270,489,338,560,[5,2]], # roll black
# [287,428,338,560,[5,2]], # roll black
b = cv.create_image(330,480,image=blackrollicon)
# print(cv.itemconfig(b))
rollicons = [w,None,b]
def def_cv_pieces(delete=False):
global whitepic , blackpic
global cv
global white_cv
global black_cv
global pieces_cv
if delete:
for i in white_cv:
cv.delete(i)
#
for i in black_cv:
cv.delete(i)
return
white_cv= []
black_cv = []
pieces_cv = []
whitepic = tk.PhotoImage(file="-white.gif")
whitepic = whitepic.subsample(2,2)
blackpic = tk.PhotoImage(file="-black.gif")
blackpic = blackpic.subsample(2,2)
## check if there are no more cv objects
t = cv.create_image(-100,-100,image=whitepic)
# for i in range(2,t+1):
# cv.delete(i)
for i in white_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[4] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=whitepic)
white_cv.append(s)
print("white")
for i in black_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[-1] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=blackpic)
black_cv.append(s)
print("black")
pieces_cv = [white_cv,None,black_cv]
print(pieces_cv)
def roll():
score()
global rolled_num
global moved,rolled
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if moved == False or rolled == True:
return
i = 0
for a in range(4):
i+= random.randint(0,1)
rolled_num = i
moved = False
rolled = True
checkroll()
def game_ended(turn):
if turn == 0:
s = "white"
opp = 2
else:
s = "black"
opp = 0
t = f"{s} won 7 : {7 - len(pieces[opp])}"
showinfo("Window",t)
def reset():
a = tk.messagebox.askokcancel("popup","reset?")
if a:
def_cv_pieces(True)
setup()
# score()
def endmove(playagain = False): # True == one more move
global turn,rolled,moved
if turn == 0:
opponent = 2
else:
opponent = 0
if not playagain:
turn = opponent
rolled = False
moved = True
if playagain:
s = roll()
if s == 0:
endmove()
checkroll()
def coords(x,y):
if 16 < x < 16 | global w ,b | random_line_split |
rgou.py | def def_cv_pieces(delete=False):
global whitepic , blackpic
global cv
global white_cv
global black_cv
global pieces_cv
if delete:
for i in white_cv:
cv.delete(i)
#
for i in black_cv:
cv.delete(i)
return
white_cv= []
black_cv = []
pieces_cv = []
whitepic = tk.PhotoImage(file="-white.gif")
whitepic = whitepic.subsample(2,2)
blackpic = tk.PhotoImage(file="-black.gif")
blackpic = blackpic.subsample(2,2)
## check if there are no more cv objects
t = cv.create_image(-100,-100,image=whitepic)
# for i in range(2,t+1):
# cv.delete(i)
for i in white_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[4] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=whitepic)
white_cv.append(s)
print("white")
for i in black_pieces:
x,y = i[0],i[1]
for c in board_x_y:
if c[-1] == [x,y]:
xx = int((c[2] + c[0]) /2)
yy = int((c[3] + c[1]) / 2)
s = cv.create_image(xx, yy, image=blackpic)
black_cv.append(s)
print("black")
pieces_cv = [white_cv,None,black_cv]
print(pieces_cv)
def roll():
score()
global rolled_num
global moved,rolled
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if moved == False or rolled == True:
return
i = 0
for a in range(4):
i+= random.randint(0,1)
rolled_num = i
moved = False
rolled = True
checkroll()
def game_ended(turn):
if turn == 0:
s = "white"
opp = 2
else:
s = "black"
opp = 0
t = f"{s} won 7 : {7 - len(pieces[opp])}"
showinfo("Window",t)
def reset():
a = tk.messagebox.askokcancel("popup","reset?")
if a:
def_cv_pieces(True)
setup()
# score()
def endmove(playagain = False): # True == one more move
global turn,rolled,moved
if turn == 0:
opponent = 2
else:
opponent = 0
if not playagain:
turn = opponent
rolled = False
moved = True
if playagain:
s = roll()
if s == 0:
endmove()
checkroll()
def coords(x,y):
if 16 < x < 164:
if 753 < y < 776:
forfeit()
return
if 315 < x < 390:
if 757 < y < 779:
reset()
return
for item in board_x_y:
if item[0] <= x <= item[2]:
if item[1] <= y <= item[3]:
print(item[4])
play(item[4])
# movec(item[4])
return
def getpossition(x,y):
for i in board_x_y:
if i[4] == [x,y]:
return i[0],i[1]
def play(coords):
# global white_pieces
# global black_pieces
# global pieces, board_butts
global rolled_num , turn, moved, rolled
global tracks
global pieces_cv
global pieces
print(f"rolled_num = {rolled_num}")
print(f"turn = {turn}")
print(f"moved = {moved}")
print(f"rolled = {rolled}")
print(pieces)
checkroll()
x = coords[0]
y = coords[1]
# if rollbutton ,rull
if x == 5 and y == turn:
if moved:
roll()
if rolled_num ==0:
if turn == 0:
turn = 2
else:
turn = 0
moved = True
rolled = False
checkroll()
return
if coords in pieces[turn] and not moved:
if turn == 0:
opponent = 2
else:
opponent = 0
trackindex = tracks[turn].index(coords) # position on board
print(f"trackindex = {trackindex}")
indpiece = pieces[turn].index(coords) # identify piece
print(f"indpiece = {indpiece}")
t = pieces_cv[turn][indpiece] # identify canvas of piece
print(f"t = {t}")
result = trackindex + rolled_num
print(result)
if len(tracks[turn]) < result:
return
if len(tracks[turn]) == result:
pieces[turn].pop(indpiece)
pieces_cv[turn].pop(indpiece)
cv.delete(t)
score()
if len(pieces[turn]) == 0:
game_ended(turn)
endmove()
# next turn
return
coords_new = tracks[turn][trackindex+rolled_num]
newx = coords_new[0]
newy = coords_new[1]
print(f"coords_new = {coords_new}")
# special case
if [newx,newy] == [3,1] : # can't take piece there
if [newx,newy] in pieces[opponent]:
newx+=1
if [newx,newy] in pieces[turn]: # can't take own piece
return
newcoordx,newcoordy = getpossition(newx,newy)
if [newx,newy] in pieces[opponent]: # take
oppindex = pieces[opponent].index([newx,newy])
oppx,oppy = getpossition(4,opponent)
difopx = oppx - newcoordx
difopy = oppy - newcoordy
taken = pieces_cv[opponent][oppindex]
cv.move(taken,difopx,difopy) # move to start
pieces[opponent][oppindex] = [4,opponent] # set coords
print(f"{newcoordx},{newcoordy}")
oldx,oldy = getpossition(x,y)
difx = newcoordx - oldx
dify = newcoordy - oldy
cv.move(t,difx,dify)
pieces[turn][indpiece] = [newx,newy]
print("move!!")
print(f"{t},{difx},{dify}")
print(f"{pieces[turn][indpiece]}")
print(f"{pieces[turn]}")
# play again squares
playagain = [ [0,0] , [0,2] , [3,1], [6,0] ,[6,2]]
play =( [newx,newy] in playagain )
endmove(play)
return
def is_move_possible():
a = pieces[turn] # all pieces of player on move
road = tracks[turn]
if turn == 0:
opponent = 2
else:
opponent = 0
alreadychecked = []
for piece in a:
if piece in alreadychecked:
continue
piece_position = road.index(piece)
if rolled_num + piece_position <= len(road):
newcoords = road[piece_position+rolled_num]
if newcoords == [3,1] : # special square check
if newcoords in pieces_coords[opponent]:
newcoords = [4,1]
if newcoords not in a:
return True
alreadychecked.append(piece)
return False
def forfeit():
global moved,rolled,turn
# check if game did not ended already
for i in range(0,3,2):
if not pieces[i]:
game_ended(i)
return
if not rolled:
tk.messagebox.askokcancel("popup","ROLL!")
return
if rolled and is_move_possible():
tk.messagebox.askokcancel("popup","you can move!")
return
endmove()
scoretext = []
def score():
global scoretext
w = str(7 - len(pieces[0]))
b = str(7 - len(pieces[2]))
t = f"{w} : {b}"
if len(scoretext) == 0:
score = cv.create_text(220,780,font="Times 30 italic bold",text=t)
scoretext.append(score)
else:
| cv.itemconfig(scoretext[0],font="Times 30 italic bold",text=t) | conditional_block |
|
lib.rs | typically starts each line with `///`.
//!
//! In both cases all image paths are relative to the **crate root**.
//!
//! ## Embedding images in outer attribute documentation
//!
//! Outer attribute documentation is typically used for documenting functions, structs, traits,
//! macros and so on. Let's consider documenting a function and embedding an image into its
//! documentation:
//!
//! ```rust
//! // Import the attribute macro
//! use embed_doc_image::embed_doc_image;
//!
//! /// Foos the bar.
//! ///
//! /// Let's drop an image below this text.
//! ///
//! /// ![Alt text goes here][myimagelabel]
//! ///
//! /// And another one.
//! ///
//! /// ![A Foobaring][foobaring]
//! ///
//! /// We can include any number of images in the above fashion. The important part is that
//! /// you match the label ("myimagelabel" or "foobaring" in this case) with the label in the
//! /// below attribute macro.
//! // Paths are always relative to the **crate root**
//! #[embed_doc_image("myimagelabel", "images/foo.png")]
//! #[embed_doc_image("foobaring", "assets/foobaring.jpg")]
//! fn foobar() {}
//! ```
//!
//! And that's it! If you run `cargo doc`, you should hopefully be able to see your images
//! in the documentation for `foobar`, and it should also work on `docs.rs` without trouble.
//!
//! ## Embedding images in inner attribute documentation
//!
//! The ability for macros to do *anything* with *inner attributes* is very limited. In fact,
//! before Rust 1.54 (which at the time of writing has not yet been released),
//! it is for all intents and purposes non-existent. This also means that we can not directly
//! use our approach to embed images in documentation for Rust < 1.54. However, we can make our
//! code compile with Rust < 1.54 and instead inject a prominent message that some images are
//! missing.
//! `docs.rs`, which always uses a nightly compiler, will be able to show the images. We'll
//! also locally be able to properly embed the images as long as we're using Rust >= 1.54
//! (or nightly). Here's how you can embed images in crate-level or module-level documentation:
//!
//! ```rust
//! //! My awesome crate for fast foobaring in latent space.
//! //!
//! // Important: note the blank line of documentation on each side of the image lookup table.
//! // The "image lookup table" can be placed anywhere, but we place it here together with the
//! // warning if the `doc-images` feature is not enabled.
//! #![cfg_attr(feature = "doc-images",
//! cfg_attr(all(),
//! doc = ::embed_doc_image::embed_image!("myimagelabel", "images/foo.png"),
//! doc = ::embed_doc_image::embed_image!("foobaring", "assets/foobaring.png")))]
//! #![cfg_attr(
//! not(feature = "doc-images"),
//! doc = "**Doc images not enabled**. Compile with feature `doc-images` and Rust version >= 1.54 \
//! to enable."
//! )]
//! //!
//! //! Let's use our images:
//! //! ![Alt text goes here][myimagelabel] ![A Foobaring][foobaring]
//! ```
//!
//! Sadly there is currently no way to detect Rust versions in `cfg_attr`. Therefore we must
//! rely on a feature flag for toggling proper image embedding. We'll need the following in our
//! `Cargo.toml`:
//!
//! ```toml
//! [features]
//! doc-images = []
//!
//! [package.metadata.docs.rs]
//! # docs.rs uses a nightly compiler, so by instructing it to use our `doc-images` feature we
//! # ensure that it will render any images that we may have in inner attribute documentation.
//! features = ["doc-images"]
//! ```
//!
//! Let's summarize:
//!
//! - `docs.rs` will correctly render our documentation with images.
//! - Locally:
//! - for Rust >= 1.54 with `--features doc-images`, the local documentation will
//! correctly render images.
//! - for Rust < 1.54: the local documentation will be missing some images, and will
//! contain a warning with instructions on how to enable proper image embedding.
//! - we can also use e.g. `cargo +nightly doc --features doc-images` to produce correct
//! documentation with a nightly compiler.
//!
//!
//! # How it works
//!
//! The crux of the issue is that `rustdoc` does not have a mechanism for tracking locally stored
//! images referenced by documentation and carry them over to the final documentation. Therefore
//! currently images on `docs.rs` can only be included if you host the image somewhere on the
//! internet and include the image with its URL. However, this has a number of issues:
//!
//! - You need to host the image, which incurs considerable additional effort on the part of
//! crate authors.
//! - The image is only available for as long as the image is hosted.
//! - Images in local documentation will not work without internet access.
//! - Images are not *versioned*, unless carefully done so manually by the crate author. That is,
//! the author must carefully provide *all* versions of the image across all versions of the
//! crate with a consistent naming convention in order to ensure that documentation of
//! older versions of the crate display the image consistent with that particular version.
//!
//! The solution employed by this crate is based on a remark made in an old
//! [reddit comment from 2017][reddit-comment]. In short, Rustdoc allows images to be provided
//! inline in the Markdown as `base64` encoded binary blobs in the following way:
//!
//! ```rust
//! ![Alt text][myimagelabel]
//!
//! [myimagelabel]: data:image/png;base64,BaSe64EnCoDeDdAtA
//! ```
//!
//! Basically we can use the "reference" feature of Markdown links/images to provide the URL
//! of the image in a different location than the image itself, but instead of providing an URL
//! we can directly provide the binary data of the image in the Markdown documentation.
//!
//! However, doing this manually with images would terribly clutter the documentation, which
//! seems less than ideal. Instead, we do this programmatically. The macros available in this
//! crate essentially follow this idea:
//!
//! - Take a label and image path relative to the crate root as input.
//! - Determine the MIME type (based on extension) and `base64` encoding of the image.
//! - Produce an appropriate doc string and inject it into the Markdown documentation for the
//! crate/function/struct/etc.
//!
//! Clearly, this is still quite hacky, but it seems like a workable solution until proper support
//! in `rustdoc` arrives, at which point we may rejoice and abandon this crate to the annals
//! of history.
//!
//! # Acknowledgements
//!
//! As an inexperienced proc macro hacker, I would not have managed to arrive at this
//! solution without the help of several individuals on the Rust Programming Language Community
//! Discord server, most notably:
//!
//! - Yandros [(github.com/danielhenrymantilla)](https://github.com/danielhenrymantilla)
//! - Nemo157 [(github.com/Nemo157)](https://github.com/Nemo157)
//!
//! [showcase]: https://crates.io/crates/embed-doc-image-showcase
//! [showcase-docs]: https://docs.rs/embed-doc-image-showcase
//! [showcase-source]: https://github.com/Andlon/embed-doc-image/tree/master/embed-doc-image-showcase
//! [rustdoc-issue]: https://github.com/rust-lang/rust/issues/32104
//! [issue-tracker]: https://github.com/Andlon/embed-doc-image/issues
//! [reddit-comment]: https://www.reddit.com/r/rust/comments/5ljshj/diagrams_in_documentation/dbwg96q?utm_source=share&utm_medium=web2x&context=3
//!
//!
use proc_macro::TokenStream;
use quote::{quote, ToTokens};
use std::fs::read;
use std::path::{Path, PathBuf};
use syn::parse;
use syn::parse::{Parse, ParseStream};
use syn::{
Item, ItemConst, ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro,
ItemMacro2, ItemMod, ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion,
ItemUse,
};
#[derive(Debug)]
struct | {
label: String,
path: PathBuf,
}
impl Parse for ImageDescription {
fn parse(input: ParseStream) -> parse::Result<Self> {
let label = input.parse::<syn::LitStr>()?;
input.parse::<syn::Token![,]>()?;
let path = input.parse::<syn::LitStr>()?;
Ok(ImageDescription {
label: label.value(),
path: PathBuf::from(path.value()),
})
}
}
fn encode_base64_image_from_path(path: &Path) -> String {
let bytes = read(path).unwrap_or_else(|_| panic!("Failed to load image at {}", path.display()));
base64::encode(bytes)
}
fn determine_mime_type(extension: &str) -> String {
let extension = extension.to_ascii_lowercase();
// TODO: Consider | ImageDescription | identifier_name |
lib.rs | //! around the current limitations of `rustdoc` and enables a practically workable approach to
//! embedding images in a portable manner.
//!
//! # How to embed images in documentation
//!
//! First, you'll need to depend on this crate. In `cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! // Replace x.x with the latest version
//! embed-doc-image = "x.x"
//! ```
//!
//! What the next step is depends on whether you want to embed images into *inner attribute
//! documentation* or *outer attribute documentation*. Inner attribute documentation is usually
//! used to document crate-level or module-level documentation, and typically starts each line with
//! `//!`. Outer attribute docs are used for most other forms of documentation, such as function
//! and struct documentation. Outer attribute documentation typically starts each line with `///`.
//!
//! In both cases all image paths are relative to the **crate root**.
//!
//! ## Embedding images in outer attribute documentation
//!
//! Outer attribute documentation is typically used for documenting functions, structs, traits,
//! macros and so on. Let's consider documenting a function and embedding an image into its
//! documentation:
//!
//! ```rust
//! // Import the attribute macro
//! use embed_doc_image::embed_doc_image;
//!
//! /// Foos the bar.
//! ///
//! /// Let's drop an image below this text.
//! ///
//! /// ![Alt text goes here][myimagelabel]
//! ///
//! /// And another one.
//! ///
//! /// ![A Foobaring][foobaring]
//! ///
//! /// We can include any number of images in the above fashion. The important part is that
//! /// you match the label ("myimagelabel" or "foobaring" in this case) with the label in the
//! /// below attribute macro.
//! // Paths are always relative to the **crate root**
//! #[embed_doc_image("myimagelabel", "images/foo.png")]
//! #[embed_doc_image("foobaring", "assets/foobaring.jpg")]
//! fn foobar() {}
//! ```
//!
//! And that's it! If you run `cargo doc`, you should hopefully be able to see your images
//! in the documentation for `foobar`, and it should also work on `docs.rs` without trouble.
//!
//! ## Embedding images in inner attribute documentation
//!
//! The ability for macros to do *anything* with *inner attributes* is very limited. In fact,
//! before Rust 1.54 (which at the time of writing has not yet been released),
//! it is for all intents and purposes non-existent. This also means that we can not directly
//! use our approach to embed images in documentation for Rust < 1.54. However, we can make our
//! code compile with Rust < 1.54 and instead inject a prominent message that some images are
//! missing.
//! `docs.rs`, which always uses a nightly compiler, will be able to show the images. We'll
//! also locally be able to properly embed the images as long as we're using Rust >= 1.54
//! (or nightly). Here's how you can embed images in crate-level or module-level documentation:
//!
//! ```rust
//! //! My awesome crate for fast foobaring in latent space.
//! //!
//! // Important: note the blank line of documentation on each side of the image lookup table.
//! // The "image lookup table" can be placed anywhere, but we place it here together with the
//! // warning if the `doc-images` feature is not enabled.
//! #![cfg_attr(feature = "doc-images",
//! cfg_attr(all(),
//! doc = ::embed_doc_image::embed_image!("myimagelabel", "images/foo.png"),
//! doc = ::embed_doc_image::embed_image!("foobaring", "assets/foobaring.png")))]
//! #![cfg_attr(
//! not(feature = "doc-images"),
//! doc = "**Doc images not enabled**. Compile with feature `doc-images` and Rust version >= 1.54 \
//! to enable."
//! )]
//! //!
//! //! Let's use our images:
//! //! ![Alt text goes here][myimagelabel] ![A Foobaring][foobaring]
//! ```
//!
//! Sadly there is currently no way to detect Rust versions in `cfg_attr`. Therefore we must
//! rely on a feature flag for toggling proper image embedding. We'll need the following in our
//! `Cargo.toml`:
//!
//! ```toml
//! [features]
//! doc-images = []
//!
//! [package.metadata.docs.rs]
//! # docs.rs uses a nightly compiler, so by instructing it to use our `doc-images` feature we
//! # ensure that it will render any images that we may have in inner attribute documentation.
//! features = ["doc-images"]
//! ```
//!
//! Let's summarize:
//!
//! - `docs.rs` will correctly render our documentation with images.
//! - Locally:
//! - for Rust >= 1.54 with `--features doc-images`, the local documentation will
//! correctly render images.
//! - for Rust < 1.54: the local documentation will be missing some images, and will
//! contain a warning with instructions on how to enable proper image embedding.
//! - we can also use e.g. `cargo +nightly doc --features doc-images` to produce correct
//! documentation with a nightly compiler.
//!
//!
//! # How it works
//!
//! The crux of the issue is that `rustdoc` does not have a mechanism for tracking locally stored
//! images referenced by documentation and carry them over to the final documentation. Therefore
//! currently images on `docs.rs` can only be included if you host the image somewhere on the
//! internet and include the image with its URL. However, this has a number of issues:
//!
//! - You need to host the image, which incurs considerable additional effort on the part of
//! crate authors.
//! - The image is only available for as long as the image is hosted.
//! - Images in local documentation will not work without internet access.
//! - Images are not *versioned*, unless carefully done so manually by the crate author. That is,
//! the author must carefully provide *all* versions of the image across all versions of the
//! crate with a consistent naming convention in order to ensure that documentation of
//! older versions of the crate display the image consistent with that particular version.
//!
//! The solution employed by this crate is based on a remark made in an old
//! [reddit comment from 2017][reddit-comment]. In short, Rustdoc allows images to be provided
//! inline in the Markdown as `base64` encoded binary blobs in the following way:
//!
//! ```rust
//! ![Alt text][myimagelabel]
//!
//! [myimagelabel]: data:image/png;base64,BaSe64EnCoDeDdAtA
//! ```
//!
//! Basically we can use the "reference" feature of Markdown links/images to provide the URL
//! of the image in a different location than the image itself, but instead of providing an URL
//! we can directly provide the binary data of the image in the Markdown documentation.
//!
//! However, doing this manually with images would terribly clutter the documentation, which
//! seems less than ideal. Instead, we do this programmatically. The macros available in this
//! crate essentially follow this idea:
//!
//! - Take a label and image path relative to the crate root as input.
//! - Determine the MIME type (based on extension) and `base64` encoding of the image.
//! - Produce an appropriate doc string and inject it into the Markdown documentation for the
//! crate/function/struct/etc.
//!
//! Clearly, this is still quite hacky, but it seems like a workable solution until proper support
//! in `rustdoc` arrives, at which point we may rejoice and abandon this crate to the annals
//! of history.
//!
//! # Acknowledgements
//!
//! As an inexperienced proc macro hacker, I would not have managed to arrive at this
//! solution without the help of several individuals on the Rust Programming Language Community
//! Discord server, most notably:
//!
//! - Yandros [(github.com/danielhenrymantilla)](https://github.com/danielhenrymantilla)
//! - Nemo157 [(github.com/Nemo157)](https://github.com/Nemo157)
//!
//! [showcase]: https://crates.io/crates/embed-doc-image-showcase
//! [showcase-docs]: https://docs.rs/embed-doc-image-showcase
//! [showcase-source]: https://github.com/Andlon/embed-doc-image/tree/master/embed-doc-image-showcase
//! [rustdoc-issue]: https://github.com/rust-lang/rust/issues/32104
//! [issue-tracker]: https://github.com/Andlon/embed-doc-image/issues
//! [reddit-comment]: https://www.reddit.com/r/rust/comments/5ljshj/diagrams_in_documentation/dbwg96q?utm_source=share&utm_medium=web2x&context=3
//!
//!
use proc_macro::TokenStream;
use quote::{quote, ToTokens};
use std::fs::read;
use std::path::{Path, PathBuf};
use syn::parse;
use syn::parse::{Parse, ParseStream};
use syn::{
Item, ItemConst, ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro,
ItemMacro2, ItemMod, ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion,
| //!
//! This crate represents a carefully crafted solution based on procedural macros that works | random_line_split |
|
orders-alter.js | _id":parseSegment[1]};
order.doPost(parseSegment[0],data,function(result){
order.reloadCallBack(result,obj);
});
}else if(obj.hasClass("alRefundDel") && confirm("确认删除订单吗?")){ // 实际上是隐藏订单组
order.doPost(parseSegment[0],data,function(result){
order.reloadCallBack(result,obj);
});
}else if(obj.hasClass("delBtn") && confirm("确认删除吗?")){
data = {"chargeback_id":parseSegment[1]};
order.doPost(parseSegment[0],data,function(result){
order.reloadCallBack(result,obj);
});
}
}
order.doPost = function(url,data,func){
$.ajax({
url:url,
data:data,
type:'post',
dataType:'json',
success:func
});
}
order.showOrderDetail = function(obj){
var groupId = obj.closest('.orderList').data('id');
var href = "order.php?act=groupDetail&group_id="+groupId+"&r="+Math.random();
$(".dialog-order").show().html("").load(href,function(){
$(".dialog-order").removeClass("fadeHide").addClass("play").addClass("fadeRight");
$(".orderDetail-wrap .orderList").css({
"height":($(window).height()-$(".orderDetai-BtnWrap").height()-30)+"px",
"overflow":"auto"
})
$(".dialog-mask").show();
});
}
order.commonCallBack = function(result,obj){
var self=this;
if(!result.status){
alert(result.message);
return false;
}else{
// 刷新页面
obj.closest('.orderList').slideUp(100,function(){
obj.closest('.orderList').remove();
self.myScroll.refresh();
});
return false;
}
}
order.reloadCallBack = function(result,obj){
var self=this;
if(!result.status){
alert(result.message);
return false;
}else{
// 刷新页面
window.location.reload();
return false;
}
}
order.active = function(){
$(".order-head .tabs-line").width($(".tabs .swiper-slide").eq(0).width());
$('.tabBox').find('.swiper-slide').removeClass('active');
$('.tabBox').find('.'+type).addClass('active');
$(".tabBox .tabs-line").css({
"left":($(".tabs .swiper-slide").eq(0).width())*$('.tabBox').find('.'+type).index()+"px"
});
}
order.loadorderDetail = function (){
var self=this;
$(document).on("click",'.j-loadetail',function(){
var href=$(this).data("href")+"?r="+Math.random();
$(".dialog-order").show().html("").load(href,function(){
$(".dialog-order").removeClass("fadeHide").addClass("play").addClass("fadeRight");
$(".orderDetail-wrap .orderList").css({
"height":($(window).height()-$(".orderDetai-BtnWrap").height()-30)+"px",
"overflow":"auto"
})
$(".dialog-mask").show();
});
})
//去评价
$('body').on("click",".btn-evalute",function(e){
var oparent=$(e.currentTarget).closest(".orderList").find(".j-loadetail");
oparent.trigger("click");
})
//收起操作
$(document).on("tap",'.dialog-mask',function(){
$(".dialog-order").removeClass("fadeRight").addClass("fadeHide");
self.myScroll.refresh();
setTimeout(function(){
$('.dialog-mask').hide();
},500)
})
$(document).on("tap",".j-maskhide",function(e){
$(".dialog-order").removeClass("fadeRight").addClass("fadeHide");
self.myScroll.refresh();
setTimeout(function(){
$('.dialog-mask').hide();
},500)
//$('.dialog-mask').trigger("click");
})
}
order.imgBox= function (){
var oelis='<span class="order-elips"><img src="'+staticUrl+'/img/order/elips.png"><em>更多</em></span>',ohas=true;
$(".orderList-pdmore").each(function(index,item){
var olinum=$(item).find(".order-imgBox").length,owidth=$(item).find(".order-imgBox").eq(0).width(),oheight=$(item).find(".order-imgBox").eq(0).height();
var obili=65/58,oelipswid=$(item).find(".order-imgBox").eq(0).width(),oelipsheight= oelipswid/obili,oelipslen=$(item).find(".order-elips").length;
if(oelipslen<=0){
$(item).find(".order-imgBox").parent().append(oelis);
$(".order-elips").width(oelipswid);
$(".order-elips").height(oelipsheight);
$(".order-elips").css({
"width": owidth+"px",
"height":oheight+"px"
})
if(olinum>=3){
$(item).find(".order-imgBox").hide();
for(var i=0;i<3;i++){
$(item).find(".order-imgBox").eq(i).show();
}
}
}
})
}
order.hideOrder = function(orderId,obj){
var self=this;
$.confirm("确认删除吗?",function(flag){
if(flag){
$.ajax({
url:hideOrderUrl,
type:'post',
data:{'order_id':orderId},
dataType:'json',
success:function(result){
if(result.status == 1){
obj.closest('.orderList').slideUp(100);
obj.closest('.orderList').remove();
self.myScroll.refresh();
}else{
alert('删除失败');
}
}
});
}
});
}
order.getMoreOrder = function(self,func){
$.ajax({
'url':getMoreOrderUrl,
'data':{'page':order.page,'type':order.orderType}, | }
order.createGroupLiHtml = function(nowGroup,buttonHtml){
var orderHtmls = "";
var imagesHtmls = "";
var disableHtmls = "";
for(q in nowGroup.orderGoodsImage.pics){
disableHtmls = "";
if(nowGroup.orderGoodsImage.pics[q].disable){
disableHtmls = '<i class="icon-failstate"></i>';
}
imagesHtmls += '<span class="order-imgBox"><img src="'+order.picUrl + nowGroup.orderGoodsImage.pics[q].img+'" alt=""/>'+disableHtmls+'</span>';
}
var html = '<div class="orderList" data-type="'+nowGroup.priority.type+'" data-id="'+nowGroup.group_id+'">\
<div class="orderListBox clearfix">\
<p class="orderList-status '+nowGroup.translate.pClass+' fl">\
<i class="ordericon '+nowGroup.translate.iClass+'"></i>'+nowGroup.translate.title+'\
</p>\
<p class="orderList-ordernum fr">\
'+nowGroup.createtime+'\
</p>\
</div>\
<ul class="orderList-pdmore orderList-product">\
<li>\
<a class="j-loadetail" href="javascript:void(0);" data-href="order.php?act=groupDetail&group_id='+nowGroup.group_id+'&tag=1">\
<!--多个产品-->\
'+imagesHtmls+'\
<!--多个产品end-->\
</a>\
</li>\
</ul>\
<div class="orderList-btnBox btborder clearfix">\
<span class="totalPrice fl">\
总价:<em class="orange-icon">¥'+nowGroup.group_amount+'</em>\
</span>\
'+buttonHtml+'\
</div>\
</div>';
return html;
}
order.createNomalLiHtml = function(nowOrder,buttonHtml){
var disableHtml = "";
if(nowOrder.disable != undefined){
disableHtml = '<span class="failstate"></span>';
}
var html = '<div class="orderList" data-type="orderRefund" data-id="'+nowOrder.orders[0].order_id+'">\
<div class="orderListBox clearfix">\
<p class="orderList-status '+nowOrder.translate.pClass+' fl">\
<i class="ordericon '+nowOrder.translate.iClass+'"></i>'+nowOrder.translate.title+'\
</p>\
<p class="orderList-ordernum fr">\
'+nowOrder.orders[0].createtime+'\
</p>\
</div>\
<ul class="orderList-pdmore orderList-product">\
<li>\
<a class="j-loadetail" href="javascript:void(0);" data-href="order.php?act=orderDetail&order_id='+nowOrder.orders[0].order_id+'&tag=1">\
<!--多个产品-->\
<span class="order-imgBox"><img src="'+order.picUrl + nowOrder.orders[0].goods_image+'" alt=""/></span>\
<!--多个产品end-->\
</a>\
</li>\
</ul>\
<div class="orderList-btnBox btborder clearfix">\
<span class="totalPrice fl">\
总价:<em class="orange-icon">¥'+nowOrder | 'type':'post',
'dataType':'json',
success:func
}); | random_line_split |
rs_handler_user.go | .Userid = userid
rs.rolePid.Request(msg1, ctx.Self())
} else {
//TODO 添加房间数据返回
rsp := handler.GetUserDataMsg(arg, rs.User)
if rs.gamePid != nil {
rsp.Game = true
}
if rs.BankPhone != "" {
rsp.Bank = true
}
rs.Send(rsp)
}
case *pb.GotUserData:
arg := msg.(*pb.GotUserData)
glog.Debugf("GotUserData %#v", arg)
rsp := handler.UserDataMsg(arg)
rs.Send(rsp)
default:
//glog.Errorf("unknown message %v", msg)
rs.handlerPay(msg, ctx)
}
}
/*
func (rs *RoleActor) addPrize(rtype, ltype, amount int32) {
switch uint32(rtype) {
case data.DIAMOND:
rs.addCurrency(amount, 0, 0, 0, ltype)
case data.COIN:
rs.addCurrency(0, amount, 0, 0, ltype)
case data.CARD:
rs.addCurrency(0, 0, amount, 0, ltype)
case data.CHIP:
rs.addCurrency(0, 0, 0, amount, ltype)
}
}
//消耗钻石
func (rs *RoleActor) expend(cost uint32, ltype int32) {
diamond := -1 * int64(cost)
rs.addCurrency(diamond, 0, 0, 0, ltype)
}
*/
//奖励发放
func (rs *RoleActor) addCurrency(diamond, coin, card, chip int64, ltype int32) {
if rs.User == nil {
glog.Errorf("add currency user err: %d", ltype)
return
}
//日志记录
if diamond < 0 && ((rs.User.GetDiamond() + diamond) < 0) {
diamond = 0 - rs.User.GetDiamond()
}
if chip < 0 && ((rs.User.GetChip() + chip) < 0) {
chip = 0 - rs.User.GetChip()
}
if coin < 0 && ((rs.User.GetCoin() + coin) < 0) {
coin = 0 - rs.User.GetCoin()
}
if card < 0 && ((rs.User.GetCard() + card) < 0) {
card = 0 - rs.User.GetCard()
}
rs.User.AddCurrency(diamond, coin, card, chip)
//货币变更及时同步
msg2 := handler.ChangeCurrencyMsg(diamond, coin,
card, chip, ltype, rs.User.GetUserid())
rs.rolePid.Tell(msg2)
//消息
msg := handler.PushCurrencyMsg(diamond, coin,
card, chip, ltype)
rs.Send(msg)
//TODO 机器人不写日志
//if rs.User.GetRobot() {
// return
//}
//rs.status = true
//日志
//TODO 日志放在dbms中统一写入
//if diamond != 0 {
// msg1 := handler.LogDiamondMsg(diamond, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if coin != 0 {
// msg1 := handler.LogCoinMsg(coin, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if card != 0 {
// msg1 := handler.LogCardMsg(card, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if chip != 0 {
// msg1 := handler.LogChipMsg(chip, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
}
//同步数据
func (rs *RoleActor) syncUser() {
if rs.User == nil {
return
}
if rs.rolePid == nil {
return
}
if !rs.status { //有变更才同步
return
}
rs.status = false
msg := new(pb.SyncUser)
msg.Userid = rs.User.GetUserid()
glog.Debugf("syscUser %#v", rs.User)
result, err := json.Marshal(rs.User)
if err != nil {
glog.Errorf("user %s Marshal err %v", rs.User.GetUserid(), err)
return
}
msg.Data = result
rs.rolePid.Tell(msg)
}
//'银行
//银行发放
func (rs *RoleActor) addBank(coin int64, ltype int32, from string) {
if rs.User == nil {
glog.Errorf("add addBank user err: %d", ltype)
return
}
//日志记录
if coin < 0 && ((rs.User.GetBank() + coin) < 0) {
coin = 0 - rs.User.GetBank()
}
rs.User.AddBank(coin)
//银行变动及时同步
msg2 := handler.BankChangeMsg(coin,
ltype, rs.User.GetUserid(), from)
rs.rolePid.Tell(msg2)
}
//1存入,2取出,3赠送
func (rs *RoleActor) bank(arg *pb.CBank) {
msg := new(pb.SBank)
rtype := arg.GetRtype()
amount := int64(arg.GetAmount())
userid := arg.GetUserid()
coin := rs.User.GetCoin()
switch rtype {
case pb.BankDeposit: //存入
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if (coin - amount) < data.BANKRUPT {
msg.Error = pb.NotEnoughCoin
} else if amount <= 0 {
msg.Error = pb.DepositNumberError
} else {
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE12))
rs.addBank(amount, int32(pb.LOG_TYPE12), "")
}
case pb.BankDraw: //取出
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
} else if amount > rs.User.GetBank() {
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY_LOW {
msg.Error = pb.DrawMoneyNumberError
} else {
rs.addCurrency(0, amount, 0, 0, int32(pb.LOG_TYPE13))
rs.addBank(-1*amount, int32(pb.LOG_TYPE13), "")
}
case pb.BankGift: //赠送
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
//} else if amount > rs.User.GetBank() {
} else if amount > rs.User.GetCoin() { //修改成赠送bank外面的
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY {
msg.Error = pb.GiveNumberError
} else if userid == "" {
msg.Error = pb.GiveUseridError
} else {
msg1 := handler.GiveBankMsg(amount, int32(pb.LOG_TYPE15), userid, rs.User.GetUserid())
if rs.bank2give(msg1) {
//rs.addBank(-1*amount, int32(pb.LOG_TYPE15), userid)
rs.add | OG_TYPE15))
//充值消息提醒
record1, msg1 := handler.GiveNotice(amount, rs.User.GetUserid(), userid)
if record1 != nil {
rs.loggerPid.Tell(record1)
}
rs.Send(msg1)
} else {
msg.Error = pb.GiveUseridError
}
}
case pb.BankSelect: //查询
msg.Phone = rs.User.BankPhone
case pb.BankOpen: //开通
if rs.User.BankPhone != "" {
msg.Error = pb.BankAlreadyOpen
} else if !utils.PhoneValidate(arg.GetPhone()) {
msg.Error = pb.PhoneNumberError
} else if len(arg.GetPassword()) != 32 {
msg.Error = pb.PwdError
} else if len(arg.GetSmscode()) != 6 {
msg.Error = pb.SmsCodeWrong
} else {
msg.Error = rs.bankCheck(arg)
if msg.Error == pb.OK {
//奖励发放
rs.addCurrency(0, 666, 0, 0, int32(pb.LOG_TYPE56))
//消息提醒
record, msg2 := handler.BankOpenNotice(666, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
}
}
case pb.BankResetPwd: //重置密码
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if rs.User.BankPhone != arg.Get | Currency(0, -1*amount, 0, 0, int32(pb.L | conditional_block |
rs_handler_user.go | .Userid = userid
rs.rolePid.Request(msg1, ctx.Self())
} else {
//TODO 添加房间数据返回
rsp := handler.GetUserDataMsg(arg, rs.User)
if rs.gamePid != nil {
rsp.Game = true
}
if rs.BankPhone != "" {
rsp.Bank = true
}
rs.Send(rsp)
}
case *pb.GotUserData:
arg := msg.(*pb.GotUserData)
glog.Debugf("GotUserData %#v", arg)
rsp := handler.UserDataMsg(arg)
rs.Send(rsp)
default:
//glog.Errorf("unknown message %v", msg)
rs.handlerPay(msg, ctx)
}
}
/*
func (rs *RoleActor) addPrize(rtype, ltype, amount int32) {
switch uint32(rtype) {
case data.DIAMOND:
rs.addCurrency(amount, 0, 0, 0, ltype)
case data.COIN:
rs.addCurrency(0, amount, 0, 0, ltype)
case data.CARD:
rs.addCurrency(0, 0, amount, 0, ltype)
case data.CHIP:
rs.addCurrency(0, 0, 0, amount, ltype)
}
}
//消耗钻石
func (rs *RoleActor) expend(cost uint32, ltype int32) {
diamond := -1 * int64(cost)
rs.addCurrency(diamond, 0, 0, 0, ltype)
}
*/
//奖励发放
func (rs *RoleActor) addCurrency(diamond, coin, card, chip int64, ltype int32) {
if rs.User == nil {
glog.Errorf("add currency user err: %d", ltype)
return
}
//日志记录
if diamond < 0 && ((rs.User.GetDiamond() + diamond) < 0) {
diamond = 0 - rs.User.GetDiamond()
}
if chip < 0 && ((rs.User.GetChip() + chip) < 0) {
chip = 0 - rs.User.GetChip()
}
if coin < 0 && ((rs.User.GetCoin() + coin) < 0) {
coin = 0 - rs.User.GetCoin()
}
if card < 0 && ((rs.User.GetCard() + card) < 0) {
card = 0 - rs.User.GetCard()
}
rs.User.AddCurrency(diamond, coin, card, chip)
//货币变更及时同步
msg2 := handler.ChangeCurrencyMsg(diamond, coin,
card, chip, ltype, rs.User.GetUserid())
rs.rolePid.Tell(msg2)
//消息
msg := handler.PushCurrencyMsg(diamond, coin,
card, chip, ltype)
rs.Send(msg)
//TODO 机器人不写日志
//if rs.User.GetRobot() {
// return
//}
//rs.status = true
//日志
//TODO 日志放在dbms中统一写入
//if diamond != 0 {
// msg1 := handler.LogDiamondMsg(diamond, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if coin != 0 {
// msg1 := handler.LogCoinMsg(coin, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if card != 0 {
// msg1 := handler.LogCardMsg(card, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if chip != 0 {
// msg1 := handler.LogChipMsg(chip, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
}
//同步数据
func (rs *RoleActor) syncUser() {
if rs.User == nil {
return
}
if rs.rolePid == nil {
return
}
if !rs.status { //有变更才同步
return
}
rs.status = false
msg := new(pb.SyncUser)
msg.Userid = rs.User.GetUserid()
glog.Debugf("syscUser %#v", rs.User)
result, err := json.Marshal(rs.User)
if err != nil {
glog.Errorf("user %s Marshal err %v", rs.User.GetUserid(), err)
return
}
msg.Data = result
rs.rolePid.Tell(msg)
}
//'银行
//银行发放
func (rs *RoleActor) addBank(coin int64, ltype int32, from string) {
if rs.User == nil {
glog.Errorf("add addBank user err: %d", ltype)
return
}
//日志记录
if coin < 0 && ((rs.User.GetBank() + coin) < 0) {
coin = 0 - rs.User.GetBank()
}
rs.User.AddBank(coin)
//银行变动及时同步
msg2 := handler.BankChangeMsg(coin,
ltype, rs.User.GetUserid(), from)
rs.rolePid.Tell(msg2)
}
//1存入,2取出,3赠送
func (rs *RoleActor) bank(arg *pb.CBank) {
msg := new(pb.SBank)
rtype := arg.GetRtype()
amount := int64(arg.GetAmount())
userid := arg.GetUserid()
coin := rs.User.GetCoin()
switch rtype {
case pb.BankDeposit: //存入
if | User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if (coin - amount) < data.BANKRUPT {
msg.Error = pb.NotEnoughCoin
} else if amount <= 0 {
msg.Error = pb.DepositNumberError
} else {
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE12))
rs.addBank(amount, int32(pb.LOG_TYPE12), "")
}
case pb.BankDraw: //取出
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
} else if amount > rs.User.GetBank() {
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY_LOW {
msg.Error = pb.DrawMoneyNumberError
} else {
rs.addCurrency(0, amount, 0, 0, int32(pb.LOG_TYPE13))
rs.addBank(-1*amount, int32(pb.LOG_TYPE13), "")
}
case pb.BankGift: //赠送
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
//} else if amount > rs.User.GetBank() {
} else if amount > rs.User.GetCoin() { //修改成赠送bank外面的
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY {
msg.Error = pb.GiveNumberError
} else if userid == "" {
msg.Error = pb.GiveUseridError
} else {
msg1 := handler.GiveBankMsg(amount, int32(pb.LOG_TYPE15), userid, rs.User.GetUserid())
if rs.bank2give(msg1) {
//rs.addBank(-1*amount, int32(pb.LOG_TYPE15), userid)
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE15))
//充值消息提醒
record1, msg1 := handler.GiveNotice(amount, rs.User.GetUserid(), userid)
if record1 != nil {
rs.loggerPid.Tell(record1)
}
rs.Send(msg1)
} else {
msg.Error = pb.GiveUseridError
}
}
case pb.BankSelect: //查询
msg.Phone = rs.User.BankPhone
case pb.BankOpen: //开通
if rs.User.BankPhone != "" {
msg.Error = pb.BankAlreadyOpen
} else if !utils.PhoneValidate(arg.GetPhone()) {
msg.Error = pb.PhoneNumberError
} else if len(arg.GetPassword()) != 32 {
msg.Error = pb.PwdError
} else if len(arg.GetSmscode()) != 6 {
msg.Error = pb.SmsCodeWrong
} else {
msg.Error = rs.bankCheck(arg)
if msg.Error == pb.OK {
//奖励发放
rs.addCurrency(0, 666, 0, 0, int32(pb.LOG_TYPE56))
//消息提醒
record, msg2 := handler.BankOpenNotice(666, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
}
}
case pb.BankResetPwd: //重置密码
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if rs.User.BankPhone != arg.Get | rs. | identifier_name |
rs_handler_user.go | rsp := handler.GetCurrency(arg, rs.User)
rs.Send(rsp)
case *pb.CBuy:
arg := msg.(*pb.CBuy)
glog.Debugf("CBuy %#v", arg)
//优化
rsp, diamond, coin := handler.Buy(arg, rs.User)
//同步兑换
rs.addCurrency(diamond, coin, 0, 0, int32(pb.LOG_TYPE18))
//响应
rs.Send(rsp)
record, msg2 := handler.BuyNotice(coin, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
case *pb.CShop:
arg := msg.(*pb.CShop)
glog.Debugf("CShop %#v", arg)
//响应
rsp := handler.Shop(arg, rs.User)
rs.Send(rsp)
case *pb.BankGive:
arg := msg.(*pb.BankGive)
glog.Debugf("BankGive %#v", arg)
//rs.addBank(arg.Coin, arg.Type, arg.From)
rs.addCurrency(0, arg.GetCoin(), 0, 0, arg.GetType())
if rs.gamePid != nil {
rs.gamePid.Tell(arg)
}
case *pb.CBank:
arg := msg.(*pb.CBank)
glog.Debugf("CBank %#v", arg)
rs.bank(arg)
case *pb.CRank:
arg := msg.(*pb.CRank)
glog.Debugf("CRank %#v", arg)
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.CBankLog:
arg := msg.(*pb.CBankLog)
glog.Debugf("CBankLog %#v", arg)
arg.Userid = rs.User.GetUserid()
rs.dbmsPid.Request(arg, ctx.Self())
case *pb.TaskUpdate:
arg := msg.(*pb.TaskUpdate)
glog.Debugf("TaskUpdate %#v", arg)
rs.taskUpdate(arg)
case *pb.CTask:
arg := msg.(*pb.CTask)
glog.Debugf("CTask %#v", arg)
rs.task()
case *pb.LuckyUpdate:
arg := msg.(*pb.LuckyUpdate)
glog.Debugf("LuckyUpdate %#v", arg)
rs.luckyUpdate(arg)
case *pb.CLucky:
arg := msg.(*pb.CLucky)
glog.Debugf("CLucky %#v", arg)
rs.lucky()
case *pb.CTaskPrize:
arg := msg.(*pb.CTaskPrize)
glog.Debugf("CTaskPrize %#v", arg)
rs.taskPrize(arg.Type)
case *pb.CLoginPrize:
arg := msg.(*pb.CLoginPrize)
glog.Debugf("CLoginPrize %#v", arg)
rs.loginPrize(arg)
case *pb.CSignature:
arg := msg.(*pb.CSignature)
glog.Debugf("CSignature %#v", arg)
rs.setSign(arg)
case *pb.CLatLng:
arg := msg.(*pb.CLatLng)
glog.Debugf("CLatLng %#v", arg)
rs.setLatLng(arg)
case *pb.CRoomRecord:
arg := msg.(*pb.CRoomRecord)
glog.Debugf("CRoomRecord %#v", arg)
msg1 := &pb.GetRoomRecord{
Gtype: arg.Gtype,
Page: arg.Page,
Userid: rs.User.GetUserid(),
}
rs.dbmsPid.Request(msg1, ctx.Self())
case *pb.CUserData:
arg := msg.(*pb.CUserData)
glog.Debugf("CUserData %#v", arg)
userid := arg.GetUserid()
if userid == "" {
userid = rs.User.GetUserid()
}
if userid != rs.User.GetUserid() {
msg1 := new(pb.GetUserData)
msg1.Userid = userid
rs.rolePid.Request(msg1, ctx.Self())
} else {
//TODO 添加房间数据返回
rsp := handler.GetUserDataMsg(arg, rs.User)
if rs.gamePid != nil {
rsp.Game = true
}
if rs.BankPhone != "" {
rsp.Bank = true
}
rs.Send(rsp)
}
case *pb.GotUserData:
arg := msg.(*pb.GotUserData)
glog.Debugf("GotUserData %#v", arg)
rsp := handler.UserDataMsg(arg)
rs.Send(rsp)
default:
//glog.Errorf("unknown message %v", msg)
rs.handlerPay(msg, ctx)
}
}
/*
func (rs *RoleActor) addPrize(rtype, ltype, amount int32) {
switch uint32(rtype) {
case data.DIAMOND:
rs.addCurrency(amount, 0, 0, 0, ltype)
case data.COIN:
rs.addCurrency(0, amount, 0, 0, ltype)
case data.CARD:
rs.addCurrency(0, 0, amount, 0, ltype)
case data.CHIP:
rs.addCurrency(0, 0, 0, amount, ltype)
}
}
//消耗钻石
func (rs *RoleActor) expend(cost uint32, ltype int32) {
diamond := -1 * int64(cost)
rs.addCurrency(diamond, 0, 0, 0, ltype)
}
*/
//奖励发放
func (rs *RoleActor) addCurrency(diamond, coin, card, chip int64, ltype int32) {
if rs.User == nil {
glog.Errorf("add currency user err: %d", ltype)
return
}
//日志记录
if diamond < 0 && ((rs.User.GetDiamond() + diamond) < 0) {
diamond = 0 - rs.User.GetDiamond()
}
if chip < 0 && ((rs.User.GetChip() + chip) < 0) {
chip = 0 - rs.User.GetChip()
}
if coin < 0 && ((rs.User.GetCoin() + coin) < 0) {
coin = 0 - rs.User.GetCoin()
}
if card < 0 && ((rs.User.GetCard() + card) < 0) {
card = 0 - rs.User.GetCard()
}
rs.User.AddCurrency(diamond, coin, card, chip)
//货币变更及时同步
msg2 := handler.ChangeCurrencyMsg(diamond, coin,
card, chip, ltype, rs.User.GetUserid())
rs.rolePid.Tell(msg2)
//消息
msg := handler.PushCurrencyMsg(diamond, coin,
card, chip, ltype)
rs.Send(msg)
//TODO 机器人不写日志
//if rs.User.GetRobot() {
// return
//}
//rs.status = true
//日志
//TODO 日志放在dbms中统一写入
//if diamond != 0 {
// msg1 := handler.LogDiamondMsg(diamond, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if coin != 0 {
// msg1 := handler.LogCoinMsg(coin, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if card != 0 {
// msg1 := handler.LogCardMsg(card, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if chip != 0 {
// msg1 := handler.LogChipMsg(chip, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
}
//同步数据
func (rs *RoleActor) syncUser() {
if rs.User == nil {
return
}
if rs.rolePid == nil {
return
}
if !rs.status { //有变更才同步
return
}
rs.status = false
msg := new(pb.SyncUser)
msg.Userid = rs.User.GetUserid()
glog.Debugf("syscUser %#v", rs.User)
result, err := json.Marshal(rs.User)
if err != nil {
glog.Errorf("user %s Marshal err %v", rs.User.GetUserid(), err)
return
}
msg.Data = result
rs.rolePid.Tell(msg)
}
//'银行
//银行发放
func (rs *RoleActor) addBank(coin int64, ltype int32, from string) {
if rs.User == nil {
glog.Errorf("add addBank user err: %d", ltype)
return
}
//日志记录
if coin < 0 && ((rs.User.GetBank() + coin) < 0) {
coin = 0 - rs.User.GetBank()
}
rs.User.AddBank(coin)
//银行变动及时同步
msg2 := handler.BankChangeMsg(coin,
ltype, rs.User.GetUserid(), from)
rs.rolePid.Tell(msg2)
}
//1存入,2取出,3赠送
func (rs *RoleActor) bank(arg *pb.CBank) {
msg := new(pb.SBank)
rtype := arg.GetRtype()
amount := int64(arg.GetAmount())
userid := arg.GetUserid()
coin := rs.User.GetCoin | rs.joinActivity(arg, ctx)
case *pb.CGetCurrency:
arg := msg.(*pb.CGetCurrency)
glog.Debugf("CGetCurrency %#v", arg)
//响应 | random_line_split |
|
rs_handler_user.go | id = userid
rs.rolePid.Request(msg1, ctx.Self())
} else {
//TODO 添加房间数据返回
rsp := handler.GetUserDataMsg(arg, rs.User)
if rs.gamePid != nil {
rsp.Game = true
}
if rs.BankPhone != "" {
rsp.Bank = true
}
rs.Send(rsp)
}
case *pb.GotUserData:
arg := msg.(*pb.GotUserData)
glog.Debugf("GotUserData %#v", arg)
rsp := handler.UserDataMsg(arg)
rs.Send(rsp)
default:
//glog.Errorf("unknown message %v", msg)
rs.handlerPay(msg, ctx)
}
}
/*
func (rs *RoleActor) addPrize(rtype, ltype, amount int32) {
switch uint32(rtype) {
case data.DIAMOND:
rs.addCurrency(amount, 0, 0, 0, ltype)
case data.COIN:
rs.addCurrency(0, amount, 0, 0, ltype)
case data.CARD:
rs.addCurrency(0, 0, amount, 0, ltype)
case data.CHIP:
rs.addCurrency(0, 0, 0, amount, ltype)
}
}
//消耗钻石
func (rs *RoleActor) expend(cost uint32, ltype int32) {
diamond := -1 * int64(cost)
rs.addCurrency(diamond, 0, 0, 0, ltype)
}
*/
//奖励发放
func (rs *RoleActor) addCurrency(diamond, coin, card, chip int64, ltype int32) {
if rs.User == nil {
glog.Errorf("add currency user err: %d", ltype)
return
}
//日志记录
if diamond < 0 && ((rs.User.GetDiamond() + diamond) < 0) {
diamond = 0 - rs.User.GetDiamond()
}
if chip < 0 && ((rs.User.GetChip() + chip) < 0) {
chip = 0 - rs.User.GetChip()
}
if coin < 0 && ((rs.User.GetCoin() + coin) < 0) {
coin = 0 - rs.User.GetCoin()
}
if card < 0 && ((rs.User.GetCard() + card) < 0) {
card = 0 - rs.User.GetCard()
}
rs.User.AddCurrency(diamond, coin, card, chip)
//货币变更及时同步
msg2 := handler.ChangeCurrencyMsg(diamond, coin,
card, chip, ltype, rs.User.GetUserid())
rs.rolePid.Tell(msg2)
//消息
msg := handler.PushCurrencyMsg(diamond, coin,
card, chip, ltype)
rs.Send(msg)
//TODO 机器人不写日志
//if rs.User.GetRobot() {
// return
//}
//rs.status = true
//日志
//TODO 日志放在dbms中统一写入
//if diamond != 0 {
// msg1 := handler.LogDiamondMsg(diamond, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if coin != 0 {
// msg1 := handler.LogCoinMsg(coin, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if card != 0 {
// msg1 := handler.LogCardMsg(card, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
//if chip != 0 {
// msg1 := handler.LogChipMsg(chip, ltype, rs.User)
// rs.loggerPid.Tell(msg1)
//}
}
//同步数据
func (rs *RoleActor) syncUser() {
if rs.User == nil {
return
}
if rs.rolePid == nil {
return
}
if !rs.status { //有变更才同步
return
}
rs.status = false
msg := new(pb.SyncUser)
msg.Userid = rs.User.GetUserid()
glog.Debugf("syscUser %#v", rs.User)
result, err := json.Marshal(rs.User)
if err != nil {
glog.Errorf("user %s Marshal err %v", rs.User.GetUserid(), err)
return
}
msg.Data = result
rs.rolePid.Tell(msg)
}
//'银行
//银行发放
func (rs *RoleActor) addBank(coin int64, ltype int32, from string) {
if rs.User == nil {
glog.Errorf("add addBank user err: %d", ltype)
return
}
//日志记录
if coin < 0 && ((rs.User.GetBank() + coin) < 0) {
coin = 0 - rs.User.GetBank()
}
rs.User.AddBank(coin)
//银行变动及时同步
msg2 := handler.BankChangeMsg(coin,
ltype, rs.User.GetUserid(), from)
rs.rolePid.Tell(msg2)
}
//1存入,2取出,3赠送
func (rs *RoleActor) bank(arg *pb.CBank) {
msg := new(pb.SBank)
rtype := arg.GetRtype()
amount := int64(arg.GetAmount())
userid := arg.GetUserid()
coin := rs.User.GetCoin()
switch rtype {
case pb.BankDeposit: //存入
if rs.User.BankPhone = | rs.addCurrency(0, amount, 0, 0, int32(pb.LOG_TYPE13))
rs.addBank(-1*amount, int32(pb.LOG_TYPE13), "")
}
case pb.BankGift: //赠送
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
//} else if amount > rs.User.GetBank() {
} else if amount > rs.User.GetCoin() { //修改成赠送bank外面的
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY {
msg.Error = pb.GiveNumberError
} else if userid == "" {
msg.Error = pb.GiveUseridError
} else {
msg1 := handler.GiveBankMsg(amount, int32(pb.LOG_TYPE15), userid, rs.User.GetUserid())
if rs.bank2give(msg1) {
//rs.addBank(-1*amount, int32(pb.LOG_TYPE15), userid)
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE15))
//充值消息提醒
record1, msg1 := handler.GiveNotice(amount, rs.User.GetUserid(), userid)
if record1 != nil {
rs.loggerPid.Tell(record1)
}
rs.Send(msg1)
} else {
msg.Error = pb.GiveUseridError
}
}
case pb.BankSelect: //查询
msg.Phone = rs.User.BankPhone
case pb.BankOpen: //开通
if rs.User.BankPhone != "" {
msg.Error = pb.BankAlreadyOpen
} else if !utils.PhoneValidate(arg.GetPhone()) {
msg.Error = pb.PhoneNumberError
} else if len(arg.GetPassword()) != 32 {
msg.Error = pb.PwdError
} else if len(arg.GetSmscode()) != 6 {
msg.Error = pb.SmsCodeWrong
} else {
msg.Error = rs.bankCheck(arg)
if msg.Error == pb.OK {
//奖励发放
rs.addCurrency(0, 666, 0, 0, int32(pb.LOG_TYPE56))
//消息提醒
record, msg2 := handler.BankOpenNotice(666, rs.User.GetUserid())
if record != nil {
rs.loggerPid.Tell(record)
}
if msg2 != nil {
rs.Send(msg2)
}
}
}
case pb.BankResetPwd: //重置密码
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if rs.User.BankPhone != arg.GetPhone | = "" {
msg.Error = pb.BankNotOpen
} else if (coin - amount) < data.BANKRUPT {
msg.Error = pb.NotEnoughCoin
} else if amount <= 0 {
msg.Error = pb.DepositNumberError
} else {
rs.addCurrency(0, -1*amount, 0, 0, int32(pb.LOG_TYPE12))
rs.addBank(amount, int32(pb.LOG_TYPE12), "")
}
case pb.BankDraw: //取出
if rs.User.BankPhone == "" {
msg.Error = pb.BankNotOpen
} else if arg.GetPassword() != rs.User.BankPassword {
msg.Error = pb.PwdError
} else if amount > rs.User.GetBank() {
msg.Error = pb.NotEnoughCoin
} else if amount < data.DRAW_MONEY_LOW {
msg.Error = pb.DrawMoneyNumberError
} else { | identifier_body |
ppo_trainer.py | po_cfg: config node with relevant params
Returns:
None
"""
logger.add_filehandler(self.config.LOG_FILE)
if observation_space is None:
observation_space = self.envs.observation_spaces[0]
self.actor_critic = AudioNavBaselinePolicy(
observation_space=observation_space,
action_space=self.envs.action_spaces[0],
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=self.config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,
extra_rgb=self.config.EXTRA_RGB
)
self.agent = PPO(
actor_critic=self.actor_critic,
clip_param=ppo_cfg.clip_param,
ppo_epoch=ppo_cfg.ppo_epoch,
num_mini_batch=ppo_cfg.num_mini_batch,
value_loss_coef=ppo_cfg.value_loss_coef,
entropy_coef=ppo_cfg.entropy_coef,
lr=ppo_cfg.lr,
eps=ppo_cfg.eps,
max_grad_norm=ppo_cfg.max_grad_norm,
)
if self.config.RESUME:
ckpt_dict = self.load_checkpoint('data/models/smt_with_pose/ckpt.400.pth', map_location="cpu")
self.agent.actor_critic.net.visual_encoder.load_state_dict(self.search_dict(ckpt_dict, 'visual_encoder'))
self.agent.actor_critic.net.goal_encoder.load_state_dict(self.search_dict(ckpt_dict, 'goal_encoder'))
self.agent.actor_critic.net.action_encoder.load_state_dict(self.search_dict(ckpt_dict, 'action_encoder'))
self.actor_critic.to(self.device)
@staticmethod
def search_dict(ckpt_dict, encoder_name):
encoder_dict = {}
for key, value in ckpt_dict['state_dict'].items():
if encoder_name in key:
encoder_dict['.'.join(key.split('.')[3:])] = value
return encoder_dict
def save_checkpoint(
self, file_name: str, extra_state=None
) -> None:
checkpoint = {
"state_dict": self.agent.state_dict(),
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
torch.save(
checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
def try_to_resume_checkpoint(self):
|
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision"}
@classmethod
def _extract_scalars_from_info(
cls, info: Dict[str, Any]
) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(
v
).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def _extract_scalars_from_infos(
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _collect_rollout_step(
self, rollouts, current_episode_reward, running_episode_stats
):
pth_time = 0.0
env_time = 0.0
t_sample_action = time.time()
# sample actions
with torch.no_grad():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
(
values,
actions,
actions_log_probs,
recurrent_hidden_states
) = self.actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
logging.debug('Reward: {}'.format(rewards[0]))
env_time += time.time() - t_step_env
t_update_stats = time.time()
batch = batch_obs(observations, device=self.device)
rewards = torch.tensor(rewards, dtype=torch.float, device=current_episode_reward.device)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones], dtype=torch.float, device=current_episode_reward.device
)
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs,
values,
rewards.to(device=self.device),
masks.to(device=self.device),
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad():
last_observation = {
k: v[-1] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step]
).detach()
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau
)
value_loss, action_loss, dist_entropy = self.agent.update(rollouts)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
)
def train(self) -> None:
r"""Main method for training PPO.
Returns:
None
"""
logger.info(f"config: {self.config}")
random.seed(self.config.SEED)
np.random.seed(self.config.SEED)
torch.manual_seed(self.config.SEED)
# add_signal_handlers()
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME), workers_ignore_signals=True
)
ppo_cfg = self.config.RL.PPO
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if not os.path.isdir(self.config.CHECKPOINT_FOLDER):
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.parameters())
)
)
rollouts = RolloutStorage(
ppo_cfg.num_steps,
self.envs.num_envs,
self.envs.observation_spaces[0],
self.envs.action_spaces[0],
ppo_cfg.hidden_size
)
rollouts.to(self.device)
observations = self.envs.reset()
batch = batch | checkpoints = glob.glob(f"{self.config.CHECKPOINT_FOLDER}/*.pth")
if len(checkpoints) == 0:
count_steps = 0
count_checkpoints = 0
start_update = 0
else:
last_ckpt = sorted(checkpoints, key=lambda x: int(x.split(".")[1]))[-1]
checkpoint_path = last_ckpt
# Restore checkpoints to models
ckpt_dict = self.load_checkpoint(checkpoint_path)
self.agent.load_state_dict(ckpt_dict["state_dict"])
ckpt_id = int(last_ckpt.split("/")[-1].split(".")[1])
count_steps = ckpt_dict["extra_state"]["step"]
count_checkpoints = ckpt_id + 1
start_update = ckpt_dict["config"].CHECKPOINT_INTERVAL * ckpt_id + 1
print(f"Resuming checkpoint {last_ckpt} at {count_steps} frames")
return count_steps, count_checkpoints, start_update | identifier_body |
ppo_trainer.py | _rgb=self.config.EXTRA_RGB
)
self.agent = PPO(
actor_critic=self.actor_critic,
clip_param=ppo_cfg.clip_param,
ppo_epoch=ppo_cfg.ppo_epoch,
num_mini_batch=ppo_cfg.num_mini_batch,
value_loss_coef=ppo_cfg.value_loss_coef,
entropy_coef=ppo_cfg.entropy_coef,
lr=ppo_cfg.lr,
eps=ppo_cfg.eps,
max_grad_norm=ppo_cfg.max_grad_norm,
)
if self.config.RESUME:
ckpt_dict = self.load_checkpoint('data/models/smt_with_pose/ckpt.400.pth', map_location="cpu")
self.agent.actor_critic.net.visual_encoder.load_state_dict(self.search_dict(ckpt_dict, 'visual_encoder'))
self.agent.actor_critic.net.goal_encoder.load_state_dict(self.search_dict(ckpt_dict, 'goal_encoder'))
self.agent.actor_critic.net.action_encoder.load_state_dict(self.search_dict(ckpt_dict, 'action_encoder'))
self.actor_critic.to(self.device)
@staticmethod
def search_dict(ckpt_dict, encoder_name):
encoder_dict = {}
for key, value in ckpt_dict['state_dict'].items():
if encoder_name in key:
encoder_dict['.'.join(key.split('.')[3:])] = value
return encoder_dict
def save_checkpoint(
self, file_name: str, extra_state=None
) -> None:
checkpoint = {
"state_dict": self.agent.state_dict(),
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
torch.save(
checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
def try_to_resume_checkpoint(self):
checkpoints = glob.glob(f"{self.config.CHECKPOINT_FOLDER}/*.pth")
if len(checkpoints) == 0:
count_steps = 0
count_checkpoints = 0
start_update = 0
else:
last_ckpt = sorted(checkpoints, key=lambda x: int(x.split(".")[1]))[-1]
checkpoint_path = last_ckpt
# Restore checkpoints to models
ckpt_dict = self.load_checkpoint(checkpoint_path)
self.agent.load_state_dict(ckpt_dict["state_dict"])
ckpt_id = int(last_ckpt.split("/")[-1].split(".")[1])
count_steps = ckpt_dict["extra_state"]["step"]
count_checkpoints = ckpt_id + 1
start_update = ckpt_dict["config"].CHECKPOINT_INTERVAL * ckpt_id + 1
print(f"Resuming checkpoint {last_ckpt} at {count_steps} frames")
return count_steps, count_checkpoints, start_update
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision"}
@classmethod
def _extract_scalars_from_info(
cls, info: Dict[str, Any]
) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(
v
).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def _extract_scalars_from_infos(
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _collect_rollout_step(
self, rollouts, current_episode_reward, running_episode_stats
):
pth_time = 0.0
env_time = 0.0
t_sample_action = time.time()
# sample actions
with torch.no_grad():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
(
values,
actions,
actions_log_probs,
recurrent_hidden_states
) = self.actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
logging.debug('Reward: {}'.format(rewards[0]))
env_time += time.time() - t_step_env
t_update_stats = time.time()
batch = batch_obs(observations, device=self.device)
rewards = torch.tensor(rewards, dtype=torch.float, device=current_episode_reward.device)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones], dtype=torch.float, device=current_episode_reward.device
)
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs,
values,
rewards.to(device=self.device),
masks.to(device=self.device),
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad():
last_observation = {
k: v[-1] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step]
).detach()
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau
)
value_loss, action_loss, dist_entropy = self.agent.update(rollouts)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
)
def train(self) -> None:
r"""Main method for training PPO.
Returns:
None
"""
logger.info(f"config: {self.config}")
random.seed(self.config.SEED)
np.random.seed(self.config.SEED)
torch.manual_seed(self.config.SEED)
# add_signal_handlers()
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME), workers_ignore_signals=True
)
ppo_cfg = self.config.RL.PPO
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if not os.path.isdir(self.config.CHECKPOINT_FOLDER):
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.parameters())
)
)
rollouts = RolloutStorage(
ppo_cfg.num_steps,
self.envs.num_envs,
self.envs.observation_spaces[0],
self.envs.action_spaces[0],
ppo_cfg.hidden_size
)
rollouts.to(self.device)
observations = self.envs.reset()
batch = batch_obs(observations)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
# batch and observations may contain shared PyTorch CUDA
# tensors. We must explicitly clear them here otherwise
# they will be kept in memory for the entire duration of training!
batch = None
observations = None
current_episode_reward = torch.zeros(self.envs.num_envs, 1)
running_episode_stats = dict( | random_line_split |
||
ppo_trainer.py | po_cfg: config node with relevant params
Returns:
None
"""
logger.add_filehandler(self.config.LOG_FILE)
if observation_space is None:
observation_space = self.envs.observation_spaces[0]
self.actor_critic = AudioNavBaselinePolicy(
observation_space=observation_space,
action_space=self.envs.action_spaces[0],
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=self.config.TASK_CONFIG.TASK.GOAL_SENSOR_UUID,
extra_rgb=self.config.EXTRA_RGB
)
self.agent = PPO(
actor_critic=self.actor_critic,
clip_param=ppo_cfg.clip_param,
ppo_epoch=ppo_cfg.ppo_epoch,
num_mini_batch=ppo_cfg.num_mini_batch,
value_loss_coef=ppo_cfg.value_loss_coef,
entropy_coef=ppo_cfg.entropy_coef,
lr=ppo_cfg.lr,
eps=ppo_cfg.eps,
max_grad_norm=ppo_cfg.max_grad_norm,
)
if self.config.RESUME:
ckpt_dict = self.load_checkpoint('data/models/smt_with_pose/ckpt.400.pth', map_location="cpu")
self.agent.actor_critic.net.visual_encoder.load_state_dict(self.search_dict(ckpt_dict, 'visual_encoder'))
self.agent.actor_critic.net.goal_encoder.load_state_dict(self.search_dict(ckpt_dict, 'goal_encoder'))
self.agent.actor_critic.net.action_encoder.load_state_dict(self.search_dict(ckpt_dict, 'action_encoder'))
self.actor_critic.to(self.device)
@staticmethod
def search_dict(ckpt_dict, encoder_name):
encoder_dict = {}
for key, value in ckpt_dict['state_dict'].items():
if encoder_name in key:
encoder_dict['.'.join(key.split('.')[3:])] = value
return encoder_dict
def save_checkpoint(
self, file_name: str, extra_state=None
) -> None:
checkpoint = {
"state_dict": self.agent.state_dict(),
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
torch.save(
checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
def try_to_resume_checkpoint(self):
checkpoints = glob.glob(f"{self.config.CHECKPOINT_FOLDER}/*.pth")
if len(checkpoints) == 0:
count_steps = 0
count_checkpoints = 0
start_update = 0
else:
last_ckpt = sorted(checkpoints, key=lambda x: int(x.split(".")[1]))[-1]
checkpoint_path = last_ckpt
# Restore checkpoints to models
ckpt_dict = self.load_checkpoint(checkpoint_path)
self.agent.load_state_dict(ckpt_dict["state_dict"])
ckpt_id = int(last_ckpt.split("/")[-1].split(".")[1])
count_steps = ckpt_dict["extra_state"]["step"]
count_checkpoints = ckpt_id + 1
start_update = ckpt_dict["config"].CHECKPOINT_INTERVAL * ckpt_id + 1
print(f"Resuming checkpoint {last_ckpt} at {count_steps} frames")
return count_steps, count_checkpoints, start_update
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision"}
@classmethod
def _extract_scalars_from_info(
cls, info: Dict[str, Any]
) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(
v
).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def | (
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _collect_rollout_step(
self, rollouts, current_episode_reward, running_episode_stats
):
pth_time = 0.0
env_time = 0.0
t_sample_action = time.time()
# sample actions
with torch.no_grad():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
(
values,
actions,
actions_log_probs,
recurrent_hidden_states
) = self.actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
logging.debug('Reward: {}'.format(rewards[0]))
env_time += time.time() - t_step_env
t_update_stats = time.time()
batch = batch_obs(observations, device=self.device)
rewards = torch.tensor(rewards, dtype=torch.float, device=current_episode_reward.device)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones], dtype=torch.float, device=current_episode_reward.device
)
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs,
values,
rewards.to(device=self.device),
masks.to(device=self.device),
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad():
last_observation = {
k: v[-1] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step]
).detach()
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau
)
value_loss, action_loss, dist_entropy = self.agent.update(rollouts)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
)
def train(self) -> None:
r"""Main method for training PPO.
Returns:
None
"""
logger.info(f"config: {self.config}")
random.seed(self.config.SEED)
np.random.seed(self.config.SEED)
torch.manual_seed(self.config.SEED)
# add_signal_handlers()
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME), workers_ignore_signals=True
)
ppo_cfg = self.config.RL.PPO
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if not os.path.isdir(self.config.CHECKPOINT_FOLDER):
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.parameters())
)
)
rollouts = RolloutStorage(
ppo_cfg.num_steps,
self.envs.num_envs,
self.envs.observation_spaces[0],
self.envs.action_spaces[0],
ppo_cfg.hidden_size
)
rollouts.to(self.device)
observations = self.envs.reset()
batch = batch_obs | _extract_scalars_from_infos | identifier_name |
ppo_trainer.py | have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def _extract_scalars_from_infos(
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _collect_rollout_step(
self, rollouts, current_episode_reward, running_episode_stats
):
pth_time = 0.0
env_time = 0.0
t_sample_action = time.time()
# sample actions
with torch.no_grad():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
(
values,
actions,
actions_log_probs,
recurrent_hidden_states
) = self.actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
logging.debug('Reward: {}'.format(rewards[0]))
env_time += time.time() - t_step_env
t_update_stats = time.time()
batch = batch_obs(observations, device=self.device)
rewards = torch.tensor(rewards, dtype=torch.float, device=current_episode_reward.device)
rewards = rewards.unsqueeze(1)
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones], dtype=torch.float, device=current_episode_reward.device
)
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs,
values,
rewards.to(device=self.device),
masks.to(device=self.device),
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad():
last_observation = {
k: v[-1] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step]
).detach()
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau
)
value_loss, action_loss, dist_entropy = self.agent.update(rollouts)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
)
def train(self) -> None:
r"""Main method for training PPO.
Returns:
None
"""
logger.info(f"config: {self.config}")
random.seed(self.config.SEED)
np.random.seed(self.config.SEED)
torch.manual_seed(self.config.SEED)
# add_signal_handlers()
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME), workers_ignore_signals=True
)
ppo_cfg = self.config.RL.PPO
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if not os.path.isdir(self.config.CHECKPOINT_FOLDER):
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.parameters())
)
)
rollouts = RolloutStorage(
ppo_cfg.num_steps,
self.envs.num_envs,
self.envs.observation_spaces[0],
self.envs.action_spaces[0],
ppo_cfg.hidden_size
)
rollouts.to(self.device)
observations = self.envs.reset()
batch = batch_obs(observations)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
# batch and observations may contain shared PyTorch CUDA
# tensors. We must explicitly clear them here otherwise
# they will be kept in memory for the entire duration of training!
batch = None
observations = None
current_episode_reward = torch.zeros(self.envs.num_envs, 1)
running_episode_stats = dict(
count=torch.zeros(self.envs.num_envs, 1),
reward=torch.zeros(self.envs.num_envs, 1),
)
window_episode_stats = defaultdict(
lambda: deque(maxlen=ppo_cfg.reward_window_size)
)
t_start = time.time()
env_time = 0
pth_time = 0
count_steps = 0
count_checkpoints = 0
start_update = 0
prev_time = 0
lr_scheduler = LambdaLR(
optimizer=self.agent.optimizer,
lr_lambda=lambda x: linear_decay(x, self.config.NUM_UPDATES),
)
interrupted_state = load_interrupted_state(model_dir=self.config.MODEL_DIR)
if interrupted_state is not None:
self.agent.load_state_dict(interrupted_state["state_dict"])
self.agent.optimizer.load_state_dict(
interrupted_state["optimizer_state"]
)
lr_scheduler.load_state_dict(interrupted_state["lr_scheduler_state"])
requeue_stats = interrupted_state["requeue_stats"]
env_time = requeue_stats["env_time"]
pth_time = requeue_stats["pth_time"]
count_steps = requeue_stats["count_steps"]
count_checkpoints = requeue_stats["count_checkpoints"]
start_update = requeue_stats["start_update"]
prev_time = requeue_stats["prev_time"]
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
for update in range(start_update, self.config.NUM_UPDATES):
if ppo_cfg.use_linear_lr_decay:
lr_scheduler.step()
if ppo_cfg.use_linear_clip_decay:
self.agent.clip_param = ppo_cfg.clip_param * linear_decay(
update, self.config.NUM_UPDATES
)
if EXIT.is_set():
self.envs.close()
if REQUEUE.is_set():
requeue_stats = dict(
env_time=env_time,
pth_time=pth_time,
count_steps=count_steps,
count_checkpoints=count_checkpoints,
start_update=update,
prev_time=(time.time() - t_start) + prev_time,
)
save_interrupted_state(
dict(
state_dict=self.agent.state_dict(),
optimizer_state=self.agent.optimizer.state_dict(),
lr_scheduler_state=lr_scheduler.state_dict(),
config=self.config,
requeue_stats=requeue_stats,
),
model_dir=self.config.MODEL_DIR
)
requeue_job()
return
for step in range(ppo_cfg.num_steps):
delta_pth_time, delta_env_time, delta_steps = self._collect_rollout_step(
rollouts,
current_episode_reward,
running_episode_stats
)
pth_time += delta_pth_time
env_time += delta_env_time
count_steps += delta_steps
delta_pth_time, value_loss, action_loss, dist_entropy = self._update_agent(
ppo_cfg, rollouts
)
pth_time += delta_pth_time
deltas = {
k: (
(v[-1] - v[0]).sum().item()
if len(v) > 1
else v[0].sum().item()
)
for k, v in window_episode_stats.items()
}
deltas["count"] = max(deltas["count"], 1.0)
writer.add_scalar(
"Metrics/reward", deltas["reward"] / deltas["count"], count_steps
)
# Check to see if there are any metrics
# that haven't been logged yet
metrics = {
k: v / deltas["count"]
for k, v in deltas.items()
if k not in {"reward", "count"}
}
if len(metrics) > 0:
# writer.add_scalars("metrics", metrics, count_steps)
| for metric, value in metrics.items():
writer.add_scalar(f"Metrics/{metric}", value, count_steps) | conditional_block |
|
imaging-multibeam.py | go to scratch space on the node.
scratch = os.getenv("TMPDIR")
if __name__ == "__main__":
# Our single command line argument is a parset containing all
# configuration information we'll need.
input_parset = lofar.parameterset.parameterset(sys.argv[1])
# We require `sbs_per_beam` input MeasurementSets for each beam, including
# the calibrator.
sbs_per_beam = sum(input_parset.getIntVector("band_size"))
print "Locating calibrator data and checking paths"
ms_cal = {}
ms_cal["datafiles"] = read_ms_list(input_parset.getString("cal_ms_list"))
assert(len(ms_cal["datafiles"]) == sbs_per_beam)
ms_cal["output_dir"] = os.path.join(
input_parset.getString("output_dir"),
"calibrator",
input_parset.getString("cal_obsid")
)
make_directory(ms_cal["output_dir"])
print "Copying calibrator subbands to output"
ms_cal["datafiles"] = copy_to_work_area(ms_cal["datafiles"], ms_cal["output_dir"])
print "Locating target data and checking paths"
# ms_target will be a dict that provides all the information we need to
# process each independent element of the observation, where an "element"
# is a combination of a beam (SAP) and a band (number of subbands)
ms_target = {}
target_mss = read_ms_list(input_parset.getString("target_ms_list"))
assert(len(target_mss) == input_parset.getInt("n_beams") * sbs_per_beam)
for beam, data in enumerate(zip(*[iter(target_mss)]*sbs_per_beam)):
start_sb = 0
for band, band_size in enumerate(input_parset.getIntVector("band_size")):
target_info = {}
target_info['datafiles'] = target_mss[start_sb:start_sb+band_size]
target_info['calfiles' ] = ms_cal["datafiles"][start_sb:start_sb+band_size]
assert(len(target_info['datafiles']) == len(target_info['calfiles']))
target_info['output_dir'] = os.path.join(
input_parset.getString("output_dir"),
"target",
input_parset.getString("target_obsid"),
"SAP00%d" % (beam,)
)
make_directory(target_info["output_dir"])
target_info["output_ms"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.MS" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_ms"]))
target_info["output_im"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.img" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_im"]))
pointing = map(math.degrees, table("%s::FIELD" % target_info["datafiles"][0]).getcol("REFERENCE_DIR")[0][0])
target_info["skymodel"] = os.path.join(
input_parset.getString("skymodel_dir"),
"%.2f_%.2f.skymodel" % (pointing[0], pointing[1])
)
assert(os.path.exists(target_info["skymodel"]))
ms_target["SAP00%d_band%d" % (beam, band)] = target_info
start_sb += band_size
# Copy to working directories
for name in ms_target.iterkeys():
print "Copying %s to scratch area" % (name,)
ms_target[name]["datafiles"] = copy_to_work_area(
ms_target[name]["datafiles"], scratch
)
# We'll run as many simultaneous jobs as we have CPUs
pool = ThreadPool(cpu_count())
# Calibration of each calibrator subband
os.chdir(ms_cal['output_dir']) # Logs will get dumped here
clear_calibrate_stand_alone_logs()
calcal_parset = get_parset_subset(input_parset, "calcal.parset", scratch)
def calibrate_calibrator(cal):
source = table("%s::OBSERVATION" % (cal,)).getcol("LOFAR_TARGET")['array'][0].lower().replace(' ', '')
skymodel = os.path.join(
input_parset.getString("skymodel_dir"),
"%s.skymodel" % (source,)
)
print "Calibrating %s with skymodel %s" % (cal, skymodel)
run_calibrate_standalone(calcal_parset, cal, skymodel, replace_parmdb=True, replace_sourcedb=True)
with time_code("Calibration of calibrator"):
pool.map(calibrate_calibrator, ms_cal["datafiles"])
# Clip calibrator parmdbs
def clip_parmdb(sb):
run_process(
input_parset.getString("pdbclip.executable"),
"--auto",
"--sigma=%f" % (input_parset.getFloat("pdbclip.sigma"),),
os.path.join(sb, "instrument")
)
with time_code("Clip calibrator instrument databases"):
pool.map(lambda sb: clip_parmdb(sb), ms_cal["datafiles"])
# Transfer calibration solutions to targets
transfer_parset = get_parset_subset(input_parset, "transfer.parset", scratch)
transfer_skymodel = input_parset.getString("transfer.skymodel")
clear_calibrate_stand_alone_logs()
def transfer_calibration(ms_pair):
cal, target = ms_pair
print "Transferring solution from %s to %s" % (cal, target)
parmdb_name = mkdtemp(dir=scratch)
run_process("parmexportcal", "in=%s/instrument/" % (cal,), "out=%s" % (parmdb_name,))
run_process("calibrate-stand-alone", "--parmdb", parmdb_name, target, transfer_parset, transfer_skymodel)
with time_code("Transfer of calibration solutions"):
for target in ms_target.itervalues():
pool.map(transfer_calibration, zip(target["calfiles"], target["datafiles"]))
# Combine with NDPPP
def combine_ms(target_info):
output = os.path.join(mkdtemp(dir=scratch), "combined.MS")
run_ndppp(
get_parset_subset(input_parset, "combine.parset", scratch),
{
"msin": str(target_info["datafiles"]),
"msout": output
}
)
target_info["combined_ms"] = output
with time_code("Combining target subbands"):
pool.map(combine_ms, ms_target.values())
# Phase only calibration of combined target subbands
print "Running phase only calibration"
def phaseonly(target_info):
# We chdir to the scratch directory initially, so that logs get dumped
# there, then we'll copy the logs to the output directory when we're
# done.
try:
os.chdir(os.path.dirname(target_info["combined_ms"]))
run_calibrate_standalone(
get_parset_subset(input_parset, "phaseonly.parset", scratch),
target_info["combined_ms"],
target_info["skymodel"]
)
for logfile in glob.glob(
os.path.join(
os.path.dirname(target_info["combined_ms"]),
"*log"
)
):
shutil.copy(logfile, target_info["output_dir"])
except Exception, e:
print "Error in phaseonly with %s" % (target_info["combined_ms"])
print str(e)
raise
# Most Lisa nodes have 24 GB RAM -- we don't want to run out
calpool = ThreadPool(6)
with time_code("Phase-only calibration"):
calpool.map(phaseonly, ms_target.values())
# Strip bad stations.
# Note that the combined, calibrated, stripped MS is one of our output
# data products, so we save that with the name specified in the parset.
def strip_bad_stations(target_info):
bad_stations = find_bad_stations(target_info["combined_ms"], scratch)
strip_stations(target_info["combined_ms"], target_info["output_ms"], bad_stations)
with time_code("Strip bad stations"):
pool.map(strip_bad_stations, ms_target.values())
# Limit the length of the baselines we're using.
# We'll image a reference table using only the short baselines.
maxbl = input_parset.getFloat("limit.max_baseline")
def limit_bl(target_info):
target_info["bl_limit_ms"] = mkdtemp(dir=scratch) |
# We source a special build for using the "new" awimager
awim_init = input_parset.getString("awimager.initscript")
# Calculate the threshold for cleaning based on the noise in a dirty map
# We don't use our threadpool here, since awimager is parallelized
noise_parset_name = get_parset_subset(input_parset, "noise.parset", scratch)
with time_code("Calculating threshold for cleaning"):
for target_info in ms_target.values():
print "Getting threshold for %s" % target_info["output_ms"]
target_info["threshold"] = input_parset.getFloat("noise.multiplier") * estimate_noise(
target_info["bl_limit_ms | limit_baselines(target_info["output_ms"], target_info["bl_limit_ms"], maxbl)
with time_code("Limiting maximum baseline length"):
pool.map(limit_bl, ms_target.values()) | random_line_split |
imaging-multibeam.py | go to scratch space on the node.
scratch = os.getenv("TMPDIR")
if __name__ == "__main__":
# Our single command line argument is a parset containing all
# configuration information we'll need.
input_parset = lofar.parameterset.parameterset(sys.argv[1])
# We require `sbs_per_beam` input MeasurementSets for each beam, including
# the calibrator.
sbs_per_beam = sum(input_parset.getIntVector("band_size"))
print "Locating calibrator data and checking paths"
ms_cal = {}
ms_cal["datafiles"] = read_ms_list(input_parset.getString("cal_ms_list"))
assert(len(ms_cal["datafiles"]) == sbs_per_beam)
ms_cal["output_dir"] = os.path.join(
input_parset.getString("output_dir"),
"calibrator",
input_parset.getString("cal_obsid")
)
make_directory(ms_cal["output_dir"])
print "Copying calibrator subbands to output"
ms_cal["datafiles"] = copy_to_work_area(ms_cal["datafiles"], ms_cal["output_dir"])
print "Locating target data and checking paths"
# ms_target will be a dict that provides all the information we need to
# process each independent element of the observation, where an "element"
# is a combination of a beam (SAP) and a band (number of subbands)
ms_target = {}
target_mss = read_ms_list(input_parset.getString("target_ms_list"))
assert(len(target_mss) == input_parset.getInt("n_beams") * sbs_per_beam)
for beam, data in enumerate(zip(*[iter(target_mss)]*sbs_per_beam)):
start_sb = 0
for band, band_size in enumerate(input_parset.getIntVector("band_size")):
target_info = {}
target_info['datafiles'] = target_mss[start_sb:start_sb+band_size]
target_info['calfiles' ] = ms_cal["datafiles"][start_sb:start_sb+band_size]
assert(len(target_info['datafiles']) == len(target_info['calfiles']))
target_info['output_dir'] = os.path.join(
input_parset.getString("output_dir"),
"target",
input_parset.getString("target_obsid"),
"SAP00%d" % (beam,)
)
make_directory(target_info["output_dir"])
target_info["output_ms"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.MS" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_ms"]))
target_info["output_im"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.img" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_im"]))
pointing = map(math.degrees, table("%s::FIELD" % target_info["datafiles"][0]).getcol("REFERENCE_DIR")[0][0])
target_info["skymodel"] = os.path.join(
input_parset.getString("skymodel_dir"),
"%.2f_%.2f.skymodel" % (pointing[0], pointing[1])
)
assert(os.path.exists(target_info["skymodel"]))
ms_target["SAP00%d_band%d" % (beam, band)] = target_info
start_sb += band_size
# Copy to working directories
for name in ms_target.iterkeys():
print "Copying %s to scratch area" % (name,)
ms_target[name]["datafiles"] = copy_to_work_area(
ms_target[name]["datafiles"], scratch
)
# We'll run as many simultaneous jobs as we have CPUs
pool = ThreadPool(cpu_count())
# Calibration of each calibrator subband
os.chdir(ms_cal['output_dir']) # Logs will get dumped here
clear_calibrate_stand_alone_logs()
calcal_parset = get_parset_subset(input_parset, "calcal.parset", scratch)
def calibrate_calibrator(cal):
source = table("%s::OBSERVATION" % (cal,)).getcol("LOFAR_TARGET")['array'][0].lower().replace(' ', '')
skymodel = os.path.join(
input_parset.getString("skymodel_dir"),
"%s.skymodel" % (source,)
)
print "Calibrating %s with skymodel %s" % (cal, skymodel)
run_calibrate_standalone(calcal_parset, cal, skymodel, replace_parmdb=True, replace_sourcedb=True)
with time_code("Calibration of calibrator"):
pool.map(calibrate_calibrator, ms_cal["datafiles"])
# Clip calibrator parmdbs
def clip_parmdb(sb):
run_process(
input_parset.getString("pdbclip.executable"),
"--auto",
"--sigma=%f" % (input_parset.getFloat("pdbclip.sigma"),),
os.path.join(sb, "instrument")
)
with time_code("Clip calibrator instrument databases"):
pool.map(lambda sb: clip_parmdb(sb), ms_cal["datafiles"])
# Transfer calibration solutions to targets
transfer_parset = get_parset_subset(input_parset, "transfer.parset", scratch)
transfer_skymodel = input_parset.getString("transfer.skymodel")
clear_calibrate_stand_alone_logs()
def transfer_calibration(ms_pair):
cal, target = ms_pair
print "Transferring solution from %s to %s" % (cal, target)
parmdb_name = mkdtemp(dir=scratch)
run_process("parmexportcal", "in=%s/instrument/" % (cal,), "out=%s" % (parmdb_name,))
run_process("calibrate-stand-alone", "--parmdb", parmdb_name, target, transfer_parset, transfer_skymodel)
with time_code("Transfer of calibration solutions"):
for target in ms_target.itervalues():
pool.map(transfer_calibration, zip(target["calfiles"], target["datafiles"]))
# Combine with NDPPP
def combine_ms(target_info):
output = os.path.join(mkdtemp(dir=scratch), "combined.MS")
run_ndppp(
get_parset_subset(input_parset, "combine.parset", scratch),
{
"msin": str(target_info["datafiles"]),
"msout": output
}
)
target_info["combined_ms"] = output
with time_code("Combining target subbands"):
pool.map(combine_ms, ms_target.values())
# Phase only calibration of combined target subbands
print "Running phase only calibration"
def phaseonly(target_info):
# We chdir to the scratch directory initially, so that logs get dumped
# there, then we'll copy the logs to the output directory when we're
# done.
try:
os.chdir(os.path.dirname(target_info["combined_ms"]))
run_calibrate_standalone(
get_parset_subset(input_parset, "phaseonly.parset", scratch),
target_info["combined_ms"],
target_info["skymodel"]
)
for logfile in glob.glob(
os.path.join(
os.path.dirname(target_info["combined_ms"]),
"*log"
)
):
shutil.copy(logfile, target_info["output_dir"])
except Exception, e:
print "Error in phaseonly with %s" % (target_info["combined_ms"])
print str(e)
raise
# Most Lisa nodes have 24 GB RAM -- we don't want to run out
calpool = ThreadPool(6)
with time_code("Phase-only calibration"):
calpool.map(phaseonly, ms_target.values())
# Strip bad stations.
# Note that the combined, calibrated, stripped MS is one of our output
# data products, so we save that with the name specified in the parset.
def strip_bad_stations(target_info):
bad_stations = find_bad_stations(target_info["combined_ms"], scratch)
strip_stations(target_info["combined_ms"], target_info["output_ms"], bad_stations)
with time_code("Strip bad stations"):
pool.map(strip_bad_stations, ms_target.values())
# Limit the length of the baselines we're using.
# We'll image a reference table using only the short baselines.
maxbl = input_parset.getFloat("limit.max_baseline")
def limit_bl(target_info):
|
with time_code("Limiting maximum baseline length"):
pool.map(limit_bl, ms_target.values())
# We source a special build for using the "new" awimager
awim_init = input_parset.getString("awimager.initscript")
# Calculate the threshold for cleaning based on the noise in a dirty map
# We don't use our threadpool here, since awimager is parallelized
noise_parset_name = get_parset_subset(input_parset, "noise.parset", scratch)
with time_code("Calculating threshold for cleaning"):
for target_info in ms_target.values():
print "Getting threshold for %s" % target_info["output_ms"]
target_info["threshold"] = input_parset.getFloat("noise.multiplier") * estimate_noise(
target_info["bl_limit_ms | target_info["bl_limit_ms"] = mkdtemp(dir=scratch)
limit_baselines(target_info["output_ms"], target_info["bl_limit_ms"], maxbl) | identifier_body |
imaging-multibeam.py | go to scratch space on the node.
scratch = os.getenv("TMPDIR")
if __name__ == "__main__":
# Our single command line argument is a parset containing all
# configuration information we'll need.
input_parset = lofar.parameterset.parameterset(sys.argv[1])
# We require `sbs_per_beam` input MeasurementSets for each beam, including
# the calibrator.
sbs_per_beam = sum(input_parset.getIntVector("band_size"))
print "Locating calibrator data and checking paths"
ms_cal = {}
ms_cal["datafiles"] = read_ms_list(input_parset.getString("cal_ms_list"))
assert(len(ms_cal["datafiles"]) == sbs_per_beam)
ms_cal["output_dir"] = os.path.join(
input_parset.getString("output_dir"),
"calibrator",
input_parset.getString("cal_obsid")
)
make_directory(ms_cal["output_dir"])
print "Copying calibrator subbands to output"
ms_cal["datafiles"] = copy_to_work_area(ms_cal["datafiles"], ms_cal["output_dir"])
print "Locating target data and checking paths"
# ms_target will be a dict that provides all the information we need to
# process each independent element of the observation, where an "element"
# is a combination of a beam (SAP) and a band (number of subbands)
ms_target = {}
target_mss = read_ms_list(input_parset.getString("target_ms_list"))
assert(len(target_mss) == input_parset.getInt("n_beams") * sbs_per_beam)
for beam, data in enumerate(zip(*[iter(target_mss)]*sbs_per_beam)):
start_sb = 0
for band, band_size in enumerate(input_parset.getIntVector("band_size")):
target_info = {}
target_info['datafiles'] = target_mss[start_sb:start_sb+band_size]
target_info['calfiles' ] = ms_cal["datafiles"][start_sb:start_sb+band_size]
assert(len(target_info['datafiles']) == len(target_info['calfiles']))
target_info['output_dir'] = os.path.join(
input_parset.getString("output_dir"),
"target",
input_parset.getString("target_obsid"),
"SAP00%d" % (beam,)
)
make_directory(target_info["output_dir"])
target_info["output_ms"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.MS" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_ms"]))
target_info["output_im"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.img" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_im"]))
pointing = map(math.degrees, table("%s::FIELD" % target_info["datafiles"][0]).getcol("REFERENCE_DIR")[0][0])
target_info["skymodel"] = os.path.join(
input_parset.getString("skymodel_dir"),
"%.2f_%.2f.skymodel" % (pointing[0], pointing[1])
)
assert(os.path.exists(target_info["skymodel"]))
ms_target["SAP00%d_band%d" % (beam, band)] = target_info
start_sb += band_size
# Copy to working directories
for name in ms_target.iterkeys():
print "Copying %s to scratch area" % (name,)
ms_target[name]["datafiles"] = copy_to_work_area(
ms_target[name]["datafiles"], scratch
)
# We'll run as many simultaneous jobs as we have CPUs
pool = ThreadPool(cpu_count())
# Calibration of each calibrator subband
os.chdir(ms_cal['output_dir']) # Logs will get dumped here
clear_calibrate_stand_alone_logs()
calcal_parset = get_parset_subset(input_parset, "calcal.parset", scratch)
def calibrate_calibrator(cal):
source = table("%s::OBSERVATION" % (cal,)).getcol("LOFAR_TARGET")['array'][0].lower().replace(' ', '')
skymodel = os.path.join(
input_parset.getString("skymodel_dir"),
"%s.skymodel" % (source,)
)
print "Calibrating %s with skymodel %s" % (cal, skymodel)
run_calibrate_standalone(calcal_parset, cal, skymodel, replace_parmdb=True, replace_sourcedb=True)
with time_code("Calibration of calibrator"):
pool.map(calibrate_calibrator, ms_cal["datafiles"])
# Clip calibrator parmdbs
def clip_parmdb(sb):
run_process(
input_parset.getString("pdbclip.executable"),
"--auto",
"--sigma=%f" % (input_parset.getFloat("pdbclip.sigma"),),
os.path.join(sb, "instrument")
)
with time_code("Clip calibrator instrument databases"):
pool.map(lambda sb: clip_parmdb(sb), ms_cal["datafiles"])
# Transfer calibration solutions to targets
transfer_parset = get_parset_subset(input_parset, "transfer.parset", scratch)
transfer_skymodel = input_parset.getString("transfer.skymodel")
clear_calibrate_stand_alone_logs()
def transfer_calibration(ms_pair):
cal, target = ms_pair
print "Transferring solution from %s to %s" % (cal, target)
parmdb_name = mkdtemp(dir=scratch)
run_process("parmexportcal", "in=%s/instrument/" % (cal,), "out=%s" % (parmdb_name,))
run_process("calibrate-stand-alone", "--parmdb", parmdb_name, target, transfer_parset, transfer_skymodel)
with time_code("Transfer of calibration solutions"):
for target in ms_target.itervalues():
pool.map(transfer_calibration, zip(target["calfiles"], target["datafiles"]))
# Combine with NDPPP
def | (target_info):
output = os.path.join(mkdtemp(dir=scratch), "combined.MS")
run_ndppp(
get_parset_subset(input_parset, "combine.parset", scratch),
{
"msin": str(target_info["datafiles"]),
"msout": output
}
)
target_info["combined_ms"] = output
with time_code("Combining target subbands"):
pool.map(combine_ms, ms_target.values())
# Phase only calibration of combined target subbands
print "Running phase only calibration"
def phaseonly(target_info):
# We chdir to the scratch directory initially, so that logs get dumped
# there, then we'll copy the logs to the output directory when we're
# done.
try:
os.chdir(os.path.dirname(target_info["combined_ms"]))
run_calibrate_standalone(
get_parset_subset(input_parset, "phaseonly.parset", scratch),
target_info["combined_ms"],
target_info["skymodel"]
)
for logfile in glob.glob(
os.path.join(
os.path.dirname(target_info["combined_ms"]),
"*log"
)
):
shutil.copy(logfile, target_info["output_dir"])
except Exception, e:
print "Error in phaseonly with %s" % (target_info["combined_ms"])
print str(e)
raise
# Most Lisa nodes have 24 GB RAM -- we don't want to run out
calpool = ThreadPool(6)
with time_code("Phase-only calibration"):
calpool.map(phaseonly, ms_target.values())
# Strip bad stations.
# Note that the combined, calibrated, stripped MS is one of our output
# data products, so we save that with the name specified in the parset.
def strip_bad_stations(target_info):
bad_stations = find_bad_stations(target_info["combined_ms"], scratch)
strip_stations(target_info["combined_ms"], target_info["output_ms"], bad_stations)
with time_code("Strip bad stations"):
pool.map(strip_bad_stations, ms_target.values())
# Limit the length of the baselines we're using.
# We'll image a reference table using only the short baselines.
maxbl = input_parset.getFloat("limit.max_baseline")
def limit_bl(target_info):
target_info["bl_limit_ms"] = mkdtemp(dir=scratch)
limit_baselines(target_info["output_ms"], target_info["bl_limit_ms"], maxbl)
with time_code("Limiting maximum baseline length"):
pool.map(limit_bl, ms_target.values())
# We source a special build for using the "new" awimager
awim_init = input_parset.getString("awimager.initscript")
# Calculate the threshold for cleaning based on the noise in a dirty map
# We don't use our threadpool here, since awimager is parallelized
noise_parset_name = get_parset_subset(input_parset, "noise.parset", scratch)
with time_code("Calculating threshold for cleaning"):
for target_info in ms_target.values():
print "Getting threshold for %s" % target_info["output_ms"]
target_info["threshold"] = input_parset.getFloat("noise.multiplier") * estimate_noise(
target_info["bl_limit_ms | combine_ms | identifier_name |
imaging-multibeam.py | to scratch space on the node.
scratch = os.getenv("TMPDIR")
if __name__ == "__main__":
# Our single command line argument is a parset containing all
# configuration information we'll need.
input_parset = lofar.parameterset.parameterset(sys.argv[1])
# We require `sbs_per_beam` input MeasurementSets for each beam, including
# the calibrator.
sbs_per_beam = sum(input_parset.getIntVector("band_size"))
print "Locating calibrator data and checking paths"
ms_cal = {}
ms_cal["datafiles"] = read_ms_list(input_parset.getString("cal_ms_list"))
assert(len(ms_cal["datafiles"]) == sbs_per_beam)
ms_cal["output_dir"] = os.path.join(
input_parset.getString("output_dir"),
"calibrator",
input_parset.getString("cal_obsid")
)
make_directory(ms_cal["output_dir"])
print "Copying calibrator subbands to output"
ms_cal["datafiles"] = copy_to_work_area(ms_cal["datafiles"], ms_cal["output_dir"])
print "Locating target data and checking paths"
# ms_target will be a dict that provides all the information we need to
# process each independent element of the observation, where an "element"
# is a combination of a beam (SAP) and a band (number of subbands)
ms_target = {}
target_mss = read_ms_list(input_parset.getString("target_ms_list"))
assert(len(target_mss) == input_parset.getInt("n_beams") * sbs_per_beam)
for beam, data in enumerate(zip(*[iter(target_mss)]*sbs_per_beam)):
start_sb = 0
for band, band_size in enumerate(input_parset.getIntVector("band_size")):
target_info = {}
target_info['datafiles'] = target_mss[start_sb:start_sb+band_size]
target_info['calfiles' ] = ms_cal["datafiles"][start_sb:start_sb+band_size]
assert(len(target_info['datafiles']) == len(target_info['calfiles']))
target_info['output_dir'] = os.path.join(
input_parset.getString("output_dir"),
"target",
input_parset.getString("target_obsid"),
"SAP00%d" % (beam,)
)
make_directory(target_info["output_dir"])
target_info["output_ms"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.MS" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_ms"]))
target_info["output_im"] = os.path.join(target_info["output_dir"], "%s_SAP00%d_band%d.img" % (input_parset.getString("target_obsid"), beam, band))
assert(not os.path.exists(target_info["output_im"]))
pointing = map(math.degrees, table("%s::FIELD" % target_info["datafiles"][0]).getcol("REFERENCE_DIR")[0][0])
target_info["skymodel"] = os.path.join(
input_parset.getString("skymodel_dir"),
"%.2f_%.2f.skymodel" % (pointing[0], pointing[1])
)
assert(os.path.exists(target_info["skymodel"]))
ms_target["SAP00%d_band%d" % (beam, band)] = target_info
start_sb += band_size
# Copy to working directories
for name in ms_target.iterkeys():
print "Copying %s to scratch area" % (name,)
ms_target[name]["datafiles"] = copy_to_work_area(
ms_target[name]["datafiles"], scratch
)
# We'll run as many simultaneous jobs as we have CPUs
pool = ThreadPool(cpu_count())
# Calibration of each calibrator subband
os.chdir(ms_cal['output_dir']) # Logs will get dumped here
clear_calibrate_stand_alone_logs()
calcal_parset = get_parset_subset(input_parset, "calcal.parset", scratch)
def calibrate_calibrator(cal):
source = table("%s::OBSERVATION" % (cal,)).getcol("LOFAR_TARGET")['array'][0].lower().replace(' ', '')
skymodel = os.path.join(
input_parset.getString("skymodel_dir"),
"%s.skymodel" % (source,)
)
print "Calibrating %s with skymodel %s" % (cal, skymodel)
run_calibrate_standalone(calcal_parset, cal, skymodel, replace_parmdb=True, replace_sourcedb=True)
with time_code("Calibration of calibrator"):
pool.map(calibrate_calibrator, ms_cal["datafiles"])
# Clip calibrator parmdbs
def clip_parmdb(sb):
run_process(
input_parset.getString("pdbclip.executable"),
"--auto",
"--sigma=%f" % (input_parset.getFloat("pdbclip.sigma"),),
os.path.join(sb, "instrument")
)
with time_code("Clip calibrator instrument databases"):
pool.map(lambda sb: clip_parmdb(sb), ms_cal["datafiles"])
# Transfer calibration solutions to targets
transfer_parset = get_parset_subset(input_parset, "transfer.parset", scratch)
transfer_skymodel = input_parset.getString("transfer.skymodel")
clear_calibrate_stand_alone_logs()
def transfer_calibration(ms_pair):
cal, target = ms_pair
print "Transferring solution from %s to %s" % (cal, target)
parmdb_name = mkdtemp(dir=scratch)
run_process("parmexportcal", "in=%s/instrument/" % (cal,), "out=%s" % (parmdb_name,))
run_process("calibrate-stand-alone", "--parmdb", parmdb_name, target, transfer_parset, transfer_skymodel)
with time_code("Transfer of calibration solutions"):
for target in ms_target.itervalues():
pool.map(transfer_calibration, zip(target["calfiles"], target["datafiles"]))
# Combine with NDPPP
def combine_ms(target_info):
output = os.path.join(mkdtemp(dir=scratch), "combined.MS")
run_ndppp(
get_parset_subset(input_parset, "combine.parset", scratch),
{
"msin": str(target_info["datafiles"]),
"msout": output
}
)
target_info["combined_ms"] = output
with time_code("Combining target subbands"):
pool.map(combine_ms, ms_target.values())
# Phase only calibration of combined target subbands
print "Running phase only calibration"
def phaseonly(target_info):
# We chdir to the scratch directory initially, so that logs get dumped
# there, then we'll copy the logs to the output directory when we're
# done.
try:
os.chdir(os.path.dirname(target_info["combined_ms"]))
run_calibrate_standalone(
get_parset_subset(input_parset, "phaseonly.parset", scratch),
target_info["combined_ms"],
target_info["skymodel"]
)
for logfile in glob.glob(
os.path.join(
os.path.dirname(target_info["combined_ms"]),
"*log"
)
):
|
except Exception, e:
print "Error in phaseonly with %s" % (target_info["combined_ms"])
print str(e)
raise
# Most Lisa nodes have 24 GB RAM -- we don't want to run out
calpool = ThreadPool(6)
with time_code("Phase-only calibration"):
calpool.map(phaseonly, ms_target.values())
# Strip bad stations.
# Note that the combined, calibrated, stripped MS is one of our output
# data products, so we save that with the name specified in the parset.
def strip_bad_stations(target_info):
bad_stations = find_bad_stations(target_info["combined_ms"], scratch)
strip_stations(target_info["combined_ms"], target_info["output_ms"], bad_stations)
with time_code("Strip bad stations"):
pool.map(strip_bad_stations, ms_target.values())
# Limit the length of the baselines we're using.
# We'll image a reference table using only the short baselines.
maxbl = input_parset.getFloat("limit.max_baseline")
def limit_bl(target_info):
target_info["bl_limit_ms"] = mkdtemp(dir=scratch)
limit_baselines(target_info["output_ms"], target_info["bl_limit_ms"], maxbl)
with time_code("Limiting maximum baseline length"):
pool.map(limit_bl, ms_target.values())
# We source a special build for using the "new" awimager
awim_init = input_parset.getString("awimager.initscript")
# Calculate the threshold for cleaning based on the noise in a dirty map
# We don't use our threadpool here, since awimager is parallelized
noise_parset_name = get_parset_subset(input_parset, "noise.parset", scratch)
with time_code("Calculating threshold for cleaning"):
for target_info in ms_target.values():
print "Getting threshold for %s" % target_info["output_ms"]
target_info["threshold"] = input_parset.getFloat("noise.multiplier") * estimate_noise(
target_info["bl_limit_ms | shutil.copy(logfile, target_info["output_dir"]) | conditional_block |
bressan_computerscience.py |
"""
loans_lenders = loans_lenders_import.explode('lenders').drop_duplicates()
loans_lenders.head(5)
"""####2. **For each loan, add a column duration corresponding to the number of days between the disburse time and the planned expiration time. If any of those two dates is missing, also the duration must be missing.**
I calculate _duration_ on the _loans_ dataframe, converting needed columns to datetime.
Please note: with _errors="coerce"_ option the system will set to NaN all values that cannot be converted.
"""
loans_import['planned_expiration_time']= pd.to_datetime(loans_import['planned_expiration_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['disburse_time']= pd.to_datetime(loans_import['disburse_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['duration'] = loans_import['planned_expiration_time'] - loans_import['disburse_time']
loans_import.head(5)
"""####3. **Find the lenders that have funded at least twice.**"""
lender_foundings = loans_lenders.groupby('lenders').size().reset_index(name='foundings')
lender_foundings[lender_foundings['foundings'] > 2]
"""####4. **For each country, compute how many loans have involved that country as borrowers.**"""
country_loans = loans_import.groupby('country_code').size().reset_index(name='loans')
country_loans.head(10)
"""####5. **For each country, compute the overall amount of money borrowed.**"""
country_loans_amount = loans_import.groupby('country_code')['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_loans_amount.head(5)
"""####6. **Like the previous point, but expressed as a percentage of the overall amount lent.**"""
country_loans_amount['overall_founds_perc'] = country_loans_amount.overall_founds / country_loans_amount.overall_founds.sum()
country_loans_amount.head(5)
"""####7. **Like the three previous points, but split for each year (with respect to disburse time).**"""
loans_import['disburse_year'] = pd.DatetimeIndex(loans_import['disburse_time']).year
country_year_loans = loans_import.groupby(['country_code','disburse_year']).size().reset_index(name='loans')
country_year_loans_amount = loans_import.groupby(['country_code','disburse_year'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_year_loans_amount['overall_founds_perc'] = country_year_loans_amount.overall_founds / country_year_loans_amount.overall_founds.sum()
country_year_loans.head(5)
country_year_loans_amount.head(5)
"""####8. **For each lender, compute the overall amount of money lent. For each loan that has more than one lender, you must assume that all lenders contributed the same amount.**
First of all, I need to assing to each lender/loan, the corresponding loan's details. So, I need to join the 2 dataset. To avoid run out of RAM, I reduce the number of variables selected on _loans_import_
"""
lender_loan_details = pd.merge(
loans_lenders,
loans_import[['loan_id','loan_amount']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_details.head(5)
"""Then, it's possible to group the dataset to obtain the overall amount of money lent"""
lender_loan_details.groupby('lenders')['loan_amount'].agg('sum').reset_index(name='overall_money_lent')
"""####9. **For each country, compute the difference between the overall amount of money lent and the overall amount of money borrowed. Since the country of the lender is often unknown, you can assume that the true distribution among the countries is the same as the one computed from the rows where the country is known.**
First of all, I join the _lenders_ and the _loans_lenders_ dataset by lender name, removing lenders without a country code associated
"""
lenders_import_filtered = lenders_import[lenders_import.country_code.notnull()]
lender_loan_country = pd.merge(
loans_lenders,
lenders_import_filtered[['permanent_name','country_code']],
left_on= ['lenders'],
right_on= ['permanent_name'],
how = 'inner')
lender_loan_country['lender_country'] = lender_loan_country['country_code']
lender_loan_country = lender_loan_country[['loan_id', 'lender_country']]
lender_loan_country.head(5)
"""Then, I join obtained dataset with the _loans_ dataset by loan ID"""
lender_loan_country_full = pd.merge(
lender_loan_country.drop_duplicates(),
loans_import[['loan_id','loan_amount','country_code']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_country_full['borrowed_country'] = lender_loan_country_full['country_code']
lender_loan_country_group = lender_loan_country_full.groupby(['lender_country','borrowed_country'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
lender_loan_country_group.head(5)
"""Finally, I can group the obtained dataset by the 2 country columns to obtain requested information"""
lender_loan_country_group_borrowers = lender_loan_country_group.groupby(['borrowed_country'])['overall_founds'].agg('sum').reset_index(name='amount_borrowed')
lender_loan_country_group_lenders = lender_loan_country_group.groupby(['lender_country'])['overall_founds'].agg('sum').reset_index(name='amount_lent')
lender_loan_country_group_join = pd.merge(
lender_loan_country_group_borrowers,
lender_loan_country_group_lenders,
left_on= ['borrowed_country'],
right_on= ['lender_country'],
how = 'inner')
lender_loan_country_group_join['country'] = lender_loan_country_group_join['borrowed_country']
lender_loan_country_group_join = lender_loan_country_group_join[['country','amount_borrowed','amount_lent']]
lender_loan_country_group_join['lent_borrowed_ratio'] = lender_loan_country_group_join['amount_borrowed']/lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join['lent_borrowed_delta'] = lender_loan_country_group_join['amount_borrowed'] - lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join.head(5)
"""####10. **Which country has the highest ratio between the difference computed at the previous point and the population?**
To evaluate this ratio, I've to join the previously created dataset with the _country_stats_ one
"""
lender_loan_country_group_stats = pd.merge(
lender_loan_country_group_join,
country_stats_import,
left_on= ['country'],
right_on= ['country_code'],
how = 'inner')
"""Then, I can compute the requested KPI"""
lender_loan_country_group_stats1 = lender_loan_country_group_stats
lender_loan_country_group_stats1['population_ratio'] = lender_loan_country_group_stats1['lent_borrowed_delta']/lender_loan_country_group_stats1['population']
lender_loan_country_group_stats1 = lender_loan_country_group_stats1[['country','lent_borrowed_delta','population_ratio']]
lender_loan_country_group_stats1.head(5)
"""####11. **Which country has the highest ratio between the difference computed at point 9 and the population that is not below the poverty line?**
To evaluate it, we have to multiply the overall population number and the _population_below_poverty_line_ ratio information
"""
lender_loan_country_group_stats2 = lender_loan_country_group_stats
lender_loan_country_group_stats2['population_weighed'] = lender_loan_country_group_stats2['population_below_poverty_line'] * lender_loan_country_group_stats2['population']
lender_loan_country_group_stats2['population_weighed_ratio'] = lender_loan_country_group_stats2['lent_borrowed_delta']/lender_loan_country_group_stats2['population_weighed']
lender_loan_country_group_stats2 = lender_loan_country_group_stats2[['country','lent_borrowed_delta','population_ratio', 'population_weighed_ratio']]
lender_loan_country_group_stats2.head(5)
"""####12. **For each year, compute the total amount of loans. Each loan that has planned expiration time and disburse time in different years must have its amount distributed proportionally to the number of days in each year. For example, a loan with disburse time December 1st, 2016, planned expiration time January 30th 2018, and amount 5000USD has an amount of 5000USD * 31 / (31+365+30) = 363.85 for 2016, 5000USD * 365 / (31+365+30) = 4284.04 for 2017, and 5000USD * 30 / (31+365+30) = 352.11 for 2018.**
Let's start defining a function that, given needed information (start date, end date and value) split it by years.
"""
def | divide_value_by_period | identifier_name |
|
bressan_computerscience.py | (loans_import['disburse_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['duration'] = loans_import['planned_expiration_time'] - loans_import['disburse_time']
loans_import.head(5)
"""####3. **Find the lenders that have funded at least twice.**"""
lender_foundings = loans_lenders.groupby('lenders').size().reset_index(name='foundings')
lender_foundings[lender_foundings['foundings'] > 2]
"""####4. **For each country, compute how many loans have involved that country as borrowers.**"""
country_loans = loans_import.groupby('country_code').size().reset_index(name='loans')
country_loans.head(10)
"""####5. **For each country, compute the overall amount of money borrowed.**"""
country_loans_amount = loans_import.groupby('country_code')['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_loans_amount.head(5)
"""####6. **Like the previous point, but expressed as a percentage of the overall amount lent.**"""
country_loans_amount['overall_founds_perc'] = country_loans_amount.overall_founds / country_loans_amount.overall_founds.sum()
country_loans_amount.head(5)
"""####7. **Like the three previous points, but split for each year (with respect to disburse time).**"""
loans_import['disburse_year'] = pd.DatetimeIndex(loans_import['disburse_time']).year
country_year_loans = loans_import.groupby(['country_code','disburse_year']).size().reset_index(name='loans')
country_year_loans_amount = loans_import.groupby(['country_code','disburse_year'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_year_loans_amount['overall_founds_perc'] = country_year_loans_amount.overall_founds / country_year_loans_amount.overall_founds.sum()
country_year_loans.head(5)
country_year_loans_amount.head(5)
"""####8. **For each lender, compute the overall amount of money lent. For each loan that has more than one lender, you must assume that all lenders contributed the same amount.**
First of all, I need to assing to each lender/loan, the corresponding loan's details. So, I need to join the 2 dataset. To avoid run out of RAM, I reduce the number of variables selected on _loans_import_
"""
lender_loan_details = pd.merge(
loans_lenders,
loans_import[['loan_id','loan_amount']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_details.head(5)
"""Then, it's possible to group the dataset to obtain the overall amount of money lent"""
lender_loan_details.groupby('lenders')['loan_amount'].agg('sum').reset_index(name='overall_money_lent')
"""####9. **For each country, compute the difference between the overall amount of money lent and the overall amount of money borrowed. Since the country of the lender is often unknown, you can assume that the true distribution among the countries is the same as the one computed from the rows where the country is known.**
First of all, I join the _lenders_ and the _loans_lenders_ dataset by lender name, removing lenders without a country code associated
"""
lenders_import_filtered = lenders_import[lenders_import.country_code.notnull()]
lender_loan_country = pd.merge(
loans_lenders,
lenders_import_filtered[['permanent_name','country_code']],
left_on= ['lenders'],
right_on= ['permanent_name'],
how = 'inner')
lender_loan_country['lender_country'] = lender_loan_country['country_code']
lender_loan_country = lender_loan_country[['loan_id', 'lender_country']]
lender_loan_country.head(5)
"""Then, I join obtained dataset with the _loans_ dataset by loan ID"""
lender_loan_country_full = pd.merge(
lender_loan_country.drop_duplicates(),
loans_import[['loan_id','loan_amount','country_code']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_country_full['borrowed_country'] = lender_loan_country_full['country_code']
lender_loan_country_group = lender_loan_country_full.groupby(['lender_country','borrowed_country'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
lender_loan_country_group.head(5)
"""Finally, I can group the obtained dataset by the 2 country columns to obtain requested information"""
lender_loan_country_group_borrowers = lender_loan_country_group.groupby(['borrowed_country'])['overall_founds'].agg('sum').reset_index(name='amount_borrowed')
lender_loan_country_group_lenders = lender_loan_country_group.groupby(['lender_country'])['overall_founds'].agg('sum').reset_index(name='amount_lent')
lender_loan_country_group_join = pd.merge(
lender_loan_country_group_borrowers,
lender_loan_country_group_lenders,
left_on= ['borrowed_country'],
right_on= ['lender_country'],
how = 'inner')
lender_loan_country_group_join['country'] = lender_loan_country_group_join['borrowed_country']
lender_loan_country_group_join = lender_loan_country_group_join[['country','amount_borrowed','amount_lent']]
lender_loan_country_group_join['lent_borrowed_ratio'] = lender_loan_country_group_join['amount_borrowed']/lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join['lent_borrowed_delta'] = lender_loan_country_group_join['amount_borrowed'] - lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join.head(5)
"""####10. **Which country has the highest ratio between the difference computed at the previous point and the population?**
To evaluate this ratio, I've to join the previously created dataset with the _country_stats_ one
"""
lender_loan_country_group_stats = pd.merge(
lender_loan_country_group_join,
country_stats_import,
left_on= ['country'],
right_on= ['country_code'],
how = 'inner')
"""Then, I can compute the requested KPI"""
lender_loan_country_group_stats1 = lender_loan_country_group_stats
lender_loan_country_group_stats1['population_ratio'] = lender_loan_country_group_stats1['lent_borrowed_delta']/lender_loan_country_group_stats1['population']
lender_loan_country_group_stats1 = lender_loan_country_group_stats1[['country','lent_borrowed_delta','population_ratio']]
lender_loan_country_group_stats1.head(5)
"""####11. **Which country has the highest ratio between the difference computed at point 9 and the population that is not below the poverty line?**
To evaluate it, we have to multiply the overall population number and the _population_below_poverty_line_ ratio information
"""
lender_loan_country_group_stats2 = lender_loan_country_group_stats
lender_loan_country_group_stats2['population_weighed'] = lender_loan_country_group_stats2['population_below_poverty_line'] * lender_loan_country_group_stats2['population']
lender_loan_country_group_stats2['population_weighed_ratio'] = lender_loan_country_group_stats2['lent_borrowed_delta']/lender_loan_country_group_stats2['population_weighed']
lender_loan_country_group_stats2 = lender_loan_country_group_stats2[['country','lent_borrowed_delta','population_ratio', 'population_weighed_ratio']]
lender_loan_country_group_stats2.head(5)
"""####12. **For each year, compute the total amount of loans. Each loan that has planned expiration time and disburse time in different years must have its amount distributed proportionally to the number of days in each year. For example, a loan with disburse time December 1st, 2016, planned expiration time January 30th 2018, and amount 5000USD has an amount of 5000USD * 31 / (31+365+30) = 363.85 for 2016, 5000USD * 365 / (31+365+30) = 4284.04 for 2017, and 5000USD * 30 / (31+365+30) = 352.11 for 2018.**
Let's start defining a function that, given needed information (start date, end date and value) split it by years.
"""
def divide_value_by_period(row):
| start_date = row['disburse_time'].tz_localize(None)
end_date = row['planned_expiration_time'].tz_localize(None)
value = row['loan_amount']
# calculating the difference in years considewring leap years
jumps = end_date.year - start_date.year
if jumps != 0:
dayss = []
starting_year = start_date.year
for i in range(jumps):
next_year = starting_year + 1
next_year_comp = datetime(next_year, 1, 1)
# get the difference in days
diff = (next_year_comp - start_date).days
dayss.append(diff)
# re-assigning start and end dates
starting_year = next_year_comp.year
start_date = next_year_comp | identifier_body |
|
bressan_computerscience.py | .csv'
loans_import = pd.read_csv(loans_url)
loans_import.dtypes
loans_import.head(2)
lenders_url = '/content/drive/My Drive/additional-kiva-snapshot/lenders.csv'
lenders_import = pd.read_csv(lenders_url)
lenders_import.dtypes
lenders_import.head(5)
country_stats_url = '/content/drive/My Drive/additional-kiva-snapshot/country_stats.csv'
country_stats_import = pd.read_csv(country_stats_url)
country_stats_import.dtypes
country_stats_import.head(5)
"""## Questions
####1. **Normalize the loan_lenders table. In the normalized table, each row must have one loan_id and one lender.**
First of all, I cast the _lenders_ variable as an array
"""
loans_lenders_import['lenders'] = loans_lenders_import.lenders.apply(lambda x: x.split(','))
loans_lenders_import.head(2)
"""Then, I can explode _lenders_ variable.
Please note: ".drop_duplicates()" is used to avoid duplicated lenders for load_in, if present in the original _lenders_ array
"""
loans_lenders = loans_lenders_import.explode('lenders').drop_duplicates()
loans_lenders.head(5)
"""####2. **For each loan, add a column duration corresponding to the number of days between the disburse time and the planned expiration time. If any of those two dates is missing, also the duration must be missing.**
I calculate _duration_ on the _loans_ dataframe, converting needed columns to datetime.
Please note: with _errors="coerce"_ option the system will set to NaN all values that cannot be converted.
"""
loans_import['planned_expiration_time']= pd.to_datetime(loans_import['planned_expiration_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['disburse_time']= pd.to_datetime(loans_import['disburse_time'], format="%Y-%m-%d %H:%M:%S", errors="coerce")
loans_import['duration'] = loans_import['planned_expiration_time'] - loans_import['disburse_time']
loans_import.head(5)
"""####3. **Find the lenders that have funded at least twice.**"""
lender_foundings = loans_lenders.groupby('lenders').size().reset_index(name='foundings')
lender_foundings[lender_foundings['foundings'] > 2]
"""####4. **For each country, compute how many loans have involved that country as borrowers.**"""
country_loans = loans_import.groupby('country_code').size().reset_index(name='loans')
country_loans.head(10)
"""####5. **For each country, compute the overall amount of money borrowed.**"""
country_loans_amount = loans_import.groupby('country_code')['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_loans_amount.head(5)
"""####6. **Like the previous point, but expressed as a percentage of the overall amount lent.**"""
country_loans_amount['overall_founds_perc'] = country_loans_amount.overall_founds / country_loans_amount.overall_founds.sum()
country_loans_amount.head(5)
"""####7. **Like the three previous points, but split for each year (with respect to disburse time).**"""
loans_import['disburse_year'] = pd.DatetimeIndex(loans_import['disburse_time']).year
country_year_loans = loans_import.groupby(['country_code','disburse_year']).size().reset_index(name='loans')
country_year_loans_amount = loans_import.groupby(['country_code','disburse_year'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_year_loans_amount['overall_founds_perc'] = country_year_loans_amount.overall_founds / country_year_loans_amount.overall_founds.sum()
country_year_loans.head(5)
country_year_loans_amount.head(5)
"""####8. **For each lender, compute the overall amount of money lent. For each loan that has more than one lender, you must assume that all lenders contributed the same amount.**
First of all, I need to assing to each lender/loan, the corresponding loan's details. So, I need to join the 2 dataset. To avoid run out of RAM, I reduce the number of variables selected on _loans_import_
"""
lender_loan_details = pd.merge(
loans_lenders,
loans_import[['loan_id','loan_amount']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_details.head(5)
"""Then, it's possible to group the dataset to obtain the overall amount of money lent"""
lender_loan_details.groupby('lenders')['loan_amount'].agg('sum').reset_index(name='overall_money_lent')
"""####9. **For each country, compute the difference between the overall amount of money lent and the overall amount of money borrowed. Since the country of the lender is often unknown, you can assume that the true distribution among the countries is the same as the one computed from the rows where the country is known.**
First of all, I join the _lenders_ and the _loans_lenders_ dataset by lender name, removing lenders without a country code associated
"""
lenders_import_filtered = lenders_import[lenders_import.country_code.notnull()]
lender_loan_country = pd.merge(
loans_lenders,
lenders_import_filtered[['permanent_name','country_code']],
left_on= ['lenders'],
right_on= ['permanent_name'],
how = 'inner')
lender_loan_country['lender_country'] = lender_loan_country['country_code'] |
"""Then, I join obtained dataset with the _loans_ dataset by loan ID"""
lender_loan_country_full = pd.merge(
lender_loan_country.drop_duplicates(),
loans_import[['loan_id','loan_amount','country_code']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_country_full['borrowed_country'] = lender_loan_country_full['country_code']
lender_loan_country_group = lender_loan_country_full.groupby(['lender_country','borrowed_country'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
lender_loan_country_group.head(5)
"""Finally, I can group the obtained dataset by the 2 country columns to obtain requested information"""
lender_loan_country_group_borrowers = lender_loan_country_group.groupby(['borrowed_country'])['overall_founds'].agg('sum').reset_index(name='amount_borrowed')
lender_loan_country_group_lenders = lender_loan_country_group.groupby(['lender_country'])['overall_founds'].agg('sum').reset_index(name='amount_lent')
lender_loan_country_group_join = pd.merge(
lender_loan_country_group_borrowers,
lender_loan_country_group_lenders,
left_on= ['borrowed_country'],
right_on= ['lender_country'],
how = 'inner')
lender_loan_country_group_join['country'] = lender_loan_country_group_join['borrowed_country']
lender_loan_country_group_join = lender_loan_country_group_join[['country','amount_borrowed','amount_lent']]
lender_loan_country_group_join['lent_borrowed_ratio'] = lender_loan_country_group_join['amount_borrowed']/lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join['lent_borrowed_delta'] = lender_loan_country_group_join['amount_borrowed'] - lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join.head(5)
"""####10. **Which country has the highest ratio between the difference computed at the previous point and the population?**
To evaluate this ratio, I've to join the previously created dataset with the _country_stats_ one
"""
lender_loan_country_group_stats = pd.merge(
lender_loan_country_group_join,
country_stats_import,
left_on= ['country'],
right_on= ['country_code'],
how = 'inner')
"""Then, I can compute the requested KPI"""
lender_loan_country_group_stats1 = lender_loan_country_group_stats
lender_loan_country_group_stats1['population_ratio'] = lender_loan_country_group_stats1['lent_borrowed_delta']/lender_loan_country_group_stats1['population']
lender_loan_country_group_stats1 = lender_loan_country_group_stats1[['country','lent_borrowed_delta','population_ratio']]
lender_loan_country_group_stats1.head(5)
"""####11. **Which country has the highest ratio between the difference computed at point 9 and the population that is not below the poverty line?**
To evaluate it, we have to multiply the overall population number and the _population_below_poverty_line_ ratio information
"""
lender_loan_country_group_stats2 = lender_loan_country_group_stats
lender_loan_country_group_stats2['population_weighed'] = lender_loan_country_group_stats2['population_below_poverty_line'] * lender_loan_country_group_stats2['population']
lender_loan_country_group_stats2['population_weighed_ratio'] = lender_loan_country_group_stats2['lent_borrowed_delta']/lender_loan_country_group_stats2['population_weighed']
lender_loan_country_group_stats2 = lender_loan_country_group_stats2[['country','lent_borrowed_delta','population_ratio', 'population_weighed_ratio']]
lender_loan_country_group_stats2.head(5)
|
lender_loan_country = lender_loan_country[['loan_id', 'lender_country']]
lender_loan_country.head(5) | random_line_split |
bressan_computerscience.py | """
lender_foundings = loans_lenders.groupby('lenders').size().reset_index(name='foundings')
lender_foundings[lender_foundings['foundings'] > 2]
"""####4. **For each country, compute how many loans have involved that country as borrowers.**"""
country_loans = loans_import.groupby('country_code').size().reset_index(name='loans')
country_loans.head(10)
"""####5. **For each country, compute the overall amount of money borrowed.**"""
country_loans_amount = loans_import.groupby('country_code')['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_loans_amount.head(5)
"""####6. **Like the previous point, but expressed as a percentage of the overall amount lent.**"""
country_loans_amount['overall_founds_perc'] = country_loans_amount.overall_founds / country_loans_amount.overall_founds.sum()
country_loans_amount.head(5)
"""####7. **Like the three previous points, but split for each year (with respect to disburse time).**"""
loans_import['disburse_year'] = pd.DatetimeIndex(loans_import['disburse_time']).year
country_year_loans = loans_import.groupby(['country_code','disburse_year']).size().reset_index(name='loans')
country_year_loans_amount = loans_import.groupby(['country_code','disburse_year'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
country_year_loans_amount['overall_founds_perc'] = country_year_loans_amount.overall_founds / country_year_loans_amount.overall_founds.sum()
country_year_loans.head(5)
country_year_loans_amount.head(5)
"""####8. **For each lender, compute the overall amount of money lent. For each loan that has more than one lender, you must assume that all lenders contributed the same amount.**
First of all, I need to assing to each lender/loan, the corresponding loan's details. So, I need to join the 2 dataset. To avoid run out of RAM, I reduce the number of variables selected on _loans_import_
"""
lender_loan_details = pd.merge(
loans_lenders,
loans_import[['loan_id','loan_amount']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_details.head(5)
"""Then, it's possible to group the dataset to obtain the overall amount of money lent"""
lender_loan_details.groupby('lenders')['loan_amount'].agg('sum').reset_index(name='overall_money_lent')
"""####9. **For each country, compute the difference between the overall amount of money lent and the overall amount of money borrowed. Since the country of the lender is often unknown, you can assume that the true distribution among the countries is the same as the one computed from the rows where the country is known.**
First of all, I join the _lenders_ and the _loans_lenders_ dataset by lender name, removing lenders without a country code associated
"""
lenders_import_filtered = lenders_import[lenders_import.country_code.notnull()]
lender_loan_country = pd.merge(
loans_lenders,
lenders_import_filtered[['permanent_name','country_code']],
left_on= ['lenders'],
right_on= ['permanent_name'],
how = 'inner')
lender_loan_country['lender_country'] = lender_loan_country['country_code']
lender_loan_country = lender_loan_country[['loan_id', 'lender_country']]
lender_loan_country.head(5)
"""Then, I join obtained dataset with the _loans_ dataset by loan ID"""
lender_loan_country_full = pd.merge(
lender_loan_country.drop_duplicates(),
loans_import[['loan_id','loan_amount','country_code']],
left_on= ['loan_id'],
right_on= ['loan_id'],
how = 'inner')
lender_loan_country_full['borrowed_country'] = lender_loan_country_full['country_code']
lender_loan_country_group = lender_loan_country_full.groupby(['lender_country','borrowed_country'])['loan_amount'].agg('sum').reset_index(name='overall_founds')
lender_loan_country_group.head(5)
"""Finally, I can group the obtained dataset by the 2 country columns to obtain requested information"""
lender_loan_country_group_borrowers = lender_loan_country_group.groupby(['borrowed_country'])['overall_founds'].agg('sum').reset_index(name='amount_borrowed')
lender_loan_country_group_lenders = lender_loan_country_group.groupby(['lender_country'])['overall_founds'].agg('sum').reset_index(name='amount_lent')
lender_loan_country_group_join = pd.merge(
lender_loan_country_group_borrowers,
lender_loan_country_group_lenders,
left_on= ['borrowed_country'],
right_on= ['lender_country'],
how = 'inner')
lender_loan_country_group_join['country'] = lender_loan_country_group_join['borrowed_country']
lender_loan_country_group_join = lender_loan_country_group_join[['country','amount_borrowed','amount_lent']]
lender_loan_country_group_join['lent_borrowed_ratio'] = lender_loan_country_group_join['amount_borrowed']/lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join['lent_borrowed_delta'] = lender_loan_country_group_join['amount_borrowed'] - lender_loan_country_group_join['amount_lent']
lender_loan_country_group_join.head(5)
"""####10. **Which country has the highest ratio between the difference computed at the previous point and the population?**
To evaluate this ratio, I've to join the previously created dataset with the _country_stats_ one
"""
lender_loan_country_group_stats = pd.merge(
lender_loan_country_group_join,
country_stats_import,
left_on= ['country'],
right_on= ['country_code'],
how = 'inner')
"""Then, I can compute the requested KPI"""
lender_loan_country_group_stats1 = lender_loan_country_group_stats
lender_loan_country_group_stats1['population_ratio'] = lender_loan_country_group_stats1['lent_borrowed_delta']/lender_loan_country_group_stats1['population']
lender_loan_country_group_stats1 = lender_loan_country_group_stats1[['country','lent_borrowed_delta','population_ratio']]
lender_loan_country_group_stats1.head(5)
"""####11. **Which country has the highest ratio between the difference computed at point 9 and the population that is not below the poverty line?**
To evaluate it, we have to multiply the overall population number and the _population_below_poverty_line_ ratio information
"""
lender_loan_country_group_stats2 = lender_loan_country_group_stats
lender_loan_country_group_stats2['population_weighed'] = lender_loan_country_group_stats2['population_below_poverty_line'] * lender_loan_country_group_stats2['population']
lender_loan_country_group_stats2['population_weighed_ratio'] = lender_loan_country_group_stats2['lent_borrowed_delta']/lender_loan_country_group_stats2['population_weighed']
lender_loan_country_group_stats2 = lender_loan_country_group_stats2[['country','lent_borrowed_delta','population_ratio', 'population_weighed_ratio']]
lender_loan_country_group_stats2.head(5)
"""####12. **For each year, compute the total amount of loans. Each loan that has planned expiration time and disburse time in different years must have its amount distributed proportionally to the number of days in each year. For example, a loan with disburse time December 1st, 2016, planned expiration time January 30th 2018, and amount 5000USD has an amount of 5000USD * 31 / (31+365+30) = 363.85 for 2016, 5000USD * 365 / (31+365+30) = 4284.04 for 2017, and 5000USD * 30 / (31+365+30) = 352.11 for 2018.**
Let's start defining a function that, given needed information (start date, end date and value) split it by years.
"""
def divide_value_by_period(row):
start_date = row['disburse_time'].tz_localize(None)
end_date = row['planned_expiration_time'].tz_localize(None)
value = row['loan_amount']
# calculating the difference in years considewring leap years
jumps = end_date.year - start_date.year
if jumps != 0:
| dayss = []
starting_year = start_date.year
for i in range(jumps):
next_year = starting_year + 1
next_year_comp = datetime(next_year, 1, 1)
# get the difference in days
diff = (next_year_comp - start_date).days
dayss.append(diff)
# re-assigning start and end dates
starting_year = next_year_comp.year
start_date = next_year_comp
# adding the days between the end date and the first day of the last year
dayss.append(((end_date - start_date).days) + 1)
# calculating the portion of value each period gets
if sum(dayss) > 0:
return [(x*value)/sum(dayss) for x in dayss] | conditional_block |
|
titanic-alpha-attempt.py | Ages < 1 indicate age in months.
#
#
# [1]: http://www.mymarketresearchmethods.com/types-of-data-nominal-ordinal-interval-ratio/
# In[ ]:
# count the number of passengers for first 25 ages
train_df.groupby('Age').size().head(25)
# another way to do the above
#train_df['Age'].value_counts().sort_index().head(25)
# In[ ]:
# convert ages to ints
age = train_df[['Age','Survived']].dropna() # returns a copy with blanks removed
age['Age'] = age['Age'].astype(int) # floors floats
# count passengers by age (smoothed via gaussian kernels)
plt.subplots(figsize=(18,6))
plt.subplot(311)
sns.kdeplot(age['Age'], shade=True, cut=0)
# count passengers by age (no smoothing)
plt.subplot(312)
sns.countplot(x='Age', data=age, palette='GnBu_d')
# survival rates by age
plt.subplot(313)
sns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d') # takes mean by default
# Observations:
#
# - Under 16s tend to have the highest survival rates
# - Very high survival rates at 53, 63 and 80
# - Survival of over 16s is fairly noisy. Possible that survival might increase with age.
# ## Survival by age group and sex ##
#
# Now let's look at survival by age groups *and* sex to see if any patterns become clearer.
# In[ ]:
# bin age into groups
train_df['AgeGroup'] = pd.cut(train_df['Age'],[0,4,15,25,35,45,65,100])
test_df['AgeGroup'] = pd.cut(test_df['Age'],[0,4,15,25,35,45,65,100])
# survival by age group
train_df.groupby('AgeGroup')['Survived'].mean()
# In[ ]:
# survival by age group and sex
train_df[['Survived','AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()
# In[ ]:
# count passengers by age group and sex
sns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')
# survival by age group and sex
sns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar')
# The relationship between survival and age group looks very different for males and females:
#
# - Males: survival rates increase *inversely* with age for (0, 25] and (25, 100). That is, younger boys fare better than older boys and younger men survive more than older men.
# - Females: no obvious relationship between surviving and age. In particular, girls and baby girls do not fare better than women; in fact, girls (4, 15] have the *lowest* survival rates of females.
#
# A feature space containing (child, man, woman) would do a decent job of representing this relationship to survivability.
#
# Non-linear classifiers (e.g. decision trees, multi-layer nn, nearest neighbour) applied to both sex and age group might do even better because of the noticeable relationship between survivability and age group for males.
# ## Family Size##
#
# We create a new feature, FamilySize, that sums Parch and SibSp. This will enable us to drop Parch and SibSp from the datasets.
# In[ ]:
# calculate family size
train_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1
test_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1
# count passengers by age group and sex
plt.subplot(211)
sns.countplot(x='FamilySize', data=train_df)
# survival by age group and sex
plt.subplot(212)
sns.barplot(x='FamilySize', y='Survived', data=train_df)
# Survival increases with family size, until families of size 4. Family sizes of 5 and above have reduced survival.
# Deck
# ----
#
# Cabin might be conceivably be related to survival, but unfortunately most values are missing. Nevertheless, by way of an exercise, we will extract the feature, Deck, from cabin by taking the first character of the label and analyze survival rates by deck.
# In[ ]:
# deck is the first letter of cabin
train_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])
train_df[['PassengerId','Name', 'Cabin', 'Deck']].head(2).T
# In[ ]:
# count passengers by the deck their cabin is on
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Deck', data=train_df)
# survival rate by deck
plt.subplot(212)
sns.barplot(x='Deck', y='Survived', data=train_df)
# ## Other attributes ##
# For this first attempt, I am ignoring the attributes below as they seem unlikely to be related to survival:
#
# - PassengerId
# - Name (however, extracting titles from names might be informative)
# - Ticket
# - Fare (could be related to socioeconomic status but we already have a class attribute)
# - Embarked
# # Data wrangling - Age group#
#
# Fill missing age group values. We don't want to drop them as this would lose many rows. Instead, we will randomly generate age groups according to the frequency that they occur in the data. We will calculate the frequency separately for males and females.
# In[ ]:
# number of males/females without an age
def get_na(dataset):
na_males = dataset[dataset.Sex == 'male'].loc[:,'AgeGroup'].isnull().sum()
na_females = dataset[dataset.Sex == 'female'].loc[:,'AgeGroup'].isnull().sum()
return {'male': na_males, 'female': na_females}
# number of males and females by age group
def get_counts(dataset):
return dataset.groupby(['Sex', 'AgeGroup']).size()
# randomly generate a list of age groups based on age group frequency (for each sex separately)
def generate_age_groups(num, freq):
age_groups = {}
for sex in ['male','female']:
relfreq = freq[sex] / freq[sex].sum()
age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex], replace=True, p=relfreq)
return age_groups
# insert the new age group values
def | (dataset, age_groups):
for sex in ['male','female']:
tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]) # filter on sex and null ages
tmp['AgeGroup'] = age_groups[sex] # index age group values
dataset = dataset.combine_first(tmp) # uses tmp to fill holes
return dataset
# fill holes for train_df
na = get_na(train_df)
counts = get_counts(train_df)
counts['female']
age_groups = generate_age_groups(na, counts)
age_groups['female']
train_df = insert_age_group_values(train_df, age_groups)
train_df.info() # check all nulls have been filled
print('-'*40)
# repeat for test_df
na = get_na(test_df)
counts = get_counts(train_df) # reuse the frequencies taken over the training data as it is larger
age_groups = generate_age_groups(na, counts)
test_df = insert_age_group_values(test_df, age_groups)
test_df.info() # check all nulls have been filled
# # Feature engineering #
#
# Now that we've explored the data let's create some features:
#
# - **Sex:** Convert to a single binary feature, Female. No need to create a feature for Male, that would be redundant.
# - **Pclass:** Convert to two binary features, PClass_1 and PClass_2. Similar to Male above, having a PClass_3 would be redundant.
# - **Age group:** The age attribute binned using separators [0, 4, 15, 25, 35, 45, 65, 100]. Convert to a number of binary features, one for each age group.
# - **Family size:** The sum of SibSp and Parch plus 1.
# In[ ]:
# Sex -> Female
# training set
dummy = pd.get_dummies(train_df['Sex'])
dummy.columns = ['Female','Male']
train_df = train_df.join(dummy['Female'])
# test set
dummy = pd.get_dummies(test_df['Sex'])
dummy.columns = ['Female','Male']
test_df = test_df.join(dummy['Female'])
train_df[['Name', 'Sex', 'Female']].head(2).T
#train_df.columns
# In[ ]:
# Pclass -> PClass_1, PClass_2
# training set
dummy = pd.get_dummies(train_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2 | insert_age_group_values | identifier_name |
titanic-alpha-attempt.py | Ages < 1 indicate age in months.
#
#
# [1]: http://www.mymarketresearchmethods.com/types-of-data-nominal-ordinal-interval-ratio/
# In[ ]:
# count the number of passengers for first 25 ages
train_df.groupby('Age').size().head(25)
# another way to do the above
#train_df['Age'].value_counts().sort_index().head(25)
# In[ ]:
# convert ages to ints
age = train_df[['Age','Survived']].dropna() # returns a copy with blanks removed
age['Age'] = age['Age'].astype(int) # floors floats
# count passengers by age (smoothed via gaussian kernels)
plt.subplots(figsize=(18,6))
plt.subplot(311)
sns.kdeplot(age['Age'], shade=True, cut=0)
# count passengers by age (no smoothing)
plt.subplot(312)
sns.countplot(x='Age', data=age, palette='GnBu_d')
# survival rates by age
plt.subplot(313)
sns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d') # takes mean by default
# Observations:
#
# - Under 16s tend to have the highest survival rates
# - Very high survival rates at 53, 63 and 80
# - Survival of over 16s is fairly noisy. Possible that survival might increase with age.
# ## Survival by age group and sex ##
#
# Now let's look at survival by age groups *and* sex to see if any patterns become clearer.
# In[ ]:
# bin age into groups
train_df['AgeGroup'] = pd.cut(train_df['Age'],[0,4,15,25,35,45,65,100])
test_df['AgeGroup'] = pd.cut(test_df['Age'],[0,4,15,25,35,45,65,100])
# survival by age group
train_df.groupby('AgeGroup')['Survived'].mean()
# In[ ]:
# survival by age group and sex
train_df[['Survived','AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()
# In[ ]:
# count passengers by age group and sex
sns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')
# survival by age group and sex
sns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar')
# The relationship between survival and age group looks very different for males and females:
#
# - Males: survival rates increase *inversely* with age for (0, 25] and (25, 100). That is, younger boys fare better than older boys and younger men survive more than older men.
# - Females: no obvious relationship between surviving and age. In particular, girls and baby girls do not fare better than women; in fact, girls (4, 15] have the *lowest* survival rates of females.
#
# A feature space containing (child, man, woman) would do a decent job of representing this relationship to survivability.
#
# Non-linear classifiers (e.g. decision trees, multi-layer nn, nearest neighbour) applied to both sex and age group might do even better because of the noticeable relationship between survivability and age group for males.
# ## Family Size##
#
# We create a new feature, FamilySize, that sums Parch and SibSp. This will enable us to drop Parch and SibSp from the datasets.
# In[ ]:
# calculate family size
train_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1
test_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1
# count passengers by age group and sex
plt.subplot(211)
sns.countplot(x='FamilySize', data=train_df)
# survival by age group and sex
plt.subplot(212)
sns.barplot(x='FamilySize', y='Survived', data=train_df)
# Survival increases with family size, until families of size 4. Family sizes of 5 and above have reduced survival.
# Deck
# ----
#
# Cabin might be conceivably be related to survival, but unfortunately most values are missing. Nevertheless, by way of an exercise, we will extract the feature, Deck, from cabin by taking the first character of the label and analyze survival rates by deck.
# In[ ]:
# deck is the first letter of cabin
train_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])
train_df[['PassengerId','Name', 'Cabin', 'Deck']].head(2).T
# In[ ]:
# count passengers by the deck their cabin is on
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Deck', data=train_df)
# survival rate by deck
plt.subplot(212)
sns.barplot(x='Deck', y='Survived', data=train_df)
# ## Other attributes ##
# For this first attempt, I am ignoring the attributes below as they seem unlikely to be related to survival:
#
# - PassengerId
# - Name (however, extracting titles from names might be informative)
# - Ticket
# - Fare (could be related to socioeconomic status but we already have a class attribute)
# - Embarked
# # Data wrangling - Age group#
#
# Fill missing age group values. We don't want to drop them as this would lose many rows. Instead, we will randomly generate age groups according to the frequency that they occur in the data. We will calculate the frequency separately for males and females.
# In[ ]:
# number of males/females without an age
def get_na(dataset):
|
# number of males and females by age group
def get_counts(dataset):
return dataset.groupby(['Sex', 'AgeGroup']).size()
# randomly generate a list of age groups based on age group frequency (for each sex separately)
def generate_age_groups(num, freq):
age_groups = {}
for sex in ['male','female']:
relfreq = freq[sex] / freq[sex].sum()
age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex], replace=True, p=relfreq)
return age_groups
# insert the new age group values
def insert_age_group_values(dataset, age_groups):
for sex in ['male','female']:
tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]) # filter on sex and null ages
tmp['AgeGroup'] = age_groups[sex] # index age group values
dataset = dataset.combine_first(tmp) # uses tmp to fill holes
return dataset
# fill holes for train_df
na = get_na(train_df)
counts = get_counts(train_df)
counts['female']
age_groups = generate_age_groups(na, counts)
age_groups['female']
train_df = insert_age_group_values(train_df, age_groups)
train_df.info() # check all nulls have been filled
print('-'*40)
# repeat for test_df
na = get_na(test_df)
counts = get_counts(train_df) # reuse the frequencies taken over the training data as it is larger
age_groups = generate_age_groups(na, counts)
test_df = insert_age_group_values(test_df, age_groups)
test_df.info() # check all nulls have been filled
# # Feature engineering #
#
# Now that we've explored the data let's create some features:
#
# - **Sex:** Convert to a single binary feature, Female. No need to create a feature for Male, that would be redundant.
# - **Pclass:** Convert to two binary features, PClass_1 and PClass_2. Similar to Male above, having a PClass_3 would be redundant.
# - **Age group:** The age attribute binned using separators [0, 4, 15, 25, 35, 45, 65, 100]. Convert to a number of binary features, one for each age group.
# - **Family size:** The sum of SibSp and Parch plus 1.
# In[ ]:
# Sex -> Female
# training set
dummy = pd.get_dummies(train_df['Sex'])
dummy.columns = ['Female','Male']
train_df = train_df.join(dummy['Female'])
# test set
dummy = pd.get_dummies(test_df['Sex'])
dummy.columns = ['Female','Male']
test_df = test_df.join(dummy['Female'])
train_df[['Name', 'Sex', 'Female']].head(2).T
#train_df.columns
# In[ ]:
# Pclass -> PClass_1, PClass_2
# training set
dummy = pd.get_dummies(train_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2 | na_males = dataset[dataset.Sex == 'male'].loc[:,'AgeGroup'].isnull().sum()
na_females = dataset[dataset.Sex == 'female'].loc[:,'AgeGroup'].isnull().sum()
return {'male': na_males, 'female': na_females} | identifier_body |
titanic-alpha-attempt.py | Cabin might be conceivably be related to survival, but unfortunately most values are missing. Nevertheless, by way of an exercise, we will extract the feature, Deck, from cabin by taking the first character of the label and analyze survival rates by deck.
# In[ ]:
# deck is the first letter of cabin
train_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])
train_df[['PassengerId','Name', 'Cabin', 'Deck']].head(2).T
# In[ ]:
# count passengers by the deck their cabin is on
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Deck', data=train_df)
# survival rate by deck
plt.subplot(212)
sns.barplot(x='Deck', y='Survived', data=train_df)
# ## Other attributes ##
# For this first attempt, I am ignoring the attributes below as they seem unlikely to be related to survival:
#
# - PassengerId
# - Name (however, extracting titles from names might be informative)
# - Ticket
# - Fare (could be related to socioeconomic status but we already have a class attribute)
# - Embarked
# # Data wrangling - Age group#
#
# Fill missing age group values. We don't want to drop them as this would lose many rows. Instead, we will randomly generate age groups according to the frequency that they occur in the data. We will calculate the frequency separately for males and females.
# In[ ]:
# number of males/females without an age
def get_na(dataset):
na_males = dataset[dataset.Sex == 'male'].loc[:,'AgeGroup'].isnull().sum()
na_females = dataset[dataset.Sex == 'female'].loc[:,'AgeGroup'].isnull().sum()
return {'male': na_males, 'female': na_females}
# number of males and females by age group
def get_counts(dataset):
return dataset.groupby(['Sex', 'AgeGroup']).size()
# randomly generate a list of age groups based on age group frequency (for each sex separately)
def generate_age_groups(num, freq):
age_groups = {}
for sex in ['male','female']:
relfreq = freq[sex] / freq[sex].sum()
age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex], replace=True, p=relfreq)
return age_groups
# insert the new age group values
def insert_age_group_values(dataset, age_groups):
for sex in ['male','female']:
tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]) # filter on sex and null ages
tmp['AgeGroup'] = age_groups[sex] # index age group values
dataset = dataset.combine_first(tmp) # uses tmp to fill holes
return dataset
# fill holes for train_df
na = get_na(train_df)
counts = get_counts(train_df)
counts['female']
age_groups = generate_age_groups(na, counts)
age_groups['female']
train_df = insert_age_group_values(train_df, age_groups)
train_df.info() # check all nulls have been filled
print('-'*40)
# repeat for test_df
na = get_na(test_df)
counts = get_counts(train_df) # reuse the frequencies taken over the training data as it is larger
age_groups = generate_age_groups(na, counts)
test_df = insert_age_group_values(test_df, age_groups)
test_df.info() # check all nulls have been filled
# # Feature engineering #
#
# Now that we've explored the data let's create some features:
#
# - **Sex:** Convert to a single binary feature, Female. No need to create a feature for Male, that would be redundant.
# - **Pclass:** Convert to two binary features, PClass_1 and PClass_2. Similar to Male above, having a PClass_3 would be redundant.
# - **Age group:** The age attribute binned using separators [0, 4, 15, 25, 35, 45, 65, 100]. Convert to a number of binary features, one for each age group.
# - **Family size:** The sum of SibSp and Parch plus 1.
# In[ ]:
# Sex -> Female
# training set
dummy = pd.get_dummies(train_df['Sex'])
dummy.columns = ['Female','Male']
train_df = train_df.join(dummy['Female'])
# test set
dummy = pd.get_dummies(test_df['Sex'])
dummy.columns = ['Female','Male']
test_df = test_df.join(dummy['Female'])
train_df[['Name', 'Sex', 'Female']].head(2).T
#train_df.columns
# In[ ]:
# Pclass -> PClass_1, PClass_2
# training set
dummy = pd.get_dummies(train_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
train_df = train_df.join(dummy[['PClass_1', 'PClass_2']])
# test set
dummy = pd.get_dummies(test_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
test_df = test_df.join(dummy[['PClass_1', 'PClass_2']])
train_df[['Name', 'Pclass', 'PClass_1', 'PClass_2']].head(2).T
#train_df.columns
# In[ ]:
# AgeGroup -> binary features
# training set
dummy = pd.get_dummies(train_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
train_df = train_df.join(dummy)
# test set
dummy = pd.get_dummies(test_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
test_df = test_df.join(dummy)
# ## Experimental features ##
# Some additional features to explore.
# In[ ]:
# Fare
# there is a single missing "Fare" value
test_df['Fare'].fillna(test_df['Fare'].median(), inplace=True)
# convert from float to int (floor)
#train_df['Fare'] = train_df['Fare'].astype(int)
#test_df['Fare'] = test_df['Fare'].astype(int)
# In[ ]:
# Embarked -> PortC, PortQ
# Fill missing values with the most occurred value
print(train_df.groupby('Embarked').size().sort_values())
train_df['Embarked'] = train_df['Embarked'].fillna('S')
# training set
dummy = pd.get_dummies(train_df['Embarked'])
#dummy.columns
dummy.columns = ['Port_C','Port_Q','Port_S']
#train_df = train_df.join(dummy[['Port_C','Port_Q']])
# test set
dummy = pd.get_dummies(test_df['Embarked'])
dummy.columns = ['Port_C','Port_Q','Port_S']
#test_df = test_df.join(dummy[['Port_C','Port_Q']])
# ## Dropping attributes ##
# Drop unused attributes to avoid detecting spurious relationships.
# In[ ]:
# drop the attributes that will be unused
train_df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'Deck', 'AgeGroup'], axis=1, inplace=True)
test_df.drop(['Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'AgeGroup'], axis=1, inplace=True)
train_df.head(10).T
# The sample above shows the features and their values for the first ten training examples.
# # Modeling #
#
# Our task is a binary classification problem: we want to formulate a relationship that predicts an output (Survived or not) from engineered features (Sex, Age group, Family size...). This is type of learning is supervised learning, since a model will be trained on a dataset containing pairs of inputs and outputs.
#
# Suitable methods for performing classification include:
#
# - Logistic Regression*
# - Perceptron*
# - Support Vector Machines (SVMs)*
# - Naive Bayes classifier*
# - KNN or k-Nearest Neighbors
# - Decision Tree
# - Random Forrest
# - Artificial neural network
# - Relevance Vector Machine
# | # The methods marked * either discover linear classification boundaries (logistic regression, perceptron, and SVMs if using linear kernels) or assume no relationship between features (naive bayes) and thus are not expected to perform as well (see the section above on the relationship between survival, age group and sex).
# ## Training data ##
# Let's use cross validation to perform the evaluation. This method will give a reasonable indication of predictive accuracy as evaluation will take place on data that is not seen during training. The package **`sklearn.model_selection`** includes support for cross validation. | random_line_split |
|
titanic-alpha-attempt.py | Ages < 1 indicate age in months.
#
#
# [1]: http://www.mymarketresearchmethods.com/types-of-data-nominal-ordinal-interval-ratio/
# In[ ]:
# count the number of passengers for first 25 ages
train_df.groupby('Age').size().head(25)
# another way to do the above
#train_df['Age'].value_counts().sort_index().head(25)
# In[ ]:
# convert ages to ints
age = train_df[['Age','Survived']].dropna() # returns a copy with blanks removed
age['Age'] = age['Age'].astype(int) # floors floats
# count passengers by age (smoothed via gaussian kernels)
plt.subplots(figsize=(18,6))
plt.subplot(311)
sns.kdeplot(age['Age'], shade=True, cut=0)
# count passengers by age (no smoothing)
plt.subplot(312)
sns.countplot(x='Age', data=age, palette='GnBu_d')
# survival rates by age
plt.subplot(313)
sns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d') # takes mean by default
# Observations:
#
# - Under 16s tend to have the highest survival rates
# - Very high survival rates at 53, 63 and 80
# - Survival of over 16s is fairly noisy. Possible that survival might increase with age.
# ## Survival by age group and sex ##
#
# Now let's look at survival by age groups *and* sex to see if any patterns become clearer.
# In[ ]:
# bin age into groups
train_df['AgeGroup'] = pd.cut(train_df['Age'],[0,4,15,25,35,45,65,100])
test_df['AgeGroup'] = pd.cut(test_df['Age'],[0,4,15,25,35,45,65,100])
# survival by age group
train_df.groupby('AgeGroup')['Survived'].mean()
# In[ ]:
# survival by age group and sex
train_df[['Survived','AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()
# In[ ]:
# count passengers by age group and sex
sns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')
# survival by age group and sex
sns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar')
# The relationship between survival and age group looks very different for males and females:
#
# - Males: survival rates increase *inversely* with age for (0, 25] and (25, 100). That is, younger boys fare better than older boys and younger men survive more than older men.
# - Females: no obvious relationship between surviving and age. In particular, girls and baby girls do not fare better than women; in fact, girls (4, 15] have the *lowest* survival rates of females.
#
# A feature space containing (child, man, woman) would do a decent job of representing this relationship to survivability.
#
# Non-linear classifiers (e.g. decision trees, multi-layer nn, nearest neighbour) applied to both sex and age group might do even better because of the noticeable relationship between survivability and age group for males.
# ## Family Size##
#
# We create a new feature, FamilySize, that sums Parch and SibSp. This will enable us to drop Parch and SibSp from the datasets.
# In[ ]:
# calculate family size
train_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1
test_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1
# count passengers by age group and sex
plt.subplot(211)
sns.countplot(x='FamilySize', data=train_df)
# survival by age group and sex
plt.subplot(212)
sns.barplot(x='FamilySize', y='Survived', data=train_df)
# Survival increases with family size, until families of size 4. Family sizes of 5 and above have reduced survival.
# Deck
# ----
#
# Cabin might be conceivably be related to survival, but unfortunately most values are missing. Nevertheless, by way of an exercise, we will extract the feature, Deck, from cabin by taking the first character of the label and analyze survival rates by deck.
# In[ ]:
# deck is the first letter of cabin
train_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])
train_df[['PassengerId','Name', 'Cabin', 'Deck']].head(2).T
# In[ ]:
# count passengers by the deck their cabin is on
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Deck', data=train_df)
# survival rate by deck
plt.subplot(212)
sns.barplot(x='Deck', y='Survived', data=train_df)
# ## Other attributes ##
# For this first attempt, I am ignoring the attributes below as they seem unlikely to be related to survival:
#
# - PassengerId
# - Name (however, extracting titles from names might be informative)
# - Ticket
# - Fare (could be related to socioeconomic status but we already have a class attribute)
# - Embarked
# # Data wrangling - Age group#
#
# Fill missing age group values. We don't want to drop them as this would lose many rows. Instead, we will randomly generate age groups according to the frequency that they occur in the data. We will calculate the frequency separately for males and females.
# In[ ]:
# number of males/females without an age
def get_na(dataset):
na_males = dataset[dataset.Sex == 'male'].loc[:,'AgeGroup'].isnull().sum()
na_females = dataset[dataset.Sex == 'female'].loc[:,'AgeGroup'].isnull().sum()
return {'male': na_males, 'female': na_females}
# number of males and females by age group
def get_counts(dataset):
return dataset.groupby(['Sex', 'AgeGroup']).size()
# randomly generate a list of age groups based on age group frequency (for each sex separately)
def generate_age_groups(num, freq):
age_groups = {}
for sex in ['male','female']:
|
return age_groups
# insert the new age group values
def insert_age_group_values(dataset, age_groups):
for sex in ['male','female']:
tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]) # filter on sex and null ages
tmp['AgeGroup'] = age_groups[sex] # index age group values
dataset = dataset.combine_first(tmp) # uses tmp to fill holes
return dataset
# fill holes for train_df
na = get_na(train_df)
counts = get_counts(train_df)
counts['female']
age_groups = generate_age_groups(na, counts)
age_groups['female']
train_df = insert_age_group_values(train_df, age_groups)
train_df.info() # check all nulls have been filled
print('-'*40)
# repeat for test_df
na = get_na(test_df)
counts = get_counts(train_df) # reuse the frequencies taken over the training data as it is larger
age_groups = generate_age_groups(na, counts)
test_df = insert_age_group_values(test_df, age_groups)
test_df.info() # check all nulls have been filled
# # Feature engineering #
#
# Now that we've explored the data let's create some features:
#
# - **Sex:** Convert to a single binary feature, Female. No need to create a feature for Male, that would be redundant.
# - **Pclass:** Convert to two binary features, PClass_1 and PClass_2. Similar to Male above, having a PClass_3 would be redundant.
# - **Age group:** The age attribute binned using separators [0, 4, 15, 25, 35, 45, 65, 100]. Convert to a number of binary features, one for each age group.
# - **Family size:** The sum of SibSp and Parch plus 1.
# In[ ]:
# Sex -> Female
# training set
dummy = pd.get_dummies(train_df['Sex'])
dummy.columns = ['Female','Male']
train_df = train_df.join(dummy['Female'])
# test set
dummy = pd.get_dummies(test_df['Sex'])
dummy.columns = ['Female','Male']
test_df = test_df.join(dummy['Female'])
train_df[['Name', 'Sex', 'Female']].head(2).T
#train_df.columns
# In[ ]:
# Pclass -> PClass_1, PClass_2
# training set
dummy = pd.get_dummies(train_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2',' | relfreq = freq[sex] / freq[sex].sum()
age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex], replace=True, p=relfreq) | conditional_block |
contacts-details.component.ts | .log('run contacts')
this.searchKin(this.user);
}
}
doctorChangeEvent(data: any){
var doc = this.doctors.filter(x => x.name == data).shift();
if(!doc){
this.inputForm.patchValue({
address1: '',
address2: '',
phone1: '',
phone2:'',
email: '',
mobile: '',
fax: '',
name: ''
})
return;
}
this.inputForm.patchValue({
address1: doc.address1,
address2: doc.address2,
phone1: doc.phone1,
phone2:doc.phone2,
email: doc.email,
mobile: doc.mobile,
fax: doc.fax,
name: doc.name
})
}
populate(){
this.listS.getdoctorinformation().subscribe(data => {
console.log(data);
this.doctors = data;
})
}
buildForm(): void {
this.kindetailsGroup = this.formBuilder.group({
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [''],
suburb: [''],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null,
subType: ''
});
this.inputForm = this.formBuilder.group({
group: [''],
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [null],
suburb: [''],
state: [],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null
})
this.inputForm.get('group').valueChanges.pipe(
switchMap(x => {
if(!x)
return EMPTY;
console.log(x);
return this.listS.gettypeother(x) })
).subscribe(data => {
this.contactTypes = data;
});
}
ngAfterViewInit(): void{
}
ngOnDestroy(): void{
this.unsubscribe.next();
this.unsubscribe.complete();
}
searchKin(token: ProfileInterface){
this.loading = true;
console.log(token)
if (token.view == view.recipient) {
this.timeS.getcontactskinrecipient(token.id)
.subscribe(data => {
this.kinsArray = data.list;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
if (token.view == view.staff) {
this.timeS.getcontactskinstaff(token.code)
.subscribe(data => {
this.kinsArray = data;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
}
showDetails(kin: any) {
this.timeS.getcontactskinstaffdetails(kin.recordNumber)
.subscribe(data => {
this.kindetailsGroup.patchValue({
address1: data.address1,
address2: data.address2,
name: data.contactName,
type: data.subType,
email: data.email,
fax: data.fax,
mobile: data.mobile,
notes: data.notes,
phone1: data.phone1,
phone2: data.phone2,
suburbcode: (data.postcode != '') ? (data.postcode || '').trim() + ' ' + (data.suburb || '').trim() : '',
suburb: data.suburb,
postcode: data.postcode,
listOrder: '',
oni1: (data.equipmentCode || '').toUpperCase() == 'PERSON1',
oni2: (data.equipmentCode || '').toUpperCase() == 'PERSON2',
recordNumber: data.recordNumber,
// subType: data.subType
})
})
}
//From ControlValueAccessor interface
writeValue(value: any) {
if (value != null) {
console.log(value)
this.innerValue = value;
this.searchKin(this.innerValue);
}
}
//From ControlValueAccessor interface
registerOnChange(fn: any) {
this.onChangeCallback = fn;
}
//From ControlValueAccessor interface
registerOnTouched(fn: any) {
this.onTouchedCallback = fn;
}
save() {
if (this.user.view === view.staff)
{
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinstaffdetails(
details,
details.recordNumber
).subscribe(data => {
// this.searchKin(this.user);
this.globalS.sToast('Success', 'Contact Updated');
});
}
if (this.user.view === view.recipient)
{
console.log('recipient');
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinrecipientdetails(details,details.recordNumber)
.subscribe(data => {
// this.searchKin(this.user);
this.handleCancel();
this.globalS.sToast('Success', 'Contact Updated');
});
}
}
getPostCodeAndSuburb(address: any): any {
const rs = address;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0] : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].split(',')[0] : "";
return {
pcode: pcode.trim() || '',
suburb: suburb.trim() || ''
}
}
get nextRequired() {
const { group, type, name } = this.inputForm.value;
if (this.current == 0 && this.globalS.isEmpty(group)) {
return false;
}
if (this.current == 1 && (this.globalS.isEmpty(type) || this.globalS.isEmpty(name)) ) {
return false;
}
return true;
}
add() {
if (this.inputForm.controls['suburbcode'].dirty) {
var rs = this.inputForm.get('suburbcode').value;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0].trim() : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].trim() : "";
let state = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[1].replace(/,/g, '').trim() : "";
if (pcode !== "") {
this.inputForm.controls["postcode"].setValue(pcode);
this.inputForm.controls["suburb"].setValue(suburb);
this.inputForm.controls["state"].setValue(state);
}
}
if (this.inputForm.get('oni1').value) {
this.inputForm.controls['ecode'].setValue('PERSON1')
} else if (this.inputForm.get('oni2').value) {
this.inputForm.controls['ecode'].setValue('PERSON2')
}
this.timeS.postcontactskinstaffdetails(
this.inputForm.value,
this.user.id
).pipe(takeUntil(this.unsubscribe)).subscribe(data => {
this.globalS.sToast('Success', 'Contact Inserted');
this.handleCancel();
this.searchKin(this.user);
this.handleCancel();
});
}
delete() {
this.timeS.deletecontactskin(this.kindetailsGroup.value.recordNumber).subscribe(data => { | this.globalS.sToast('Success', 'Contact Deleted');
this.searchKin(this.user);
});
} | random_line_split |
|
contacts-details.component.ts | => {
};
@Component({
selector: 'app-contacts-details',
templateUrl: './contacts-details.component.html',
styleUrls: ['./contacts-details.component.css'],
providers: [
{
provide: NG_VALUE_ACCESSOR,
multi: true,
useExisting: forwardRef(() => ContactsDetailsComponent),
}
],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class ContactsDetailsComponent implements OnInit, OnDestroy, OnChanges,ControlValueAccessor {
private unsubscribe: Subject<void> = new Subject();
selectedCompany;
doctor: any;
@Input() user: any;
private onTouchedCallback: () => void = noop;
private onChangeCallback: (_: any) => void = noop;
innerValue: ProfileInterface;
kinsArray: Array<any> = [];
kindetailsGroup: FormGroup;
inputForm: FormGroup;
contactGroups: Array<string> = contactGroups;
contactTypes : Array<string>;
modalOpen: boolean = false;
postLoading: boolean = false;
selected: any;
current: number = 0;
loading: boolean;
tocken: any;
doctors: Array<any> = [];
constructor(
private globalS: GlobalService,
private clientS: ClientService,
private staffS: StaffService,
private timeS: TimeSheetService,
private sharedS: ShareService,
private listS: ListService,
private formBuilder: FormBuilder,
private cd: ChangeDetectorRef,
private http: HttpClient,
private titleCase: TitleCasePipe
) { }
ngOnInit(): void {
this.user = this.sharedS.getPicked();
this.buildForm();
}
ngOnChanges(changes: SimpleChanges) {
for (let property in changes) {
console.log('run contacts')
this.searchKin(this.user);
}
}
doctorChangeEvent(data: any){
var doc = this.doctors.filter(x => x.name == data).shift();
if(!doc){
this.inputForm.patchValue({
address1: '',
address2: '',
phone1: '',
phone2:'',
email: '',
mobile: '',
fax: '',
name: ''
})
return;
}
this.inputForm.patchValue({
address1: doc.address1,
address2: doc.address2,
phone1: doc.phone1,
phone2:doc.phone2,
email: doc.email,
mobile: doc.mobile,
fax: doc.fax,
name: doc.name
})
}
populate() |
buildForm(): void {
this.kindetailsGroup = this.formBuilder.group({
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [''],
suburb: [''],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null,
subType: ''
});
this.inputForm = this.formBuilder.group({
group: [''],
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [null],
suburb: [''],
state: [],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null
})
this.inputForm.get('group').valueChanges.pipe(
switchMap(x => {
if(!x)
return EMPTY;
console.log(x);
return this.listS.gettypeother(x) })
).subscribe(data => {
this.contactTypes = data;
});
}
ngAfterViewInit(): void{
}
ngOnDestroy(): void{
this.unsubscribe.next();
this.unsubscribe.complete();
}
searchKin(token: ProfileInterface){
this.loading = true;
console.log(token)
if (token.view == view.recipient) {
this.timeS.getcontactskinrecipient(token.id)
.subscribe(data => {
this.kinsArray = data.list;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
if (token.view == view.staff) {
this.timeS.getcontactskinstaff(token.code)
.subscribe(data => {
this.kinsArray = data;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
}
showDetails(kin: any) {
this.timeS.getcontactskinstaffdetails(kin.recordNumber)
.subscribe(data => {
this.kindetailsGroup.patchValue({
address1: data.address1,
address2: data.address2,
name: data.contactName,
type: data.subType,
email: data.email,
fax: data.fax,
mobile: data.mobile,
notes: data.notes,
phone1: data.phone1,
phone2: data.phone2,
suburbcode: (data.postcode != '') ? (data.postcode || '').trim() + ' ' + (data.suburb || '').trim() : '',
suburb: data.suburb,
postcode: data.postcode,
listOrder: '',
oni1: (data.equipmentCode || '').toUpperCase() == 'PERSON1',
oni2: (data.equipmentCode || '').toUpperCase() == 'PERSON2',
recordNumber: data.recordNumber,
// subType: data.subType
})
})
}
//From ControlValueAccessor interface
writeValue(value: any) {
if (value != null) {
console.log(value)
this.innerValue = value;
this.searchKin(this.innerValue);
}
}
//From ControlValueAccessor interface
registerOnChange(fn: any) {
this.onChangeCallback = fn;
}
//From ControlValueAccessor interface
registerOnTouched(fn: any) {
this.onTouchedCallback = fn;
}
save() {
if (this.user.view === view.staff)
{
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinstaffdetails(
details,
details.recordNumber
).subscribe(data => {
// this.searchKin(this.user);
this.globalS.sToast('Success', 'Contact Updated');
});
}
if (this.user.view === view.recipient)
{
console.log('recipient');
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinrecipientdetails(details,details.recordNumber)
.subscribe(data => {
// this.searchKin(this.user);
this.handleCancel();
this.globalS.sToast('Success', 'Contact Updated');
});
}
}
getPostCodeAndSuburb(address: any): any {
const rs = address;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0] : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].split(',')[0] : "";
return {
pcode: pcode.trim() || '',
suburb: suburb.trim() || ''
}
}
get nextRequired() {
const { group, type, name } = this.inputForm.value;
if (this.current == 0 && this.globalS.isEmpty(group)) {
return false;
}
if (this.current == | {
this.listS.getdoctorinformation().subscribe(data => {
console.log(data);
this.doctors = data;
})
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.