file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
routex.go | pubsub: pubsub.New(20),
quit: make(chan int),
}
go ret.tutorialGenerator()
return ret, nil
}
func (m RouteMap) | (ctx rest.Context, identity model.Identity) {
id := rmodel.Identity{
Identity: identity,
Type: "identity",
Action: "update",
}
m.pubsub.Publish(m.identityName(identity), id)
}
func (m RouteMap) UpdateExfee(ctx rest.Context, invitations model.Invitation) {
var crossId int64
var action string
ctx.Bind("cross_id", &crossId)
ctx.Bind("action", &action)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if action != "join" && action != "remove" {
ctx.Return(http.StatusBadRequest, "invalid action: %s", action)
return
}
id := rmodel.Invitation{
Identity: invitations.Identity,
Notifications: invitations.Notifications,
Type: "invitation",
Action: action,
}
m.pubsub.Publish(m.publicName(crossId), id)
}
type UserCrossSetup struct {
SaveBreadcrumbs bool `json:"save_breadcrumbs,omitempty"`
AfterInSeconds int `json:"after_in_seconds,omitempty"`
}
func (m RouteMap) SetUser(ctx rest.Context, setup UserCrossSetup) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var crossId int64
ctx.Bind("cross_id", &crossId)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if setup.AfterInSeconds == 0 {
setup.AfterInSeconds = 60 * 60
}
m.switchWindow(crossId, token.Identity, setup.SaveBreadcrumbs, setup.AfterInSeconds)
}
func (m RouteMap) SearchRoutex(ctx rest.Context, crossIds []int64) {
ret, err := m.routexRepo.Search(crossIds)
if err != nil {
logger.ERROR("search for route failed: %s with %+v", err, crossIds)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
type RoutexInfo struct {
InWindow *bool `json:"in_window"`
Objects []rmodel.Geomark `json:"objects"`
}
func (m RouteMap) GetRoutex(ctx rest.Context) {
var userId, crossId int64
ctx.Bind("cross_id", &crossId)
ctx.Bind("user_id", &userId)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
endAt, err := m.breadcrumbsRepo.GetWindowEnd(userId, crossId)
if err != nil {
logger.ERROR("get user %d cross %d routex failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ret := RoutexInfo{}
if endAt != 0 {
ret.InWindow = new(bool)
*ret.InWindow = endAt >= time.Now().Unix()
}
query := make(url.Values)
query.Set("user_id", fmt.Sprintf("%d", userId))
cross, err := m.platform.FindCross(crossId, query)
if err == nil {
ret.Objects = m.getObjects(cross, true)
} else {
logger.ERROR("get user %d cross %d failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
func (m RouteMap) Stream(ctx rest.StreamContext) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var forceOpen bool
var coordinate string
ctx.Bind("force_window_open", &forceOpen)
ctx.Bind("coordinate", &coordinate)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
now := time.Now()
endAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || endAt <= now.Unix() {
if !forceOpen {
ctx.Return(http.StatusForbidden, "not in window")
return
}
after := 15 * 60
if endAt == 0 {
after = 60 * 60
}
var openAfter int
ctx.BindReset()
ctx.Bind("force_window_open", &openAfter)
if ctx.BindError() == nil {
after = openAfter
}
endAt = now.Unix() + int64(after)
m.switchWindow(int64(token.Cross.ID), token.Identity, true, after)
}
c := make(chan interface{}, 10)
m.pubsub.Subscribe(m.publicName(int64(token.Cross.ID)), c)
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
m.pubsub.Subscribe(m.tutorialName(), c)
}
for _, inv := range token.Cross.Exfee.Invitations {
m.pubsub.Subscribe(m.identityName(inv.Identity), c)
}
logger.DEBUG("streaming connected by user %d, cross %d", token.UserId, token.Cross.ID)
defer func() {
logger.DEBUG("streaming disconnect by user %d, cross %d", token.UserId, token.Cross.ID)
m.pubsub.UnsubscribeAll(c)
close(c)
}()
willEnd := endAt - now.Unix()
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{willEnd},
})
if err != nil {
return
}
toMars := coordinate == "mars"
isTutorial := false
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
isTutorial = true
}
hasCreated := false
ctx.Return(http.StatusOK)
quit := make(chan int)
defer func() { close(quit) }()
for _, mark := range m.getObjects(token.Cross, toMars) {
if isTutorial && !hasCreated && !mark.IsBreadcrumbs() {
hasCreated = true
}
if err := ctx.Render(mark); err != nil {
return
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
if err := ctx.Render(map[string]string{"type": "command", "action": "init_end"}); err != nil {
return
}
lastCheck := now.Unix()
for ctx.Ping() == nil {
select {
case d := <-c:
switch data := d.(type) {
case rmodel.Geomark:
if isTutorial && !hasCreated {
if data.Id == m.breadcrumbsId(token.UserId) {
locale, by := "", ""
for _, i := range token.Cross.Exfee.Invitations {
if i.Identity.UserID == token.UserId {
locale, by = i.Identity.Locale, i.Identity.Id()
break
}
}
tutorialMark, err := m.setTutorial(data.Positions[0].GPS[0], data.Positions[0].GPS[1], token.UserId, int64(token.Cross.ID), locale, by)
if err != nil {
logger.ERROR("create tutorial geomark for user %d in cross %d failed: %s", token.UserId, token.Cross.ID, err)
} else {
hasCreated = true
if toMars {
tutorialMark.ToMars(m.conversion)
}
err := ctx.Render(tutorialMark)
if err != nil {
return
}
}
}
}
if toMars {
data.ToMars(m.conversion)
}
d = data
case rmodel.Identity:
switch data.Action {
case "join":
if token.Cross.Exfee.Join(data.Identity) {
m.pubsub.Subscribe(m.identityName(data.Identity), c)
}
case "remove":
if token.Cross.Exfee.Remove(data.Identity) {
m.pubsub.Unsubscribe(m.identityName(data.Identity), c)
}
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
err := ctx.Render(d)
if err != nil {
return
}
case <-time.After(broker.NetworkTimeout):
case <-time.After(time.Duration(endAt-time.Now().Unix()) * time.Second):
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || newEndAt == 0 || newEndAt <= time.Now().Unix() {
return
}
endAt = newEndAt
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{endAt - time.Now().Unix()},
})
if err != nil {
return
}
}
if time.Now().Unix()-lastCheck > 60 {
lastCheck = time.Now().Unix()
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil {
logger.ERROR("can't set user %d cross %d: %s", token.UserId | UpdateIdentity | identifier_name |
routex.go | pubsub: pubsub.New(20),
quit: make(chan int),
}
go ret.tutorialGenerator()
return ret, nil
}
func (m RouteMap) UpdateIdentity(ctx rest.Context, identity model.Identity) {
id := rmodel.Identity{
Identity: identity,
Type: "identity",
Action: "update",
}
m.pubsub.Publish(m.identityName(identity), id)
}
func (m RouteMap) UpdateExfee(ctx rest.Context, invitations model.Invitation) {
var crossId int64
var action string
ctx.Bind("cross_id", &crossId)
ctx.Bind("action", &action)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if action != "join" && action != "remove" {
ctx.Return(http.StatusBadRequest, "invalid action: %s", action)
return
}
id := rmodel.Invitation{
Identity: invitations.Identity,
Notifications: invitations.Notifications,
Type: "invitation",
Action: action,
}
m.pubsub.Publish(m.publicName(crossId), id)
}
type UserCrossSetup struct {
SaveBreadcrumbs bool `json:"save_breadcrumbs,omitempty"`
AfterInSeconds int `json:"after_in_seconds,omitempty"`
}
func (m RouteMap) SetUser(ctx rest.Context, setup UserCrossSetup) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var crossId int64
ctx.Bind("cross_id", &crossId)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if setup.AfterInSeconds == 0 {
setup.AfterInSeconds = 60 * 60
}
m.switchWindow(crossId, token.Identity, setup.SaveBreadcrumbs, setup.AfterInSeconds)
}
func (m RouteMap) SearchRoutex(ctx rest.Context, crossIds []int64) {
ret, err := m.routexRepo.Search(crossIds)
if err != nil {
logger.ERROR("search for route failed: %s with %+v", err, crossIds)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
type RoutexInfo struct {
InWindow *bool `json:"in_window"`
Objects []rmodel.Geomark `json:"objects"`
}
func (m RouteMap) GetRoutex(ctx rest.Context) {
var userId, crossId int64
ctx.Bind("cross_id", &crossId)
ctx.Bind("user_id", &userId) | if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
endAt, err := m.breadcrumbsRepo.GetWindowEnd(userId, crossId)
if err != nil {
logger.ERROR("get user %d cross %d routex failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ret := RoutexInfo{}
if endAt != 0 {
ret.InWindow = new(bool)
*ret.InWindow = endAt >= time.Now().Unix()
}
query := make(url.Values)
query.Set("user_id", fmt.Sprintf("%d", userId))
cross, err := m.platform.FindCross(crossId, query)
if err == nil {
ret.Objects = m.getObjects(cross, true)
} else {
logger.ERROR("get user %d cross %d failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
func (m RouteMap) Stream(ctx rest.StreamContext) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var forceOpen bool
var coordinate string
ctx.Bind("force_window_open", &forceOpen)
ctx.Bind("coordinate", &coordinate)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
now := time.Now()
endAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || endAt <= now.Unix() {
if !forceOpen {
ctx.Return(http.StatusForbidden, "not in window")
return
}
after := 15 * 60
if endAt == 0 {
after = 60 * 60
}
var openAfter int
ctx.BindReset()
ctx.Bind("force_window_open", &openAfter)
if ctx.BindError() == nil {
after = openAfter
}
endAt = now.Unix() + int64(after)
m.switchWindow(int64(token.Cross.ID), token.Identity, true, after)
}
c := make(chan interface{}, 10)
m.pubsub.Subscribe(m.publicName(int64(token.Cross.ID)), c)
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
m.pubsub.Subscribe(m.tutorialName(), c)
}
for _, inv := range token.Cross.Exfee.Invitations {
m.pubsub.Subscribe(m.identityName(inv.Identity), c)
}
logger.DEBUG("streaming connected by user %d, cross %d", token.UserId, token.Cross.ID)
defer func() {
logger.DEBUG("streaming disconnect by user %d, cross %d", token.UserId, token.Cross.ID)
m.pubsub.UnsubscribeAll(c)
close(c)
}()
willEnd := endAt - now.Unix()
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{willEnd},
})
if err != nil {
return
}
toMars := coordinate == "mars"
isTutorial := false
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
isTutorial = true
}
hasCreated := false
ctx.Return(http.StatusOK)
quit := make(chan int)
defer func() { close(quit) }()
for _, mark := range m.getObjects(token.Cross, toMars) {
if isTutorial && !hasCreated && !mark.IsBreadcrumbs() {
hasCreated = true
}
if err := ctx.Render(mark); err != nil {
return
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
if err := ctx.Render(map[string]string{"type": "command", "action": "init_end"}); err != nil {
return
}
lastCheck := now.Unix()
for ctx.Ping() == nil {
select {
case d := <-c:
switch data := d.(type) {
case rmodel.Geomark:
if isTutorial && !hasCreated {
if data.Id == m.breadcrumbsId(token.UserId) {
locale, by := "", ""
for _, i := range token.Cross.Exfee.Invitations {
if i.Identity.UserID == token.UserId {
locale, by = i.Identity.Locale, i.Identity.Id()
break
}
}
tutorialMark, err := m.setTutorial(data.Positions[0].GPS[0], data.Positions[0].GPS[1], token.UserId, int64(token.Cross.ID), locale, by)
if err != nil {
logger.ERROR("create tutorial geomark for user %d in cross %d failed: %s", token.UserId, token.Cross.ID, err)
} else {
hasCreated = true
if toMars {
tutorialMark.ToMars(m.conversion)
}
err := ctx.Render(tutorialMark)
if err != nil {
return
}
}
}
}
if toMars {
data.ToMars(m.conversion)
}
d = data
case rmodel.Identity:
switch data.Action {
case "join":
if token.Cross.Exfee.Join(data.Identity) {
m.pubsub.Subscribe(m.identityName(data.Identity), c)
}
case "remove":
if token.Cross.Exfee.Remove(data.Identity) {
m.pubsub.Unsubscribe(m.identityName(data.Identity), c)
}
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
err := ctx.Render(d)
if err != nil {
return
}
case <-time.After(broker.NetworkTimeout):
case <-time.After(time.Duration(endAt-time.Now().Unix()) * time.Second):
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || newEndAt == 0 || newEndAt <= time.Now().Unix() {
return
}
endAt = newEndAt
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{endAt - time.Now().Unix()},
})
if err != nil {
return
}
}
if time.Now().Unix()-lastCheck > 60 {
lastCheck = time.Now().Unix()
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil {
logger.ERROR("can't set user %d cross %d: %s", token.UserId, | random_line_split |
|
routex.go | pubsub: pubsub.New(20),
quit: make(chan int),
}
go ret.tutorialGenerator()
return ret, nil
}
func (m RouteMap) UpdateIdentity(ctx rest.Context, identity model.Identity) |
func (m RouteMap) UpdateExfee(ctx rest.Context, invitations model.Invitation) {
var crossId int64
var action string
ctx.Bind("cross_id", &crossId)
ctx.Bind("action", &action)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if action != "join" && action != "remove" {
ctx.Return(http.StatusBadRequest, "invalid action: %s", action)
return
}
id := rmodel.Invitation{
Identity: invitations.Identity,
Notifications: invitations.Notifications,
Type: "invitation",
Action: action,
}
m.pubsub.Publish(m.publicName(crossId), id)
}
type UserCrossSetup struct {
SaveBreadcrumbs bool `json:"save_breadcrumbs,omitempty"`
AfterInSeconds int `json:"after_in_seconds,omitempty"`
}
func (m RouteMap) SetUser(ctx rest.Context, setup UserCrossSetup) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var crossId int64
ctx.Bind("cross_id", &crossId)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
if setup.AfterInSeconds == 0 {
setup.AfterInSeconds = 60 * 60
}
m.switchWindow(crossId, token.Identity, setup.SaveBreadcrumbs, setup.AfterInSeconds)
}
func (m RouteMap) SearchRoutex(ctx rest.Context, crossIds []int64) {
ret, err := m.routexRepo.Search(crossIds)
if err != nil {
logger.ERROR("search for route failed: %s with %+v", err, crossIds)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
type RoutexInfo struct {
InWindow *bool `json:"in_window"`
Objects []rmodel.Geomark `json:"objects"`
}
func (m RouteMap) GetRoutex(ctx rest.Context) {
var userId, crossId int64
ctx.Bind("cross_id", &crossId)
ctx.Bind("user_id", &userId)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
endAt, err := m.breadcrumbsRepo.GetWindowEnd(userId, crossId)
if err != nil {
logger.ERROR("get user %d cross %d routex failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ret := RoutexInfo{}
if endAt != 0 {
ret.InWindow = new(bool)
*ret.InWindow = endAt >= time.Now().Unix()
}
query := make(url.Values)
query.Set("user_id", fmt.Sprintf("%d", userId))
cross, err := m.platform.FindCross(crossId, query)
if err == nil {
ret.Objects = m.getObjects(cross, true)
} else {
logger.ERROR("get user %d cross %d failed: %s", userId, crossId, err)
ctx.Return(http.StatusInternalServerError, err)
return
}
ctx.Render(ret)
}
func (m RouteMap) Stream(ctx rest.StreamContext) {
token, ok := m.auth(ctx)
if !ok {
ctx.Return(http.StatusUnauthorized, "invalid token")
return
}
var forceOpen bool
var coordinate string
ctx.Bind("force_window_open", &forceOpen)
ctx.Bind("coordinate", &coordinate)
if err := ctx.BindError(); err != nil {
ctx.Return(http.StatusBadRequest, err)
return
}
now := time.Now()
endAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || endAt <= now.Unix() {
if !forceOpen {
ctx.Return(http.StatusForbidden, "not in window")
return
}
after := 15 * 60
if endAt == 0 {
after = 60 * 60
}
var openAfter int
ctx.BindReset()
ctx.Bind("force_window_open", &openAfter)
if ctx.BindError() == nil {
after = openAfter
}
endAt = now.Unix() + int64(after)
m.switchWindow(int64(token.Cross.ID), token.Identity, true, after)
}
c := make(chan interface{}, 10)
m.pubsub.Subscribe(m.publicName(int64(token.Cross.ID)), c)
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
m.pubsub.Subscribe(m.tutorialName(), c)
}
for _, inv := range token.Cross.Exfee.Invitations {
m.pubsub.Subscribe(m.identityName(inv.Identity), c)
}
logger.DEBUG("streaming connected by user %d, cross %d", token.UserId, token.Cross.ID)
defer func() {
logger.DEBUG("streaming disconnect by user %d, cross %d", token.UserId, token.Cross.ID)
m.pubsub.UnsubscribeAll(c)
close(c)
}()
willEnd := endAt - now.Unix()
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{willEnd},
})
if err != nil {
return
}
toMars := coordinate == "mars"
isTutorial := false
if token.Cross.By.UserID == m.config.Routex.TutorialCreator {
isTutorial = true
}
hasCreated := false
ctx.Return(http.StatusOK)
quit := make(chan int)
defer func() { close(quit) }()
for _, mark := range m.getObjects(token.Cross, toMars) {
if isTutorial && !hasCreated && !mark.IsBreadcrumbs() {
hasCreated = true
}
if err := ctx.Render(mark); err != nil {
return
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
if err := ctx.Render(map[string]string{"type": "command", "action": "init_end"}); err != nil {
return
}
lastCheck := now.Unix()
for ctx.Ping() == nil {
select {
case d := <-c:
switch data := d.(type) {
case rmodel.Geomark:
if isTutorial && !hasCreated {
if data.Id == m.breadcrumbsId(token.UserId) {
locale, by := "", ""
for _, i := range token.Cross.Exfee.Invitations {
if i.Identity.UserID == token.UserId {
locale, by = i.Identity.Locale, i.Identity.Id()
break
}
}
tutorialMark, err := m.setTutorial(data.Positions[0].GPS[0], data.Positions[0].GPS[1], token.UserId, int64(token.Cross.ID), locale, by)
if err != nil {
logger.ERROR("create tutorial geomark for user %d in cross %d failed: %s", token.UserId, token.Cross.ID, err)
} else {
hasCreated = true
if toMars {
tutorialMark.ToMars(m.conversion)
}
err := ctx.Render(tutorialMark)
if err != nil {
return
}
}
}
}
if toMars {
data.ToMars(m.conversion)
}
d = data
case rmodel.Identity:
switch data.Action {
case "join":
if token.Cross.Exfee.Join(data.Identity) {
m.pubsub.Subscribe(m.identityName(data.Identity), c)
}
case "remove":
if token.Cross.Exfee.Remove(data.Identity) {
m.pubsub.Unsubscribe(m.identityName(data.Identity), c)
}
}
}
ctx.SetWriteDeadline(time.Now().Add(broker.NetworkTimeout))
err := ctx.Render(d)
if err != nil {
return
}
case <-time.After(broker.NetworkTimeout):
case <-time.After(time.Duration(endAt-time.Now().Unix()) * time.Second):
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil || newEndAt == 0 || newEndAt <= time.Now().Unix() {
return
}
endAt = newEndAt
err = ctx.Render(map[string]interface{}{
"type": "command",
"action": "close_after",
"args": []interface{}{endAt - time.Now().Unix()},
})
if err != nil {
return
}
}
if time.Now().Unix()-lastCheck > 60 {
lastCheck = time.Now().Unix()
newEndAt, err := m.breadcrumbsRepo.GetWindowEnd(token.UserId, int64(token.Cross.ID))
if err != nil {
logger.ERROR("can't set user %d cross %d: %s", token | {
id := rmodel.Identity{
Identity: identity,
Type: "identity",
Action: "update",
}
m.pubsub.Publish(m.identityName(identity), id)
} | identifier_body |
Checker_ruleunit_test.js | ",
"HAAC_Aria_ImgAlt": "1128",
"HAAC_BackgroundImg_HasTextOrTitle": "1132",
"HAAC_Accesskey_NeedLabel": "1140",
"HAAC_Aria_Or_HTML5_Attr": "1141",
"HAAC_Canvas": "1143",
"HAAC_Figure_label": "1144",
"HAAC_Input_Placeholder": "1145",
"HAAC_Aria_Native_Host_Sematics": "1146",
"RPT_Form_ChangeEmpty": "1147",
"IBMA_Color_Contrast_WCAG2AA": "1148",
"IBMA_Color_Contrast_WCAG2AA_PV": "1149",
"WCAG20_Body_FirstASkips_Native_Host_Sematics": "1150",
"WCAG20_Body_FirstAContainsSkipText_Native_Host_Sematics": "1151",
"Rpt_Aria_RequiredChildren_Native_Host_Sematics": "1152",
"Rpt_Aria_RequiredParent_Native_Host_Sematics": "1153",
"Rpt_Aria_EventHandlerMissingRole_Native_Host_Sematics": "1154",
"Rpt_Aria_WidgetLabels_Implicit": "1156",
"Rpt_Aria_OrphanedContent_Native_Host_Sematics": "1157",
"Rpt_Aria_RegionLabel_Implicit": "1158",
"Rpt_Aria_MultipleMainsVisibleLabel_Implicit": "1159",
"Rpt_Aria_MultipleBannerLandmarks_Implicit": "1160",
"Rpt_Aria_MultipleComplementaryLandmarks_Implicit": "1161",
"Rpt_Aria_MultipleContentinfoLandmarks_Implicit": "1162",
"Rpt_Aria_MultipleFormLandmarks_Implicit": "1163",
"Rpt_Aria_MultipleNavigationLandmarks_Implicit": "1164",
"Rpt_Aria_ComplementaryLandmarkLabel_Implicit": "1165",
"Rpt_Aria_MultipleArticleRoles_Implicit": "1166",
"Rpt_Aria_ArticleRoleLabel_Implicit": "1167",
"Rpt_Aria_MultipleGroupRoles_Implicit": "1168",
"Rpt_Aria_GroupRoleLabel_Implicit": "1169",
"Rpt_Aria_MultipleContentinfoInSiblingSet_Implicit": "1170",
"Rpt_Aria_OneBannerInSiblingSet_Implicit": "1172",
"Rpt_Aria_ContentinfoWithNoMain_Implicit": "1173",
"Rpt_Aria_ComplementaryRequiredLabel_Implicit": "1174",
"Rpt_Aria_MultipleRegionsUniqueLabel_Implicit": "1176",
"IBMA_Focus_Tabbable": "1177",
"IBMA_Focus_MultiTab": "1178",
"WCAG20_Table_SummaryAria3": "1179",
"RPT_Style_Trigger2": "1180",
"Rpt_Aria_MultipleMainsRequireLabel_Implicit_2": "1182",
"HAAC_Media_DocumentTrigger2": "1183",
"HAAC_Aria_ErrorMessage": "1184",
"HAAC_List_Group_ListItem": "1185",
"HAAC_ActiveDescendantCheck": "1186",
"HAAC_Application_Role_Text": "1187",
"Rpt_Aria_MultipleToolbarUniqueLabel": "1188",
"HAAC_Combobox_ARIA_11_Guideline": "1193",
"HAAC_Combobox_Must_Have_Text_Input": "1194",
"HAAC_Combobox_DOM_Focus": "1195",
"HAAC_Combobox_Autocomplete": "1196",
"HAAC_Combobox_Autocomplete_Invalid": "1197",
"HAAC_Combobox_Expanded": "1198",
"HAAC_Combobox_Popup": "1199",
"WCAG21_Style_Viewport": "1200",
"WCAG21_Label_Accessible": "1202",
"WCAG21_Input_Autocomplete": "1203",
"WCAG20_Input_VisibleLabel": "1204"
}
let mapGToRule = {}
for (const key in mapRuleToG) {
mapGToRule[mapRuleToG[key]] = key;
}
// Describe this Suite of testscases, describe is a test Suite and 'it' is a testcase.
describe("Rule Unit Tests", function() {
// Variable Decleration
let originalTimeout;
// All the html unit testscases will be stored in the window.__html__ by the preprocessor
let unitTestcaseHTML = window.__html__;
// Loop over all the unitTestcase html/htm files and perform a scan for them
for (let unitTestFile in unitTestcaseHTML) {
// Get the extension of the file we are about to scan
let fileExtension = unitTestFile.substr(unitTestFile.lastIndexOf('.') + 1);
// Make sure the unit testcase we are trying to scan is actually and html/htm files, if it is not
// just move on to the next one.
if (fileExtension !== 'html' && fileExtension !== 'htm' && fileExtension !== 'svg') {
continue;
}
// This function is used to execute for each of the unitTestFiles, we have to use this type of function
// to allow dynamic creation/execution of the Unit Testcases. This is like forcing an syncronous execution
// for the testcases. Which is needed to make sure that all the tests run in the same order.
// For now we do not need to consider threaded execution because over all theses testscases will take at
// most half 1 sec * # of testcses (500ms * 780)
(function(unitTestFile) {
// Description of the test case that will be run.
describe("Load Test: " + unitTestFile, function() {
// Function to run before every testcase (it --> is a testcase)
// This before function allows to add async support to a testcase.
// The testcase will not run until the done function is called
beforeEach(function() {
// Extract the current jasmine DEFAULT_TIMEOUT_INTERVAL value to restore later on
originalTimeout = jasmine.DEFAULT_TIMEOUT_INTERVAL;
// Set the DEFAULT_TIMEOUT_INTERVAL to 3min seconds, to allow for the DAP scan to finish.
jasmine.DEFAULT_TIMEOUT_INTERVAL = 180000;
});
// The Individual testcase for each of the unittestcases.
// Note the done that is passed in, this is used to wait for asyn functions.
it('a11y scan should match expected value', async function() {
// Extract the unitTestcase data file from the unitTestcase hash map.
// This will contain the full content of the testcase file. Includes the document
// object also.
let unitTestDataFileContent = unitTestcaseHTML[unitTestFile];
// Create an iframe element in the body of the current document
let iframe = document.createElement('iframe');
iframe.id = "unitTestcaseIframe";
// Append the iframe to the body
document.body.appendChild(iframe);
// Start to write the contents of the html file into the iframe document
// This will include the entire html page, including the doc type and all.
iframe.contentWindow.document.open();
iframe.contentWindow.document.write(unitTestDataFileContent);
iframe.contentWindow.document.close();
// Get the iframe window
let iframeWin = document.getElementById("unitTestcaseIframe").contentWindow;
// Get the iframe document that was just created
let iframeDoc = iframeWin.document;
let checker = new ace.Checker();
let report = await checker.check(iframeDoc, null);
expect(report.results).toBeDefined();
// Extract the ruleCoverage object from the unit testcases that is loaded on to the iframe.
let expectedInfo = iframeWin.UnitTest;
let legacyExpectedInfo = iframeWin.OpenAjax &&
iframeWin.OpenAjax.a11y &&
iframeWin.OpenAjax.a11y.ruleCoverage;
if (expectedInfo && expectedInfo.ruleIds) | {
let filtReport = [];
for (const issue of report.results) {
delete issue.node;
delete issue.ruleTime;
delete issue.bounds;
if (expectedInfo.ruleIds.includes(issue.ruleId)) {
// These are too variable between runs - don't test these
delete issue.snippet;
filtReport.push(issue);
}
}
expect(filtReport).withContext(JSON.stringify(filtReport, null, 2)).toEqual(expectedInfo.results);
} | conditional_block |
|
Checker_ruleunit_test.js | ": "1195",
"HAAC_Combobox_Autocomplete": "1196",
"HAAC_Combobox_Autocomplete_Invalid": "1197",
"HAAC_Combobox_Expanded": "1198",
"HAAC_Combobox_Popup": "1199",
"WCAG21_Style_Viewport": "1200",
"WCAG21_Label_Accessible": "1202",
"WCAG21_Input_Autocomplete": "1203",
"WCAG20_Input_VisibleLabel": "1204"
}
let mapGToRule = {}
for (const key in mapRuleToG) {
mapGToRule[mapRuleToG[key]] = key;
}
// Describe this Suite of testscases, describe is a test Suite and 'it' is a testcase.
describe("Rule Unit Tests", function() {
// Variable Decleration
let originalTimeout;
// All the html unit testscases will be stored in the window.__html__ by the preprocessor
let unitTestcaseHTML = window.__html__;
// Loop over all the unitTestcase html/htm files and perform a scan for them
for (let unitTestFile in unitTestcaseHTML) {
// Get the extension of the file we are about to scan
let fileExtension = unitTestFile.substr(unitTestFile.lastIndexOf('.') + 1);
// Make sure the unit testcase we are trying to scan is actually and html/htm files, if it is not
// just move on to the next one.
if (fileExtension !== 'html' && fileExtension !== 'htm' && fileExtension !== 'svg') {
continue;
}
// This function is used to execute for each of the unitTestFiles, we have to use this type of function
// to allow dynamic creation/execution of the Unit Testcases. This is like forcing an syncronous execution
// for the testcases. Which is needed to make sure that all the tests run in the same order.
// For now we do not need to consider threaded execution because over all theses testscases will take at
// most half 1 sec * # of testcses (500ms * 780)
(function(unitTestFile) {
// Description of the test case that will be run.
describe("Load Test: " + unitTestFile, function() {
// Function to run before every testcase (it --> is a testcase)
// This before function allows to add async support to a testcase.
// The testcase will not run until the done function is called
beforeEach(function() {
// Extract the current jasmine DEFAULT_TIMEOUT_INTERVAL value to restore later on
originalTimeout = jasmine.DEFAULT_TIMEOUT_INTERVAL;
// Set the DEFAULT_TIMEOUT_INTERVAL to 3min seconds, to allow for the DAP scan to finish.
jasmine.DEFAULT_TIMEOUT_INTERVAL = 180000;
});
// The Individual testcase for each of the unittestcases.
// Note the done that is passed in, this is used to wait for asyn functions.
it('a11y scan should match expected value', async function() {
// Extract the unitTestcase data file from the unitTestcase hash map.
// This will contain the full content of the testcase file. Includes the document
// object also.
let unitTestDataFileContent = unitTestcaseHTML[unitTestFile];
// Create an iframe element in the body of the current document
let iframe = document.createElement('iframe');
iframe.id = "unitTestcaseIframe";
// Append the iframe to the body
document.body.appendChild(iframe);
// Start to write the contents of the html file into the iframe document
// This will include the entire html page, including the doc type and all.
iframe.contentWindow.document.open();
iframe.contentWindow.document.write(unitTestDataFileContent);
iframe.contentWindow.document.close();
// Get the iframe window
let iframeWin = document.getElementById("unitTestcaseIframe").contentWindow;
// Get the iframe document that was just created
let iframeDoc = iframeWin.document;
let checker = new ace.Checker();
let report = await checker.check(iframeDoc, null);
expect(report.results).toBeDefined();
// Extract the ruleCoverage object from the unit testcases that is loaded on to the iframe.
let expectedInfo = iframeWin.UnitTest;
let legacyExpectedInfo = iframeWin.OpenAjax &&
iframeWin.OpenAjax.a11y &&
iframeWin.OpenAjax.a11y.ruleCoverage;
if (expectedInfo && expectedInfo.ruleIds) {
let filtReport = [];
for (const issue of report.results) {
delete issue.node;
delete issue.ruleTime;
delete issue.bounds;
if (expectedInfo.ruleIds.includes(issue.ruleId)) {
// These are too variable between runs - don't test these
delete issue.snippet;
filtReport.push(issue);
}
}
expect(filtReport).withContext(JSON.stringify(filtReport, null, 2)).toEqual(expectedInfo.results);
} else if (legacyExpectedInfo) {
let expectedInfo = {}
let actualInfo = {}
for (const item of legacyExpectedInfo) {
if (checker.engine.getRule(mapGToRule[item.ruleId])) {
expectedInfo[item.ruleId] = [];
actualInfo[item.ruleId] = [];
for (let xpath of item.failedXpaths) {
xpath = xpath.replace(/([^\]])\//g, "$1[1]/");
if (!xpath.endsWith("]")) xpath += "[1]";
expectedInfo[item.ruleId].push(xpath);
}
} else {
console.log("WARNING:",item.ruleId,"does not exist in current ruleset");
}
}
for (const issue of report.results) {
delete issue.node;
delete issue.ruleTime;
delete issue.bounds;
const ruleId = mapRuleToG[issue.ruleId];
if (ruleId in expectedInfo && issue.value[1] !== "PASS") {
actualInfo[ruleId].push(issue.path.dom);
}
}
for (const ruleId in expectedInfo) {
expectedInfo[ruleId].sort();
actualInfo[ruleId].sort();
}
expect(actualInfo).withContext("\nExpected:" + JSON.stringify(expectedInfo, null, 2) + "\nActual:" + JSON.stringify(actualInfo, null, 2)).toEqual(expectedInfo);
}
// let violationsData = data.report.fail;
// // In the case that the violationData is not defined then trigger an error right away.
// if (violationsData) {
// // Only try to verify results if there are baseline/expected results to actually verify
// if (expectedInfo) {
// // Decleare the actualMap which will store all the actual xpath results
// let actualMap = {};
// // Loop over all the violation Data and extract the gID and the xpath for the gId and
// // add it to the actual Map.
// violationsData.forEach(function (actual) {
// // Create a new array in the case that one does not exists
// actualMap[actual.ruleId] = actualMap[actual.ruleId] || [];
// // Fix up the xPath as we need to replace [1] with space so that it can actually match correctly.
// let fixComp = actual.component.replace(/\[1\]/g, "");
// // Add the fixed xPath to the actual map for the gId
// actualMap[actual.ruleId].push(fixComp);
// });
// // Loop over all the expected Infor objects and fix up the xPath so that it is ready for compare
// expectedInfo.forEach(function (expected) {
// // Temp array to store all the fixed xpaths
// let temp = [];
// // Fix all the xPaths that are in the failedXpaths array
// expected.failedXpaths.forEach(function (xpath) {
// temp.push(xpath.replace(/\[1\]/g, ""));
// });
// // Reasign the temp fixed xpath to failedXpath
// expected.failedXpaths = temp;
// });
// // Loop over all the expected xPaths and make sure they are present in the actual results.
// // TODO: Add support for checking passed xPath here also.
// expectedInfo.forEach(function (expected) {
// // In the case the xPath exists in the actualMap then sort them
// if (actualMap[expected.ruleId]) {
// actualMap[expected.ruleId] = actualMap[expected.ruleId].sort();
// }
// // In the case the failedXpaths exists in the expected object then sort them
// if (expected.failedXpaths) {
// expected.failedXpaths = expected.failedXpaths.sort();
// }
// // In the case that the expected failed map is empty and we found violations triggered for this rule then mark this as failed.
// if (expected.failedXpaths.length == 0) { | // expect(typeof (actualMap[expected.ruleId])).toEqual('undefined', "\nShould trigger NO violations, but triggered for rule: " + expected.ruleId + " with " + actualMap[expected.ruleId]); | random_line_split |
|
shows_data.js | . Indoor concerts held in the largest venues are sometimes called arena concerts or amphitheatre concerts. Informal names for a concert include show and gig.",
"description": "Concert",
"photos": [
"https://ichef.bbci.co.uk/news/1024/cpsprodpb/16441/production/_109910219_massiveattack2.jpg",
"http://www.surinenglish.com/noticias/202009/23/media/cortadas/Imagen%[email protected]",
"https://www.diariobahiadecadiz.com/noticias/wp-content/uploads/2018/12/conciertoconcertmusicfestivalchi18-web-750x430.jpg"
]
},
{
"id": 3,
"name": "Cirque du Soleil",
"info": "A circus is a company of performers who put on diverse entertainment shows that may include clowns, acrobats, trained animals, trapeze acts, musicians, dancers, hoopers, tightrope walkers, jugglers, magicians, unicyclists, as well as other object manipulation and stunt-oriented artists. Contemporary circus has been credited with a revival of the circus tradition since the late 1970s, when a number of groups began to experiment with new circus formats and aesthetics, typically avoiding the use of animals to focus exclusively on human artistry. Circuses within the movement have tended to favor a theatrical approach, combining character-driven circus acts with original music in a broad variety of styles to convey complex themes or stories.",
"description": "Cirque du Soleil",
"photos": [
"https://agendainfantil.es/wp-content/uploads/2020/01/circus-roncalli.jpg",
"https://www.lovevalencia.com/wp-content/uploads/2017/12/circo-wonderland-valencia-inframundo.jpg",
"https://cadenaser00.epimg.net/ser/imagenes/2020/06/29/internacional/1593459707_054833_1593460181_noticia_normal.jpg"
]
}
]
exports.shows = [
{
id: 26,
name: "The Lion King",
price: 35.99,
hotelPoints: 500,
photo: "https://www.miaminews24.com/wp-content/uploads/2019/05/cheetah-tab.jpg",
description: "When an unthinkable tragedy, orchestrated by Simba’s wicked uncle, Scar, takes his father’s life, Simba flees the Pride Lands, leaving his loss and the life he knew behind. Eventually companioned by two hilarious and unlikely friends, Simba starts anew. But when weight of responsibility and a desperate plea from the now ravaged Pride Lands come to find the adult prince.",
quantity: 0,
category: "theatre"
},
{
id: 27,
name: "The Phantom of the Opera",
price: 30.50,
hotelPoints: 450,
photo: "https://cdn.londonandpartners.com/asset/the-phantom-of-the-opera-musical-at-her-majestys-theatre_phantom-of-the-opera-image-courtesy-of-cameron-mackintosh_240b6958e824776c4b4b222d72281b95.jpg",
description: "Based on the 1910 horror novel by Gaston Leroux, which has been adapted into countless films, The Phantom of the Opera follows a deformed composer who haunts the grand Paris Opera House. Sheltered from the outside world in an underground cavern, the lonely, romantic man tutors and composes operas for Christine, a gorgeous young soprano star-to-be. ",
quantity: 0,
category: "theatre"
},
{
id: 28,
name: "Aladdin",
price: 42.99,
hotelPoints: 550,
photo: "https://www.broadwaycollection.com/wp-content/uploads/2015/08/aladdin.jpg",
description: "In the middle-eastern town of Agrabah, Princess Jasmine is feeling hemmed in by her father’s desire to find her a royal groom. Meanwhile, the Sultan’s right-hand man, Jafar, is plotting to take over the throne. When Jasmine sneaks out of the palace incognito, she forms an instant connection with Aladdin, a charming street urchin and reformed thief.",
quantity: 0,
category: "theatre"
},
{
id: 29, | name: "Wicked",
price: 32.50,
hotelPoints: 470,
photo: "https://image.nuevayork.es/wp-content/uploads/2014/11/Wicked-on-Broadway-Tickets.png.webp",
description: "Wicked, the Broadway sensation, looks at what happened in the Land of Oz…but from a different angle. Long before Dorothy arrives, there is another girl, born with emerald-green skin—smart, fiery, misunderstood, and possessing an extraordinary talent. When she meets a bubbly blonde who is exceptionally popular, their initial rivalry turns into the unlikeliest of friendships…",
quantity: 0,
category: "theatre"
},
{
id: 30,
name: "Ain't Too Proud",
price: 25.99,
hotelPoints: 400,
photo: "https://www.love4musicals.com/wp-content/uploads/2018/11/1200x6752-1200x675-e1541668270340.jpg",
description: "Ain't Too Proud is the electrifying new musical that follows The Temptations' extraordinary journey from the streets of Detroit to the Rock & Roll Hall of Fame. With their signature dance moves and unmistakable harmonies, they rose to the top of the charts creating an amazing 42 Top Ten Hits with 14 reaching number one.",
quantity: 0,
category: "theatre"
},
{
id: 31,
name: "Queen - Wembley",
price: 89.99,
hotelPoints: 1000,
photo: "https://images-na.ssl-images-amazon.com/images/I/71g40mlbinL._SL1072_.jpg",
description: "Exactly 365 days after their world famous Live Aid gig, Queen returns to Wembley Stadium. Why this concert is better than the Live Aid one? Simple. Because it’s longer. A full-length rock show instead of a 20 minute TV gig. From the first chord of One Vision to the last euphoric singalong of We Are The Champions; Freddie, Bryan, Roger and John show London who the best live band in the world is.",
quantity: 0,
category: "concert"
},
{
id: 32,
name: "Led Zeppelin - Royal Albert",
price: 69.99,
hotelPoints: 800,
photo: "https://www.nacionrock.com/wp-content/uploads/117162885.jpg",
description: "One singer, one guitarist, one bass player and a drummer. Classic. Rock. In 1970 Robert Plant, Jimmy Page, John Paul Jones and John Bonham celebrated Jimmy’s 26th birthday at the Royal Albert Hall. “At the time the largest and most prestigious gig”, according to the birthday boy himself. Only two years after their first gig (as a band called “The New Yardbirds”) they gave rock history one of the most legendary two-and-a-half-hours of all time.",
quantity: 0,
category: "concert"
},
{
id: 33,
name: "David Bowie - Santa Monica",
price: 85.99,
hotelPoints: 950,
photo: "https://img.discogs.com/V7w6P2ut4y_EiD5Pf4InpMZ-_tk=/fit-in/300x300/filters:strip_icc():format(jpeg):mode_rgb():quality(40)/discogs-images/R-9909633-1488376223-6630.png.jpg",
description: "Davy Jones. No, not the singer of The Monkees, but the legendary artist also known as Ziggy Stardust, the Thin White Duke, Aladdin Sane, Jareth the Goblin King, or just Bowie. David Bowie. From his revolutionary breakthrough with Space Oddity to the last painful notes of Black Star, Bowie was a true artist. It’s not possible to choose the best concert of an artist with such a rich history of live performances. ",
quantity: 0,
category: "concert | random_line_split |
|
tool.js | .body.ontouchmove = function(event) {
var el = document.getElementsByClassName('scroll');
if (el.length == 0) {
return;
}
var y = event.originalEvent.changedTouches[0].clientY;
var st = el[0].scrollTop; //滚动条高度
if (y >= window.lastY && st <= 10) { //如果滚动条高度小于0,可以理解为到顶了,且是下拉情况下,阻止touchmove事件。
window.lastY = y;
event.preventDefault();
}
window.lastY = y;
};
}
/**
* 禁止微信浏览器下拉回弹
*/
util.overscroll = function() {
var el = document.querySelector('.scroll');
el.addEventListener('touchstart', function(evt) {
window.lastY = evt.targetTouches[0].clientY; //点击屏幕时记录最后一次Y度坐标。
var top = el.scrollTop;
var totalScroll = el.scrollHeight;
var currentScroll = top + el.offsetHeight;
if (top === 0) {
el.scrollTop = 1;
} else if (currentScroll === totalScroll) {
el.scrollTop = top - 1;
}
});
el.addEventListener('touchmove', function(evt) {
var y = evt.targetTouches[0].clientY;
if (el.offsetHeight < el.scrollHeight) {
evt._isScroller = true;
}
if (el.scrollTop <= 10 && y > window.lastY) {
evt._isScroller = false;
}
});
// 禁止微信浏览器下拉"露底"
document.body.ontouchstart = function(evt) {
evt.preventDefault();
if (!evt._isScroller) {
evt.preventDefault();
}
};
}
// 获取jssdk | on() {
// 获取当前时间戳(以s为单位)
var timestamp = Date.parse(new Date()) / 1000;
var jsapi_ticket = window.jsapi_ticket;
var nonceStr = createNonceStr();
function createNonceStr() {
var chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
var str = "";
for (var i = 0; i < 16; i++) {
var randomNum = parseInt(Math.random() * chars.length, 10);
str += chars.substr(randomNum, 1);
}
return str;
}
// 如获取不到ticket则提前结束函数
if (!jsapi_ticket) {
return false;
}
// 这里参数的顺序要按照 key 值 ASCII 码升序排序
var string = "jsapi_ticket=" + jsapi_ticket + "&noncestr=" + nonceStr + "×tamp=" + timestamp + "&url=" + window.location.href.split('#')[0];
var data = new Uint8Array(encodeUTF8(string));
var result = sha1(data);
var signature = Array.prototype.map.call(result, function(e) {
return (e < 16 ? "0" : "") + e.toString(16);
}).join("");
// return出去
var json = {}
json.timestamp = timestamp;
json.nonceStr = nonceStr;
json.signature = signature;
return json;
}
// 禁止微信分享
util.shareDeny = function() {
//禁止分享配置
wx.config({
debug: false,
appId: 'wx0cad77c43b1d74ce',
timestamp: 123123213,
nonceStr: '123123123',
signature: '123123123',
jsApiList: [
'hideOptionMenu',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'hideOptionMenu',
]
});
wx.hideOptionMenu();
});
window.weixinShare.status = 'deny';
}
// 综合评价分享出去
util.shareEvaluation = function(name) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
// 获取保险id
var insuranceId = util.getQueryString('id');
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',人工智能大揭秘',
desc: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'evaluation';
}
// 报告分享出去
util.shareReport = function(name, code) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',这个报告很棒哦,分享给你~',
desc: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
});
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
})
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'report';
}
window.weixinShare = {
status: 'init',
shareDeny: util.shareDeny,
shareEvaluation: util.shareEvaluation,
shareReport | 签名的方法
util.getJssdkSign = functi | conditional_block |
tool.js | .ui.showLoading();
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading();
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'filter') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
window.ui && window.ui.showLoading('', true);
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading('', true);
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'protect') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
if (window.isSendingAjax == false) {
window.isSendingAjax = true;
} else {
config.baseURL = 'http://127.0.0.1'
}
return config;
}, function(error) {
// 对请求错误做些什么
window.isSendingAjax = false;
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.isSendingAjax = false;
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.isSendingAjax = false;
return Promise.reject(error);
});
}
if (misc == null) {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
return config;
}, function(error) {
// 对请求错误做些什么
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
return Promise.reject(error);
});
}
if (window.isSendingAjax === true) {
return false;
}
if (method.toLowerCase() == 'get') { // 如果是GET请求,则直接发送
return ajax.get(url);
} else if (method.toLowerCase() == 'post') { // 如果是POST请求,先计算signature,再发送
data = util.appendSignature(data);
return ajax.post(url, data);
} else {
return false;
}
}
// 将json对象转换成url格式(键名a-z方式顺序排列),计算签名再放入json中
util.appendSignature = function(obj) {
const seed = "420E496DCF9D9CEC4FD231AC3C258820";
if (util.isEmptyObject(obj)) {
return {
"signature": b64_hmac_sha1(seed, '') + '='
}
}
let string = util.a2z(obj).param;
let string2 = util.a2z(obj).encodedParam;
let signature = b64_hmac_sha1(seed, string) + '=';
string = string2 + '&signature=' + encodeURIComponent(signature);
return string;
// obj2.signature=signature;
// return obj2;
}
// json转为ASCII码升序的url字符串
util.a2z = function(obj) {
let arr = [];
// 将obj中的键名依次存入空数组
for (let i in obj) {
arr[arr.length] = i;
}
// 将数组内的元素按字母顺序正序排列
arr.sort();
let arr2=arr.slice(0);
// 将键值对内部用等号连接,在外部用&连接
for (let i = 0; i < arr.length; i++) {
let key=arr[i];
arr[i] = key + '=' + obj[key];
}
for (let j = 0; j < arr2.length; j++) {
let key=arr2[j];
arr2[j] = key + '=' + encodeURIComponent(obj[key]);
}
let output={
param: arr.join('&'),
encodedParam: arr2.join('&')
}
return output;
}
Vue.prototype.$util = util;
// 延迟执行指定方法
// this.debounce(方法名,延迟毫秒数,参数1,参数2...)
Vue.mixin({
data(){
return {
debounceTimer:null
}
},
methods:{
debounce(func,time,...args){
if(!this.$util.isFunction(func) || !(time>=0)){
return;
}
window.clearTimeout(this.debounceTimer);
this.debounceTimer = window.setTimeout(()=>{
func(...args);
},time)
}
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,末尾显示省略号
Vue.filter('more', function(input, num = 5) {
if (!util.isString(input)) {
return input;
}
if (input.length > num) {
return input.slice(0, num) + '...';
} else {
return input;
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,中间显示省略号
Vue.filter('more2', function(input, num = 16) {
if (!util.isString(input) || input.length<=num || num<6) {
return input;
}
if (input.length > num) {
return input.slice(0, num-4) + '...' + input.slice(-4);
} else {
return input;
}
})
// Vue自定义过滤器,将文本中的'2个全角空格'或'4个半角空格'过滤掉
Vue.filter('nospace', function(input) {
if (!util.isString(input)) {
return input;
}
let output=input.replace(/ /g,'').replace(/ /g,'');
return output;
})
Vue.prototype.$store = {
}
// 向浏览历史堆栈中追加一个空锚点,用于防止浏览器回退关闭
Vue.prototype.pushHistory = function() {
let state = {
title: 'title',
url: '#'
}
window.history.pushState(state, "title", "#");
util.shareDeny();
}
window.ajaxRetryTimes=20;
// 判断当前是否取到了token,执行or延迟200ms重试执行,最多重试20次
Vue.prototype.doAndRetry = function(func) {
let that = this;
let token = window.sessionStorage.getItem('AISessionToken');
if (token) {
func();
window.ajaxRetryTimes = 0;
return;
}
if (window.ajaxRetryTimes > 0) {
window.ajaxRetryTimes--;
setTimeout(function() {
that.doAndRetry(func);
}, 200);
}
}
| identifier_name |
||
tool.js | .body.ontouchmove = function(event) {
var el = document.getElementsByClassName('scroll');
if (el.length == 0) {
return;
}
var y = event.originalEvent.changedTouches[0].clientY;
var st = el[0].scrollTop; //滚动条高度
if (y >= window.lastY && st <= 10) { //如果滚动条高度小于0,可以理解为到顶了,且是下拉情况下,阻止touchmove事件。
window.lastY = y;
event.preventDefault();
}
window.lastY = y;
};
}
/**
* 禁止微信浏览器下拉回弹
*/
util.overscroll = function() {
var el = document.querySelector('.scroll');
el.addEventListener('touchstart', function(evt) {
window.lastY = evt.targetTouches[0].clientY; //点击屏幕时记录最后一次Y度坐标。
var top = el.scrollTop;
var totalScroll = el.scrollHeight;
var currentScroll = top + el.offsetHeight;
if (top === 0) {
el.scrollTop = 1;
} else if (currentScroll === totalScroll) {
el.scrollTop = top - 1;
}
});
el.addEventListener('touchmove', function(evt) {
var y = evt.targetTouches[0].clientY;
if (el.offsetHeight < el.scrollHeight) {
evt._isScroller = true;
}
if (el.scrollTop <= 10 && y > window.lastY) {
evt._isScroller = false;
}
});
// 禁止微信浏览器下拉"露底"
document.body.ontouchstart = function(evt) {
evt.preventDefault();
if (!evt._isScroller) {
evt.preventDefault();
}
};
}
// 获取jssdk签名的方法
util.getJssdkSign = function() {
// 获取当前时间戳(以s为单位)
var timestamp = Date.parse(new Date()) / 1000;
var jsapi_ticket = window.jsapi_ticket;
var nonceStr = createNonceStr();
function createNonceStr() {
var chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
var str = "";
for (var i = 0; i < 16; i++) {
var randomNum = parseInt(Math.random() * chars.length, 10);
str += chars.substr(randomNum, 1);
}
return str;
}
// 如获取不到ticket则提前结束函数
if (!jsapi_ticket) {
return false;
}
// 这里参数的顺序要按照 key 值 ASCII 码升序排序
var string = "jsapi_ticket=" + jsapi_ticket + "&noncestr=" + nonceStr + "×tamp=" + timestamp + "&url=" + window.location.href.split('#')[0];
var dat | on.nonceStr = nonceStr;
json.signature = signature;
return json;
}
// 禁止微信分享
util.shareDeny = function() {
//禁止分享配置
wx.config({
debug: false,
appId: 'wx0cad77c43b1d74ce',
timestamp: 123123213,
nonceStr: '123123123',
signature: '123123123',
jsApiList: [
'hideOptionMenu',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'hideOptionMenu',
]
});
wx.hideOptionMenu();
});
window.weixinShare.status = 'deny';
}
// 综合评价分享出去
util.shareEvaluation = function(name) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
// 获取保险id
var insuranceId = util.getQueryString('id');
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',人工智能大揭秘',
desc: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',人工智能大揭秘',
link: window.location.origin + '/expert/?route=detail&id=' + insuranceId,
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'evaluation';
}
// 报告分享出去
util.shareReport = function(name, code) {
var json = util.getJssdkSign();
if (json == false) {
return;
}
wx.showOptionMenu();
//配置微信分享
wx.config({
debug: false,
appId: appId,
timestamp: json.timestamp,
nonceStr: json.nonceStr,
signature: json.signature,
jsApiList: [
'checkJsApi',
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
wx.ready(function() {
wx.checkJsApi({
jsApiList: [
'onMenuShareAppMessage',
'onMenuShareTimeline',
]
});
// 2.1 监听“分享给朋友”,按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareAppMessage({
title: name + ',这个报告很棒哦,分享给你~',
desc: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击发送给朋友');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
});
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
// 2.2 监听“分享到朋友圈”按钮点击、自定义分享内容及分享结果接口
wx.onMenuShareTimeline({
title: name + ',这个报告很棒哦,分享给你~',
link: window.location.origin + '/expert/?route=report&invitationCode=' + encodeURIComponent(code),
imgUrl: window.location.origin + '/expert/logo.jpg',
trigger: function(res) {
//alert('用户点击分享到朋友圈');
},
success: function(res) {
//alert('已分享');
util.ajax('/report/generateLink', 'post', {
invitationCode: encodeURIComponent(code)
}).then(function(res) {
if (res.code == '100000') {
// window.ui.showToast('已分享','',true);
}
})
},
cancel: function(res) {
//alert('已取消');
},
fail: function(res) {
//alert(JSON.stringify(res));
}
});
});
window.weixinShare.status = 'report';
}
window.weixinShare = {
status: 'init',
shareDeny: util.shareDeny,
shareEvaluation: util.shareEvaluation,
shareReport | a = new Uint8Array(encodeUTF8(string));
var result = sha1(data);
var signature = Array.prototype.map.call(result, function(e) {
return (e < 16 ? "0" : "") + e.toString(16);
}).join("");
// return出去
var json = {}
json.timestamp = timestamp;
js | identifier_body |
tool.js | null) {
let AISessionToken = window.sessionStorage.getItem('AISessionToken');
if (AISessionToken == null) {
AISessionToken = '';
// window.ui && window.ui.showQrcode();
// return; // 无token时阻止此次ajax请求
}
let config = {
baseURL: baseURL,
timeout: 20000,
responseType: "json",
crossDomain: true,
headers: {
// 'Content-Type': 'application/x-www-form-urlencoded',
'Content-Type': 'application/x-www-form-urlencoded'
// 'AISessionToken': AISessionToken
}
}
config.url = url;
if (data) {
config.data = data;
}
let ajax = util.axios.create(config);
if (misc == 'loading') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
window.ui && window.ui.showLoading();
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading();
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'filter') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
window.ui && window.ui.showLoading('', true);
return config;
}, function(error) {
// 对请求错误做些什么
window.ui && window.ui.showLoading('', true);
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.ui && window.ui.hideLoading();
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.ui && window.ui.hideLoading();
return Promise.reject(error);
});
}
if (misc == 'protect') {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
if (window.isSendingAjax == false) {
window.isSendingAjax = true;
} else {
config.baseURL = 'http://127.0.0.1'
}
return config;
}, function(error) {
// 对请求错误做些什么
window.isSendingAjax = false;
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
window.isSendingAjax = false;
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
window.isSendingAjax = false;
return Promise.reject(error);
});
}
if (misc == null) {
// 为axios添加请求拦截器
ajax.interceptors.request.use(function(config) {
// 在发送请求之前做些什么
return config;
}, function(error) {
// 对请求错误做些什么
return Promise.reject(error);
});
// 为axios添加响应拦截器
ajax.interceptors.response.use(function(response) {
// 对响应数据做点什么
if (
response.data.code != '100000'
&& response.data.code != '110000'
&& response.data.code != '200005'
&& response.data.code != '200021'
&& window.location.href.indexOf('/score2')==-1
) {
window.ui && window.ui.showToast(response.data.message, '');
}
return response.data;
}, function(error) {
// 对响应错误做点什么
return Promise.reject(error);
});
}
if (window.isSendingAjax === true) {
return false;
}
if (method.toLowerCase() == 'get') { // 如果是GET请求,则直接发送
return ajax.get(url);
} else if (method.toLowerCase() == 'post') { // 如果是POST请求,先计算signature,再发送
data = util.appendSignature(data);
return ajax.post(url, data);
} else {
return false;
}
}
// 将json对象转换成url格式(键名a-z方式顺序排列),计算签名再放入json中
util.appendSignature = function(obj) {
const seed = "420E496DCF9D9CEC4FD231AC3C258820";
if (util.isEmptyObject(obj)) {
return {
"signature": b64_hmac_sha1(seed, '') + '='
}
}
let string = util.a2z(obj).param;
let string2 = util.a2z(obj).encodedParam;
let signature = b64_hmac_sha1(seed, string) + '=';
string = string2 + '&signature=' + encodeURIComponent(signature);
return string;
// obj2.signature=signature;
// return obj2;
}
// json转为ASCII码升序的url字符串
util.a2z = function(obj) {
let arr = [];
// 将obj中的键名依次存入空数组
for (let i in obj) {
arr[arr.length] = i;
}
// 将数组内的元素按字母顺序正序排列
arr.sort();
let arr2=arr.slice(0);
// 将键值对内部用等号连接,在外部用&连接
for (let i = 0; i < arr.length; i++) {
let key=arr[i];
arr[i] = key + '=' + obj[key];
}
for (let j = 0; j < arr2.length; j++) {
let key=arr2[j];
arr2[j] = key + '=' + encodeURIComponent(obj[key]);
}
let output={
param: arr.join('&'),
encodedParam: arr2.join('&')
}
return output;
}
Vue.prototype.$util = util;
// 延迟执行指定方法
// this.debounce(方法名,延迟毫秒数,参数1,参数2...)
Vue.mixin({
data(){
return {
debounceTimer:null
}
},
methods:{
debounce(func,time,...args){
if(!this.$util.isFunction(func) || !(time>=0)){
return;
}
window.clearTimeout(this.debounceTimer);
this.debounceTimer = window.setTimeout(()=>{
func(...args);
},time)
}
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,末尾显示省略号
Vue.filter('more', function(input, num = 5) {
if (!util.isString(input)) {
return input;
}
if (input.length > num) {
return input.slice(0, num) + '...';
} else {
return input;
}
})
// Vue自定义过滤器,将长于指定位数的字符串截断,中间显示省略号
Vue.filter('more2', function(input, num = 16) {
if (!util.isString(input) || input.length<=num || num<6) {
return input;
}
if (input.length > num) {
return input.slice(0, num-4) + '...' + input.slice(-4);
} else {
return input;
}
})
// Vue自定义过滤器,将文本中的'2个全角空格'或'4个半角空格'过滤掉
Vue.filter('nospace', function(input) {
if (!util.isString(input)) { | return input; | random_line_split |
|
tf.js | (false);
console.log(o);
if(o.isDetail != undefined){
if(o.isDetail == '1')
o.isDetail = true;
else if(o.isDetail == '0')
o.isDetail = false;
}
if (!params.limit)
params.limit = this.pageSize;
params[dataProxy.loadArgsKey] = [ o, params ];
}.createDelegate(this),
'load' : function ( obj, records, options ) {
console.log("load=======================================================")
console.log(obj);
console.log(records);
console.log(options);
console.log("load=======================================================")
}
}
}),
reader : this.reader
});
this.pagingBar = new App.PagingToolbar({
pageSize : this.pageSize,
store : this.store,
displayInfo : true,
displayMsg : '{0} - {1} of {2}',
emptyMsg : "没有记录"
});
this.grid = new Ext.tf.SimpleGridPanel({
gridConfigEx : this.gridConfigEx,
edit : this.edit.createDelegate(this),
del : this.del.createDelegate(this),
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle,
bbar : this.pagingBar
});
this.editForm = new Ext.tf.SimpleFormPanel({
items : this.editConfig,
close : function() {
this.editWin.hide();
}.createDelegate(this),
saveFn : function() {
var formBean = this.editForm.getForm().getValues(false);
console.log(formBean)
if(formBean.isDetail){
if(formBean.isDetail == '一级目录'){
formBean.isDetail = false;
}else if(formBean.isDetail == '二级目录'){
formBean.isDetail = true;
}
}
//console.log(formBean);
this.editUrl(formBean, function() {
Ext.MessageBox.alert("提示", "保存成功!");
this.editWin.hide();
this.store.reload();
}.createDelegate(this));
}.createDelegate(this)
});
this.editWin = new Ext.Window({
title : '',
// closeAction : 'hide',
modal : true,
autoHeight : true,
close : function() {
this.hide();
},
// autoWidth : true,
width : 300,
items : [ this.editForm ]
});
this.addRecord = function() {
this.editForm.getForm().reset();
this.editWin.show();
};
this.query = function() {
this.grid.selModel.clearSelections();
this.store.reload();
};
this.queryForm = new Ext.tf.SimpleQueryFormPanel(
{
queryConfigEx : this.queryConfigEx,
items : this.queryConfig,
buttons : [ {
text : '查询',
formBind : true,
scope : this,
handler : this.query.createDelegate(this)
} ],
keys : [ Ext.tf.util.enterKey(this.query
.createDelegate(this)) ]
});
if (this.hasAdd) {
this.queryForm.addButton('新增', this.addRecord, this);
}
this.items = [ this.queryForm, this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// private
edit : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
} else if (selections.length != 1) {
Ext.MessageBox.alert("提示", "不能选择多行编辑!");
return;
}
this.editWin.show();
this.editForm.getForm().loadRecord(selections[0]);
},
del : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
}
var fn = function(e) {
if (e == "yes") {
var ids = new Array();
for ( var i = 0, len = selections.length; i < len; i++) {
try {
// 如果选中的record没有在这一页显示,remove就会出问题
selections[i].get("id");
ids[i] = selections[i].get("id");
} catch (e) {
// //console.log(e);
}
}
this.deleteUrl(ids.join(","), function() {
Ext.MessageBox.alert("提示", "删除完毕!");
this.store.reload();
}.createDelegate(this));
}
}
Ext.MessageBox.confirm("提示", "确认要删除所选择的记录么?", fn, this);
},
// public
load : function() {
return this.store.load({
params : {
start : 0,
limit : this.pageSize
}
});
}
});
/**
* 弹出窗口控件
*/
Ext.tf.PopSelect = Ext.extend(Ext.form.TriggerField, {
triggerClass : 'x-form-date-trigger',
readOnly : true,
initComponent : function() {
Ext.tf.PopSelect.superclass.initComponent(this);
},
/**
* Find ref element, set value
*/
setRefName : function(v) {
var refName = this.refName || ''; // If not refName, then ??
var form = this.findParentBy(function(v) {
if (Ext.type(v.getForm) == 'function')
return true;
});
if (form != null) {
Ext.each(form.find("name", refName), function(field) {
field.setValue(v);
});
}
return this;
},
onDestroy : function() {
Ext.destroy(this.win, this.panel);
Ext.tf.PopSelect.superclass.onDestroy.call(this);
},
edit : function() {
var grid = this.panel.grid;
var store = this.panel.store;
var view = grid.getView();
var sm = grid.getSelectionModel();
for ( var i = 0; i < view.getRows().length; i++) {
if (sm.isSelected(i)) {
var record = store.getAt(i);
var id = record.get('id');
var name = record.get('name');
this.setValue(name);
this.setRefName(id);
}
;
}
;
this.win.hide();
},
// pop select window
onTriggerClick : function() {
if (this.win == null) {
this.panel = new Ext.tf.SimplePanel({
title : '',
pageSize : 10,
hasAdd : false,
dblclickToggle : false,
contextmenuToggle : false,
gridConfigEx : {
height : 200
},
queryUrl : this.queryUrl,
// 查询条件Form
queryConfig : this.queryConfig,
// Grid 读取数据时的reader
readerConfig : this.readerConfig,
// Grid的列
gridCm : this.gridCm
});
this.panel.grid.on('rowdblclick', this.edit, this);
this.win = new Ext.Window({
title : this.title,
modal : true,
width : 520,
autoHeight : true,
closeAction : 'hide',
items : [ this.panel ],
buttons : [ {
text : '关闭',
handler : function() {
this.win.hide();
}.createDelegate(this)
}, {
text : '清除',
handler : function() {
this.setValue('');
this.setRefName('');
this.win.hide();
}.createDelegate(this)
}, {
text : '确认',
handler : this.edit.createDelegate(this)
} ]
});
}
this.win.show(this);
}
});
Ext.reg("popselect", Ext.tf.PopSelect);
/**
* SimpleReportPanel
*/
Ext.tf.SimpleReportPanel = Ext.extend(Ext.Panel, {
closable : true,
layout : 'fit',
autoScroll : true,
queryUrl : Ext.emptyFn,
// toggle: in grid, whether double click fire edit
dblclickToggle : false,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : false,
initComponent : function() {
try {
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize",
root : "data",
id : "id"
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl
}),
reader : this.reader
});
this.grid = new Ext.tf.SimpleGridPanel({
tbar : [ {
text : '刷新',
handler : function() {
this.load();
}.createDelegate(this)
}, {
text : '打印',
handler : function() {
printPage(this.grid);
}.createDelegate(this)
} ],
viewConfig : {
forceFit : ''
},
width : '', | gridConfigEx : this.gridConfigEx,
store : this.store, | random_line_split |
|
tf.js | (grid) {
var tableStr = '<table cellpadding="0" cellspacing="0" width="100%" id="statisticByDay">';
var cm = grid.getColumnModel();
var colCount = cm.getColumnCount();
var temp_obj = new Array();
// 只下载没有隐藏的列(isHidden()为true表示隐藏,其他都为显示)
// 临时数组,存放所有当前显示列的下标
for ( var i = 0; i < colCount; i++) {// 从第三列开始,因为我的第1、2列是分别是rownumber和selectmodel。
if (cm.isHidden(i) == true) {
} else {
temp_obj.push(i);
}
}
tableStr = tableStr + '<thead><tr>';
for ( var i = 0; i < temp_obj.length; i++) {
// 显示列的列标题
tableStr = tableStr + '<td>' + cm.getColumnHeader(temp_obj[i])
+ '</td>';
}
tableStr = tableStr + '</tr></thead>';
var store = grid.getStore();
var recordCount = store.getCount();
tableStr = tableStr + '<tbody>'
for ( var i = 0; i < recordCount; i++) {
var r = store.getAt(i);
tableStr = tableStr + '<tr>';
for ( var j = 0; j < temp_obj.length; j++) {
var dataIndex = cm.getDataIndex(temp_obj[j]);
var tdValue = r.get(dataIndex);
var rendererFunc = cm.getRenderer(temp_obj[j]);
if (rendererFunc != null) {
tdValue = rendererFunc(tdValue);
}
if (tdValue == null || tdValue == 0) {
tdValue = ' ';
}
if (j != 0)
tableStr = tableStr + '<td style="text-align:center;">'
+ tdValue + '</td>';
else
tableStr = tableStr + '<td>' + tdValue + '</td>';
}
tableStr = tableStr + '</tr>';
}
tableStr = tableStr + '</tbody></table>';
var head = '<link rel="stylesheet" type="text/css" href="../css/printReport.css" />';
var titleHTML = tableStr;// document.getElementById("printGridfff").innerHTML;
var newwin = window.open('about:blank', '', '');
newwin.document.write(head);
newwin.document.write(titleHTML);
newwin.document.location.reload();
newwin.print();
// newwin.close();
}
tfMoney = function(v, sign) {
if (!sign)
sign = '';
v = (Math.round((v - 0) * 100)) / 100;
v = (v == Math.floor(v)) ? v + ".00" : ((v * 10 == Math.floor(v * 10)) ? v
+ "0" : v);
v = String(v);
var ps = v.split('.');
var whole = ps[0];
var sub = ps[1] ? '.' + ps[1] : '.00';
var r = /(\d+)(\d{3})/;
while (r.test(whole)) {
whole = whole.replace(r, '$1' + ',' + '$2');
}
v = whole + sub;
if (v.charAt(0) == '-') {
return '-' + sign + v.substr(1);
}
return sign + v;
}
Ext.apply(Ext.form.VTypes, {
daterange : function(val, field) {
var date = field.parseDate(val);
if (!date) {
return;
}
if (field.startDateField
&& (!this.dateRangeMax || (date.getTime() != this.dateRangeMax
.getTime()))) {
var start = Ext.getCmp(field.startDateField);
start.setMaxValue(date);
start.validate();
this.dateRangeMax = date;
} else if (field.endDateField
&& (!this.dateRangeMin || (date.getTime() != this.dateRangeMin
.getTime()))) {
var end = Ext.getCmp(field.endDateField);
end.setMinValue(date);
end.validate();
this.dateRangeMin = date;
}
/*
* Always return true since we're only using this vtype to set the
* min/max allowed values (these are tested for after the vtype test)
*/
return true;
},
password : function(val, field) {
if (field.initialPassField) {
var pwd = Ext.getCmp(field.initialPassField);
return (val == pwd.getValue());
}
return true;
},
passwordText : 'Passwords do not match'
});
Ext.grid.CheckColumn = function(config) {
Ext.apply(this, config);
if (!this.id) {
this.id = Ext.id();
}
this.renderer = this.renderer.createDelegate(this);
};
Ext.grid.CheckColumn.prototype = {
init : function(grid) {
this.grid = grid;
this.grid.on('render', function() {
var view = this.grid.getView();
view.mainBody.on('mousedown', this.onMouseDown, this);
}, this);
},
onMouseDown : function(e, t) {
if (t.className && t.className.indexOf('x-grid3-cc-' + this.id) != -1) {
e.stopEvent();
var index = this.grid.getView().findRowIndex(t);
var record = this.grid.store.getAt(index);
record.set(this.dataIndex, !record.data[this.dataIndex]);
}
},
renderer : function(v, p, record) {
p.css += ' x-grid3-check-col-td';
return '<div class="x-grid3-check-col' + (v ? '-on' : '')
+ ' x-grid3-cc-' + this.id + '"> </div>';
}
};
Ext.namespace('Ext.tf.util');
/**
* Compiles a selector/xpath query into a reusable function. The returned
* function takes one parameter "root" (optional), which is the context node
* from where the query should start.
*
* @param {Ext.form.FormPanel}
* formPanel 包含主数据的FormPanel
* @param {Ext.grid.GridPanel/Ext.grid.EditorGridPanel}
* gridPanel 包含细节数据的GridPanel
* @param {Array}
* excludes gridPanel中不需要获取的列, 数组中加入需要摈弃的grid.store.fields中
* @param {Array}
* resultPropNames (可选) 定义返回的json对象的属性名,缺省为["formData", "gridData"]
* @return {Object} 缺省为{formData:masterData, gridData:detailData}
* masterData为json对象, detailData为[json对象],
* detailData数组中的json对象的属性名与grid.store的fields定义相同
*/
Ext.tf.util.gatherData = function(formPanel, gridPanel, excludes,
resultPropNames) {
var store = gridPanel.store;
var gridDataList = [];
var formData = formPanel.getForm().getValues(false);
store.each(function(rec) {
for ( var i = excludes.length - 1; i >= 0; --i) {
delete rec.data[excludes[i]];
}
;
gridDataList.push(rec.data)
});
if (resultPropNames) {
var result = {};
result[resultPropNames[0]] = formData;
result[resultPropNames[1]] = gridDataList;
return result;
} else
return {
formData : formData,
gridData : gridDataList
};
}
Ext.tf.util.debug = function(msg) {
if (typeof (console) != "undefined") {
console.debug(msg);
}
}
/*
* Usage : var lable = Ext.tf.util.OptCache.getLabel("Nationality", "1");
*
*/
Ext.tf.util.OptCache = {};
Ext.tf.util.OptCache.data = {};
Ext.tf.util.OptCache.getOptions = function(optName) {
var util = Ext.tf.util;
if (!util.OptCache.data[optName]) {
OptionProvider.getOptions(optName, {
async : false,
callback : function(list) {
var opt = {};
for ( var i = list.length - 1; i >= 0; --i) {
opt[list[i].id] = list[i].name;
}
;
util.OptCache.data[optName] = opt;
}
});
} else {
util.debug("util.OptCache.getOptions: using cache");
}
return util.OptCache.data[optName];
};
Ext.tf.util.OptCache.getLabel = function(optName, key) {
var util = Ext.tf.util;
var options = util.OptCache.getOptions(optName);
if (options) {
return options[key];
} else {
return '';
}
};
/**
* 回车对应函数 handler
*/
Ext.tf.util.enterKey = function(handler) {
return {
key : [ 10, 13 ],
fn : handler
}
};
// //////////////////////
Ext.ns("Ext.tf");
Ext.tf.currentUser = null;
Ext.tf.SimpleFormPanel = Ext.extend(Ext.FormPanel, {
autoHeight : true,
frame : true,
defaultType : 'textfield',
initComponent : function() {
Ext.applyIf(this, {
// saveFn : function() {}
});
this.build();
Ext.tf.SimpleFormPanel.superclass.initComponent.call(this);
},
build : function() | printPage | identifier_name |
|
tf.js | },
contextmenu : function(grid, rowIndex, e) {
e.preventDefault();
e.stopEvent();
var updateMenu = new Ext.menu.Item({
iconCls : 'edit',
id : 'updateMenu',
text : '修改',
handler : this.edit.createDelegate(this)
});
var deleteMenu = new Ext.menu.Item({
iconCls : 'delete',
id : 'deleteMenu',
text : '删除',
handler : this.del.createDelegate(this)
});
var selections = this.getSelections();
if (selections.length > 1) {
updateMenu.disable();
}
var menuList = [ updateMenu, deleteMenu ];
this.grid_menu = new Ext.menu.Menu({
id : 'mainMenu',
items : menuList
});
var coords = e.getXY();
grid.getSelectionModel().selectRow(rowIndex);
this.grid_menu.showAt([ coords[0], coords[1] ]);
}
});
/**
* 功能页面的panel类 Config 说明: title : '模块目录管理', pageSize : 10, queryUrl :
* ModuleService.findModuleCategory.createDelegate(this), editUrl :
* ModuleService.editModuleCategory.createDelegate(this), deleteUrl : xxx,
* // Grid 需要的配置信息, 会覆盖掉缺省的 gridConfigEx : {}; // query panel 需要的配置信息, 会覆盖掉缺省的
* queryConfigEx : {};
*
* //查询用到的form配置 queryConfig : [ { fieldLabel : '名称', name : 'name', allowBlank :
* true } ], //编辑用到的form配置 editConfig : [ { fieldLabel : '模块目录名称', name : 'name' }, {
* fieldLabel : '排列顺序', name : 'ordinal' } ], //reader的配置 readerConfig : [ {
* name : 'id', mapping : 'id' }, { name : 'name', mapping : 'name' }, { name :
* 'ordinal', mapping : 'ordinal' } ], //网格记录显示的配置 gridCm : [ { "hidden" : true,
* "header" : "ID", "sortable" : true, "dataIndex" : "id" }, { "header" :
* "模块目录名称", "sortable" : true, "dataIndex" : "name" }, { "header" : "排列顺序",
* "sortable" : true, "dataIndex" : "ordinal" } ]
*/
Ext.tf.SimplePanel = Ext.extend(Ext.Panel, {
closable : true,
hasAdd : true,
queryUrl : Ext.emptyFn,
editUrl : Ext.emptyFn,
deleteUrl : Ext.emptyFn,
// toggle: in grid, whether double click fire edit
dblclickToggle : true,
// toggle: in grid, whether right mouse click fire context menu
contextmenuToggle : true,
initComponent : function() {
try {
Ext.applyIf(this, {
pageSize : 10
});
this.reader = new Ext.data.JsonReader({
totalProperty : "totalSize", // 总记录数
root : "data", // 分页对象中的数据集
id : "id" //
}, Ext.data.Record.create(this.readerConfig));
this.store = new Ext.data.Store({
proxy : new Ext.ux.data.DWRProxy({
dwrFunction : this.queryUrl,
listeners : {
'beforeload' : function(dataProxy, params) {
// alert(dwr.util.toDescriptiveString(params, 2));
// alert(this.queryUrl +"eeeee");
var o = this.queryForm.getForm().getValues(false);
console.log(o);
if(o.isDetail != undefined){
if(o.isDetail == '1')
o.isDetail = true;
else if(o.isDetail == '0')
o.isDetail = false;
}
if (!params.limit)
params.limit = this.pageSize;
params[dataProxy.loadArgsKey] = [ o, params ];
}.createDelegate(this),
'load' : function ( obj, records, options ) {
console.log("load=======================================================")
console.log(obj);
console.log(records);
console.log(options);
console.log("load=======================================================")
}
}
}),
reader : this.reader
});
this.pagingBar = new App.PagingToolbar({
pageSize : this.pageSize,
store : this.store,
displayInfo : true,
displayMsg : '{0} - {1} of {2}',
emptyMsg : "没有记录"
});
this.grid = new Ext.tf.SimpleGridPanel({
gridConfigEx : this.gridConfigEx,
edit : this.edit.createDelegate(this),
del : this.del.createDelegate(this),
store : this.store,
cm : new Ext.grid.ColumnModel(this.gridCm),
dblclickToggle : this.dblclickToggle,
contextmenuToggle : this.contextmenuToggle,
bbar : this.pagingBar
});
this.editForm = new Ext.tf.SimpleFormPanel({
items : this.editConfig,
close : function() {
this.editWin.hide();
}.createDelegate(this),
saveFn : function() {
var formBean = this.editForm.getForm().getValues(false);
console.log(formBean)
if(formBean.isDetail){
if(formBean.isDetail == '一级目录'){
formBean.isDetail = false;
}else if(formBean.isDetail == '二级目录'){
formBean.isDetail = true;
}
}
//console.log(formBean);
this.editUrl(formBean, function() {
Ext.MessageBox.alert("提示", "保存成功!");
this.editWin.hide();
this.store.reload();
}.createDelegate(this));
}.createDelegate(this)
});
this.editWin = new Ext.Window({
title : '',
// closeAction : 'hide',
modal : true,
autoHeight : true,
close : function() {
this.hide();
},
// autoWidth : true,
width : 300,
items : [ this.editForm ]
});
this.addRecord = function() {
this.editForm.getForm().reset();
this.editWin.show();
};
this.query = function() {
this.grid.selModel.clearSelections();
this.store.reload();
};
this.queryForm = new Ext.tf.SimpleQueryFormPanel(
{
queryConfigEx : this.queryConfigEx,
items : this.queryConfig,
buttons : [ {
text : '查询',
formBind : true,
scope : this,
handler : this.query.createDelegate(this)
} ],
keys : [ Ext.tf.util.enterKey(this.query
.createDelegate(this)) ]
});
if (this.hasAdd) {
this.queryForm.addButton('新增', this.addRecord, this);
}
this.items = [ this.queryForm, this.grid ];
Ext.tf.SimplePanel.superclass.initComponent.call(this);
} catch (e) {
//console.log(e);
throw e;
}
},
// private
edit : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
} else if (selections.length != 1) {
Ext.MessageBox.alert("提示", "不能选择多行编辑!");
return;
}
this.editWin.show();
this.editForm.getForm().loadRecord(selections[0]);
},
del : function() {
var selections = this.grid.getSelections();
if (selections.length == 0) {
Ext.MessageBox.alert("提示", "请选择一条的记录!");
return;
}
var fn = function(e) {
if (e == "yes") {
var ids = new Array();
for ( var i = 0, len = selections.length; i < len; i++) {
try {
// 如果选中的record没有在这一页显示,remove就会出问题
selections[i].get("id");
ids[i] = selections[i].get("id");
} catch (e) {
// //console.log(e);
}
}
this.deleteUrl(ids.join(","), function() {
Ext.MessageBox.alert("提示", "删除完毕!");
this.store.reload();
}.createDelegate(this));
}
}
Ext.MessageBox.confirm("提示", "确认要删除所选择的记录么?", fn, this);
},
// public
load : function() {
return this.store.load({
params : {
start : 0,
limit : this.pageSize
}
});
}
});
/**
* 弹出窗口控件
*/
Ex | t.tf.PopSelect = Ext.extend(Ext.form.TriggerField, {
triggerClass : 'x-form-date-trigger',
readOnly : true,
initComponent : function() {
Ext.tf.PopSelect.superclass.initComponent(this);
},
/**
* Find ref element, set value
*/
setRefName : function(v) {
var refName = this.refName || ''; // If not refName, then ??
var form = this.findParentBy(function(v) {
if (Ext.type(v.getForm) == 'function')
return true;
});
if (form != null) {
Ext.ea | conditional_block |
|
tf.js | var store = grid.getStore();
var recordCount = store.getCount();
tableStr = tableStr + '<tbody>'
for ( var i = 0; i < recordCount; i++) {
var r = store.getAt(i);
tableStr = tableStr + '<tr>';
for ( var j = 0; j < temp_obj.length; j++) {
var dataIndex = cm.getDataIndex(temp_obj[j]);
var tdValue = r.get(dataIndex);
var rendererFunc = cm.getRenderer(temp_obj[j]);
if (rendererFunc != null) {
tdValue = rendererFunc(tdValue);
}
if (tdValue == null || tdValue == 0) {
tdValue = ' ';
}
if (j != 0)
tableStr = tableStr + '<td style="text-align:center;">'
+ tdValue + '</td>';
else
tableStr = tableStr + '<td>' + tdValue + '</td>';
}
tableStr = tableStr + '</tr>';
}
tableStr = tableStr + '</tbody></table>';
var head = '<link rel="stylesheet" type="text/css" href="../css/printReport.css" />';
var titleHTML = tableStr;// document.getElementById("printGridfff").innerHTML;
var newwin = window.open('about:blank', '', '');
newwin.document.write(head);
newwin.document.write(titleHTML);
newwin.document.location.reload();
newwin.print();
// newwin.close();
}
tfMoney = function(v, sign) {
if (!sign)
sign = '';
v = (Math.round((v - 0) * 100)) / 100;
v = (v == Math.floor(v)) ? v
+ ".00" : ((v * 10 == Math.floor(v * 10)) ? v
+ "0" : v);
v = String(v);
var ps = v.split('.');
var whole = ps[0];
var sub = ps[1] ? '.' + ps[1] : '.00';
var r = /(\d+)(\d{3})/;
while (r.test(whole)) {
whole = whole.replace(r, '$1' + ',' + '$2');
}
v = whole + sub;
if (v.charAt(0) == '-') {
return '-' + sign + v.substr(1);
}
return sign + v;
}
Ext.apply(Ext.form.VTypes, {
daterange : function(val, field) {
var date = field.parseDate(val);
if (!date) {
return;
}
if (field.startDateField
&& (!this.dateRangeMax || (date.getTime() != this.dateRangeMax
.getTime()))) {
var start = Ext.getCmp(field.startDateField);
start.setMaxValue(date);
start.validate();
this.dateRangeMax = date;
} else if (field.endDateField
&& (!this.dateRangeMin || (date.getTime() != this.dateRangeMin
.getTime()))) {
var end = Ext.getCmp(field.endDateField);
end.setMinValue(date);
end.validate();
this.dateRangeMin = date;
}
/*
* Always return true since we're only using this vtype to set the
* min/max allowed values (these are tested for after the vtype test)
*/
return true;
},
password : function(val, field) {
if (field.initialPassField) {
var pwd = Ext.getCmp(field.initialPassField);
return (val == pwd.getValue());
}
return true;
},
passwordText : 'Passwords do not match'
});
Ext.grid.CheckColumn = function(config) {
Ext.apply(this, config);
if (!this.id) {
this.id = Ext.id();
}
this.renderer = this.renderer.createDelegate(this);
};
Ext.grid.CheckColumn.prototype = {
init : function(grid) {
this.grid = grid;
this.grid.on('render', function() {
var view = this.grid.getView();
view.mainBody.on('mousedown', this.onMouseDown, this);
}, this);
},
onMouseDown : function(e, t) {
if (t.className && t.className.indexOf('x-grid3-cc-' + this.id) != -1) {
e.stopEvent();
var index = this.grid.getView().findRowIndex(t);
var record = this.grid.store.getAt(index);
record.set(this.dataIndex, !record.data[this.dataIndex]);
}
},
renderer : function(v, p, record) {
p.css += ' x-grid3-check-col-td';
return '<div class="x-grid3-check-col' + (v ? '-on' : '')
+ ' x-grid3-cc-' + this.id + '"> </div>';
}
};
Ext.namespace('Ext.tf.util');
/**
* Compiles a selector/xpath query into a reusable function. The returned
* function takes one parameter "root" (optional), which is the context node
* from where the query should start.
*
* @param {Ext.form.FormPanel}
* formPanel 包含主数据的FormPanel
* @param {Ext.grid.GridPanel/Ext.grid.EditorGridPanel}
* gridPanel 包含细节数据的GridPanel
* @param {Array}
* excludes gridPanel中不需要获取的列, 数组中加入需要摈弃的grid.store.fields中
* @param {Array}
* resultPropNames (可选) 定义返回的json对象的属性名,缺省为["formData", "gridData"]
* @return {Object} 缺省为{formData:masterData, gridData:detailData}
* masterData为json对象, detailData为[json对象],
* detailData数组中的json对象的属性名与grid.store的fields定义相同
*/
Ext.tf.util.gatherData = function(formPanel, gridPanel, excludes,
resultPropNames) {
var store = gridPanel.store;
var gridDataList = [];
var formData = formPanel.getForm().getValues(false);
store.each(function(rec) {
for ( var i = excludes.length - 1; i >= 0; --i) {
delete rec.data[excludes[i]];
}
;
gridDataList.push(rec.data)
});
if (resultPropNames) {
var result = {};
result[resultPropNames[0]] = formData;
result[resultPropNames[1]] = gridDataList;
return result;
} else
return {
formData : formData,
gridData : gridDataList
};
}
Ext.tf.util.debug = function(msg) {
if (typeof (console) != "undefined") {
console.debug(msg);
}
}
/*
* Usage : var lable = Ext.tf.util.OptCache.getLabel("Nationality", "1");
*
*/
Ext.tf.util.OptCache = {};
Ext.tf.util.OptCache.data = {};
Ext.tf.util.OptCache.getOptions = function(optName) {
var util = Ext.tf.util;
if (!util.OptCache.data[optName]) {
OptionProvider.getOptions(optName, {
async : false,
callback : function(list) {
var opt = {};
for ( var i = list.length - 1; i >= 0; --i) {
opt[list[i].id] = list[i].name;
}
;
util.OptCache.data[optName] = opt;
}
});
} else {
util.debug("util.OptCache.getOptions: using cache");
}
return util.OptCache.data[optName];
};
Ext.tf.util.OptCache.getLabel = function(optName, key) {
var util = Ext.tf.util;
var options = util.OptCache.getOptions(optName);
if (options) {
return options[key];
} else {
return '';
}
};
/**
* 回车对应函数 handler
*/
Ext.tf.util.enterKey = function(handler) {
return {
key : [ 10, 13 ],
fn : handler
}
};
// //////////////////////
Ext.ns("Ext.tf");
Ext.tf.currentUser = null;
Ext.tf.SimpleFormPanel = Ext.extend(Ext.FormPanel, {
autoHeight : true,
frame : true,
defaultType : 'textfield',
initComponent : function() {
Ext.applyIf(this, {
// saveFn : function() {}
});
this.build();
Ext.tf.SimpleFormPanel.superclass.initComponent.call(this);
},
build : function() {
this | {
var tableStr = '<table cellpadding="0" cellspacing="0" width="100%" id="statisticByDay">';
var cm = grid.getColumnModel();
var colCount = cm.getColumnCount();
var temp_obj = new Array();
// 只下载没有隐藏的列(isHidden()为true表示隐藏,其他都为显示)
// 临时数组,存放所有当前显示列的下标
for ( var i = 0; i < colCount; i++) {// 从第三列开始,因为我的第1、2列是分别是rownumber和selectmodel。
if (cm.isHidden(i) == true) {
} else {
temp_obj.push(i);
}
}
tableStr = tableStr + '<thead><tr>';
for ( var i = 0; i < temp_obj.length; i++) {
// 显示列的列标题
tableStr = tableStr + '<td>' + cm.getColumnHeader(temp_obj[i])
+ '</td>';
}
tableStr = tableStr + '</tr></thead>'; | identifier_body |
|
index.go | }
for _, field := range i.fields {
indexValue := input.Get(field)
key = append(key, field.tupleElement(indexValue))
}
}
}
return
}
func (i *Index) needValueStore() bool {
if i.handle != nil {
return true
}
return false
}
// getOldKey is just an wrapper around getKey, except case when index has handle, when it can be dynamically changed
func (i *Index) getOldKey(tr fdb.Transaction, primaryTuple tuple.Tuple, oldObject *Struct) (tuple.Tuple, error) {
if i.needValueStore() { // the index is custom
bites, err := tr.Get(i.valueDir.Pack(primaryTuple)).Get()
if err != nil {
return nil, err
}
oldKey, err := tuple.Unpack(bites)
if err != nil {
return nil, err
}
return oldKey, nil
}
return i.getKey(oldObject), nil
}
// writeSearch will set new index keys and delete old ones for text search index
func (i *Index) writeSearch(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
newWords := searchGetInputWords(i, input)
toAddWords := map[string]bool{}
skip := false
if i.checkHandler != nil {
if !i.checkHandler(input.value.Interface()) {
//fmt.Println("skipping index")
skip = true
}
// old value is better to delete any way
}
if !skip {
for _, word := range newWords {
toAddWords[word] = true
}
fmt.Println("index words >>", newWords)
}
toDeleteWords := map[string]bool{}
if oldObject != nil {
oldWords := searchGetInputWords(i, oldObject)
for _, word := range oldWords {
_, ok := toAddWords[word]
if ok {
delete(toAddWords, word)
} else {
toDeleteWords[word] = true
}
}
}
for word := range toAddWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
fmt.Println("write search key", fullKey, "packed", i.dir.Pack(fullKey))
tr.Set(i.dir.Pack(fullKey), []byte{})
}
for word := range toDeleteWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
tr.Clear(i.dir.Pack(fullKey))
}
return nil
}
// Write writes index related keys
func (i *Index) Write(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
if i.search {
return i.writeSearch(tr, primaryTuple, input, oldObject)
}
key := i.getKey(input)
if oldObject != nil {
toDelete, err := i.getOldKey(tr, primaryTuple, oldObject)
if err != nil { // if error fetching old index - throw it right here
return err
}
if toDelete != nil {
if reflect.DeepEqual(toDelete, key) {
return nil
}
i.Delete(tr, primaryTuple, toDelete)
}
}
if i.optional && i.isEmpty(input) { // no need to delete any inex than
return nil
}
// nil means should not index this object
if key == nil {
return nil
}
if i.Unique {
previousPromise := tr.Get(i.dir.Pack(key))
tr.Set(i.dir.Pack(key), primaryTuple.Pack()) // will be cancelled in case of error
previousBytes, err := previousPromise.Get()
if err != nil {
return err
}
if len(previousBytes) != 0 {
if !bytes.Equal(primaryTuple.Pack(), previousBytes) {
return ErrAlreadyExist
}
}
} else {
fullKey := append(key, primaryTuple...)
tr.Set(i.dir.Pack(fullKey), []byte{})
}
if i.needValueStore() {
tr.Set(i.valueDir.Pack(primaryTuple), key.Pack())
}
return nil
}
// Delete removes selected index
func (i *Index) Delete(tr fdb.Transaction, primaryTuple tuple.Tuple, key tuple.Tuple) {
if key == nil {
fmt.Println("index key is NIL strange behavior")
// no need to clean, this field wasn't indexed
return
}
sub := i.dir.Sub(key...)
if i.Unique {
fmt.Println("+++ delete the index", sub)
tr.Clear(sub)
} else {
// Add primary here
sub = sub.Sub(primaryTuple...)
tr.Clear(sub) // removing old keys
}
}
func (i *Index) getIterator(tr fdb.ReadTransaction, q *Query) (subspace.Subspace, *fdb.RangeIterator) {
if i.Unique {
i.object.panic("index is unique (lists not supported)")
}
//if len(q.primary) != 0 {
sub := i.dir.Sub(q.primary...)
start, end := sub.FDBRangeKeys()
if q.from != nil {
//start = sub.Sub(q.from...)
if q.reverse {
end = sub.Pack(q.from)
} else {
start = sub.Pack(q.from)
}
if q.to != nil {
if q.reverse {
start = sub.Pack(q.to)
} else {
end = sub.Pack(q.to)
}
}
}
r := fdb.KeyRange{Begin: start, End: end}
rangeResult := tr.GetRange(r, fdb.RangeOptions{Mode: fdb.StreamingModeWantAll, Limit: q.limit, Reverse: q.reverse})
iterator := rangeResult.Iterator()
return sub, iterator
}
// getList will fetch and request all the objects using the index
func (i *Index) getList(tr fdb.ReadTransaction, q *Query) ([]*needObject, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*needObject{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
values = append(values, i.object.need(tr, i.object.sub(key)))
}
return values, nil
}
// getPrimariesList will fetch just an list of primaries
func (i *Index) getPrimariesList(tr fdb.ReadTransaction, q *Query) (*Slice, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*Value{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
value := Value{object: i.object}
value.fromKeyTuple(key)
values = append(values, &value)
}
return &Slice{values: values}, nil
}
func (i *Index) getPrimary(tr fdb.ReadTransaction, indexKey tuple.Tuple) (subspace.Subspace, error) {
sub := i.dir.Sub(indexKey...)
if i.Unique {
bytes, err := tr.Get(sub).Get()
if err != nil {
return nil, err
}
if len(bytes) == 0 {
return nil, ErrNotFound
}
primaryTuple, err := tuple.Unpack(bytes)
if err != nil {
return nil, err
}
return i.object.primary.Sub(primaryTuple...), nil
}
sel := fdb.FirstGreaterThan(sub)
primaryKey, err := tr.GetKey(sel).Get()
if err != nil {
return nil, err
}
primaryTuple, err := sub.Unpack(primaryKey)
//primary, err := UnpackKeyIndex(indexKey, primaryKey)
if err != nil || len(primaryTuple) < 1 {
return nil, ErrNotFound
}
return i.object.primary.Sub(primaryTuple...), nil
}
// ReindexUnsafe will update index info (NOT consistency safe function)
// this function will use data provited by th object so should be used with care
func (i *Index) ReindexUnsafe(data interface{}) *PromiseErr {
input := structAny(data)
p := i.object.promiseErr()
p.do(func() Chain {
primaryTuple := input.getPrimary(i.object)
err := i.Write(p.tr, primaryTuple, input, nil)
if err != nil {
return p.fail(err)
}
return p.done(nil)
})
return p
}
func (i *Index) doClearAll(tr fdb.Transaction) | {
start, end := i.dir.FDBRangeKeys()
tr.ClearRange(fdb.KeyRange{Begin: start, End: end})
} | identifier_body |
|
index.go | if i.handle != nil {
keyTuple := i.handle(input.value.Interface())
// Would not index object if key is empty
if keyTuple == nil || len(keyTuple) == 0 {
return nil
}
tmpTuple := tuple.Tuple{}
for _, element := range keyTuple {
tmpTuple = append(tmpTuple, element)
}
// embedded tuple cause problems with partitial fetching
key = tmpTuple
} else {
key = tuple.Tuple{}
if i.Geo != 0 {
latInterface := input.Get(i.fields[0])
lngInterface := input.Get(i.fields[1])
lat, long := latInterface.(float64), lngInterface.(float64)
if lat == 0.0 && long == 0.0 {
return nil
} | hash = hash[0:i.Geo] // Cutting hash to needed precision
}
key = append(key, hash)
} else {
//key = tuple.Tuple{indexValue}
for _, field := range i.fields {
indexValue := input.Get(field)
key = append(key, field.tupleElement(indexValue))
}
}
}
return
}
func (i *Index) needValueStore() bool {
if i.handle != nil {
return true
}
return false
}
// getOldKey is just an wrapper around getKey, except case when index has handle, when it can be dynamically changed
func (i *Index) getOldKey(tr fdb.Transaction, primaryTuple tuple.Tuple, oldObject *Struct) (tuple.Tuple, error) {
if i.needValueStore() { // the index is custom
bites, err := tr.Get(i.valueDir.Pack(primaryTuple)).Get()
if err != nil {
return nil, err
}
oldKey, err := tuple.Unpack(bites)
if err != nil {
return nil, err
}
return oldKey, nil
}
return i.getKey(oldObject), nil
}
// writeSearch will set new index keys and delete old ones for text search index
func (i *Index) writeSearch(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
newWords := searchGetInputWords(i, input)
toAddWords := map[string]bool{}
skip := false
if i.checkHandler != nil {
if !i.checkHandler(input.value.Interface()) {
//fmt.Println("skipping index")
skip = true
}
// old value is better to delete any way
}
if !skip {
for _, word := range newWords {
toAddWords[word] = true
}
fmt.Println("index words >>", newWords)
}
toDeleteWords := map[string]bool{}
if oldObject != nil {
oldWords := searchGetInputWords(i, oldObject)
for _, word := range oldWords {
_, ok := toAddWords[word]
if ok {
delete(toAddWords, word)
} else {
toDeleteWords[word] = true
}
}
}
for word := range toAddWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
fmt.Println("write search key", fullKey, "packed", i.dir.Pack(fullKey))
tr.Set(i.dir.Pack(fullKey), []byte{})
}
for word := range toDeleteWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
tr.Clear(i.dir.Pack(fullKey))
}
return nil
}
// Write writes index related keys
func (i *Index) Write(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
if i.search {
return i.writeSearch(tr, primaryTuple, input, oldObject)
}
key := i.getKey(input)
if oldObject != nil {
toDelete, err := i.getOldKey(tr, primaryTuple, oldObject)
if err != nil { // if error fetching old index - throw it right here
return err
}
if toDelete != nil {
if reflect.DeepEqual(toDelete, key) {
return nil
}
i.Delete(tr, primaryTuple, toDelete)
}
}
if i.optional && i.isEmpty(input) { // no need to delete any inex than
return nil
}
// nil means should not index this object
if key == nil {
return nil
}
if i.Unique {
previousPromise := tr.Get(i.dir.Pack(key))
tr.Set(i.dir.Pack(key), primaryTuple.Pack()) // will be cancelled in case of error
previousBytes, err := previousPromise.Get()
if err != nil {
return err
}
if len(previousBytes) != 0 {
if !bytes.Equal(primaryTuple.Pack(), previousBytes) {
return ErrAlreadyExist
}
}
} else {
fullKey := append(key, primaryTuple...)
tr.Set(i.dir.Pack(fullKey), []byte{})
}
if i.needValueStore() {
tr.Set(i.valueDir.Pack(primaryTuple), key.Pack())
}
return nil
}
// Delete removes selected index
func (i *Index) Delete(tr fdb.Transaction, primaryTuple tuple.Tuple, key tuple.Tuple) {
if key == nil {
fmt.Println("index key is NIL strange behavior")
// no need to clean, this field wasn't indexed
return
}
sub := i.dir.Sub(key...)
if i.Unique {
fmt.Println("+++ delete the index", sub)
tr.Clear(sub)
} else {
// Add primary here
sub = sub.Sub(primaryTuple...)
tr.Clear(sub) // removing old keys
}
}
func (i *Index) getIterator(tr fdb.ReadTransaction, q *Query) (subspace.Subspace, *fdb.RangeIterator) {
if i.Unique {
i.object.panic("index is unique (lists not supported)")
}
//if len(q.primary) != 0 {
sub := i.dir.Sub(q.primary...)
start, end := sub.FDBRangeKeys()
if q.from != nil {
//start = sub.Sub(q.from...)
if q.reverse {
end = sub.Pack(q.from)
} else {
start = sub.Pack(q.from)
}
if q.to != nil {
if q.reverse {
start = sub.Pack(q.to)
} else {
end = sub.Pack(q.to)
}
}
}
r := fdb.KeyRange{Begin: start, End: end}
rangeResult := tr.GetRange(r, fdb.RangeOptions{Mode: fdb.StreamingModeWantAll, Limit: q.limit, Reverse: q.reverse})
iterator := rangeResult.Iterator()
return sub, iterator
}
// getList will fetch and request all the objects using the index
func (i *Index) getList(tr fdb.ReadTransaction, q *Query) ([]*needObject, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*needObject{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
values = append(values, i.object.need(tr, i.object.sub(key)))
}
return values, nil
}
// getPrimariesList will fetch just an list of primaries
func (i *Index) getPrimariesList(tr fdb.ReadTransaction, q *Query) (*Slice, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*Value{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
value := Value{object: i.object}
value.fromKeyTuple(key)
values = append(values, &value)
}
return &Slice{values: values}, nil
}
func (i *Index) getPrimary(tr fdb.ReadTransaction, indexKey tuple.Tuple) (subspace.Subspace, error) {
sub := i.dir.Sub(indexKey...)
if i.Unique {
bytes, err := tr.Get(sub).Get()
if err != nil {
return nil, err
}
if len(bytes) == 0 {
return nil, ErrNotFound
}
primaryTuple, err := tuple.Unpack(bytes)
if err != nil {
return nil, err
}
return i.object.primary.Sub(primaryTuple...), nil
}
sel := fdb.FirstGreaterThan(sub | hash := geohash.Encode(lat, long)
if i.Geo < 12 { | random_line_split |
index.go | i.handle != nil {
keyTuple := i.handle(input.value.Interface())
// Would not index object if key is empty
if keyTuple == nil || len(keyTuple) == 0 {
return nil
}
tmpTuple := tuple.Tuple{}
for _, element := range keyTuple {
tmpTuple = append(tmpTuple, element)
}
// embedded tuple cause problems with partitial fetching
key = tmpTuple
} else {
key = tuple.Tuple{}
if i.Geo != 0 {
latInterface := input.Get(i.fields[0])
lngInterface := input.Get(i.fields[1])
lat, long := latInterface.(float64), lngInterface.(float64)
if lat == 0.0 && long == 0.0 {
return nil
}
hash := geohash.Encode(lat, long)
if i.Geo < 12 {
hash = hash[0:i.Geo] // Cutting hash to needed precision
}
key = append(key, hash)
} else {
//key = tuple.Tuple{indexValue}
for _, field := range i.fields {
indexValue := input.Get(field)
key = append(key, field.tupleElement(indexValue))
}
}
}
return
}
func (i *Index) | () bool {
if i.handle != nil {
return true
}
return false
}
// getOldKey is just an wrapper around getKey, except case when index has handle, when it can be dynamically changed
func (i *Index) getOldKey(tr fdb.Transaction, primaryTuple tuple.Tuple, oldObject *Struct) (tuple.Tuple, error) {
if i.needValueStore() { // the index is custom
bites, err := tr.Get(i.valueDir.Pack(primaryTuple)).Get()
if err != nil {
return nil, err
}
oldKey, err := tuple.Unpack(bites)
if err != nil {
return nil, err
}
return oldKey, nil
}
return i.getKey(oldObject), nil
}
// writeSearch will set new index keys and delete old ones for text search index
func (i *Index) writeSearch(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
newWords := searchGetInputWords(i, input)
toAddWords := map[string]bool{}
skip := false
if i.checkHandler != nil {
if !i.checkHandler(input.value.Interface()) {
//fmt.Println("skipping index")
skip = true
}
// old value is better to delete any way
}
if !skip {
for _, word := range newWords {
toAddWords[word] = true
}
fmt.Println("index words >>", newWords)
}
toDeleteWords := map[string]bool{}
if oldObject != nil {
oldWords := searchGetInputWords(i, oldObject)
for _, word := range oldWords {
_, ok := toAddWords[word]
if ok {
delete(toAddWords, word)
} else {
toDeleteWords[word] = true
}
}
}
for word := range toAddWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
fmt.Println("write search key", fullKey, "packed", i.dir.Pack(fullKey))
tr.Set(i.dir.Pack(fullKey), []byte{})
}
for word := range toDeleteWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
tr.Clear(i.dir.Pack(fullKey))
}
return nil
}
// Write writes index related keys
func (i *Index) Write(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
if i.search {
return i.writeSearch(tr, primaryTuple, input, oldObject)
}
key := i.getKey(input)
if oldObject != nil {
toDelete, err := i.getOldKey(tr, primaryTuple, oldObject)
if err != nil { // if error fetching old index - throw it right here
return err
}
if toDelete != nil {
if reflect.DeepEqual(toDelete, key) {
return nil
}
i.Delete(tr, primaryTuple, toDelete)
}
}
if i.optional && i.isEmpty(input) { // no need to delete any inex than
return nil
}
// nil means should not index this object
if key == nil {
return nil
}
if i.Unique {
previousPromise := tr.Get(i.dir.Pack(key))
tr.Set(i.dir.Pack(key), primaryTuple.Pack()) // will be cancelled in case of error
previousBytes, err := previousPromise.Get()
if err != nil {
return err
}
if len(previousBytes) != 0 {
if !bytes.Equal(primaryTuple.Pack(), previousBytes) {
return ErrAlreadyExist
}
}
} else {
fullKey := append(key, primaryTuple...)
tr.Set(i.dir.Pack(fullKey), []byte{})
}
if i.needValueStore() {
tr.Set(i.valueDir.Pack(primaryTuple), key.Pack())
}
return nil
}
// Delete removes selected index
func (i *Index) Delete(tr fdb.Transaction, primaryTuple tuple.Tuple, key tuple.Tuple) {
if key == nil {
fmt.Println("index key is NIL strange behavior")
// no need to clean, this field wasn't indexed
return
}
sub := i.dir.Sub(key...)
if i.Unique {
fmt.Println("+++ delete the index", sub)
tr.Clear(sub)
} else {
// Add primary here
sub = sub.Sub(primaryTuple...)
tr.Clear(sub) // removing old keys
}
}
func (i *Index) getIterator(tr fdb.ReadTransaction, q *Query) (subspace.Subspace, *fdb.RangeIterator) {
if i.Unique {
i.object.panic("index is unique (lists not supported)")
}
//if len(q.primary) != 0 {
sub := i.dir.Sub(q.primary...)
start, end := sub.FDBRangeKeys()
if q.from != nil {
//start = sub.Sub(q.from...)
if q.reverse {
end = sub.Pack(q.from)
} else {
start = sub.Pack(q.from)
}
if q.to != nil {
if q.reverse {
start = sub.Pack(q.to)
} else {
end = sub.Pack(q.to)
}
}
}
r := fdb.KeyRange{Begin: start, End: end}
rangeResult := tr.GetRange(r, fdb.RangeOptions{Mode: fdb.StreamingModeWantAll, Limit: q.limit, Reverse: q.reverse})
iterator := rangeResult.Iterator()
return sub, iterator
}
// getList will fetch and request all the objects using the index
func (i *Index) getList(tr fdb.ReadTransaction, q *Query) ([]*needObject, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*needObject{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
values = append(values, i.object.need(tr, i.object.sub(key)))
}
return values, nil
}
// getPrimariesList will fetch just an list of primaries
func (i *Index) getPrimariesList(tr fdb.ReadTransaction, q *Query) (*Slice, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*Value{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
value := Value{object: i.object}
value.fromKeyTuple(key)
values = append(values, &value)
}
return &Slice{values: values}, nil
}
func (i *Index) getPrimary(tr fdb.ReadTransaction, indexKey tuple.Tuple) (subspace.Subspace, error) {
sub := i.dir.Sub(indexKey...)
if i.Unique {
bytes, err := tr.Get(sub).Get()
if err != nil {
return nil, err
}
if len(bytes) == 0 {
return nil, ErrNotFound
}
primaryTuple, err := tuple.Unpack(bytes)
if err != nil {
return nil, err
}
return i.object.primary.Sub(primaryTuple...), nil
}
sel := fdb.FirstGreaterThan(sub | needValueStore | identifier_name |
index.go | i.handle != nil {
keyTuple := i.handle(input.value.Interface())
// Would not index object if key is empty
if keyTuple == nil || len(keyTuple) == 0 {
return nil
}
tmpTuple := tuple.Tuple{}
for _, element := range keyTuple {
tmpTuple = append(tmpTuple, element)
}
// embedded tuple cause problems with partitial fetching
key = tmpTuple
} else {
key = tuple.Tuple{}
if i.Geo != 0 {
latInterface := input.Get(i.fields[0])
lngInterface := input.Get(i.fields[1])
lat, long := latInterface.(float64), lngInterface.(float64)
if lat == 0.0 && long == 0.0 {
return nil
}
hash := geohash.Encode(lat, long)
if i.Geo < 12 {
hash = hash[0:i.Geo] // Cutting hash to needed precision
}
key = append(key, hash)
} else {
//key = tuple.Tuple{indexValue}
for _, field := range i.fields {
indexValue := input.Get(field)
key = append(key, field.tupleElement(indexValue))
}
}
}
return
}
func (i *Index) needValueStore() bool {
if i.handle != nil {
return true
}
return false
}
// getOldKey is just an wrapper around getKey, except case when index has handle, when it can be dynamically changed
func (i *Index) getOldKey(tr fdb.Transaction, primaryTuple tuple.Tuple, oldObject *Struct) (tuple.Tuple, error) {
if i.needValueStore() { // the index is custom
bites, err := tr.Get(i.valueDir.Pack(primaryTuple)).Get()
if err != nil {
return nil, err
}
oldKey, err := tuple.Unpack(bites)
if err != nil {
return nil, err
}
return oldKey, nil
}
return i.getKey(oldObject), nil
}
// writeSearch will set new index keys and delete old ones for text search index
func (i *Index) writeSearch(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
newWords := searchGetInputWords(i, input)
toAddWords := map[string]bool{}
skip := false
if i.checkHandler != nil {
if !i.checkHandler(input.value.Interface()) {
//fmt.Println("skipping index")
skip = true
}
// old value is better to delete any way
}
if !skip {
for _, word := range newWords {
toAddWords[word] = true
}
fmt.Println("index words >>", newWords)
}
toDeleteWords := map[string]bool{}
if oldObject != nil {
oldWords := searchGetInputWords(i, oldObject)
for _, word := range oldWords {
_, ok := toAddWords[word]
if ok {
delete(toAddWords, word)
} else {
toDeleteWords[word] = true
}
}
}
for word := range toAddWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
fmt.Println("write search key", fullKey, "packed", i.dir.Pack(fullKey))
tr.Set(i.dir.Pack(fullKey), []byte{})
}
for word := range toDeleteWords {
key := tuple.Tuple{word}
fullKey := append(key, primaryTuple...)
tr.Clear(i.dir.Pack(fullKey))
}
return nil
}
// Write writes index related keys
func (i *Index) Write(tr fdb.Transaction, primaryTuple tuple.Tuple, input, oldObject *Struct) error {
if i.search {
return i.writeSearch(tr, primaryTuple, input, oldObject)
}
key := i.getKey(input)
if oldObject != nil {
toDelete, err := i.getOldKey(tr, primaryTuple, oldObject)
if err != nil { // if error fetching old index - throw it right here
return err
}
if toDelete != nil {
if reflect.DeepEqual(toDelete, key) {
return nil
}
i.Delete(tr, primaryTuple, toDelete)
}
}
if i.optional && i.isEmpty(input) |
// nil means should not index this object
if key == nil {
return nil
}
if i.Unique {
previousPromise := tr.Get(i.dir.Pack(key))
tr.Set(i.dir.Pack(key), primaryTuple.Pack()) // will be cancelled in case of error
previousBytes, err := previousPromise.Get()
if err != nil {
return err
}
if len(previousBytes) != 0 {
if !bytes.Equal(primaryTuple.Pack(), previousBytes) {
return ErrAlreadyExist
}
}
} else {
fullKey := append(key, primaryTuple...)
tr.Set(i.dir.Pack(fullKey), []byte{})
}
if i.needValueStore() {
tr.Set(i.valueDir.Pack(primaryTuple), key.Pack())
}
return nil
}
// Delete removes selected index
func (i *Index) Delete(tr fdb.Transaction, primaryTuple tuple.Tuple, key tuple.Tuple) {
if key == nil {
fmt.Println("index key is NIL strange behavior")
// no need to clean, this field wasn't indexed
return
}
sub := i.dir.Sub(key...)
if i.Unique {
fmt.Println("+++ delete the index", sub)
tr.Clear(sub)
} else {
// Add primary here
sub = sub.Sub(primaryTuple...)
tr.Clear(sub) // removing old keys
}
}
func (i *Index) getIterator(tr fdb.ReadTransaction, q *Query) (subspace.Subspace, *fdb.RangeIterator) {
if i.Unique {
i.object.panic("index is unique (lists not supported)")
}
//if len(q.primary) != 0 {
sub := i.dir.Sub(q.primary...)
start, end := sub.FDBRangeKeys()
if q.from != nil {
//start = sub.Sub(q.from...)
if q.reverse {
end = sub.Pack(q.from)
} else {
start = sub.Pack(q.from)
}
if q.to != nil {
if q.reverse {
start = sub.Pack(q.to)
} else {
end = sub.Pack(q.to)
}
}
}
r := fdb.KeyRange{Begin: start, End: end}
rangeResult := tr.GetRange(r, fdb.RangeOptions{Mode: fdb.StreamingModeWantAll, Limit: q.limit, Reverse: q.reverse})
iterator := rangeResult.Iterator()
return sub, iterator
}
// getList will fetch and request all the objects using the index
func (i *Index) getList(tr fdb.ReadTransaction, q *Query) ([]*needObject, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*needObject{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
values = append(values, i.object.need(tr, i.object.sub(key)))
}
return values, nil
}
// getPrimariesList will fetch just an list of primaries
func (i *Index) getPrimariesList(tr fdb.ReadTransaction, q *Query) (*Slice, error) {
sub, iterator := i.getIterator(tr, q)
primaryLen := len(i.object.primaryFields)
values := []*Value{}
for iterator.Advance() {
kv, err := iterator.Get()
if err != nil {
return nil, err
}
fullTuple, err := sub.Unpack(kv.Key)
if err != nil {
return nil, err
}
if len(fullTuple)-primaryLen < 0 {
return nil, errors.New("invalid data: key too short")
}
key := fullTuple[len(fullTuple)-primaryLen:]
value := Value{object: i.object}
value.fromKeyTuple(key)
values = append(values, &value)
}
return &Slice{values: values}, nil
}
func (i *Index) getPrimary(tr fdb.ReadTransaction, indexKey tuple.Tuple) (subspace.Subspace, error) {
sub := i.dir.Sub(indexKey...)
if i.Unique {
bytes, err := tr.Get(sub).Get()
if err != nil {
return nil, err
}
if len(bytes) == 0 {
return nil, ErrNotFound
}
primaryTuple, err := tuple.Unpack(bytes)
if err != nil {
return nil, err
}
return i.object.primary.Sub(primaryTuple...), nil
}
sel := fdb.FirstGreaterThan | { // no need to delete any inex than
return nil
} | conditional_block |
main.go | ); err == nil {
mgr.logger.Log(fmt.Sprintf("successfully joined cluster at %s", mgr.targetCluster))
}
}
default:
err = fmt.Errorf("unsupported cluster-op specified \"%s\"", mgr.operation)
}
return err
}
func (mgr *etcdManager) getMemberID(ctx context.Context) (uint64, error) {
var memberID uint64
// use the client to retrieve this instance's member id
members, err := mgr.client.MemberList(ctx)
if members != nil {
for _, m := range members.Members {
if m.Name == mgr.Name {
memberID = m.ID
}
}
}
return memberID, err
}
func (mgr *etcdManager) removeMember() error {
var err error
var memberID uint64
ctx, cancel := context.WithTimeout(context.Background(), mgr.removeTimeout)
defer cancel()
// only remove yourself from the cluster if the server is running
if mgr.server.IsRunning() {
if memberID, err = mgr.getMemberID(ctx); err == nil {
removed := make(chan error, 1)
go func() {
defer close(removed)
removed <- mgr.client.RemoveMember(mgr.Name, memberID)
}()
select {
case err = <-removed:
cancel()
case <-ctx.Done():
}
if ctx.Err() != nil {
err = ctx.Err()
}
}
}
return err
}
func (mgr *etcdManager) shutdown(graceful bool) (err error) {
if mgr.server.IsRunning() {
// stop the etcd server
mgr.server.Stop(graceful, false) // graceful shutdown true, snapshot false
}
if mgr.client != nil {
// close the client if applicable
err = mgr.client.Close()
}
return err
}
type gateway struct {
flags gatewayFlags
listeners []protocol.Listener
forwarders []protocol.Forwarder
logger log.Logger
setupDoneSignal chan struct{}
tk timekeeper.TimeKeeper
debugServer *httpdebug.Server
debugServerListener net.Listener
internalMetricsServer *internal.Collector
internalMetricsListener net.Listener
stdout io.Writer
gomaxprocs func(int) int
debugContext web.HeaderCtxFlag
debugSink dpsink.ItemFlagger
ctxDims log.CtxDimensions
signalChan chan os.Signal
config *config.GatewayConfig
etcdMgr *etcdManager
versionMetric reportsha.SHA1Reporter
}
var mainInstance = gateway{
tk: timekeeper.RealTime{},
logger: log.DefaultLogger.CreateChild(),
stdout: os.Stdout,
gomaxprocs: runtime.GOMAXPROCS,
debugContext: web.HeaderCtxFlag{
HeaderName: "X-Debug-Id",
},
debugSink: dpsink.ItemFlagger{
EventMetaName: "dbg_events",
MetricDimensionName: "sf_metric",
},
signalChan: make(chan os.Signal, 1),
etcdMgr: &etcdManager{ServerConfig: etcd.ServerConfig{}, logger: log.DefaultLogger.CreateChild()},
}
func init() {
flag.StringVar(&mainInstance.flags.configFileName, "configfile", "sf/gateway.conf", "Name of the db gateway configuration file")
flag.StringVar(&mainInstance.etcdMgr.operation, "cluster-op", "", "operation to perform if running in cluster mode [\"seed\", \"join\", \"\"] this overrides the ClusterOperation set in the config file")
}
func (p *gateway) getLogOutput(loadedConfig *config.GatewayConfig) io.Writer {
logDir := *loadedConfig.LogDir
if logDir == "-" {
p.logger.Log("Sending logging to stdout")
return p.stdout
}
logMaxSize := *loadedConfig.LogMaxSize
logMaxBackups := *loadedConfig.LogMaxBackups
lumberjackLogger := &lumberjack.Logger{
Filename: path.Join(logDir, "gateway.log"),
MaxSize: logMaxSize, // megabytes
MaxBackups: logMaxBackups,
}
p.logger.Log(logkey.Filename, lumberjackLogger.Filename, logkey.Dir, os.TempDir(), "Logging redirect setup")
return lumberjackLogger
}
func (p *gateway) getLogger(loadedConfig *config.GatewayConfig) log.Logger {
out := p.getLogOutput(loadedConfig)
useJSON := *loadedConfig.LogFormat == "json"
if useJSON {
return log.NewJSONLogger(out, log.DefaultErrorHandler)
}
return log.NewLogfmtLogger(out, log.DefaultErrorHandler)
}
func forwarderName(f *config.ForwardTo) string {
if f.Name != nil {
return *f.Name
}
return f.Type
}
var errDupeForwarder = errors.New("cannot duplicate forwarder names or types without names")
func setupForwarders(ctx context.Context, tk timekeeper.TimeKeeper, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler, Checker *dpsink.ItemFlagger, cdim *log.CtxDimensions, manager *etcdManager) ([]protocol.Forwarder, error) {
allForwarders := make([]protocol.Forwarder, 0, len(loadedConfig.ForwardTo))
nameMap := make(map[string]bool) | forwardConfig.Server = manager.server
forwardConfig.Client = manager.client
forwardConfig.ClusterName = loadedConfig.ClusterName
forwardConfig.AdditionalDimensions = datapoint.AddMaps(loadedConfig.AdditionalDimensions, forwardConfig.AdditionalDimensions)
forwarder, err := loader.Forwarder(forwardConfig)
if err != nil {
return nil, err
}
name := forwarderName(forwardConfig)
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two forwarders with name '%s' or two unnamed forwarders of same type", name))
return nil, errDupeForwarder
}
nameMap[name] = true
logCtx = logCtx.With(logkey.Name, name)
// Buffering -> counting -> (forwarder)
limitedLogger := &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
}
dcount := &dpsink.Counter{
Logger: limitedLogger,
}
count := signalfx.UnifyNextSinkWrap(dcount)
endingSink := signalfx.FromChain(forwarder, signalfx.NextWrap(count))
bconf := &dpbuffered.Config{
Checker: Checker,
BufferSize: forwardConfig.BufferSize,
MaxTotalDatapoints: forwardConfig.BufferSize,
MaxTotalEvents: forwardConfig.BufferSize,
MaxTotalSpans: forwardConfig.BufferSize,
MaxDrainSize: forwardConfig.MaxDrainSize,
NumDrainingThreads: forwardConfig.DrainingThreads,
Name: forwardConfig.Name,
Cdim: cdim,
}
bf := dpbuffered.NewBufferedForwarder(ctx, bconf, endingSink, forwarder.Close, forwarder.StartupFinished, limitedLogger)
allForwarders = append(allForwarders, bf)
groupName := fmt.Sprintf("%s_f_%d", name, idx)
scheduler.AddGroupedCallback(groupName, forwarder)
scheduler.AddGroupedCallback(groupName, bf)
scheduler.AddGroupedCallback(groupName, dcount)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "forwarder",
"source": "gateway",
"host": *loadedConfig.ServerName,
"type": forwardConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return allForwarders, nil
}
var errDupeListener = errors.New("cannot duplicate listener names or types without names")
func setupListeners(tk timekeeper.TimeKeeper, hostname string, loadedConfig *config.GatewayConfig, loader *config.Loader, listenFrom []*config.ListenFrom, multiplexer signalfx.Sink, logger log.Logger, scheduler *sfxclient.Scheduler) ([]protocol.Listener, error) {
listeners := make([]protocol.Listener, 0, len(listenFrom))
nameMap := make(map[string]bool)
for idx, listenConfig := range listenFrom {
logCtx := log.NewContext(logger).With(logkey.Protocol, listenConfig.Type, logkey.Direction, "listener")
name := func() string {
if listenConfig.Name != nil {
return *listenConfig.Name
}
return listenConfig.Type
}()
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two listeners with name '%s' or two unnamed listners of same type", name))
return nil, errDupeListener
}
nameMap[name] = true
count := &dpsink.Counter{
Logger: &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
| for idx, forwardConfig := range loadedConfig.ForwardTo {
logCtx := log.NewContext(logger).With(logkey.Protocol, forwardConfig.Type, logkey.Direction, "forwarder") | random_line_split |
main.go | key.TotalPipeline, totalPipeline, "Waking up for graceful shutdown")
if totalPipeline > 0 {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "Items are still draining")
startingTimeGood = now
continue
}
if now.Sub(startingTimeGood) >= *p.config.SilentGracefulTimeDuration {
p.logger.Log(logkey.TotalPipeline, totalPipeline, "I've been silent. Graceful shutdown done")
return
}
}
}
}
func (p *gateway) Pipeline() int64 {
var totalForwarded int64
for _, f := range p.forwarders {
totalForwarded += f.Pipeline()
}
return totalForwarded
}
func (p *gateway) Close() error {
errs := make([]error, 0, len(p.forwarders)+1)
for _, f := range p.forwarders {
errs = append(errs, f.Close())
}
if p.etcdMgr != nil && p.etcdMgr.server != nil {
errs = append(errs, p.etcdMgr.removeMember())
errs = append(errs, p.etcdMgr.shutdown(true)) // shutdown the etcd server and close the client
}
if p.debugServer != nil {
errs = append(errs, p.debugServerListener.Close())
}
if p.internalMetricsServer != nil {
errs = append(errs, p.internalMetricsListener.Close())
}
return errors.NewMultiErr(errs)
}
func (p *gateway) main(ctx context.Context) error {
// Disable the default logger to make sure nobody else uses it
err := p.run(ctx)
return errors.NewMultiErr([]error{err, p.Close()})
}
func (p *gateway) setup(loadedConfig *config.GatewayConfig) {
if loadedConfig.DebugFlag != nil && *loadedConfig.DebugFlag != "" {
p.debugContext.SetFlagStr(*loadedConfig.DebugFlag)
}
p.config = loadedConfig
p.logger = log.NewContext(p.getLogger(loadedConfig)).With(logkey.Time, log.DefaultTimestamp, logkey.Caller, log.DefaultCaller)
p.debugSink.Logger = p.logger
log.DefaultLogger.Set(p.logger)
pidFilename := *loadedConfig.PidFilename
if err := writePidFile(pidFilename); err != nil {
p.logger.Log(log.Err, err, logkey.Filename, pidFilename, "cannot store pid in pid file")
}
defer func() {
log.IfErr(p.logger, os.Remove(pidFilename))
}()
defer func() {
log.DefaultLogger.Set(log.Discard)
}()
}
func (p *gateway) createCommonHTTPChain(loadedConfig *config.GatewayConfig) web.NextConstructor {
h := web.HeadersInRequest{
Headers: map[string]string{
"X-Gateway-Name": *loadedConfig.ServerName,
},
}
cf := &web.CtxWithFlag{
CtxFlagger: &p.ctxDims,
HeaderName: "X-Response-Id",
}
return web.NextConstructor(func(ctx context.Context, rw http.ResponseWriter, r *http.Request, next web.ContextHandler) {
cf.ServeHTTPC(ctx, rw, r, h.CreateMiddleware(next))
})
}
func (p *gateway) setupScheduler(loadedConfig *config.GatewayConfig) *sfxclient.Scheduler {
scheduler := sfxclient.NewScheduler()
scheduler.AddCallback(sfxclient.GoMetricsSource)
scheduler.DefaultDimensions(datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"source": "gateway",
"host": *loadedConfig.ServerName,
"cluster": *loadedConfig.ClusterName,
}))
return scheduler
}
func (p *gateway) scheduleStatCollection(ctx context.Context, scheduler *sfxclient.Scheduler, loadedConfig *config.GatewayConfig, multiplexer signalfx.Sink) (context.Context, context.CancelFunc) {
// We still want to schedule stat collection so people can debug the server if they want
scheduler.Sink = dpsink.Discard
scheduler.ReportingDelayNs = (time.Second * 30).Nanoseconds()
finishedContext, cancelFunc := context.WithCancel(ctx)
if loadedConfig.StatsDelayDuration != nil && *loadedConfig.StatsDelayDuration != 0 {
scheduler.Sink = multiplexer
scheduler.ReportingDelayNs = loadedConfig.StatsDelayDuration.Nanoseconds()
} else {
p.logger.Log("skipping stat keeping")
}
return finishedContext, cancelFunc
}
func (p *gateway) setupForwardersAndListeners(ctx context.Context, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) (signalfx.Sink, error) {
var err error
p.forwarders, err = setupForwarders(ctx, p.tk, loader, loadedConfig, logger, scheduler, &p.debugSink, &p.ctxDims, p.etcdMgr)
if err != nil {
p.logger.Log(log.Err, err, "Unable to setup forwarders")
return nil, errors.Annotate(err, "unable to setup forwarders")
}
dpSinks, eSinks, tSinks := splitSinks(p.forwarders)
dmux := &demultiplexer.Demultiplexer{
DatapointSinks: dpSinks,
EventSinks: eSinks,
TraceSinks: tSinks,
Logger: log.NewOnePerSecond(logger),
LateDuration: loadedConfig.LateThresholdDuration,
FutureDuration: loadedConfig.FutureThresholdDuration,
}
scheduler.AddCallback(dmux)
p.versionMetric.RepoURL = "https://github.com/signalfx/gateway"
p.versionMetric.FileName = "/buildInfo.json"
scheduler.AddCallback(&p.versionMetric)
multiplexer := signalfx.FromChain(dmux, signalfx.NextWrap(signalfx.UnifyNextSinkWrap(&p.debugSink)))
p.listeners, err = setupListeners(p.tk, *loadedConfig.ServerName, loadedConfig, loader, loadedConfig.ListenFrom, multiplexer, logger, scheduler)
if err != nil {
p.logger.Log(log.Err, err, "Unable to setup listeners")
return nil, errors.Annotate(err, "cannot setup listeners from configuration")
}
var errs []error
for _, f := range p.forwarders {
err = f.StartupFinished()
errs = append(errs, err)
log.IfErr(logger, err)
}
return multiplexer, FirstNonNil(errs...)
}
func (p *gateway) run(ctx context.Context) error {
p.debugSink.CtxFlagCheck = &p.debugContext
p.logger.Log(logkey.ConfigFile, p.flags.configFileName, "Looking for config file")
p.logger.Log(logkey.Env, strings.Join(os.Environ(), "-"), "Looking for config file")
loadedConfig, err := config.Load(p.flags.configFileName, p.logger)
if err != nil {
p.logger.Log(log.Err, err, "Unable to load config")
return err
}
p.setup(loadedConfig)
p.versionMetric.Logger = p.logger
logger := p.logger
scheduler := p.setupScheduler(loadedConfig)
if err := p.setupDebugServer(loadedConfig, logger, scheduler); err != nil {
p.logger.Log(log.Err, "debug server failed", err)
return err
}
if err := p.setupInternalMetricsServer(loadedConfig, logger, scheduler); err != nil {
p.logger.Log(log.Err, "internal metrics server failed", err)
return err
}
p.etcdMgr.setup(loadedConfig)
if err := p.etcdMgr.start(); err != nil {
p.logger.Log(log.Err, "unable to start etcd server", err)
return err
}
var bb []byte
if bb, err = json.Marshal(loadedConfig); err == nil {
logger.Log(logkey.Config, string(bb), logkey.Env, strings.Join(os.Environ(), "-"), "config loaded")
}
setupGoMaxProcs(loadedConfig.NumProcs, p.gomaxprocs)
chain := p.createCommonHTTPChain(loadedConfig)
loader := config.NewLoader(ctx, logger, Version, &p.debugContext, &p.debugSink, &p.ctxDims, chain)
multiplexer, err := p.setupForwardersAndListeners(ctx, loader, loadedConfig, logger, scheduler)
if err == nil {
finishedContext, cancelFunc := p.scheduleStatCollection(ctx, scheduler, loadedConfig, multiplexer)
// Schedule datapoint collection to a Discard sink so we can get the stats in Expvar()
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
err := scheduler.Schedule(finishedContext)
logger.Log(log.Err, err, logkey.Struct, "scheduler", "Schedule finished")
wg.Done()
}()
if p.setupDoneSignal != nil {
close(p.setupDoneSignal)
}
logger.Log("Setup done. Blocking!")
select {
case <-ctx.Done():
case <-p.signalChan:
err = p.gracefulShutdown()
}
cancelFunc()
wg.Wait()
}
return err
}
var flagParse = flag.Parse
func main() {
flagParse()
signal.Notify(mainInstance.signalChan, syscall.SIGTERM)
log.IfErr(log.DefaultLogger, mainInstance.main(context.Background()))
}
// FirstNonNil returns what it says it does
func FirstNonNil(errs ...error) error | {
for _, err := range errs {
if err != nil {
return err
}
}
return nil
} | identifier_body |
|
main.go |
case "join":
mgr.logger.Log(fmt.Sprintf("joining cluster with etcd server name: %s", mgr.ServerConfig.Name))
if mgr.client, err = etcd.NewClient(mgr.targetCluster, etcd.SecurityConfig{}, true); err == nil {
mgr.logger.Log(fmt.Sprintf("joining etcd cluster @ %s", mgr.client.Endpoints()))
if err = mgr.server.Join(mgr.client); err == nil {
mgr.logger.Log(fmt.Sprintf("successfully joined cluster at %s", mgr.targetCluster))
}
}
default:
err = fmt.Errorf("unsupported cluster-op specified \"%s\"", mgr.operation)
}
return err
}
func (mgr *etcdManager) getMemberID(ctx context.Context) (uint64, error) {
var memberID uint64
// use the client to retrieve this instance's member id
members, err := mgr.client.MemberList(ctx)
if members != nil {
for _, m := range members.Members {
if m.Name == mgr.Name {
memberID = m.ID
}
}
}
return memberID, err
}
func (mgr *etcdManager) removeMember() error {
var err error
var memberID uint64
ctx, cancel := context.WithTimeout(context.Background(), mgr.removeTimeout)
defer cancel()
// only remove yourself from the cluster if the server is running
if mgr.server.IsRunning() {
if memberID, err = mgr.getMemberID(ctx); err == nil {
removed := make(chan error, 1)
go func() {
defer close(removed)
removed <- mgr.client.RemoveMember(mgr.Name, memberID)
}()
select {
case err = <-removed:
cancel()
case <-ctx.Done():
}
if ctx.Err() != nil {
err = ctx.Err()
}
}
}
return err
}
func (mgr *etcdManager) shutdown(graceful bool) (err error) {
if mgr.server.IsRunning() {
// stop the etcd server
mgr.server.Stop(graceful, false) // graceful shutdown true, snapshot false
}
if mgr.client != nil {
// close the client if applicable
err = mgr.client.Close()
}
return err
}
type gateway struct {
flags gatewayFlags
listeners []protocol.Listener
forwarders []protocol.Forwarder
logger log.Logger
setupDoneSignal chan struct{}
tk timekeeper.TimeKeeper
debugServer *httpdebug.Server
debugServerListener net.Listener
internalMetricsServer *internal.Collector
internalMetricsListener net.Listener
stdout io.Writer
gomaxprocs func(int) int
debugContext web.HeaderCtxFlag
debugSink dpsink.ItemFlagger
ctxDims log.CtxDimensions
signalChan chan os.Signal
config *config.GatewayConfig
etcdMgr *etcdManager
versionMetric reportsha.SHA1Reporter
}
var mainInstance = gateway{
tk: timekeeper.RealTime{},
logger: log.DefaultLogger.CreateChild(),
stdout: os.Stdout,
gomaxprocs: runtime.GOMAXPROCS,
debugContext: web.HeaderCtxFlag{
HeaderName: "X-Debug-Id",
},
debugSink: dpsink.ItemFlagger{
EventMetaName: "dbg_events",
MetricDimensionName: "sf_metric",
},
signalChan: make(chan os.Signal, 1),
etcdMgr: &etcdManager{ServerConfig: etcd.ServerConfig{}, logger: log.DefaultLogger.CreateChild()},
}
func init() {
flag.StringVar(&mainInstance.flags.configFileName, "configfile", "sf/gateway.conf", "Name of the db gateway configuration file")
flag.StringVar(&mainInstance.etcdMgr.operation, "cluster-op", "", "operation to perform if running in cluster mode [\"seed\", \"join\", \"\"] this overrides the ClusterOperation set in the config file")
}
func (p *gateway) getLogOutput(loadedConfig *config.GatewayConfig) io.Writer {
logDir := *loadedConfig.LogDir
if logDir == "-" {
p.logger.Log("Sending logging to stdout")
return p.stdout
}
logMaxSize := *loadedConfig.LogMaxSize
logMaxBackups := *loadedConfig.LogMaxBackups
lumberjackLogger := &lumberjack.Logger{
Filename: path.Join(logDir, "gateway.log"),
MaxSize: logMaxSize, // megabytes
MaxBackups: logMaxBackups,
}
p.logger.Log(logkey.Filename, lumberjackLogger.Filename, logkey.Dir, os.TempDir(), "Logging redirect setup")
return lumberjackLogger
}
func (p *gateway) getLogger(loadedConfig *config.GatewayConfig) log.Logger {
out := p.getLogOutput(loadedConfig)
useJSON := *loadedConfig.LogFormat == "json"
if useJSON {
return log.NewJSONLogger(out, log.DefaultErrorHandler)
}
return log.NewLogfmtLogger(out, log.DefaultErrorHandler)
}
func forwarderName(f *config.ForwardTo) string {
if f.Name != nil {
return *f.Name
}
return f.Type
}
var errDupeForwarder = errors.New("cannot duplicate forwarder names or types without names")
func setupForwarders(ctx context.Context, tk timekeeper.TimeKeeper, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler, Checker *dpsink.ItemFlagger, cdim *log.CtxDimensions, manager *etcdManager) ([]protocol.Forwarder, error) {
allForwarders := make([]protocol.Forwarder, 0, len(loadedConfig.ForwardTo))
nameMap := make(map[string]bool)
for idx, forwardConfig := range loadedConfig.ForwardTo {
logCtx := log.NewContext(logger).With(logkey.Protocol, forwardConfig.Type, logkey.Direction, "forwarder")
forwardConfig.Server = manager.server
forwardConfig.Client = manager.client
forwardConfig.ClusterName = loadedConfig.ClusterName
forwardConfig.AdditionalDimensions = datapoint.AddMaps(loadedConfig.AdditionalDimensions, forwardConfig.AdditionalDimensions)
forwarder, err := loader.Forwarder(forwardConfig)
if err != nil {
return nil, err
}
name := forwarderName(forwardConfig)
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two forwarders with name '%s' or two unnamed forwarders of same type", name))
return nil, errDupeForwarder
}
nameMap[name] = true
logCtx = logCtx.With(logkey.Name, name)
// Buffering -> counting -> (forwarder)
limitedLogger := &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
}
dcount := &dpsink.Counter{
Logger: limitedLogger,
}
count := signalfx.UnifyNextSinkWrap(dcount)
endingSink := signalfx.FromChain(forwarder, signalfx.NextWrap(count))
bconf := &dpbuffered.Config{
Checker: Checker,
BufferSize: forwardConfig.BufferSize,
MaxTotalDatapoints: forwardConfig.BufferSize,
MaxTotalEvents: forwardConfig.BufferSize,
MaxTotalSpans: forwardConfig.BufferSize,
MaxDrainSize: forwardConfig.MaxDrainSize,
NumDrainingThreads: forwardConfig.DrainingThreads,
Name: forwardConfig.Name,
Cdim: cdim,
}
bf := dpbuffered.NewBufferedForwarder(ctx, bconf, endingSink, forwarder.Close, forwarder.StartupFinished, limitedLogger)
allForwarders = append(allForwarders, bf)
groupName := fmt.Sprintf("%s_f_%d", name, idx)
scheduler.AddGroupedCallback(groupName, forwarder)
scheduler.AddGroupedCallback(groupName, bf)
scheduler.AddGroupedCallback(groupName, dcount)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "forwarder",
"source": "gateway",
"host": *loadedConfig.ServerName,
"type": forwardConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return allForwarders, nil
}
var errDupeListener = errors.New("cannot duplicate listener names or types without names")
func setupListeners(tk timekeeper.TimeKeeper, hostname string, loadedConfig *config.GatewayConfig, loader *config.Loader, listenFrom []*config.ListenFrom, multiplexer signalfx.Sink, logger log.Logger, scheduler *sfxclient.Scheduler) ([]protocol.Listener, error) {
listeners := make([]protocol.Listener, 0, len(listenFrom))
nameMap := make(map[string]bool)
for idx, listenConfig := range listen | {
if !isStringInSlice(mgr.AdvertisedClientAddress(), mgr.targetCluster) {
mgr.targetCluster = append(mgr.targetCluster, mgr.AdvertisedClientAddress())
}
mgr.client, err = etcd.NewClient(mgr.targetCluster, etcd.SecurityConfig{}, true)
} | conditional_block |
|
main.go | ups: logMaxBackups,
}
p.logger.Log(logkey.Filename, lumberjackLogger.Filename, logkey.Dir, os.TempDir(), "Logging redirect setup")
return lumberjackLogger
}
func (p *gateway) getLogger(loadedConfig *config.GatewayConfig) log.Logger {
out := p.getLogOutput(loadedConfig)
useJSON := *loadedConfig.LogFormat == "json"
if useJSON {
return log.NewJSONLogger(out, log.DefaultErrorHandler)
}
return log.NewLogfmtLogger(out, log.DefaultErrorHandler)
}
func forwarderName(f *config.ForwardTo) string {
if f.Name != nil {
return *f.Name
}
return f.Type
}
var errDupeForwarder = errors.New("cannot duplicate forwarder names or types without names")
func setupForwarders(ctx context.Context, tk timekeeper.TimeKeeper, loader *config.Loader, loadedConfig *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler, Checker *dpsink.ItemFlagger, cdim *log.CtxDimensions, manager *etcdManager) ([]protocol.Forwarder, error) {
allForwarders := make([]protocol.Forwarder, 0, len(loadedConfig.ForwardTo))
nameMap := make(map[string]bool)
for idx, forwardConfig := range loadedConfig.ForwardTo {
logCtx := log.NewContext(logger).With(logkey.Protocol, forwardConfig.Type, logkey.Direction, "forwarder")
forwardConfig.Server = manager.server
forwardConfig.Client = manager.client
forwardConfig.ClusterName = loadedConfig.ClusterName
forwardConfig.AdditionalDimensions = datapoint.AddMaps(loadedConfig.AdditionalDimensions, forwardConfig.AdditionalDimensions)
forwarder, err := loader.Forwarder(forwardConfig)
if err != nil {
return nil, err
}
name := forwarderName(forwardConfig)
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two forwarders with name '%s' or two unnamed forwarders of same type", name))
return nil, errDupeForwarder
}
nameMap[name] = true
logCtx = logCtx.With(logkey.Name, name)
// Buffering -> counting -> (forwarder)
limitedLogger := &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
}
dcount := &dpsink.Counter{
Logger: limitedLogger,
}
count := signalfx.UnifyNextSinkWrap(dcount)
endingSink := signalfx.FromChain(forwarder, signalfx.NextWrap(count))
bconf := &dpbuffered.Config{
Checker: Checker,
BufferSize: forwardConfig.BufferSize,
MaxTotalDatapoints: forwardConfig.BufferSize,
MaxTotalEvents: forwardConfig.BufferSize,
MaxTotalSpans: forwardConfig.BufferSize,
MaxDrainSize: forwardConfig.MaxDrainSize,
NumDrainingThreads: forwardConfig.DrainingThreads,
Name: forwardConfig.Name,
Cdim: cdim,
}
bf := dpbuffered.NewBufferedForwarder(ctx, bconf, endingSink, forwarder.Close, forwarder.StartupFinished, limitedLogger)
allForwarders = append(allForwarders, bf)
groupName := fmt.Sprintf("%s_f_%d", name, idx)
scheduler.AddGroupedCallback(groupName, forwarder)
scheduler.AddGroupedCallback(groupName, bf)
scheduler.AddGroupedCallback(groupName, dcount)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "forwarder",
"source": "gateway",
"host": *loadedConfig.ServerName,
"type": forwardConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return allForwarders, nil
}
var errDupeListener = errors.New("cannot duplicate listener names or types without names")
func setupListeners(tk timekeeper.TimeKeeper, hostname string, loadedConfig *config.GatewayConfig, loader *config.Loader, listenFrom []*config.ListenFrom, multiplexer signalfx.Sink, logger log.Logger, scheduler *sfxclient.Scheduler) ([]protocol.Listener, error) {
listeners := make([]protocol.Listener, 0, len(listenFrom))
nameMap := make(map[string]bool)
for idx, listenConfig := range listenFrom {
logCtx := log.NewContext(logger).With(logkey.Protocol, listenConfig.Type, logkey.Direction, "listener")
name := func() string {
if listenConfig.Name != nil {
return *listenConfig.Name
}
return listenConfig.Type
}()
if nameMap[name] {
logger.Log(fmt.Sprintf("Cannot add two listeners with name '%s' or two unnamed listners of same type", name))
return nil, errDupeListener
}
nameMap[name] = true
count := &dpsink.Counter{
Logger: &log.RateLimitedLogger{
EventCounter: eventcounter.New(tk.Now(), time.Second),
Limit: 16,
Logger: logCtx,
Now: tk.Now,
},
}
endingSink := signalfx.FromChain(multiplexer, signalfx.NextWrap(signalfx.UnifyNextSinkWrap(count)))
listener, err := loader.Listener(endingSink, listenConfig)
if err != nil {
logCtx.Log(log.Err, err, "unable to load config")
return nil, err
}
listeners = append(listeners, listener)
groupName := fmt.Sprintf("%s_l_%d", name, idx)
scheduler.AddGroupedCallback(groupName, listener)
scheduler.AddGroupedCallback(groupName, count)
scheduler.GroupedDefaultDimensions(groupName, datapoint.AddMaps(loadedConfig.AdditionalDimensions, map[string]string{
"name": name,
"direction": "listener",
"source": "gateway",
"host": hostname,
"type": listenConfig.Type,
"cluster": *loadedConfig.ClusterName,
}))
}
return listeners, nil
}
func splitSinks(forwarders []protocol.Forwarder) ([]dpsink.DSink, []dpsink.ESink, []trace.Sink) {
dsinks := make([]dpsink.DSink, 0, len(forwarders))
esinks := make([]dpsink.ESink, 0, len(forwarders))
tsinks := make([]trace.Sink, 0, len(forwarders))
for _, f := range forwarders {
dsinks = append(dsinks, f)
esinks = append(esinks, f)
tsinks = append(tsinks, f)
}
return dsinks, esinks, tsinks
}
func (p *gateway) setupInternalMetricsServer(conf *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) error {
if conf.InternalMetricsListenerAddress == nil {
return nil
}
listener, err := net.Listen("tcp", *conf.InternalMetricsListenerAddress)
if err != nil {
return errors.Annotate(err, "cannot setup internal metrics server")
}
p.internalMetricsListener = listener
collector := internal.NewCollector(logger, scheduler)
handler := mux.NewRouter()
handler.Path("/internal-metrics").HandlerFunc(collector.MetricsHandler)
p.internalMetricsServer = collector
go func() {
err := http.Serve(listener, handler)
logger.Log(log.Err, err, "Finished serving internal metrics server")
}()
return nil
}
func (p *gateway) setupDebugServer(conf *config.GatewayConfig, logger log.Logger, scheduler *sfxclient.Scheduler) error {
if conf.LocalDebugServer == nil {
return nil
}
listener, err := net.Listen("tcp", *conf.LocalDebugServer)
if err != nil {
return errors.Annotate(err, "cannot setup debug server")
}
p.debugServerListener = listener
p.debugServer = httpdebug.New(&httpdebug.Config{
Logger: log.NewContext(logger).With(logkey.Protocol, "debugserver"),
ExplorableObj: p,
})
p.debugServer.Mux.Handle("/debug/dims", &p.debugSink)
p.debugServer.Exp2.Exported["config"] = conf.Var()
p.debugServer.Exp2.Exported["datapoints"] = scheduler.Var()
p.debugServer.Exp2.Exported["goruntime"] = expvar.Func(func() interface{} {
return runtime.Version()
})
p.debugServer.Exp2.Exported["debugdims"] = p.debugSink.Var()
p.debugServer.Exp2.Exported["gateway_version"] = expvar.Func(func() interface{} {
return Version
})
p.debugServer.Exp2.Exported["build_date"] = expvar.Func(func() interface{} {
return BuildDate
})
p.debugServer.Exp2.Exported["source"] = expvar.Func(func() interface{} {
return fmt.Sprintf("https://github.com/signalfx/gateway/tree/%s", Version)
})
go func() {
err := p.debugServer.Serve(listener)
logger.Log(log.Err, err, "Finished serving debug server")
}()
return nil
}
func | setupGoMaxProcs | identifier_name |
|
ntd_utils.py | _feat_te,l_idx_te,y_te,X_test,y_test,X_idx):
"""
Goal: to replace the indicies with setIDs associated with the datasets in the
"test" section of the mixed dataset from the "train" to the "test" features
testIndex: the index from the
yIndicies: a python dictionary; {"setID": list of indicies associated with the set; the indicies are the location of a sample from the set in the original testing set X_test}
-> an element in the list gives index of the next "setID" in the current testing data
->
l_feat_te: a list of hog features.
-> axis=0 is datasets
-> axis=1 is hog features for a specific dataset
-> lengths across axis=1 varies
y_te: a list of setIDs from the "testing" section of the mixed dataset
l_idx_te: locations of the sample in the original roidb
-> axis=0 is datasets
-> axis=1 is the sample location
idx: what use the "idx" from across the y_te?
**error case**: if the # of training examples loaded in y_test > available # of testing
-> shouldn't happend since the test/train split comes originally from a training set (at least) x2 the testing size
"""
print(len(y_te))
print(len(l_idx_te))
print(len(l_feat_te))
for i in range(8):
print(len(l_idx_te[i]))
print(len(l_feat_te[i]))
# replace the X_test for each match of y_test
yIndicies = {}
dsIndicies = [ 0 for _ in range(len(l_idx_te)) ]
for setID in y_te:
if setID not in yIndicies.keys():
yIndicies[setID] = list(np.where(y_test == setID)[0]) # find where the setID's are
print("{}: {}".format(setID,len(yIndicies[setID])))
if len(yIndicies[setID]) == 0: continue
dsIdx = dsIndicies[setID] # index for l_feat_te
testIndex = yIndicies[setID][0] # index for x_test
X_test[testIndex] = l_feat_te[setID][dsIdx] # replace sample content
X_idx[testIndex] = {"idx":int(l_idx_te[setID][dsIdx]),"split":"test"} # replace the lookup
dsIndicies[setID] += 1 # incriment
yIndicies[setID].remove(testIndex) # "incriment" index by removing element
print(dsIndicies)
def roidbToSVMData(roidbTr,roidbTe,train_size,test_size,loaderSettings):
ds_feat_tr,l_idx_tr,y_tr = roidbToFeatures(roidbTr,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings['roidbSizes'])
"""
X_train, X_test, y_train, y_test, X_idx = split_data(train_size, test_size, \
l_feat_tr,l_idx_tr, y_tr,\
loaderSettings['dsHasTest'])
"""
ds_feat_te,l_idx_te,y_te = roidbToFeatures(roidbTe,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings["roidbSizes"])
X_train, X_test, y_train, y_test, testing_idx = split_tr_te_data(ds_feat_tr,l_idx_tr,y_tr,
ds_feat_te,l_idx_te,y_te,
train_size, test_size,
loaderSettings['dsHasTest'])
print("-=-=- training dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_tr):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_train==idx)))
print("-=-=- testing dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_te):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_test==idx)))
# this is a work-around for the loading of a "testing" mixed dataset... overwrites the original split from the training data
#mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,testing_idx)
X_train, X_test = scale_data(X_train, X_test)
print(X_train.shape)
print(y_train.shape)
if X_train.shape[0] != y_train.shape[0]:
raise ValueError("number of examples for x and y are different")
return X_train, X_test, y_train, y_test, testing_idx
def prepareMixedDataset(setID,repeat,size,addHOG=True):
mixedData = load_mixture_set(setID,repeat,size)
roidbTrDict,annoCountTr,roidbTrDict1k = mixedData["train"][0],mixedData["train"][1],mixedData["train"][2]
roidbTeDict,annoCountTe,roidbTeDict1k = mixedData["test"][0],mixedData["test"][1],mixedData['test'][2]
printRoidbDictImageNamesToTextFile(roidbTrDict,"train_{}".format(setID))
printRoidbDictImageNamesToTextFile(roidbTeDict,"test_{}".format(setID))
# does the dataset have a "testing" split?
dsHasTest = [ (i is not None) and (j is not None) for i,j in zip(annoCountTr[size],
annoCountTe[size]) ]
# cropped hog image input
if addHOG:
appendHOGtoRoidbDict(roidbTrDict,size)
appendHOGtoRoidbDict(roidbTeDict,size)
appendHOGtoRoidbDict(roidbTrDict1k,1000)
appendHOGtoRoidbDict(roidbTeDict1k,1000)
print("annoCountTr: {}".format(annoCountTr[size]))
print("annoCountTe: {}".format(annoCountTe[size]))
# print_report(roidbTr,annoCountTr,roidbTe,annoCountTe,setID,repeat,size)
annoSizes = {}
annoSizes['train'] = annoCountTr
annoSizes['test'] = annoCountTe
print("-="*50)
return roidbTrDict,roidbTeDict,roidbTrDict1k,roidbTeDict1k,dsHasTest,annoSizes
def loadSvmModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
modelFn = modelParams['modelFn']
if modelFn is not None:
model = pickle.load(open(modelFn,"rb"))
else:
model = train_SVM(X_train,y_train)
fn = iconicImagesFileFormat().format("model{}_svm_{}_{}_{}.pkl".format(dataType,setID,repeat,size))
pickle.dump(model,open(fn,"wb"))
print(" saved model to {}".format(fn))
print("\n\n-=- model loaded -=-\n\n")
return model
def loadDlModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
pass
def genConfCropped(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = None
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Cropped",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfRaw(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleImageHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = np.arange(len(roidbTr)) + 1
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Raw",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfSVM(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
| X_train, X_test, y_train, y_test, X_idx = roidbToSVMData(roidbTr,roidbTe,
ntdGameInfo['trainSize'],
ntdGameInfo['testSize'],
loaderSettings)
model = loadSvmModel(modelParams,dataType,ntdGameInfo['setID'],ntdGameInfo['repeat'],
ntdGameInfo['size'],X_train,y_train)
print(X_test.shape)
print(y_test.shape)
print("accuracy on test data {}".format(model.score(X_test,y_test)))
print(make_confusion_matrix(model, X_train, y_train, cfg.clsToSet))
print("-"*50)
return make_confusion_matrix(model, X_test, y_test, cfg.clsToSet),model | identifier_body |
|
ntd_utils.py | ,X_test,y_test,X_idx):
"""
Goal: to replace the indicies with setIDs associated with the datasets in the
"test" section of the mixed dataset from the "train" to the "test" features
testIndex: the index from the
yIndicies: a python dictionary; {"setID": list of indicies associated with the set; the indicies are the location of a sample from the set in the original testing set X_test}
-> an element in the list gives index of the next "setID" in the current testing data
->
l_feat_te: a list of hog features.
-> axis=0 is datasets
-> axis=1 is hog features for a specific dataset
-> lengths across axis=1 varies
y_te: a list of setIDs from the "testing" section of the mixed dataset
l_idx_te: locations of the sample in the original roidb
-> axis=0 is datasets
-> axis=1 is the sample location
idx: what use the "idx" from across the y_te?
**error case**: if the # of training examples loaded in y_test > available # of testing
-> shouldn't happend since the test/train split comes originally from a training set (at least) x2 the testing size
"""
print(len(y_te))
print(len(l_idx_te))
print(len(l_feat_te))
for i in range(8):
print(len(l_idx_te[i]))
print(len(l_feat_te[i]))
# replace the X_test for each match of y_test
yIndicies = {}
dsIndicies = [ 0 for _ in range(len(l_idx_te)) ]
for setID in y_te:
if setID not in yIndicies.keys():
yIndicies[setID] = list(np.where(y_test == setID)[0]) # find where the setID's are
print("{}: {}".format(setID,len(yIndicies[setID])))
if len(yIndicies[setID]) == 0: continue
dsIdx = dsIndicies[setID] # index for l_feat_te
testIndex = yIndicies[setID][0] # index for x_test
X_test[testIndex] = l_feat_te[setID][dsIdx] # replace sample content
X_idx[testIndex] = {"idx":int(l_idx_te[setID][dsIdx]),"split":"test"} # replace the lookup
dsIndicies[setID] += 1 # incriment
yIndicies[setID].remove(testIndex) # "incriment" index by removing element
print(dsIndicies)
def roidbToSVMData(roidbTr,roidbTe,train_size,test_size,loaderSettings):
ds_feat_tr,l_idx_tr,y_tr = roidbToFeatures(roidbTr,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings['roidbSizes'])
"""
X_train, X_test, y_train, y_test, X_idx = split_data(train_size, test_size, \
l_feat_tr,l_idx_tr, y_tr,\
loaderSettings['dsHasTest'])
"""
ds_feat_te,l_idx_te,y_te = roidbToFeatures(roidbTe,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings["roidbSizes"])
X_train, X_test, y_train, y_test, testing_idx = split_tr_te_data(ds_feat_tr,l_idx_tr,y_tr,
ds_feat_te,l_idx_te,y_te,
train_size, test_size,
loaderSettings['dsHasTest'])
print("-=-=- training dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_tr):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_train==idx)))
print("-=-=- testing dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_te):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_test==idx)))
# this is a work-around for the loading of a "testing" mixed dataset... overwrites the original split from the training data
#mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,testing_idx)
X_train, X_test = scale_data(X_train, X_test)
print(X_train.shape)
print(y_train.shape)
if X_train.shape[0] != y_train.shape[0]:
raise ValueError("number of examples for x and y are different")
return X_train, X_test, y_train, y_test, testing_idx
def prepareMixedDataset(setID,repeat,size,addHOG=True):
mixedData = load_mixture_set(setID,repeat,size)
roidbTrDict,annoCountTr,roidbTrDict1k = mixedData["train"][0],mixedData["train"][1],mixedData["train"][2]
roidbTeDict,annoCountTe,roidbTeDict1k = mixedData["test"][0],mixedData["test"][1],mixedData['test'][2]
printRoidbDictImageNamesToTextFile(roidbTrDict,"train_{}".format(setID))
printRoidbDictImageNamesToTextFile(roidbTeDict,"test_{}".format(setID))
# does the dataset have a "testing" split?
dsHasTest = [ (i is not None) and (j is not None) for i,j in zip(annoCountTr[size],
annoCountTe[size]) ]
# cropped hog image input
if addHOG:
appendHOGtoRoidbDict(roidbTrDict,size)
appendHOGtoRoidbDict(roidbTeDict,size)
appendHOGtoRoidbDict(roidbTrDict1k,1000)
appendHOGtoRoidbDict(roidbTeDict1k,1000)
print("annoCountTr: {}".format(annoCountTr[size]))
print("annoCountTe: {}".format(annoCountTe[size]))
# print_report(roidbTr,annoCountTr,roidbTe,annoCountTe,setID,repeat,size)
annoSizes = {}
annoSizes['train'] = annoCountTr
annoSizes['test'] = annoCountTe
print("-="*50)
return roidbTrDict,roidbTeDict,roidbTrDict1k,roidbTeDict1k,dsHasTest,annoSizes
def loadSvmModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
modelFn = modelParams['modelFn']
if modelFn is not None:
model = pickle.load(open(modelFn,"rb"))
else:
model = train_SVM(X_train,y_train)
fn = iconicImagesFileFormat().format("model{}_svm_{}_{}_{}.pkl".format(dataType,setID,repeat,size))
pickle.dump(model,open(fn,"wb"))
print(" saved model to {}".format(fn))
print("\n\n-=- model loaded -=-\n\n")
return model
def loadDlModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
pass
def genConfCropped(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = None
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Cropped",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfRaw(modelParams,roidbTr,roidbTe,ntdGameInfo):
loaderSettings = {}
loaderSettings['pyloader'] = roidbSampleImageHOG
loaderSettings['calcHog'] = False
loaderSettings['roidbSizes'] = np.arange(len(roidbTr)) + 1
loaderSettings['dsHasTest'] = ntdGameInfo['dsHasTest'] # todo: kind of gross here
return genConf(modelParams,"Raw",roidbTr,roidbTe,loaderSettings,ntdGameInfo)
def genConfSVM(modelParams,dataType,roidbTr,roidbTe,loaderSettings,ntdGameInfo):
X_train, X_test, y_train, y_test, X_idx = roidbToSVMData(roidbTr,roidbTe,
ntdGameInfo['trainSize'],
ntdGameInfo['testSize'],
loaderSettings)
model = loadSvmModel(modelParams,dataType,ntdGameInfo['setID'],ntdGameInfo['repeat'],
ntdGameInfo['size'],X_train,y_train)
print(X_test.shape)
print(y_test.shape)
print("accuracy on test data {}".format(model.score(X_test,y_test)))
print(make_confusion_matrix(model, X_train, y_train, cfg.clsToSet))
print("-"*50)
return make_confusion_matrix(model, X_test, y_test, cfg.clsToSet),model
def | genConfDl | identifier_name |
|
ntd_utils.py |
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.data[0] * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def roidbToFeatures(roidb,pyloader=roidbSampleHOG,calcHog=False,roidbSizes=None):
pyroidb = RoidbDataset(roidb,[0,1,2,3,4,5,6,7],
loader=pyloader,
transform=None)
if roidbSizes is not None:
pyroidb.roidbSizes = np.arange(len(roidb)) + 1
l_feat,l_idx,y = extract_pyroidb_features(pyroidb, 'hog', cfg.clsToSet, calc_feat = calcHog, \
spatial_size=(32, 32),hist_bins=32, \
orient=9, pix_per_cell=8, cell_per_block=2, \
hog_channel=0)
return l_feat,l_idx,y
def mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,X_idx):
"""
Goal: to replace the indicies with setIDs associated with the datasets in the
"test" section of the mixed dataset from the "train" to the "test" features
testIndex: the index from the
yIndicies: a python dictionary; {"setID": list of indicies associated with the set; the indicies are the location of a sample from the set in the original testing set X_test}
-> an element in the list gives index of the next "setID" in the current testing data
->
l_feat_te: a list of hog features.
-> axis=0 is datasets
-> axis=1 is hog features for a specific dataset
-> lengths across axis=1 varies
y_te: a list of setIDs from the "testing" section of the mixed dataset
l_idx_te: locations of the sample in the original roidb
-> axis=0 is datasets
-> axis=1 is the sample location
idx: what use the "idx" from across the y_te?
**error case**: if the # of training examples loaded in y_test > available # of testing
-> shouldn't happend since the test/train split comes originally from a training set (at least) x2 the testing size
"""
print(len(y_te))
print(len(l_idx_te))
print(len(l_feat_te))
for i in range(8):
print(len(l_idx_te[i]))
print(len(l_feat_te[i]))
# replace the X_test for each match of y_test
yIndicies = {}
dsIndicies = [ 0 for _ in range(len(l_idx_te)) ]
for setID in y_te:
if setID not in yIndicies.keys():
yIndicies[setID] = list(np.where(y_test == setID)[0]) # find where the setID's are
print("{}: {}".format(setID,len(yIndicies[setID])))
if len(yIndicies[setID]) == 0: continue
dsIdx = dsIndicies[setID] # index for l_feat_te
testIndex = yIndicies[setID][0] # index for x_test
X_test[testIndex] = l_feat_te[setID][dsIdx] # replace sample content
X_idx[testIndex] = {"idx":int(l_idx_te[setID][dsIdx]),"split":"test"} # replace the lookup
dsIndicies[setID] += 1 # incriment
yIndicies[setID].remove(testIndex) # "incriment" index by removing element
print(dsIndicies)
def roidbToSVMData(roidbTr,roidbTe,train_size,test_size,loaderSettings):
ds_feat_tr,l_idx_tr,y_tr = roidbToFeatures(roidbTr,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings['roidbSizes'])
"""
X_train, X_test, y_train, y_test, X_idx = split_data(train_size, test_size, \
l_feat_tr,l_idx_tr, y_tr,\
loaderSettings['dsHasTest'])
"""
ds_feat_te,l_idx_te,y_te = roidbToFeatures(roidbTe,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings["roidbSizes"])
X_train, X_test, y_train, y_test, testing_idx = split_tr_te_data(ds_feat_tr,l_idx_tr,y_tr,
ds_feat_te,l_idx_te,y_te,
train_size, test_size,
loaderSettings['dsHasTest'])
print("-=-=- training dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_tr):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_train==idx)))
print("-=-=- testing dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_te):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_test==idx)))
# this is a work-around for the loading of a "testing" mixed dataset... overwrites the original split from the training data
#mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,testing_idx)
X_train, X_test = scale_data(X_train, X_test)
print(X_train.shape)
print(y_train.shape)
if X_train.shape[0] != y_train.shape[0]:
raise ValueError("number of examples for x and y are different")
return X_train, X_test, y_train, y_test, testing_idx
def prepareMixedDataset(setID,repeat,size,addHOG=True):
mixedData = load_mixture_set(setID,repeat,size)
roidbTrDict,annoCountTr,roidbTrDict1k = mixedData["train"][0],mixedData["train"][1],mixedData["train"][2]
roidbTeDict,annoCountTe,roidbTeDict1k = mixedData["test"][0],mixedData["test"][1],mixedData['test'][2]
printRoidbDictImageNamesToTextFile(roidbTrDict,"train_{}".format(setID))
printRoidbDictImageNamesToTextFile(roidbTeDict,"test_{}".format(setID))
# does the dataset have a "testing" split?
dsHasTest = [ (i is not None) and (j is not None) for i,j in zip(annoCountTr[size],
annoCountTe[size]) ]
# cropped hog image input
if addHOG:
appendHOGtoRoidbDict(roidbTrDict,size)
appendHOGtoRoidbDict(roidbTeDict,size)
appendHOGtoRoidbDict(roidbTrDict1k,1000)
appendHOGtoRoidbDict(roidbTeDict1k,1000)
print("annoCountTr: {}".format(annoCountTr[size]))
print("annoCountTe: {}".format(annoCountTe[size]))
# print_report(roidbTr,annoCountTr,roidbTe,annoCountTe,setID,repeat,size)
annoSizes = {}
annoSizes['train'] = annoCountTr
annoSizes['test'] = annoCountTe
print("-="*50)
return roidbTrDict,roidbTeDict,roidbTrDict1k,roidbTeDict1k,dsHasTest,annoSizes
def load | if phase == 'train':
scheduler.step()
model.train(True) # Set model to training mode
else:
model.train(False) # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for data in dataloaders[phase]:
# get the inputs
inputs, labels = data
# wrap them in Variable
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels) | conditional_block |
|
ntd_utils.py | preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.data[0] * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def roidbToFeatures(roidb,pyloader=roidbSampleHOG,calcHog=False,roidbSizes=None):
pyroidb = RoidbDataset(roidb,[0,1,2,3,4,5,6,7],
loader=pyloader,
transform=None)
if roidbSizes is not None:
pyroidb.roidbSizes = np.arange(len(roidb)) + 1
l_feat,l_idx,y = extract_pyroidb_features(pyroidb, 'hog', cfg.clsToSet, calc_feat = calcHog, \
spatial_size=(32, 32),hist_bins=32, \
orient=9, pix_per_cell=8, cell_per_block=2, \
hog_channel=0)
return l_feat,l_idx,y
def mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,X_idx):
"""
Goal: to replace the indicies with setIDs associated with the datasets in the
"test" section of the mixed dataset from the "train" to the "test" features
testIndex: the index from the
yIndicies: a python dictionary; {"setID": list of indicies associated with the set; the indicies are the location of a sample from the set in the original testing set X_test}
-> an element in the list gives index of the next "setID" in the current testing data
->
l_feat_te: a list of hog features.
-> axis=0 is datasets
-> axis=1 is hog features for a specific dataset
-> lengths across axis=1 varies
y_te: a list of setIDs from the "testing" section of the mixed dataset
l_idx_te: locations of the sample in the original roidb
-> axis=0 is datasets
-> axis=1 is the sample location
idx: what use the "idx" from across the y_te?
**error case**: if the # of training examples loaded in y_test > available # of testing
-> shouldn't happend since the test/train split comes originally from a training set (at least) x2 the testing size
"""
print(len(y_te))
print(len(l_idx_te))
print(len(l_feat_te))
for i in range(8):
print(len(l_idx_te[i]))
print(len(l_feat_te[i]))
# replace the X_test for each match of y_test
yIndicies = {}
dsIndicies = [ 0 for _ in range(len(l_idx_te)) ]
for setID in y_te:
if setID not in yIndicies.keys(): | print("{}: {}".format(setID,len(yIndicies[setID])))
if len(yIndicies[setID]) == 0: continue
dsIdx = dsIndicies[setID] # index for l_feat_te
testIndex = yIndicies[setID][0] # index for x_test
X_test[testIndex] = l_feat_te[setID][dsIdx] # replace sample content
X_idx[testIndex] = {"idx":int(l_idx_te[setID][dsIdx]),"split":"test"} # replace the lookup
dsIndicies[setID] += 1 # incriment
yIndicies[setID].remove(testIndex) # "incriment" index by removing element
print(dsIndicies)
def roidbToSVMData(roidbTr,roidbTe,train_size,test_size,loaderSettings):
ds_feat_tr,l_idx_tr,y_tr = roidbToFeatures(roidbTr,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings['roidbSizes'])
"""
X_train, X_test, y_train, y_test, X_idx = split_data(train_size, test_size, \
l_feat_tr,l_idx_tr, y_tr,\
loaderSettings['dsHasTest'])
"""
ds_feat_te,l_idx_te,y_te = roidbToFeatures(roidbTe,pyloader=loaderSettings['pyloader'],
calcHog=loaderSettings['calcHog'],
roidbSizes=loaderSettings["roidbSizes"])
X_train, X_test, y_train, y_test, testing_idx = split_tr_te_data(ds_feat_tr,l_idx_tr,y_tr,
ds_feat_te,l_idx_te,y_te,
train_size, test_size,
loaderSettings['dsHasTest'])
print("-=-=- training dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_tr):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_train==idx)))
print("-=-=- testing dataset counts -=-=-")
for idx,feat in enumerate(ds_feat_te):
print("{}: {}, {}".format(cfg.DATASET_NAMES_ORDERED[idx],len(feat),np.sum(y_test==idx)))
# this is a work-around for the loading of a "testing" mixed dataset... overwrites the original split from the training data
#mangleTestingData(l_feat_te,l_idx_te,y_te,X_test,y_test,testing_idx)
X_train, X_test = scale_data(X_train, X_test)
print(X_train.shape)
print(y_train.shape)
if X_train.shape[0] != y_train.shape[0]:
raise ValueError("number of examples for x and y are different")
return X_train, X_test, y_train, y_test, testing_idx
def prepareMixedDataset(setID,repeat,size,addHOG=True):
mixedData = load_mixture_set(setID,repeat,size)
roidbTrDict,annoCountTr,roidbTrDict1k = mixedData["train"][0],mixedData["train"][1],mixedData["train"][2]
roidbTeDict,annoCountTe,roidbTeDict1k = mixedData["test"][0],mixedData["test"][1],mixedData['test'][2]
printRoidbDictImageNamesToTextFile(roidbTrDict,"train_{}".format(setID))
printRoidbDictImageNamesToTextFile(roidbTeDict,"test_{}".format(setID))
# does the dataset have a "testing" split?
dsHasTest = [ (i is not None) and (j is not None) for i,j in zip(annoCountTr[size],
annoCountTe[size]) ]
# cropped hog image input
if addHOG:
appendHOGtoRoidbDict(roidbTrDict,size)
appendHOGtoRoidbDict(roidbTeDict,size)
appendHOGtoRoidbDict(roidbTrDict1k,1000)
appendHOGtoRoidbDict(roidbTeDict1k,1000)
print("annoCountTr: {}".format(annoCountTr[size]))
print("annoCountTe: {}".format(annoCountTe[size]))
# print_report(roidbTr,annoCountTr,roidbTe,annoCountTe,setID,repeat,size)
annoSizes = {}
annoSizes['train'] = annoCountTr
annoSizes['test'] = annoCountTe
print("-="*50)
return roidbTrDict,roidbTeDict,roidbTrDict1k,roidbTeDict1k,dsHasTest,annoSizes
def loadSvmModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
modelFn = modelParams['modelFn']
if modelFn is not None:
model = pickle.load(open(modelFn,"rb"))
else:
model = train_SVM(X_train,y_train)
fn = iconicImagesFileFormat().format("model{}_svm_{}_{}_{}.pkl".format(dataType,setID,repeat,size))
pickle.dump(model,open(fn,"wb"))
print(" saved model to {}".format(fn))
print("\n\n-=- model loaded -=-\n\n")
return model
def loadDlModel(modelParams,dataType,setID,repeat,size,X_train,y_train):
pass
def | yIndicies[setID] = list(np.where(y_test == setID)[0]) # find where the setID's are | random_line_split |
spacetime.rs | pub fn attributes_did_change(&self) -> bool {
!(self.attributes_installed.is_empty() &&
self.attributes_altered.is_empty())
}
}
/// Update an 'AttributeMap' in place given two sets of causetid and attribute retractions, which
/// together contain enough information to reason about a "schemaReplicant retraction".
///
/// SchemaReplicant may only be retracted if all of its necessary attributes are being retracted:
/// - :edb/causetid, :edb/valueType, :edb/cardinality.
///
/// Note that this is currently incomplete/flawed:
/// - we're allowing optional attributes to not be retracted and dangle afterwards
///
/// Returns a set of attribute retractions which do not involve schemaReplicant-defining attributes.
fn update_attribute_map_from_schemaReplicant_retractions(attribute_map: &mut AttributeMap, retractions: Vec<EAV>, causetId_retractions: &BTreeMap<SolitonId, symbols::Keyword>) -> Result<Vec<EAV>> {
// Process retractions of schemaReplicant attributes first. It's allowed to retract a schemaReplicant attribute
// if all of the schemaReplicant-defining schemaReplicant attributes are being retracted.
// A defining set of attributes is :edb/causetid, :edb/valueType, :edb/cardinality.
let mut filtered_retractions = vec![];
let mut suspect_retractions = vec![];
// Filter out sets of schemaReplicant altering retractions.
let mut eas = BTreeMap::new();
for (e, a, v) in retractions.into_iter() {
if causetids::is_a_schemaReplicant_attribute(a) {
eas.entry(e).or_insert(vec![]).push(a);
suspect_retractions.push((e, a, v));
} else {
filtered_retractions.push((e, a, v));
}
}
// TODO (see https://github.com/whtcorpsinc/edb/issues/796).
// Retraction of causetIds is allowed, but if an causetid names a schemaReplicant attribute, then we should enforce
// retraction of all of the associated schemaReplicant attributes.
// Unfortunately, our current in-memory schemaReplicant representation (namely, how we define an Attribute) is not currently
// rich enough: it lacks distinction between presence and absence, and instead assumes default values.
// Currently, in order to do this enforcement correctly, we'd need to inspect 'causets'.
// Here is an incorrect way to enforce this. It's incorrect because it prevents us from retracting non-"schemaReplicant naming" causetIds.
// for retracted_e in causetId_retractions.keys() {
// if !eas.contains_key(retracted_e) {
// bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting :edb/causetid of a schemaReplicant without retracting its defining attributes is not permitted.")));
// }
// }
for (e, a, v) in suspect_retractions.into_iter() {
let attributes = eas.get(&e).unwrap();
// Found a set of retractions which negate a schemaReplicant.
if attributes.contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) {
// Ensure that corresponding :edb/causetid is also being retracted at the same time.
if causetId_retractions.contains_key(&e) {
// Remove attributes corresponding to retracted attribute.
attribute_map.remove(&e);
} else {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted.")));
}
} else {
filtered_retractions.push((e, a, v));
}
}
Ok(filtered_retractions)
}
/// Update a `AttributeMap` in place from the given `[e a typed_value]` triples.
///
/// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not
/// contain install and alter markers.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport> {
fn | (attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder {
existing.get(&attribute_id)
.map(AttributeBuilder::to_modify_attribute)
.unwrap_or_else(AttributeBuilder::default)
}
// Group mutations by impacted solitonId.
let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new();
// For retractions, we start with an attribute builder that's pre-populated with the existing
// attribute values. That allows us to check existing values and unset them.
for (solitonId, attr, ref value) in retractions {
let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map));
match attr {
// You can only retract :edb/unique, :edb/isComponent; all others must be altered instead
// of retracted, or are not allowed to change.
causetids::DB_IS_COMPONENT => {
match value {
&MinkowskiType::Boolean(v) if builder.component == Some(v) => {
builder.component(false);
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v)));
},
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(u) => {
match u {
causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => {
builder.non_unique();
},
causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => {
builder.non_unique();
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v)));
},
}
},
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value)))
}
},
causetids::DB_VALUE_TYPE |
causetids::DB_CARDINALITY |
causetids::DB_INDEX |
causetids::DB_FULLTEXT |
causetids::DB_NO_HISTORY => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId)));
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
}
for (solitonId, attr, ref value) in assertions.into_iter() {
// For assertions, we can start with an empty attribute builder.
let builder = builders.entry(solitonId).or_insert_with(Default::default);
// TODO: improve error messages throughout.
match attr {
causetids::DB_VALUE_TYPE => {
match *value {
MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); },
MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); },
MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); },
MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); },
MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); },
MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); },
MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); },
MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr)))
}
},
causetids::DB_CARDINALITY => {
match *value {
MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); },
MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value)))
}
},
causetids::DB_UNIQUE => {
match *value {
Mink | attribute_builder_to_modify | identifier_name |
spacetime.rs | pub fn attributes_did_change(&self) -> bool {
!(self.attributes_installed.is_empty() &&
self.attributes_altered.is_empty())
}
}
/// Update an 'AttributeMap' in place given two sets of causetid and attribute retractions, which
/// together contain enough information to reason about a "schemaReplicant retraction".
///
/// SchemaReplicant may only be retracted if all of its necessary attributes are being retracted:
/// - :edb/causetid, :edb/valueType, :edb/cardinality.
///
/// Note that this is currently incomplete/flawed:
/// - we're allowing optional attributes to not be retracted and dangle afterwards
///
/// Returns a set of attribute retractions which do not involve schemaReplicant-defining attributes.
fn update_attribute_map_from_schemaReplicant_retractions(attribute_map: &mut AttributeMap, retractions: Vec<EAV>, causetId_retractions: &BTreeMap<SolitonId, symbols::Keyword>) -> Result<Vec<EAV>> {
// Process retractions of schemaReplicant attributes first. It's allowed to retract a schemaReplicant attribute
// if all of the schemaReplicant-defining schemaReplicant attributes are being retracted.
// A defining set of attributes is :edb/causetid, :edb/valueType, :edb/cardinality.
let mut filtered_retractions = vec![];
let mut suspect_retractions = vec![];
// Filter out sets of schemaReplicant altering retractions.
let mut eas = BTreeMap::new();
for (e, a, v) in retractions.into_iter() {
if causetids::is_a_schemaReplicant_attribute(a) {
eas.entry(e).or_insert(vec![]).push(a);
suspect_retractions.push((e, a, v));
} else {
filtered_retractions.push((e, a, v));
}
}
// TODO (see https://github.com/whtcorpsinc/edb/issues/796).
// Retraction of causetIds is allowed, but if an causetid names a schemaReplicant attribute, then we should enforce
// retraction of all of the associated schemaReplicant attributes.
// Unfortunately, our current in-memory schemaReplicant representation (namely, how we define an Attribute) is not currently
// rich enough: it lacks distinction between presence and absence, and instead assumes default values.
// Currently, in order to do this enforcement correctly, we'd need to inspect 'causets'.
// Here is an incorrect way to enforce this. It's incorrect because it prevents us from retracting non-"schemaReplicant naming" causetIds.
// for retracted_e in causetId_retractions.keys() {
// if !eas.contains_key(retracted_e) {
// bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting :edb/causetid of a schemaReplicant without retracting its defining attributes is not permitted.")));
// }
// }
for (e, a, v) in suspect_retractions.into_iter() {
let attributes = eas.get(&e).unwrap();
// Found a set of retractions which negate a schemaReplicant.
if attributes.contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) {
// Ensure that corresponding :edb/causetid is also being retracted at the same time.
if causetId_retractions.contains_key(&e) {
// Remove attributes corresponding to retracted attribute.
attribute_map.remove(&e);
} else {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted.")));
}
} else {
filtered_retractions.push((e, a, v));
}
}
Ok(filtered_retractions)
}
/// Update a `AttributeMap` in place from the given `[e a typed_value]` triples.
/// | fn attribute_builder_to_modify(attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder {
existing.get(&attribute_id)
.map(AttributeBuilder::to_modify_attribute)
.unwrap_or_else(AttributeBuilder::default)
}
// Group mutations by impacted solitonId.
let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new();
// For retractions, we start with an attribute builder that's pre-populated with the existing
// attribute values. That allows us to check existing values and unset them.
for (solitonId, attr, ref value) in retractions {
let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map));
match attr {
// You can only retract :edb/unique, :edb/isComponent; all others must be altered instead
// of retracted, or are not allowed to change.
causetids::DB_IS_COMPONENT => {
match value {
&MinkowskiType::Boolean(v) if builder.component == Some(v) => {
builder.component(false);
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v)));
},
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(u) => {
match u {
causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => {
builder.non_unique();
},
causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => {
builder.non_unique();
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v)));
},
}
},
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value)))
}
},
causetids::DB_VALUE_TYPE |
causetids::DB_CARDINALITY |
causetids::DB_INDEX |
causetids::DB_FULLTEXT |
causetids::DB_NO_HISTORY => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId)));
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
}
for (solitonId, attr, ref value) in assertions.into_iter() {
// For assertions, we can start with an empty attribute builder.
let builder = builders.entry(solitonId).or_insert_with(Default::default);
// TODO: improve error messages throughout.
match attr {
causetids::DB_VALUE_TYPE => {
match *value {
MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); },
MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); },
MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); },
MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); },
MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); },
MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); },
MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); },
MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr)))
}
},
causetids::DB_CARDINALITY => {
match *value {
MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); },
MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value)))
}
},
causetids::DB_UNIQUE => {
match *value {
Minkowski | /// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not
/// contain install and alter markers.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport> { | random_line_split |
spacetime.rs | fn attributes_did_change(&self) -> bool {
!(self.attributes_installed.is_empty() &&
self.attributes_altered.is_empty())
}
}
/// Update an 'AttributeMap' in place given two sets of causetid and attribute retractions, which
/// together contain enough information to reason about a "schemaReplicant retraction".
///
/// SchemaReplicant may only be retracted if all of its necessary attributes are being retracted:
/// - :edb/causetid, :edb/valueType, :edb/cardinality.
///
/// Note that this is currently incomplete/flawed:
/// - we're allowing optional attributes to not be retracted and dangle afterwards
///
/// Returns a set of attribute retractions which do not involve schemaReplicant-defining attributes.
fn update_attribute_map_from_schemaReplicant_retractions(attribute_map: &mut AttributeMap, retractions: Vec<EAV>, causetId_retractions: &BTreeMap<SolitonId, symbols::Keyword>) -> Result<Vec<EAV>> {
// Process retractions of schemaReplicant attributes first. It's allowed to retract a schemaReplicant attribute
// if all of the schemaReplicant-defining schemaReplicant attributes are being retracted.
// A defining set of attributes is :edb/causetid, :edb/valueType, :edb/cardinality.
let mut filtered_retractions = vec![];
let mut suspect_retractions = vec![];
// Filter out sets of schemaReplicant altering retractions.
let mut eas = BTreeMap::new();
for (e, a, v) in retractions.into_iter() {
if causetids::is_a_schemaReplicant_attribute(a) {
eas.entry(e).or_insert(vec![]).push(a);
suspect_retractions.push((e, a, v));
} else {
filtered_retractions.push((e, a, v));
}
}
// TODO (see https://github.com/whtcorpsinc/edb/issues/796).
// Retraction of causetIds is allowed, but if an causetid names a schemaReplicant attribute, then we should enforce
// retraction of all of the associated schemaReplicant attributes.
// Unfortunately, our current in-memory schemaReplicant representation (namely, how we define an Attribute) is not currently
// rich enough: it lacks distinction between presence and absence, and instead assumes default values.
// Currently, in order to do this enforcement correctly, we'd need to inspect 'causets'.
// Here is an incorrect way to enforce this. It's incorrect because it prevents us from retracting non-"schemaReplicant naming" causetIds.
// for retracted_e in causetId_retractions.keys() {
// if !eas.contains_key(retracted_e) {
// bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting :edb/causetid of a schemaReplicant without retracting its defining attributes is not permitted.")));
// }
// }
for (e, a, v) in suspect_retractions.into_iter() {
let attributes = eas.get(&e).unwrap();
// Found a set of retractions which negate a schemaReplicant.
if attributes.contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) {
// Ensure that corresponding :edb/causetid is also being retracted at the same time.
if causetId_retractions.contains_key(&e) {
// Remove attributes corresponding to retracted attribute.
attribute_map.remove(&e);
} else {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted.")));
}
} else {
filtered_retractions.push((e, a, v));
}
}
Ok(filtered_retractions)
}
/// Update a `AttributeMap` in place from the given `[e a typed_value]` triples.
///
/// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not
/// contain install and alter markers.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport> | builder.component(false);
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v)));
},
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(u) => {
match u {
causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => {
builder.non_unique();
},
causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => {
builder.non_unique();
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v)));
},
}
},
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value)))
}
},
causetids::DB_VALUE_TYPE |
causetids::DB_CARDINALITY |
causetids::DB_INDEX |
causetids::DB_FULLTEXT |
causetids::DB_NO_HISTORY => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId)));
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
}
for (solitonId, attr, ref value) in assertions.into_iter() {
// For assertions, we can start with an empty attribute builder.
let builder = builders.entry(solitonId).or_insert_with(Default::default);
// TODO: improve error messages throughout.
match attr {
causetids::DB_VALUE_TYPE => {
match *value {
MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); },
MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); },
MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); },
MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); },
MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); },
MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); },
MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); },
MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr)))
}
},
causetids::DB_CARDINALITY => {
match *value {
MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); },
MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value)))
}
},
causetids::DB_UNIQUE => {
match *value {
Minkowski | {
fn attribute_builder_to_modify(attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder {
existing.get(&attribute_id)
.map(AttributeBuilder::to_modify_attribute)
.unwrap_or_else(AttributeBuilder::default)
}
// Group mutations by impacted solitonId.
let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new();
// For retractions, we start with an attribute builder that's pre-populated with the existing
// attribute values. That allows us to check existing values and unset them.
for (solitonId, attr, ref value) in retractions {
let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map));
match attr {
// You can only retract :edb/unique, :edb/isComponent; all others must be altered instead
// of retracted, or are not allowed to change.
causetids::DB_IS_COMPONENT => {
match value {
&MinkowskiType::Boolean(v) if builder.component == Some(v) => { | identifier_body |
spacetime.rs | .contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) {
// Ensure that corresponding :edb/causetid is also being retracted at the same time.
if causetId_retractions.contains_key(&e) {
// Remove attributes corresponding to retracted attribute.
attribute_map.remove(&e);
} else {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted.")));
}
} else {
filtered_retractions.push((e, a, v));
}
}
Ok(filtered_retractions)
}
/// Update a `AttributeMap` in place from the given `[e a typed_value]` triples.
///
/// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not
/// contain install and alter markers.
///
/// Returns a report summarizing the mutations that were applied.
pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport> {
fn attribute_builder_to_modify(attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder {
existing.get(&attribute_id)
.map(AttributeBuilder::to_modify_attribute)
.unwrap_or_else(AttributeBuilder::default)
}
// Group mutations by impacted solitonId.
let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new();
// For retractions, we start with an attribute builder that's pre-populated with the existing
// attribute values. That allows us to check existing values and unset them.
for (solitonId, attr, ref value) in retractions {
let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map));
match attr {
// You can only retract :edb/unique, :edb/isComponent; all others must be altered instead
// of retracted, or are not allowed to change.
causetids::DB_IS_COMPONENT => {
match value {
&MinkowskiType::Boolean(v) if builder.component == Some(v) => {
builder.component(false);
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v)));
},
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(u) => {
match u {
causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => {
builder.non_unique();
},
causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => {
builder.non_unique();
},
v => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v)));
},
}
},
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value)))
}
},
causetids::DB_VALUE_TYPE |
causetids::DB_CARDINALITY |
causetids::DB_INDEX |
causetids::DB_FULLTEXT |
causetids::DB_NO_HISTORY => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId)));
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
}
for (solitonId, attr, ref value) in assertions.into_iter() {
// For assertions, we can start with an empty attribute builder.
let builder = builders.entry(solitonId).or_insert_with(Default::default);
// TODO: improve error messages throughout.
match attr {
causetids::DB_VALUE_TYPE => {
match *value {
MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); },
MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); },
MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); },
MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); },
MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); },
MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); },
MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); },
MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr)))
}
},
causetids::DB_CARDINALITY => {
match *value {
MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); },
MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value)))
}
},
causetids::DB_UNIQUE => {
match *value {
MinkowskiType::Ref(causetids::DB_UNIQUE_VALUE) => { builder.unique(attribute::Unique::Value); },
MinkowskiType::Ref(causetids::DB_UNIQUE_CausetIDITY) => { builder.unique(attribute::Unique::CausetIdity); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/unique :edb.unique/value|:edb.unique/causetIdity] but got [... :edb/unique {:?}]", value)))
}
},
causetids::DB_INDEX => {
match *value {
MinkowskiType::Boolean(x) => { builder.index(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/index true|false] but got [... :edb/index {:?}]", value)))
}
},
causetids::DB_FULLTEXT => {
match *value {
MinkowskiType::Boolean(x) => { builder.fulltext(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/fulltext true|false] but got [... :edb/fulltext {:?}]", value)))
}
},
causetids::DB_IS_COMPONENT => {
match *value {
MinkowskiType::Boolean(x) => { builder.component(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/isComponent true|false] but got [... :edb/isComponent {:?}]", value)))
}
},
causetids::DB_NO_HISTORY => {
match *value {
MinkowskiType::Boolean(x) => { builder.no_history(x); },
_ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/noHistory true|false] but got [... :edb/noHistory {:?}]", value)))
}
},
_ => {
bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId)))
}
}
};
let mut attributes_installed: BTreeSet<SolitonId> = BTreeSet::default();
let mut attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>> = BTreeMap::default();
for (solitonId, builder) in builders.into_iter() {
match attribute_map.entry(solitonId) {
Entry::Vacant(entry) => {
// Validate once…
builder.validate_install_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for new attribute with solitonId {} is not valid", solitonId)))?;
// … and twice, now we have the Attribute.
let a = builder.build();
a.validate(|| solitonId.to_string())?;
entry.insert(a);
attributes_installed.insert(solitonId);
},
Entry::Occupied(mut entry) => {
| builder.validate_alter_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for existing attribute with solitonId {} is not valid", solitonId)))?;
let mutations = builder.mutate(entry.get_mut());
attributes_altered.insert(solitonId, mutations);
},
| conditional_block |
|
make.py | size, --device=paper, 2 pages per sheet
* screen: normal pages size, --device=screen
If a separate ptex2tex step is wanted, fill in all necessary
commands in the ptex2tex string.
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
if version in ('paper', 'A4', '2up', 'A4-2up'):
if not '--device=paper' in options:
options += ' --device=paper'
elif version == 'screen' and '--device=paper' in options:
options = options.replace('--device=paper', '')
if version in ('A4', 'A4-2up'):
if not '--latex_papersize=a4' in options:
options += ' --latex_papersize=a4'
if postfix == 'auto':
if version == 'paper':
postfix = '4print'
elif version == 'screen':
postfix = '4screen'
else:
postfix = version
# Compile source
cmd = 'doconce format %(latex_program)s %(name)s %(options)s ' % vars()
system(cmd)
cmd = r"doconce replace '%% insert custom LaTeX commands...' '\usepackage[russian]{babel} \usepackage{titlesec} \titleformat{\subsubsection}[runin] {\normalfont\normalsize\bfseries}{\thesubsubsection.}{1em}{} \let\paragraph=\subsubsection' %(name)s.tex" % vars()
system(cmd)
cmd = r"doconce replace '\usepackage{lmodern}' '%%\usepackage{lmodern}' %(name)s.tex" % vars()
system(cmd)
# Transform .p.tex to .tex?
if ptex2tex is not None:
cmd = ptex2tex
system(cmd)
# Load latex file into string for examination
dofile = open(name + '.tex', 'r')
text = dofile.read()
dofile.close()
latex_options = ''
if latex_program == 'pdflatex':
latex_options = '-file-line-error -interaction nonstopmode -halt-on-error'
# Run latex
shell_escape = ' -shell-escape' if 'begin{minted}' in text else ''
cmd_latex = '%(latex_program)s%(shell_escape)s %(latex_options)s %(name)s' % vars()
system(cmd_latex)
if 'idx{' in text:
cmd = 'makeindex %(name)s' % vars()
system(cmd)
if 'BIBFILE:' in text:
cmd = 'bibtex %(name)s' % vars()
system(cmd)
system(cmd_latex)
system(cmd_latex)
if latex_program == 'latex':
cmd = 'dvipdf %(name)s' % vars()
system(cmd)
# Could instead of dvipdf run the old-fashioned dvips and ps2pdf
if version in ('2up', 'A4-2up'):
# Use pdfnup to make two pages per sheet
cmd = 'pdfnup --frame true --outfile %(name)s.pdf %(name)s.pdf' % vars()
system(cmd)
if postfix:
shutil.copy(name + '.pdf', name + '-' + postfix + '.pdf')
def html(name, options='', split=False):
"""
Make HTML file from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = u"doconce replace 'Figure' 'Рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'figure' 'рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'width=responsive' 'class=\"img-responsive\" style=\"max-width:600px; width:100%%;\"' %(name)s.html".encode('utf-8') % vars()
system(cmd)
if split:
cmd = 'doconce split_html %(name)s' % vars()
system(cmd)
for filename in glob.glob("._%(name)s*.html" % vars()):
if '000' not in filename:
cmd = u"doconce replace '← Prev' '← Предыдущая глава' %s".encode('utf-8') % filename
system(cmd)
cmd = u"doconce replace 'Next →' ' Следующая глава →' %s".encode('utf-8') % filename
system(cmd)
for filename in [name, '._%s000' % name]:
print(filename)
cmd = u"doconce replace 'Read' 'Перейти к первой главе' %s.html".encode('utf-8') % filename
system(cmd)
cmd = u"doconce subst '.*Next.*' '' %s.html".encode('utf-8') % filename
system(cmd)
def reveal_slides(name, options='', postfix='reveal', theme='darkgray'):
"""Make reveal.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_p | options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s reveal --html_slide_theme=%(theme)s'
system(cmd)
def deck_slides(name, options='', postfix='deck', theme='sandstone.default'):
"""Make deck.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['deck'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def beamer_slides(name, options='', postfix='beamer', theme='red_shadow',
ptex2tex_envir='minted'):
"""Make latex beamer slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
# Compile source
shell_escape = '-shell-escape' if ptex2tex_envir == 'minted' else ''
cmd = 'doconce format pdflatex %(name)s %(options)s ' % vars()
system(cmd)
# Run latex
cmd = 'doconce ptex2tex %(name)s envir=%(ptex2tex_envir)s' % vars()
system(cmd)
cmd = 'doconce slides_beamer %(name)s --beamer_slide_theme=%(theme)s' % vars()
system(cmd)
cmd = 'pdflatex %(shell_escape)s %(name)s'
system(cmd)
system(cmd)
system('cp %(name)s.pdf %(name)s-%(postfi).pdf' % vars())
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def sphinx(name, options='', dirname='sphinx-rootdir',
theme='pyramid', automake_sphinx_options='',
split=False):
"""
Make Sphinx HTML subdirectory from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
if name.endswith('.do'):
name = name.replace('.do','')
# Compile source
cmd = 'doconce format sphinx %(name)s %(options)s ' % vars()
system(cmd)
if split:
cmd = 'doconce split_rst %(name)s' % vars()
# Create sphinx directory
cmd = 'doconce sphinx_dir theme=%(theme)s %(name)s' % vars()
system(cmd)
| ygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['reveal'][theme][0]
if '--keep_pygments_html_bg' not in options:
| conditional_block |
make.py | (cmd)
system(cmd_latex)
system(cmd_latex)
if latex_program == 'latex':
cmd = 'dvipdf %(name)s' % vars()
system(cmd)
# Could instead of dvipdf run the old-fashioned dvips and ps2pdf
if version in ('2up', 'A4-2up'):
# Use pdfnup to make two pages per sheet
cmd = 'pdfnup --frame true --outfile %(name)s.pdf %(name)s.pdf' % vars()
system(cmd)
if postfix:
shutil.copy(name + '.pdf', name + '-' + postfix + '.pdf')
def html(name, options='', split=False):
"""
Make HTML file from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = u"doconce replace 'Figure' 'Рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'figure' 'рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'width=responsive' 'class=\"img-responsive\" style=\"max-width:600px; width:100%%;\"' %(name)s.html".encode('utf-8') % vars()
system(cmd)
if split:
cmd = 'doconce split_html %(name)s' % vars()
system(cmd)
for filename in glob.glob("._%(name)s*.html" % vars()):
if '000' not in filename:
cmd = u"doconce replace '← Prev' '← Предыдущая глава' %s".encode('utf-8') % filename
system(cmd)
cmd = u"doconce replace 'Next →' ' Следующая глава →' %s".encode('utf-8') % filename
system(cmd)
for filename in [name, '._%s000' % name]:
print(filename)
cmd = u"doconce replace 'Read' 'Перейти к первой главе' %s.html".encode('utf-8') % filename
system(cmd)
cmd = u"doconce subst '.*Next.*' '' %s.html".encode('utf-8') % filename
system(cmd)
def reveal_slides(name, options='', postfix='reveal', theme='darkgray'):
"""Make reveal.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['reveal'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s reveal --html_slide_theme=%(theme)s'
system(cmd)
def deck_slides(name, options='', postfix='deck', theme='sandstone.default'):
"""Make deck.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['deck'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def beamer_slides(name, options='', postfix='beamer', theme='red_shadow',
ptex2tex_envir='minted'):
"""Make latex beamer slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
# Compile source
shell_escape = '-shell-escape' if ptex2tex_envir == 'minted' else ''
cmd = 'doconce format pdflatex %(name)s %(options)s ' % vars()
system(cmd)
# Run latex
cmd = 'doconce ptex2tex %(name)s envir=%(ptex2tex_envir)s' % vars()
system(cmd)
cmd = 'doconce slides_beamer %(name)s --beamer_slide_theme=%(theme)s' % vars()
system(cmd)
cmd = 'pdflatex %(shell_escape)s %(name)s'
system(cmd)
system(cmd)
system('cp %(name)s.pdf %(name)s-%(postfi).pdf' % vars())
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def sphinx(name, options='', dirname='sphinx-rootdir',
theme='pyramid', automake_sphinx_options='',
split=False):
"""
Make Sphinx HTML subdirectory from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
if name.endswith('.do'):
name = name.replace('.do','')
# Compile source
cmd = 'doconce format sphinx %(name)s %(options)s ' % vars()
system(cmd)
if split:
cmd = 'doconce split_rst %(name)s' % vars()
# Create sphinx directory
cmd = 'doconce sphinx_dir theme=%(theme)s %(name)s' % vars()
system(cmd)
# Compile sphinx
cmd = 'python automake_sphinx.py %(automake_sphinx_options)s' % vars()
system(cmd)
def doconce2format(name, format, options=''):
"""Make given format from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format %(format)s %(name)s %(options)s ' % vars()
system(cmd)
def plain(name, options=''):
doconce2format(name, 'plain', options)
def pandoc(name, options=''):
doconce2format(name, 'pandoc', options)
def ipynb(name, options=''):
doconce2format(name, 'ipynb', options)
def cwiki(name, options=''):
doconce2format(name, 'cwiki', options)
def mwiki(name, options=''):
doconce2format(name, 'mwiki', options)
def gwiki(name, options=''):
doconce2format(name, 'gwiki', options)
def cleanup_html(fn):
show = False
out = []
with codecs.open(fn, "r", encoding='utf-8') as f:
for line in f:
if "<!-- ------------------- end of main content --------------- -->" in line:
show = False
if show:
out.append(line)
if "<!-- ------------------- main content ---------------------- -->" in line:
show = True
assert out, "No output in %s" % fn
with codecs.open(fn, 'w', encoding='utf-8') as f:
f.write("".join(out))
def mksnippets():
for fn in glob.glob("._*.html"):
with codecs.open(fn, 'r', encoding='utf-8') as thebook:
snippet_name = None
snippet_content = []
snippets = {}
for line in thebook:
if 'navigation buttons at the bottom of the page' in line \
or 'end of snippets' in line:
break
if 'snippet: ' in line:
m = re.search(ur'snippet:\s*(\w+)', line)
if m:
snippet_name = m.groups(1)
snippets[snippet_name] = snippet_content
else:
if snippet_name:
if re.match('<h\d', line):
snippet_content = []
snippet_name = None
else:
snippet_content.append(line)
for snippet_name, snippet_content in snippets.items():
with codecs.open("snippets/%s.html" % snippet_name,
'w', encoding='utf-8') as snippet:
snippet.write("".join(snippet_content))
def main():
"""
Produce various formats from the d | ocon | identifier_name |
|
make.py |
def spellcheck():
for filename in glob.glob('*.do.txt'):
if not filename.startswith('tmp'):
cmd = 'doconce spellcheck -d .dict4spell.txt %(filename)s' % vars()
system(cmd)
def latex(name,
latex_program='pdflatex', # or 'latex'
options='--latex_code_style=vrb',
version='paper', # or 'screen', '2up', 'A4', 'A4-2up'
postfix='', # or 'auto'
ptex2tex=None, # only for ptex2tex step
):
"""
Make latex/pdflatex (according to latex_program) PDF file from
the doconce file name (without any .do.txt extension).
version can take the following values:
* paper: normal page size, --device=paper
* 2up: normal page size, --device=paper, 2 pages per sheet
* A4: A4 page size, --device=paper
* A4-2up: A4 page size, --device=paper, 2 pages per sheet
* screen: normal pages size, --device=screen
If a separate ptex2tex step is wanted, fill in all necessary
commands in the ptex2tex string.
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
if version in ('paper', 'A4', '2up', 'A4-2up'):
if not '--device=paper' in options:
options += ' --device=paper'
elif version == 'screen' and '--device=paper' in options:
options = options.replace('--device=paper', '')
if version in ('A4', 'A4-2up'):
if not '--latex_papersize=a4' in options:
options += ' --latex_papersize=a4'
if postfix == 'auto':
if version == 'paper':
postfix = '4print'
elif version == 'screen':
postfix = '4screen'
else:
postfix = version
# Compile source
cmd = 'doconce format %(latex_program)s %(name)s %(options)s ' % vars()
system(cmd)
cmd = r"doconce replace '%% insert custom LaTeX commands...' '\usepackage[russian]{babel} \usepackage{titlesec} \titleformat{\subsubsection}[runin] {\normalfont\normalsize\bfseries}{\thesubsubsection.}{1em}{} \let\paragraph=\subsubsection' %(name)s.tex" % vars()
system(cmd)
cmd = r"doconce replace '\usepackage{lmodern}' '%%\usepackage{lmodern}' %(name)s.tex" % vars()
system(cmd)
# Transform .p.tex to .tex?
if ptex2tex is not None:
cmd = ptex2tex
system(cmd)
# Load latex file into string for examination
dofile = open(name + '.tex', 'r')
text = dofile.read()
dofile.close()
latex_options = ''
if latex_program == 'pdflatex':
latex_options = '-file-line-error -interaction nonstopmode -halt-on-error'
# Run latex
shell_escape = ' -shell-escape' if 'begin{minted}' in text else ''
cmd_latex = '%(latex_program)s%(shell_escape)s %(latex_options)s %(name)s' % vars()
system(cmd_latex)
if 'idx{' in text:
cmd = 'makeindex %(name)s' % vars()
system(cmd)
if 'BIBFILE:' in text:
cmd = 'bibtex %(name)s' % vars()
system(cmd)
system(cmd_latex)
system(cmd_latex)
if latex_program == 'latex':
cmd = 'dvipdf %(name)s' % vars()
system(cmd)
# Could instead of dvipdf run the old-fashioned dvips and ps2pdf
if version in ('2up', 'A4-2up'):
# Use pdfnup to make two pages per sheet
cmd = 'pdfnup --frame true --outfile %(name)s.pdf %(name)s.pdf' % vars()
system(cmd)
if postfix:
shutil.copy(name + '.pdf', name + '-' + postfix + '.pdf')
def html(name, options='', split=False):
"""
Make HTML file from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = u"doconce replace 'Figure' 'Рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'figure' 'рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'width=responsive' 'class=\"img-responsive\" style=\"max-width:600px; width:100%%;\"' %(name)s.html".encode('utf-8') % vars()
system(cmd)
if split:
cmd = 'doconce split_html %(name)s' % vars()
system(cmd)
for filename in glob.glob("._%(name)s*.html" % vars()):
if '000' not in filename:
cmd = u"doconce replace '← Prev' '← Предыдущая глава' %s".encode('utf-8') % filename
system(cmd)
cmd = u"doconce replace 'Next →' ' Следующая глава →' %s".encode('utf-8') % filename
system(cmd)
for filename in [name, '._%s000' % name]:
print(filename)
cmd = u"doconce replace 'Read' 'Перейти к первой главе' %s.html".encode('utf-8') % filename
system(cmd)
cmd = u"doconce subst '.*Next.*' '' %s.html".encode('utf-8') % filename
system(cmd)
def reveal_slides(name, options='', postfix='reveal', theme='darkgray'):
"""Make reveal.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['reveal'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s reveal --html_slide_theme=%(theme)s'
system(cmd)
def deck_slides(name, options='', postfix='deck', theme='sandstone.default'):
"""Make deck.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['deck'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def beamer_slides(name, options='', postfix='beamer', theme='red_shadow',
ptex2tex_envir='minted'):
"""Make latex beamer slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
# Compile source
shell_escape = '-shell-escape' if | """Run system command cmd using subprocess module."""
print cmd
try:
output = subprocess.check_output(cmd, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print """Command
%s
failed""" % cmd
print 'Return code:', e.returncode
print e.output
print("Hello")
sys.exit(1)
print output
f = open(logfile, 'a'); f.write(output); f.close()
unix_command_recorder.append(cmd) # record command for bash script
return output | identifier_body |
|
make.py | page size, --device=paper, 2 pages per sheet
* screen: normal pages size, --device=screen
If a separate ptex2tex step is wanted, fill in all necessary
commands in the ptex2tex string.
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
if version in ('paper', 'A4', '2up', 'A4-2up'):
if not '--device=paper' in options:
options += ' --device=paper'
elif version == 'screen' and '--device=paper' in options:
options = options.replace('--device=paper', '')
if version in ('A4', 'A4-2up'):
if not '--latex_papersize=a4' in options:
options += ' --latex_papersize=a4'
if postfix == 'auto':
if version == 'paper':
postfix = '4print'
elif version == 'screen':
postfix = '4screen'
else:
postfix = version
# Compile source
cmd = 'doconce format %(latex_program)s %(name)s %(options)s ' % vars()
system(cmd)
cmd = r"doconce replace '%% insert custom LaTeX commands...' '\usepackage[russian]{babel} \usepackage{titlesec} \titleformat{\subsubsection}[runin] {\normalfont\normalsize\bfseries}{\thesubsubsection.}{1em}{} \let\paragraph=\subsubsection' %(name)s.tex" % vars()
system(cmd)
cmd = r"doconce replace '\usepackage{lmodern}' '%%\usepackage{lmodern}' %(name)s.tex" % vars()
system(cmd)
# Transform .p.tex to .tex?
if ptex2tex is not None:
cmd = ptex2tex
system(cmd)
# Load latex file into string for examination
dofile = open(name + '.tex', 'r')
text = dofile.read()
dofile.close()
latex_options = ''
if latex_program == 'pdflatex':
latex_options = '-file-line-error -interaction nonstopmode -halt-on-error'
# Run latex
shell_escape = ' -shell-escape' if 'begin{minted}' in text else ''
cmd_latex = '%(latex_program)s%(shell_escape)s %(latex_options)s %(name)s' % vars()
system(cmd_latex)
if 'idx{' in text:
cmd = 'makeindex %(name)s' % vars()
system(cmd)
if 'BIBFILE:' in text:
cmd = 'bibtex %(name)s' % vars()
system(cmd)
system(cmd_latex)
system(cmd_latex)
if latex_program == 'latex':
cmd = 'dvipdf %(name)s' % vars()
system(cmd)
# Could instead of dvipdf run the old-fashioned dvips and ps2pdf
if version in ('2up', 'A4-2up'):
# Use pdfnup to make two pages per sheet
cmd = 'pdfnup --frame true --outfile %(name)s.pdf %(name)s.pdf' % vars()
system(cmd)
if postfix:
shutil.copy(name + '.pdf', name + '-' + postfix + '.pdf')
def html(name, options='', split=False):
"""
Make HTML file from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = u"doconce replace 'Figure' 'Рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'figure' 'рис.' %(name)s.html".encode('utf-8') % vars()
system(cmd)
cmd = u"doconce replace 'width=responsive' 'class=\"img-responsive\" style=\"max-width:600px; width:100%%;\"' %(name)s.html".encode('utf-8') % vars()
system(cmd)
if split:
cmd = 'doconce split_html %(name)s' % vars()
system(cmd)
for filename in glob.glob("._%(name)s*.html" % vars()):
if '000' not in filename: | system(cmd)
for filename in [name, '._%s000' % name]:
print(filename)
cmd = u"doconce replace 'Read' 'Перейти к первой главе' %s.html".encode('utf-8') % filename
system(cmd)
cmd = u"doconce subst '.*Next.*' '' %s.html".encode('utf-8') % filename
system(cmd)
def reveal_slides(name, options='', postfix='reveal', theme='darkgray'):
"""Make reveal.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['reveal'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s reveal --html_slide_theme=%(theme)s'
system(cmd)
def deck_slides(name, options='', postfix='deck', theme='sandstone.default'):
"""Make deck.js HTML5 slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
# Compile source
if '--pygments_html_style=' not in options:
from doconce.misc import recommended_html_styles_and_pygment_styles
combinations = recommended_html_styles_and_pygment_styles()
options += ' --pygments_html_style=%s' % combinations['deck'][theme][0]
if '--keep_pygments_html_bg' not in options:
options += ' --keep_pygments_html_bg'
options += ' --html_output="%(name)s-%(postfi)s'
cmd = 'doconce format html %(name)s %(options)s ' % vars()
system(cmd)
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def beamer_slides(name, options='', postfix='beamer', theme='red_shadow',
ptex2tex_envir='minted'):
"""Make latex beamer slides from the doconce file `name`."""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
system('rm -f %(name)s.aux' % vars())
# Compile source
shell_escape = '-shell-escape' if ptex2tex_envir == 'minted' else ''
cmd = 'doconce format pdflatex %(name)s %(options)s ' % vars()
system(cmd)
# Run latex
cmd = 'doconce ptex2tex %(name)s envir=%(ptex2tex_envir)s' % vars()
system(cmd)
cmd = 'doconce slides_beamer %(name)s --beamer_slide_theme=%(theme)s' % vars()
system(cmd)
cmd = 'pdflatex %(shell_escape)s %(name)s'
system(cmd)
system(cmd)
system('cp %(name)s.pdf %(name)s-%(postfi).pdf' % vars())
cmd = 'doconce slides_html %(name)s-%(postfi)s deck --html_slide_theme=%(theme)s'
system(cmd)
def sphinx(name, options='', dirname='sphinx-rootdir',
theme='pyramid', automake_sphinx_options='',
split=False):
"""
Make Sphinx HTML subdirectory from the doconce file `name`
(without any .do.txt extension).
"""
if name.endswith('.do.txt'):
name = name.replace('.do.txt', '')
if name.endswith('.do'):
name = name.replace('.do','')
# Compile source
cmd = 'doconce format sphinx %(name)s %(options)s ' % vars()
system(cmd)
if split:
cmd = 'doconce split_rst %(name)s' % vars()
# Create sphinx directory
cmd = 'doconce sphinx_dir theme=%(theme)s %(name)s' % vars()
system(cmd)
# Compile | cmd = u"doconce replace '← Prev' '← Предыдущая глава' %s".encode('utf-8') % filename
system(cmd)
cmd = u"doconce replace 'Next →' ' Следующая глава →' %s".encode('utf-8') % filename | random_line_split |
init.rs | = Vec::new();
for arg in args {
let path = PathBuf::from(&arg);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
build_args.push(absolute_path.to_string_lossy().into_owned());
continue;
}
build_args.push(arg.to_string_lossy().into_owned());
if arg == "android" || arg == "ios" {
break;
}
}
build_args.push(target.ide_build_script_name().into());
let binary_path = PathBuf::from(&binary);
let bin_stem = binary_path.file_stem().unwrap().to_string_lossy();
let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap();
if r.is_match(&bin_stem) {
if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) {
let manager_stem = npm_execpath.file_stem().unwrap().to_os_string();
let is_npm = manager_stem == "npm-cli";
let is_npx = manager_stem == "npx-cli";
binary = if is_npm {
"npm".into()
} else if is_npx {
"npx".into()
} else {
manager_stem
};
if !(build_args.is_empty() || is_npx) {
// remove script path, we'll use `npm_lifecycle_event` instead
build_args.remove(0);
}
if is_npm {
build_args.insert(0, "--".into());
}
if !is_npx {
build_args.insert(0, var("npm_lifecycle_event").unwrap());
}
if is_npm {
build_args.insert(0, "run".into());
}
}
}
map.insert("tauri-binary", binary.to_string_lossy());
map.insert("tauri-binary-args", &build_args);
map.insert("tauri-binary-args-str", build_args.join(" "));
let app = match target {
// Generate Android Studio project
Target::Android => match AndroidEnv::new() {
Ok(_env) => {
let app = get_app(tauri_config_);
let (config, metadata) =
super::android::get_config(&app, tauri_config_, &Default::default());
map.insert("android", &config);
super::android::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
skip_targets_install,
)?;
app
}
Err(err) => {
if err.sdk_or_ndk_issue() {
Report::action_request(
" to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!",
err,
)
.print(wrapper);
app
} else {
return Err(err.into());
}
}
},
#[cfg(target_os = "macos")]
// Generate Xcode project
Target::Ios => {
let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default());
map.insert("apple", &config);
super::ios::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
non_interactive,
reinstall_deps,
skip_targets_install,
)?;
app
}
};
Report::victory(
"Project generated successfully!",
"Make cool apps! 🌻 🐕 🎉",
)
.print(wrapper);
Ok(app)
}
fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) {
let mut h = Handlebars::new();
h.register_escape_fn(handlebars::no_escape);
h.register_helper("html-escape", Box::new(html_escape));
h.register_helper("join", Box::new(join));
h.register_helper("quote-and-join", Box::new(quote_and_join));
h.register_helper(
"quote-and-join-colon-prefix",
Box::new(quote_and_join_colon_prefix),
);
h.register_helper("snake-case", Box::new(snake_case));
h.register_helper("reverse-domain", Box::new(reverse_domain));
h.register_helper(
"reverse-domain-snake-case",
Box::new(reverse_domain_snake_case),
);
// don't mix these up or very bad things will happen to all of us
h.register_helper("prefix-path", Box::new(prefix_path));
h.register_helper("unprefix-path", Box::new(unprefix_path));
let mut map = JsonMap::default();
map.insert("app", app);
(h, map)
}
fn get_str<'a>(helper: &'a Helper) -> &'a str {
helper
.param(0)
.and_then(|v| v.value().as_str())
.unwrap_or("")
}
fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> {
helper.param(0).and_then(|v| {
v.value().as_array().and_then(|arr| {
arr
.iter()
.map(|val| {
val.as_str().map(
#[allow(clippy::redundant_closure)]
|s| formatter(s),
)
})
.collect()
})
})
}
fn html_escape(
helper: &Helper,
_: &Handlebars,
_ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(&handlebars::html_escape(get_str(helper)))
.map_err(Into::into)
}
fn join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| s.to_string())
.ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{s:?}"))
.ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join_colon_prefix(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{:?}", format!(":{s}")))
.ok_or_else(|| {
RenderError::new("`quote-and-join-colon-prefix` helper wasn't given an array")
})?
.join(", "),
)
.map_err(Into::into)
}
fn snake_case(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
use heck::ToSnekCase as _;
out
.write(&get_str(helper).to_snek_case())
.map_err(Into::into)
}
fn reverse_domain(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(&util::reverse_domain(get_str(helper)))
.map_err(Into::into)
}
fn reverse_domain_snake_case(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
use heck::ToSnekCase as _;
out
.write(&util::reverse_domain(get_str(helper)).to_snek_case())
.map_err(Into::into)
}
fn app_root(ctx: &Context) -> Result<&str, RenderError> {
let app_root = ctx
.data()
.get("app")
.ok_or_else(|| RenderError::new("`app` missing from template data."))?
.get("root-dir")
.ok_or_else(|| RenderError::new("`app.root-dir` missing from template data."))?;
app_root
.as_str()
.ok_or_else(|| RenderError::new("`app.root-dir` contained invalid UTF-8."))
}
fn prefix_path(
helper: &Helper,
_: &Handlebars,
ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
util::prefix_path(app_root(ctx)?, get_str(helper))
.to_str()
.ok_or_else(|| {
RenderError::new(
"Either the `app.root-dir` or the specified path contained invalid UTF-8.",
)
})?,
)
.map_err(Into::into)
}
fn unprefix_ | path(
helpe | identifier_name |
|
init.rs | Option<(&mut AndroidEnv, &AndroidConfig)>,
) -> Result<()> {
if let Some((env, config)) = android {
for target in AndroidTarget::all().values() {
let config = target.generate_cargo_config(config, env)?;
let target_var_name = target.triple.replace('-', "_").to_uppercase();
if let Some(linker) = config.linker {
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_LINKER"),
linker.into(),
);
}
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_RUSTFLAGS"),
config.rustflags.join(" ").into(),
);
}
}
let mut dot_cargo = dot_cargo::DotCargo::load(app)?;
// Mysteriously, builds that don't specify `--target` seem to fight over
// the build cache with builds that use `--target`! This means that
// alternating between i.e. `cargo run` and `cargo apple run` would
// result in clean builds being made each time you switched... which is
// pretty nightmarish. Specifying `build.target` in `.cargo/config`
// fortunately has the same effect as specifying `--target`, so now we can
// `cargo run` with peace of mind!
//
// This behavior could be explained here:
// https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags
dot_cargo.set_default_target(util::host_target_triple()?);
dot_cargo.write(app).map_err(Into::into)
} | #[allow(unused_variables)] non_interactive: bool,
#[allow(unused_variables)] reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<App> {
let current_dir = current_dir()?;
let tauri_config = get_tauri_config(None)?;
let tauri_config_guard = tauri_config.lock().unwrap();
let tauri_config_ = tauri_config_guard.as_ref().unwrap();
let app = get_app(tauri_config_);
let (handlebars, mut map) = handlebars(&app);
let mut args = std::env::args_os();
let mut binary = args
.next()
.map(|bin| {
let path = PathBuf::from(&bin);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
return absolute_path.into();
}
bin
})
.unwrap_or_else(|| std::ffi::OsString::from("cargo"));
let mut build_args = Vec::new();
for arg in args {
let path = PathBuf::from(&arg);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
build_args.push(absolute_path.to_string_lossy().into_owned());
continue;
}
build_args.push(arg.to_string_lossy().into_owned());
if arg == "android" || arg == "ios" {
break;
}
}
build_args.push(target.ide_build_script_name().into());
let binary_path = PathBuf::from(&binary);
let bin_stem = binary_path.file_stem().unwrap().to_string_lossy();
let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap();
if r.is_match(&bin_stem) {
if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) {
let manager_stem = npm_execpath.file_stem().unwrap().to_os_string();
let is_npm = manager_stem == "npm-cli";
let is_npx = manager_stem == "npx-cli";
binary = if is_npm {
"npm".into()
} else if is_npx {
"npx".into()
} else {
manager_stem
};
if !(build_args.is_empty() || is_npx) {
// remove script path, we'll use `npm_lifecycle_event` instead
build_args.remove(0);
}
if is_npm {
build_args.insert(0, "--".into());
}
if !is_npx {
build_args.insert(0, var("npm_lifecycle_event").unwrap());
}
if is_npm {
build_args.insert(0, "run".into());
}
}
}
map.insert("tauri-binary", binary.to_string_lossy());
map.insert("tauri-binary-args", &build_args);
map.insert("tauri-binary-args-str", build_args.join(" "));
let app = match target {
// Generate Android Studio project
Target::Android => match AndroidEnv::new() {
Ok(_env) => {
let app = get_app(tauri_config_);
let (config, metadata) =
super::android::get_config(&app, tauri_config_, &Default::default());
map.insert("android", &config);
super::android::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
skip_targets_install,
)?;
app
}
Err(err) => {
if err.sdk_or_ndk_issue() {
Report::action_request(
" to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!",
err,
)
.print(wrapper);
app
} else {
return Err(err.into());
}
}
},
#[cfg(target_os = "macos")]
// Generate Xcode project
Target::Ios => {
let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default());
map.insert("apple", &config);
super::ios::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
non_interactive,
reinstall_deps,
skip_targets_install,
)?;
app
}
};
Report::victory(
"Project generated successfully!",
"Make cool apps! 🌻 🐕 🎉",
)
.print(wrapper);
Ok(app)
}
fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) {
let mut h = Handlebars::new();
h.register_escape_fn(handlebars::no_escape);
h.register_helper("html-escape", Box::new(html_escape));
h.register_helper("join", Box::new(join));
h.register_helper("quote-and-join", Box::new(quote_and_join));
h.register_helper(
"quote-and-join-colon-prefix",
Box::new(quote_and_join_colon_prefix),
);
h.register_helper("snake-case", Box::new(snake_case));
h.register_helper("reverse-domain", Box::new(reverse_domain));
h.register_helper(
"reverse-domain-snake-case",
Box::new(reverse_domain_snake_case),
);
// don't mix these up or very bad things will happen to all of us
h.register_helper("prefix-path", Box::new(prefix_path));
h.register_helper("unprefix-path", Box::new(unprefix_path));
let mut map = JsonMap::default();
map.insert("app", app);
(h, map)
}
fn get_str<'a>(helper: &'a Helper) -> &'a str {
helper
.param(0)
.and_then(|v| v.value().as_str())
.unwrap_or("")
}
fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> {
helper.param(0).and_then(|v| {
v.value().as_array().and_then(|arr| {
arr
.iter()
.map(|val| {
val.as_str().map(
#[allow(clippy::redundant_closure)]
|s| formatter(s),
)
})
.collect()
})
})
}
fn html_escape(
helper: &Helper,
_: &Handlebars,
_ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(&handlebars::html_escape(get_str(helper)))
.map_err(Into::into)
}
fn join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| s.to_string())
.ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{s:?}"))
.ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join_colon_prefix |
pub fn exec(
target: Target,
wrapper: &TextWrapper, | random_line_split |
init.rs | Option<(&mut AndroidEnv, &AndroidConfig)>,
) -> Result<()> {
if let Some((env, config)) = android {
for target in AndroidTarget::all().values() {
let config = target.generate_cargo_config(config, env)?;
let target_var_name = target.triple.replace('-', "_").to_uppercase();
if let Some(linker) = config.linker {
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_LINKER"),
linker.into(),
);
}
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_RUSTFLAGS"),
config.rustflags.join(" ").into(),
);
}
}
let mut dot_cargo = dot_cargo::DotCargo::load(app)?;
// Mysteriously, builds that don't specify `--target` seem to fight over
// the build cache with builds that use `--target`! This means that
// alternating between i.e. `cargo run` and `cargo apple run` would
// result in clean builds being made each time you switched... which is
// pretty nightmarish. Specifying `build.target` in `.cargo/config`
// fortunately has the same effect as specifying `--target`, so now we can
// `cargo run` with peace of mind!
//
// This behavior could be explained here:
// https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags
dot_cargo.set_default_target(util::host_target_triple()?);
dot_cargo.write(app).map_err(Into::into)
}
pub fn exec(
target: Target,
wrapper: &TextWrapper,
#[allow(unused_variables)] non_interactive: bool,
#[allow(unused_variables)] reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<App> {
let current_dir = current_dir()?;
let tauri_config = get_tauri_config(None)?;
let tauri_config_guard = tauri_config.lock().unwrap();
let tauri_config_ = tauri_config_guard.as_ref().unwrap();
let app = get_app(tauri_config_);
let (handlebars, mut map) = handlebars(&app);
let mut args = std::env::args_os();
let mut binary = args
.next()
.map(|bin| {
let path = PathBuf::from(&bin);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
return absolute_path.into();
}
bin
})
.unwrap_or_else(|| std::ffi::OsString::from("cargo"));
let mut build_args = Vec::new();
for arg in args {
let path = PathBuf::from(&arg);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
build_args.push(absolute_path.to_string_lossy().into_owned());
continue;
}
build_args.push(arg.to_string_lossy().into_owned());
if arg == "android" || arg == "ios" {
break;
}
}
build_args.push(target.ide_build_script_name().into());
let binary_path = PathBuf::from(&binary);
let bin_stem = binary_path.file_stem().unwrap().to_string_lossy();
let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap();
if r.is_match(&bin_stem) | build_args.insert(0, var("npm_lifecycle_event").unwrap());
}
if is_npm {
build_args.insert(0, "run".into());
}
}
}
map.insert("tauri-binary", binary.to_string_lossy());
map.insert("tauri-binary-args", &build_args);
map.insert("tauri-binary-args-str", build_args.join(" "));
let app = match target {
// Generate Android Studio project
Target::Android => match AndroidEnv::new() {
Ok(_env) => {
let app = get_app(tauri_config_);
let (config, metadata) =
super::android::get_config(&app, tauri_config_, &Default::default());
map.insert("android", &config);
super::android::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
skip_targets_install,
)?;
app
}
Err(err) => {
if err.sdk_or_ndk_issue() {
Report::action_request(
" to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!",
err,
)
.print(wrapper);
app
} else {
return Err(err.into());
}
}
},
#[cfg(target_os = "macos")]
// Generate Xcode project
Target::Ios => {
let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default());
map.insert("apple", &config);
super::ios::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
non_interactive,
reinstall_deps,
skip_targets_install,
)?;
app
}
};
Report::victory(
"Project generated successfully!",
"Make cool apps! 🌻 🐕 🎉",
)
.print(wrapper);
Ok(app)
}
fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) {
let mut h = Handlebars::new();
h.register_escape_fn(handlebars::no_escape);
h.register_helper("html-escape", Box::new(html_escape));
h.register_helper("join", Box::new(join));
h.register_helper("quote-and-join", Box::new(quote_and_join));
h.register_helper(
"quote-and-join-colon-prefix",
Box::new(quote_and_join_colon_prefix),
);
h.register_helper("snake-case", Box::new(snake_case));
h.register_helper("reverse-domain", Box::new(reverse_domain));
h.register_helper(
"reverse-domain-snake-case",
Box::new(reverse_domain_snake_case),
);
// don't mix these up or very bad things will happen to all of us
h.register_helper("prefix-path", Box::new(prefix_path));
h.register_helper("unprefix-path", Box::new(unprefix_path));
let mut map = JsonMap::default();
map.insert("app", app);
(h, map)
}
fn get_str<'a>(helper: &'a Helper) -> &'a str {
helper
.param(0)
.and_then(|v| v.value().as_str())
.unwrap_or("")
}
fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> {
helper.param(0).and_then(|v| {
v.value().as_array().and_then(|arr| {
arr
.iter()
.map(|val| {
val.as_str().map(
#[allow(clippy::redundant_closure)]
|s| formatter(s),
)
})
.collect()
})
})
}
fn html_escape(
helper: &Helper,
_: &Handlebars,
_ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(&handlebars::html_escape(get_str(helper)))
.map_err(Into::into)
}
fn join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| s.to_string())
.ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{s:?}"))
.ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join_colon_prefix | {
if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) {
let manager_stem = npm_execpath.file_stem().unwrap().to_os_string();
let is_npm = manager_stem == "npm-cli";
let is_npx = manager_stem == "npx-cli";
binary = if is_npm {
"npm".into()
} else if is_npx {
"npx".into()
} else {
manager_stem
};
if !(build_args.is_empty() || is_npx) {
// remove script path, we'll use `npm_lifecycle_event` instead
build_args.remove(0);
}
if is_npm {
build_args.insert(0, "--".into());
}
if !is_npx { | conditional_block |
init.rs | Option<(&mut AndroidEnv, &AndroidConfig)>,
) -> Result<()> {
if let Some((env, config)) = android {
for target in AndroidTarget::all().values() {
let config = target.generate_cargo_config(config, env)?;
let target_var_name = target.triple.replace('-', "_").to_uppercase();
if let Some(linker) = config.linker {
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_LINKER"),
linker.into(),
);
}
env.base.insert_env_var(
format!("CARGO_TARGET_{target_var_name}_RUSTFLAGS"),
config.rustflags.join(" ").into(),
);
}
}
let mut dot_cargo = dot_cargo::DotCargo::load(app)?;
// Mysteriously, builds that don't specify `--target` seem to fight over
// the build cache with builds that use `--target`! This means that
// alternating between i.e. `cargo run` and `cargo apple run` would
// result in clean builds being made each time you switched... which is
// pretty nightmarish. Specifying `build.target` in `.cargo/config`
// fortunately has the same effect as specifying `--target`, so now we can
// `cargo run` with peace of mind!
//
// This behavior could be explained here:
// https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags
dot_cargo.set_default_target(util::host_target_triple()?);
dot_cargo.write(app).map_err(Into::into)
}
pub fn exec(
target: Target,
wrapper: &TextWrapper,
#[allow(unused_variables)] non_interactive: bool,
#[allow(unused_variables)] reinstall_deps: bool,
skip_targets_install: bool,
) -> Result<App> {
let current_dir = current_dir()?;
let tauri_config = get_tauri_config(None)?;
let tauri_config_guard = tauri_config.lock().unwrap();
let tauri_config_ = tauri_config_guard.as_ref().unwrap();
let app = get_app(tauri_config_);
let (handlebars, mut map) = handlebars(&app);
let mut args = std::env::args_os();
let mut binary = args
.next()
.map(|bin| {
let path = PathBuf::from(&bin);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
return absolute_path.into();
}
bin
})
.unwrap_or_else(|| std::ffi::OsString::from("cargo"));
let mut build_args = Vec::new();
for arg in args {
let path = PathBuf::from(&arg);
if path.exists() {
let absolute_path = util::prefix_path(¤t_dir, path);
build_args.push(absolute_path.to_string_lossy().into_owned());
continue;
}
build_args.push(arg.to_string_lossy().into_owned());
if arg == "android" || arg == "ios" {
break;
}
}
build_args.push(target.ide_build_script_name().into());
let binary_path = PathBuf::from(&binary);
let bin_stem = binary_path.file_stem().unwrap().to_string_lossy();
let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap();
if r.is_match(&bin_stem) {
if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) {
let manager_stem = npm_execpath.file_stem().unwrap().to_os_string();
let is_npm = manager_stem == "npm-cli";
let is_npx = manager_stem == "npx-cli";
binary = if is_npm {
"npm".into()
} else if is_npx {
"npx".into()
} else {
manager_stem
};
if !(build_args.is_empty() || is_npx) {
// remove script path, we'll use `npm_lifecycle_event` instead
build_args.remove(0);
}
if is_npm {
build_args.insert(0, "--".into());
}
if !is_npx {
build_args.insert(0, var("npm_lifecycle_event").unwrap());
}
if is_npm {
build_args.insert(0, "run".into());
}
}
}
map.insert("tauri-binary", binary.to_string_lossy());
map.insert("tauri-binary-args", &build_args);
map.insert("tauri-binary-args-str", build_args.join(" "));
let app = match target {
// Generate Android Studio project
Target::Android => match AndroidEnv::new() {
Ok(_env) => {
let app = get_app(tauri_config_);
let (config, metadata) =
super::android::get_config(&app, tauri_config_, &Default::default());
map.insert("android", &config);
super::android::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
skip_targets_install,
)?;
app
}
Err(err) => {
if err.sdk_or_ndk_issue() {
Report::action_request(
" to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!",
err,
)
.print(wrapper);
app
} else {
return Err(err.into());
}
}
},
#[cfg(target_os = "macos")]
// Generate Xcode project
Target::Ios => {
let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default());
map.insert("apple", &config);
super::ios::project::gen(
&config,
&metadata,
(handlebars, map),
wrapper,
non_interactive,
reinstall_deps,
skip_targets_install,
)?;
app
}
};
Report::victory(
"Project generated successfully!",
"Make cool apps! 🌻 🐕 🎉",
)
.print(wrapper);
Ok(app)
}
fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) {
let mut h = Handlebars::new();
h.register_escape_fn(handlebars::no_escape);
h.register_helper("html-escape", Box::new(html_escape));
h.register_helper("join", Box::new(join));
h.register_helper("quote-and-join", Box::new(quote_and_join));
h.register_helper(
"quote-and-join-colon-prefix",
Box::new(quote_and_join_colon_prefix),
);
h.register_helper("snake-case", Box::new(snake_case));
h.register_helper("reverse-domain", Box::new(reverse_domain));
h.register_helper(
"reverse-domain-snake-case",
Box::new(reverse_domain_snake_case),
);
// don't mix these up or very bad things will happen to all of us
h.register_helper("prefix-path", Box::new(prefix_path));
h.register_helper("unprefix-path", Box::new(unprefix_path));
let mut map = JsonMap::default();
map.insert("app", app);
(h, map)
}
fn get_str<'a>(helper: &'a Helper) -> &'a str {
helper
.param(0)
.and_then(|v| v.value().as_str())
.unwrap_or("")
}
fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> {
helper.param(0).and_then(|v| {
v.value().as_array().and_then(|arr| {
arr
.iter()
.map(|val| {
val.as_str().map(
#[allow(clippy::redundant_closure)]
|s| formatter(s),
)
})
.collect()
})
})
}
fn html_escape(
helper: &Helper,
_: &Handlebars,
_ctx: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
| (
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| s.to_string())
.ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join(
helper: &Helper,
_: &Handlebars,
_: &Context,
_: &mut RenderContext,
out: &mut dyn Output,
) -> HelperResult {
out
.write(
&get_str_array(helper, |s| format!("{s:?}"))
.ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))?
.join(", "),
)
.map_err(Into::into)
}
fn quote_and_join_colon_prefix | .write(&handlebars::html_escape(get_str(helper)))
.map_err(Into::into)
}
fn join | identifier_body |
consensus.rs | 40_000;
/// Fork every 6 months.
pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2;
/// Testnet first hard fork height, set to happen around 2019-06-20
pub const TESTNET_FIRST_HARD_FORK: u64 = 185_040;
/// Testnet second hard fork height, set to happen around 2019-12-19
pub const TESTNET_SECOND_HARD_FORK: u64 = 298_080;
/// Testnet second hard fork height, set to happen around 2020-06-20
pub const TESTNET_THIRD_HARD_FORK: u64 = 552_960;
/// Testnet second hard fork height, set to happen around 2020-12-8
pub const TESTNET_FOURTH_HARD_FORK: u64 = 642_240;
/// Fork every 3 blocks
pub const TESTING_HARD_FORK_INTERVAL: u64 = 3;
/// Compute possible block version at a given height,
/// currently no hard forks.
pub fn header_version(_height: u64) -> HeaderVersion {
HeaderVersion(1)
}
/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool {
version == header_version(height)
}
/// Number of blocks used to calculate difficulty adjustment by Damped Moving Average
pub const DMA_WINDOW: u64 = HOUR_HEIGHT;
/// Difficulty adjustment half life (actually, 60s * number of 0s-blocks to raise diff by factor e) is 4 hours
pub const WTEMA_HALF_LIFE: u64 = 4 * HOUR_SEC;
/// Average time span of the DMA difficulty adjustment window
pub const BLOCK_TIME_WINDOW: u64 = DMA_WINDOW * BLOCK_TIME_SEC;
/// Clamp factor to use for DMA difficulty adjustment
/// Limit value to within this factor of goal
pub const CLAMP_FACTOR: u64 = 2;
/// Dampening factor to use for DMA difficulty adjustment
pub const DMA_DAMP_FACTOR: u64 = 3;
/// Dampening factor to use for AR scale calculation.
pub const AR_SCALE_DAMP_FACTOR: u64 = 13;
/// Compute weight of a graph as number of siphash bits defining the graph
/// The height dependence allows a 30-week linear transition from C31+ to C32+ starting after 1 year
pub fn graph_weight(height: u64, edge_bits: u8) -> u64 {
let mut xpr_edge_bits = edge_bits as u64;
let expiry_height = YEAR_HEIGHT;
if edge_bits == 31 && height >= expiry_height {
xpr_edge_bits = xpr_edge_bits.saturating_sub(1 + (height - expiry_height) / WEEK_HEIGHT);
}
// For C31 xpr_edge_bits reaches 0 at height YEAR_HEIGHT + 30 * WEEK_HEIGHT
// 30 weeks after Jan 15, 2020 would be Aug 12, 2020
(2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits
}
/// minimum solution difficulty after HardFork4 when PoW becomes primary only Cuckatoo32+
pub const C32_GRAPH_WEIGHT: u64 = (2u64 << (32 - BASE_EDGE_BITS) as u64) * 32; // 16384
/// Minimum difficulty, enforced in Damped Moving Average diff retargetting
/// avoids getting stuck when trying to increase difficulty subject to dampening
pub const MIN_DMA_DIFFICULTY: u64 = DMA_DAMP_FACTOR;
/// Minimum scaling factor for AR pow, enforced in diff retargetting
/// avoids getting stuck when trying to increase ar_scale subject to dampening
pub const MIN_AR_SCALE: u64 = AR_SCALE_DAMP_FACTOR;
/// unit difficulty, equal to graph_weight(SECOND_POW_EDGE_BITS)
pub const UNIT_DIFFICULTY: u64 =
((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64);
/// The initial difficulty at launch. This should be over-estimated
/// and difficulty should come down at launch rather than up
/// Currently grossly over-estimated at 10% of current
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval)
pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY;
/// Minimal header information required for the Difficulty calculation to
/// take place
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct HeaderInfo {
/// Block hash, ZERO_HASH when this is a sythetic entry.
pub block_hash: Hash,
/// Timestamp of the header, 1 when not used (returned info)
pub timestamp: u64,
/// Network difficulty or next difficulty to use
pub difficulty: Difficulty,
/// Network secondary PoW factor or factor to use
pub secondary_scaling: u32,
/// Whether the header is a secondary proof of work
pub is_secondary: bool,
}
impl HeaderInfo {
/// Default constructor
pub fn new(
block_hash: Hash,
timestamp: u64,
difficulty: Difficulty,
secondary_scaling: u32,
is_secondary: bool,
) -> HeaderInfo {
HeaderInfo {
block_hash,
timestamp,
difficulty,
secondary_scaling,
is_secondary,
}
}
/// Constructor from a timestamp and difficulty, setting a default secondary
/// PoW factor
pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp,
difficulty,
secondary_scaling: global::initial_graph_weight(),
is_secondary: true,
}
}
/// Constructor from a difficulty and secondary factor, setting a default
/// timestamp
pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp: 1,
difficulty,
secondary_scaling,
is_secondary: true,
}
}
}
/// Move value linearly toward a goal
pub fn damp(actual: u64, goal: u64, damp_factor: u64) -> u64 {
(actual + (damp_factor - 1) * goal) / damp_factor
}
/// limit value to be within some factor from a goal
pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 {
max(goal / clamp_factor, min(actual, goal * clamp_factor))
}
/// Computes the proof-of-work difficulty that the next block should comply with.
/// Takes an iterator over past block headers information, from latest
/// (highest height) to oldest (lowest height).
/// Uses either the old dma DAA or, starting from HF4, the new wtema DAA
pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
if header_version(height) < HeaderVersion(5) {
next_dma_difficulty(height, cursor)
} else {
next_wtema_difficulty(height, cursor)
}
}
/// Difficulty calculation based on a Damped Moving Average
/// of difficulty over a window of DMA_WINDOW blocks.
/// The corresponding timespan is calculated
/// by using the difference between the timestamps at the beginning
/// and the end of the window, with a damping toward the target block time.
pub fn next_dma_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = HeaderInfo>,
{
// Create vector of difficulty data running from earliest
// to latest, and pad with simulated pre-genesis data to allow earlier
// adjustment if there isn't enough window data length will be
// DMA_WINDOW + 1 (for initial block time bound)
let diff_data = global::difficulty_data_to_vector(cursor);
// First, get the ratio of secondary PoW vs primary, skipping initial header
let sec_pow_scaling = secondary_pow_scaling(height, &diff_data[1..]);
// Get the timestamp delta across the window
let ts_delta: u64 = diff_data[DMA_WINDOW as usize].timestamp - diff_data[0].timestamp;
// Get the difficulty sum of the last DMA_WINDOW elements
let diff_sum: u64 = diff_data
.iter()
.skip(1)
.map(|dd| dd.difficulty.to_num())
.sum();
// adjust time delta toward goal subject to dampening and clamping
let adj_ts = clamp(
damp(ts_delta, BLOCK_TIME_WINDOW, DMA_DAMP_FACTOR),
BLOCK_TIME_WINDOW,
CLAMP_FACTOR,
);
// minimum difficulty avoids getting stuck due to dampening | let difficulty = max(MIN_DMA_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts);
HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling) | random_line_split |
|
consensus.rs | _bits to compute difficulty factors for higher
/// Cuckoo graph sizes, changing this would hard fork
pub const BASE_EDGE_BITS: u8 = 24;
/// Default number of blocks in the past when cross-block cut-through will start
/// happening. Needs to be long enough to not overlap with a long reorg.
/// Rational
/// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We
/// add an order of magnitude to be safe and round to 7x24h of blocks to make it
/// easier to reason about.
pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32;
/// Default number of blocks in the past to determine the height where we request
/// a txhashset (and full blocks from). Needs to be long enough to not overlap with
/// a long reorg.
/// Rational behind the value is the longest bitcoin fork was about 30 blocks, so 5h.
/// We add an order of magnitude to be safe and round to 2x24h of blocks to make it
/// easier to reason about.
pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32;
/// Weight of an input when counted against the max block weight capacity
pub const INPUT_WEIGHT: u64 = 1;
/// Weight of an output when counted against the max block weight capacity
pub const OUTPUT_WEIGHT: u64 = 21;
/// Weight of a kernel when counted against the max block weight capacity
pub const KERNEL_WEIGHT: u64 = 3;
/// Total maximum block weight. At current sizes, this means a maximum
/// theoretical size of:
/// * `(674 + 33 + 1) * (40_000 / 21) = 1_348_571` for a block with only outputs
/// * `(1 + 8 + 8 + 33 + 64) * (40_000 / 3) = 1_520_000` for a block with only kernels
/// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs
///
/// Regardless of the relative numbers of inputs/outputs/kernels in a block the maximum
/// block size is around 1.5MB
/// For a block full of "average" txs (2 inputs, 2 outputs, 1 kernel) we have -
/// `(1 * 2) + (21 * 2) + (3 * 1) = 47` (weight per tx)
/// `40_000 / 47 = 851` (txs per block)
///
pub const MAX_BLOCK_WEIGHT: u64 = 40_000;
/// Fork every 6 months.
pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2;
/// Testnet first hard fork height, set to happen around 2019-06-20
pub const TESTNET_FIRST_HARD_FORK: u64 = 185_040;
/// Testnet second hard fork height, set to happen around 2019-12-19
pub const TESTNET_SECOND_HARD_FORK: u64 = 298_080;
/// Testnet second hard fork height, set to happen around 2020-06-20
pub const TESTNET_THIRD_HARD_FORK: u64 = 552_960;
/// Testnet second hard fork height, set to happen around 2020-12-8
pub const TESTNET_FOURTH_HARD_FORK: u64 = 642_240;
/// Fork every 3 blocks
pub const TESTING_HARD_FORK_INTERVAL: u64 = 3;
/// Compute possible block version at a given height,
/// currently no hard forks.
pub fn header_version(_height: u64) -> HeaderVersion {
HeaderVersion(1)
}
/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool {
version == header_version(height)
}
/// Number of blocks used to calculate difficulty adjustment by Damped Moving Average
pub const DMA_WINDOW: u64 = HOUR_HEIGHT;
/// Difficulty adjustment half life (actually, 60s * number of 0s-blocks to raise diff by factor e) is 4 hours
pub const WTEMA_HALF_LIFE: u64 = 4 * HOUR_SEC;
/// Average time span of the DMA difficulty adjustment window
pub const BLOCK_TIME_WINDOW: u64 = DMA_WINDOW * BLOCK_TIME_SEC;
/// Clamp factor to use for DMA difficulty adjustment
/// Limit value to within this factor of goal
pub const CLAMP_FACTOR: u64 = 2;
/// Dampening factor to use for DMA difficulty adjustment
pub const DMA_DAMP_FACTOR: u64 = 3;
/// Dampening factor to use for AR scale calculation.
pub const AR_SCALE_DAMP_FACTOR: u64 = 13;
/// Compute weight of a graph as number of siphash bits defining the graph
/// The height dependence allows a 30-week linear transition from C31+ to C32+ starting after 1 year
pub fn graph_weight(height: u64, edge_bits: u8) -> u64 {
let mut xpr_edge_bits = edge_bits as u64;
let expiry_height = YEAR_HEIGHT;
if edge_bits == 31 && height >= expiry_height {
xpr_edge_bits = xpr_edge_bits.saturating_sub(1 + (height - expiry_height) / WEEK_HEIGHT);
}
// For C31 xpr_edge_bits reaches 0 at height YEAR_HEIGHT + 30 * WEEK_HEIGHT
// 30 weeks after Jan 15, 2020 would be Aug 12, 2020
(2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits
}
/// minimum solution difficulty after HardFork4 when PoW becomes primary only Cuckatoo32+
pub const C32_GRAPH_WEIGHT: u64 = (2u64 << (32 - BASE_EDGE_BITS) as u64) * 32; // 16384
/// Minimum difficulty, enforced in Damped Moving Average diff retargetting
/// avoids getting stuck when trying to increase difficulty subject to dampening
pub const MIN_DMA_DIFFICULTY: u64 = DMA_DAMP_FACTOR;
/// Minimum scaling factor for AR pow, enforced in diff retargetting
/// avoids getting stuck when trying to increase ar_scale subject to dampening
pub const MIN_AR_SCALE: u64 = AR_SCALE_DAMP_FACTOR;
/// unit difficulty, equal to graph_weight(SECOND_POW_EDGE_BITS)
pub const UNIT_DIFFICULTY: u64 =
((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64);
/// The initial difficulty at launch. This should be over-estimated
/// and difficulty should come down at launch rather than up
/// Currently grossly over-estimated at 10% of current
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval)
pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY;
/// Minimal header information required for the Difficulty calculation to
/// take place
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct HeaderInfo {
/// Block hash, ZERO_HASH when this is a sythetic entry.
pub block_hash: Hash,
/// Timestamp of the header, 1 when not used (returned info)
pub timestamp: u64,
/// Network difficulty or next difficulty to use
pub difficulty: Difficulty,
/// Network secondary PoW factor or factor to use
pub secondary_scaling: u32,
/// Whether the header is a secondary proof of work
pub is_secondary: bool,
}
impl HeaderInfo {
/// Default constructor
pub fn new(
block_hash: Hash,
timestamp: u64,
difficulty: Difficulty,
secondary_scaling: u32,
is_secondary: bool,
) -> HeaderInfo {
HeaderInfo {
block_hash,
timestamp,
difficulty,
secondary_scaling,
is_secondary,
}
}
/// Constructor from a timestamp and difficulty, setting a default secondary
/// PoW factor
pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp,
difficulty,
secondary_scaling: global::initial_graph_weight(),
is_secondary: true,
}
}
/// Constructor from a difficulty and secondary factor, setting a default
/// timestamp
pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo {
HeaderInfo {
block_hash: ZERO_HASH,
timestamp: 1,
difficulty,
secondary_scaling,
is_secondary: true,
}
}
}
/// Move value linearly toward a goal
pub fn | damp | identifier_name |
|
consensus.rs | == 4 {
5_424_600
} else if num == 5 {
7_524_600
} else if num == 6 {
9_624_600
} else if num == 7 {
11_724_600
} else if num == 8 {
13_824_600
} else if num == 9 {
539_424_600
} else {
// shouldn't get here.
0
}
}
/// Calculate block reward based on height
pub fn calc_block_reward(height: u64) -> u64 {
if height == 0 {
// reward for genesis block
REWARD0
} else if height <= get_epoch_start(2) {
REWARD1
} else if height <= get_epoch_start(3) {
REWARD2
} else if height <= get_epoch_start(4) {
REWARD3
} else if height <= get_epoch_start(5) {
REWARD4
} else if height <= get_epoch_start(6) {
REWARD5
} else if height <= get_epoch_start(7) {
REWARD6
} else if height <= get_epoch_start(8) {
REWARD7
} else if height <= get_epoch_start(9) {
REWARD8
} else {
0 // no reward after this.
}
}
fn get_overage_offset_start_epoch(num: u64) -> u64 {
if num == 1 {
REWARD0
} else if num == 2 {
get_epoch_start(2) * REWARD1 + REWARD0
} else if num == 3 {
get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0
} else if num == 4 {
get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 5 {
get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 6 {
get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 7 {
get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 8 {
get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 9 | else {
// should not get here
1
}
}
/// Calculate block overage based on height and claimed BTCUtxos
pub fn calc_block_overage(height: u64) -> u64 {
if height == 0 {
0
} else if height <= get_epoch_start(2) {
(REWARD1 * height) + get_overage_offset_start_epoch(1)
} else if height <= get_epoch_start(3) {
(REWARD2 * (height - get_epoch_start(2))) + get_overage_offset_start_epoch(2)
} else if height <= get_epoch_start(4) {
(REWARD3 * (height - get_epoch_start(3))) + get_overage_offset_start_epoch(3)
} else if height <= get_epoch_start(5) {
(REWARD4 * (height - get_epoch_start(4))) + get_overage_offset_start_epoch(4)
} else if height <= get_epoch_start(6) {
(REWARD5 * (height - get_epoch_start(5))) + get_overage_offset_start_epoch(5)
} else if height <= get_epoch_start(7) {
(REWARD6 * (height - get_epoch_start(6))) + get_overage_offset_start_epoch(6)
} else if height <= get_epoch_start(8) {
(REWARD7 * (height - get_epoch_start(7))) + get_overage_offset_start_epoch(7)
} else if height <= get_epoch_start(9) {
(REWARD8 * (height - get_epoch_start(8))) + get_overage_offset_start_epoch(8)
} else {
// we exit here. Up to future generations to decide
// how to handle.
std::process::exit(0);
}
}
/// an hour in seconds
pub const HOUR_SEC: u64 = 60 * 60;
/// Nominal height for standard time intervals, hour is 60 blocks
pub const HOUR_HEIGHT: u64 = HOUR_SEC / BLOCK_TIME_SEC;
/// A day is 1440 blocks
pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT;
/// A week is 10_080 blocks
pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT;
/// A year is 524_160 blocks
pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT;
/// Number of blocks before a coinbase matures and can be spent
pub const COINBASE_MATURITY: u64 = DAY_HEIGHT;
/// We use all C29d from the start
pub fn secondary_pow_ratio(_height: u64) -> u64 {
100
}
/// Cuckoo-cycle proof size (cycle length)
pub const PROOFSIZE: usize = 42;
/// Default Cuckatoo Cycle edge_bits, used for mining and validating.
pub const DEFAULT_MIN_EDGE_BITS: u8 = 31;
/// Cuckaroo* proof-of-work edge_bits, meant to be ASIC resistant.
pub const SECOND_POW_EDGE_BITS: u8 = 29;
/// Original reference edge_bits to compute difficulty factors for higher
/// Cuckoo graph sizes, changing this would hard fork
pub const BASE_EDGE_BITS: u8 = 24;
/// Default number of blocks in the past when cross-block cut-through will start
/// happening. Needs to be long enough to not overlap with a long reorg.
/// Rational
/// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We
/// add an order of magnitude to be safe and round to 7x24h of blocks to make it
/// easier to reason about.
pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32;
/// Default number of blocks in the past to determine the height where we request
/// a txhashset (and full blocks from). Needs to be long enough to not overlap with
/// a long reorg.
/// Rational behind the value is the longest bitcoin fork was about 30 blocks, so 5h.
/// We add an order of magnitude to be safe and round to 2x24h of blocks to make it
/// easier to reason about.
pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32;
/// Weight of an input when counted against the max block weight capacity
pub const INPUT_WEIGHT: u64 = 1;
/// Weight of an output when counted against the max block weight capacity
pub const OUTPUT_WEIGHT: u64 = 21;
/// Weight of a kernel when counted against the max block weight capacity
pub const KERNEL_WEIGHT: u64 = 3;
/// Total maximum block weight. At current sizes, this means a maximum
/// theoretical size of:
/// * `(674 + 33 + 1) * (40 | {
get_epoch_start(9) * REWARD8
+ get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} | conditional_block |
consensus.rs | {
get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 7 {
get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 8 {
get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else if num == 9 {
get_epoch_start(9) * REWARD8
+ get_epoch_start(8) * REWARD7
+ get_epoch_start(7) * REWARD6
+ get_epoch_start(6) * REWARD5
+ get_epoch_start(5) * REWARD4
+ get_epoch_start(4) * REWARD3
+ get_epoch_start(3) * REWARD2
+ get_epoch_start(2) * REWARD1
+ REWARD0
} else {
// should not get here
1
}
}
/// Calculate block overage based on height and claimed BTCUtxos
pub fn calc_block_overage(height: u64) -> u64 {
if height == 0 {
0
} else if height <= get_epoch_start(2) {
(REWARD1 * height) + get_overage_offset_start_epoch(1)
} else if height <= get_epoch_start(3) {
(REWARD2 * (height - get_epoch_start(2))) + get_overage_offset_start_epoch(2)
} else if height <= get_epoch_start(4) {
(REWARD3 * (height - get_epoch_start(3))) + get_overage_offset_start_epoch(3)
} else if height <= get_epoch_start(5) {
(REWARD4 * (height - get_epoch_start(4))) + get_overage_offset_start_epoch(4)
} else if height <= get_epoch_start(6) {
(REWARD5 * (height - get_epoch_start(5))) + get_overage_offset_start_epoch(5)
} else if height <= get_epoch_start(7) {
(REWARD6 * (height - get_epoch_start(6))) + get_overage_offset_start_epoch(6)
} else if height <= get_epoch_start(8) {
(REWARD7 * (height - get_epoch_start(7))) + get_overage_offset_start_epoch(7)
} else if height <= get_epoch_start(9) {
(REWARD8 * (height - get_epoch_start(8))) + get_overage_offset_start_epoch(8)
} else {
// we exit here. Up to future generations to decide
// how to handle.
std::process::exit(0);
}
}
/// an hour in seconds
pub const HOUR_SEC: u64 = 60 * 60;
/// Nominal height for standard time intervals, hour is 60 blocks
pub const HOUR_HEIGHT: u64 = HOUR_SEC / BLOCK_TIME_SEC;
/// A day is 1440 blocks
pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT;
/// A week is 10_080 blocks
pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT;
/// A year is 524_160 blocks
pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT;
/// Number of blocks before a coinbase matures and can be spent
pub const COINBASE_MATURITY: u64 = DAY_HEIGHT;
/// We use all C29d from the start
pub fn secondary_pow_ratio(_height: u64) -> u64 {
100
}
/// Cuckoo-cycle proof size (cycle length)
pub const PROOFSIZE: usize = 42;
/// Default Cuckatoo Cycle edge_bits, used for mining and validating.
pub const DEFAULT_MIN_EDGE_BITS: u8 = 31;
/// Cuckaroo* proof-of-work edge_bits, meant to be ASIC resistant.
pub const SECOND_POW_EDGE_BITS: u8 = 29;
/// Original reference edge_bits to compute difficulty factors for higher
/// Cuckoo graph sizes, changing this would hard fork
pub const BASE_EDGE_BITS: u8 = 24;
/// Default number of blocks in the past when cross-block cut-through will start
/// happening. Needs to be long enough to not overlap with a long reorg.
/// Rational
/// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We
/// add an order of magnitude to be safe and round to 7x24h of blocks to make it
/// easier to reason about.
pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32;
/// Default number of blocks in the past to determine the height where we request
/// a txhashset (and full blocks from). Needs to be long enough to not overlap with
/// a long reorg.
/// Rational behind the value is the longest bitcoin fork was about 30 blocks, so 5h.
/// We add an order of magnitude to be safe and round to 2x24h of blocks to make it
/// easier to reason about.
pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32;
/// Weight of an input when counted against the max block weight capacity
pub const INPUT_WEIGHT: u64 = 1;
/// Weight of an output when counted against the max block weight capacity
pub const OUTPUT_WEIGHT: u64 = 21;
/// Weight of a kernel when counted against the max block weight capacity
pub const KERNEL_WEIGHT: u64 = 3;
/// Total maximum block weight. At current sizes, this means a maximum
/// theoretical size of:
/// * `(674 + 33 + 1) * (40_000 / 21) = 1_348_571` for a block with only outputs
/// * `(1 + 8 + 8 + 33 + 64) * (40_000 / 3) = 1_520_000` for a block with only kernels
/// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs
///
/// Regardless of the relative numbers of inputs/outputs/kernels in a block the maximum
/// block size is around 1.5MB
/// For a block full of "average" txs (2 inputs, 2 outputs, 1 kernel) we have -
/// `(1 * 2) + (21 * 2) + (3 * 1) = 47` (weight per tx)
/// `40_000 / 47 = 851` (txs per block)
///
pub const MAX_BLOCK_WEIGHT: u64 = 40_000;
/// Fork every 6 months.
pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2;
/// Testnet first hard fork height, set to happen around 2019-06-20
pub const TESTNET_FIRST_HARD_FORK: u64 = 185_040;
/// Testnet second hard fork height, set to happen around 2019-12-19
pub const TESTNET_SECOND_HARD_FORK: u64 = 298_080;
/// Testnet second hard fork height, set to happen around 2020-06-20
pub const TESTNET_THIRD_HARD_FORK: u64 = 552_960;
/// Testnet second hard fork height, set to happen around 2020-12-8
pub const TESTNET_FOURTH_HARD_FORK: u64 = 642_240;
/// Fork every 3 blocks
pub const TESTING_HARD_FORK_INTERVAL: u64 = 3;
/// Compute possible block version at a given height,
/// currently no hard forks.
pub fn header_version(_height: u64) -> HeaderVersion {
HeaderVersion(1)
}
/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool | {
version == header_version(height)
} | identifier_body |
|
lib.rs | let ty = try!(parser.parse_ty());
let value = try!(L::parse_arg_value(ecx, parser, ident));
Ok(ArgSpec {
ident: ident,
ty: ty,
value: value,
})
}
pub fn cxx_type<'a>(&self, handler: &'a Handler)
-> PResult<'a, Cow<'static, str>> {
types::convert_ty_to_cxx(handler, &self.ty)
}
}
impl Function<Cxx> {
pub fn call_expr<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, P<ast::Expr>> {
let name = self.name.clone();
let args = self.args.iter().map(|arg| arg.value.clone()).collect();
Ok(ecx.expr_call_ident(self.span, name, args))
}
pub fn cxx_code<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> {
let ret_ty = try!(self.cxx_ret_ty(ecx));
let args = try!(self.cxx_args(ecx));
let signature = format!(
"{span}\nextern \"C\" {ret_ty} {name}({args})",
span = span_to_cpp_directive(ecx, self.span),
ret_ty = ret_ty,
name = self.name,
args = args);
let mut body = tokens_to_cpp(ecx, &flatten_tts(&self.body));
if self.ret_ty.is_some() {
body = format!("return ({{\n{};\n}});", body);
}
Ok(format!("{} {{\n{}\n}}\n", signature, body))
}
}
// Calling rust from C++ is a bit trickier.
// We must declare the function before it can be used.
// However C++ requires the function to be declared outside the current function, but then we may
// miss type definitions which are in scope due to being in a namespace, or some includes.
//
// For example :
// ```c++
// #include <stdint.h>
// #include <stdio.h>
//
// void foo() {
// uint32_t a = 3;
// uint32_t double = rust![(a: uint32_t) -> uint32_t {
// a * 2
// }];
// printf("double: ", a);
// }
// ```
//
// Declaring the extern function before the includes would not work, as uint32_t is not defined at
// this point. Finding the right place to declare it would be complicated and would almost require
// a full C++ parser.
//
// Instead we use an alternative approach. The function's symbol is declared with an opaque type at
// the top of the file. This does not require argument types to be in scope.
// When invoking the function, the symbol is first casted into a function pointer of the correct type.
// This way, the same typing context as in the original source is used.
//
// The example above would be translated into the following :
//
// ```c++
// struct rustcxx_XXXXXXXX;
// extern "C" rustcxx_XXXXXXXX rustcxx_XXXXXXXX;
//
// #include <stdint.h>
// #include <stdio.h>
//
// void foo() {
// uint32_t a = 3;
// uint32_t double = ((uint32_t (*)(uint32_t a)) &rustcxx_XXXXXXXX)(a);
// printf("double: ", a);
// }
// ```
impl Function<Rust> {
pub fn cxx_decl<'a>(&self, _ecx: &'a ExtCtxt) -> PResult<'a, String> {
Ok(format!("struct {}; extern \"C\" {} {};", self.name, self.name, self.name))
}
pub fn cxx_call<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> {
let ret_ty = try!(self.cxx_ret_ty(ecx));
let args_sig = try!(self.cxx_args(ecx));
let arg_separator = respan(DUMMY_SP, String::from(","));
let args_value = self.args.iter().map(|arg| {
arg.value.clone()
}).collect::<Vec<_>>().join(&arg_separator);
let cast_ty = format!("{} (*) ({})", ret_ty, args_sig);
let fn_ptr = format!("( ({}) &{} )", cast_ty, self.name);
let call = format!("{} ({})", fn_ptr, tokens_to_cpp(ecx, &args_value));
Ok(call)
}
pub fn item<'a>(&self, ecx: &'a ExtCtxt) -> P<ast::Item> {
let decl = self.fn_decl(ecx);
// Function has to be no_mangle, otherwise it can't be called from C++
let no_mangle = ecx.meta_word(self.span, token::intern("no_mangle").as_str());
// The function has to be exported or it would be optimized out by the compiler.
// The compiler already prints an error, but it is easy to miss, so make it a hard error.
let deny = ecx.meta_list(
self.span,
token::intern("deny").as_str(),
vec![ecx.meta_word(self.span, token::intern("private_no_mangle_fns").as_str())]);
let attrs = vec![
ecx.attribute(self.span, no_mangle),
ecx.attribute(self.span, deny),
];
let fn_item = ast::ItemKind::Fn(
decl, ast::Unsafety::Unsafe, ast::Constness::NotConst,
Abi::C, ast::Generics::default(), self.body.clone());
P(ast::Item {
ident: self.name,
attrs: attrs,
id: ast::DUMMY_NODE_ID,
node: fn_item,
vis: ast::Visibility::Public,
span: self.span,
})
}
}
/// Find and replace uses of rust![ .. ] in a token tree stream.
///
/// The callback is invoked for every use of the rust! macro and it's result is used to replace it.
pub fn parse_rust_macro<F>(tts: &[TokenTree], f: &mut F) -> Vec<Spanned<String>>
where F: FnMut(Span, &[TokenTree]) -> Vec<Spanned<String>> {
let mut result = Vec::new();
// Iterate over the tokens with 3 tokens of lookahead.
let mut i = 0;
loop {
match (tts.get(i), tts.get(i+1), tts.get(i+2)) {
(Some(&TokenTree::Token(_, token::Ident(ident))),
Some(&TokenTree::Token(_, token::Not)),
Some(&TokenTree::Delimited(span, ref contents)))
if ident.name.to_string() == "rust" => {
i += 2;
result.extend(f(span, &contents.tts));
}
(Some(&TokenTree::Delimited(_, ref contents)), _, _) => {
// Recursively look into the token tree
result.push(respan(contents.open_span, token_to_string(&contents.open_token())));
result.extend(parse_rust_macro(&contents.tts, f));
result.push(respan(contents.close_span, token_to_string(&contents.close_token())));
}
(Some(&TokenTree::Token(span, ref tok)), _, _) => {
result.push(respan(span, token_to_string(tok)));
}
(Some(&TokenTree::Sequence(..)), _, _) => unimplemented!(),
(None, _, _) => break,
}
i += 1;
}
result
}
/// Flatten a token tree stream.
///
/// Each token is stringified and paired with it's span.
pub fn flatten_tts(tts: &[TokenTree]) -> Vec<Spanned<String>> {
tts.iter().flat_map(|tt| {
match tt {
&TokenTree::Token(span, ref tok) => {
vec![respan(span, token_to_string(tok))]
}
&TokenTree::Delimited(_, ref delimited) => {
let open = respan(delimited.open_span, token_to_string(&delimited.open_token()));
let close = respan(delimited.close_span, token_to_string(&delimited.close_token()));
iter::once(open)
.chain(flatten_tts(&delimited.tts))
.chain(iter::once(close))
.collect()
}
&TokenTree::Sequence(..) => unimplemented!()
}
}).collect()
}
/// Join tokens, using `#line` C preprocessor directives to maintain span
/// information.
pub fn tokens_to_cpp(ecx: &ExtCtxt, tokens: &[Spanned<String>]) -> String {
let codemap = ecx.parse_sess.codemap();
let mut last_pos = codemap.lookup_char_pos(DUMMY_SP.lo);
let mut column = 0;
let mut contents = String::new();
for token in tokens {
if token.span != DUMMY_SP {
let pos = codemap.lookup_char_pos(token.span.lo);
if pos.file.name == pos.file.name && pos.line == last_pos.line + 1 {
contents.push('\n');
column = 0;
} else if pos.file.name != pos.file.name || pos.line != last_pos.line {
contents.push('\n');
contents.push_str(&span_to_cpp_directive(ecx, token.span));
contents.push('\n');
column = 0;
} |
// Pad the code such that the token remains on the same column
while column < pos.col.0 { | random_line_split |
|
lib.rs | Eq) {
let mut tokens = Vec::new();
while !parser.check(&token::Comma) &&
!parser.check(&token::CloseDelim(token::Paren)) {
tokens.push(try!(parser.parse_token_tree()));
}
Ok(flatten_tts(&tokens))
} else {
Ok(vec![respan(ident.span, ident.node.to_string())])
}
}
}
impl Lang for Cxx {
type Body = Vec<TokenTree>;
type ArgValue = P<ast::Expr>;
fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body> {
try!(parser.expect(&token::OpenDelim(token::Brace)));
parser.parse_seq_to_end(
&token::CloseDelim(token::Brace),
SeqSep::none(),
|parser| parser.parse_token_tree())
}
fn parse_arg_value<'a>(ecx: &ExtCtxt,
parser: &mut Parser<'a>,
ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue> {
if parser.eat(&token::Eq) {
parser.parse_expr()
} else {
Ok(ecx.expr_ident(ident.span, ident.node))
}
}
}
pub struct Function<L: Lang> {
pub span: Span,
pub name: ast::Ident,
pub ret_ty: Option<P<ast::Ty>>,
pub args: Vec<ArgSpec<L>>,
pub body: L::Body,
}
impl <L: Lang> Function<L> {
pub fn | <'a>(ecx: &ExtCtxt<'a>,
span: Span,
tts: &[TokenTree]) -> PResult<'a, Function<L>> {
let mut parser = ecx.new_parser_from_tts(tts);
let args = if parser.check(&token::OpenDelim(token::Paren)) {
Some(try!(Self::parse_args(ecx, &mut parser)))
} else {
None
};
let ret_ty = if args.is_some() && parser.check(&token::RArrow) {
Some(try!(Self::parse_ret_ty(&mut parser)))
} else {
None
};
let body = try!(L::parse_body(&mut parser));
let hash = {
let mut hasher = SipHasher::new();
tts_to_string(tts).hash(&mut hasher);
hasher.finish()
};
let name = ecx.ident_of(&format!("rustcxx_{:016x}", hash));
Ok(Function {
span: span,
name: name,
ret_ty: ret_ty,
args: args.unwrap_or_else(|| Vec::new()),
body: body,
})
}
fn parse_args<'a>(ecx: &ExtCtxt,
parser: &mut Parser<'a>) -> PResult<'a, Vec<ArgSpec<L>>> {
parser.parse_unspanned_seq(
&token::OpenDelim(token::Paren),
&token::CloseDelim(token::Paren),
SeqSep::trailing_allowed(token::Comma),
|parser| ArgSpec::parse(ecx, parser))
}
fn parse_ret_ty<'a>(parser: &mut Parser<'a>) -> PResult<'a, P<ast::Ty>> {
try!(parser.expect(&token::RArrow));
parser.parse_ty()
}
pub fn fn_decl(&self, ecx: &ExtCtxt) -> P<ast::FnDecl> {
let args = self.args.iter().map(|arg| {
ecx.arg(arg.ident.span, arg.ident.node, arg.ty.clone())
}).collect();
let ret_ty = self.ret_ty.clone()
.map(ast::FunctionRetTy::Ty)
.unwrap_or(ast::FunctionRetTy::Default(DUMMY_SP));
P(ast::FnDecl {
inputs: args,
output: ret_ty,
variadic: false
})
}
pub fn foreign_item(&self, ecx: &ExtCtxt) -> ast::ForeignItem {
let fn_decl = self.fn_decl(ecx);
ast::ForeignItem {
id: DUMMY_NODE_ID,
ident: self.name,
attrs: Vec::new(),
node: ast::ForeignItemKind::Fn(fn_decl, ast::Generics::default()),
vis: ast::Visibility::Inherited,
span: self.span,
}
}
pub fn cxx_args<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> {
let args = try!(self.args.iter().map(|arg| {
let ty = try!(arg.cxx_type(&ecx.parse_sess.span_diagnostic));
Ok(format!("{} const {}", ty, arg.ident.node))
}).collect::<PResult<Vec<String>>>());
Ok(args.join(", "))
}
pub fn cxx_ret_ty<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, Cow<'static, str>> {
self.ret_ty.as_ref().map(|ty| {
types::convert_ty_to_cxx(&ecx.parse_sess.span_diagnostic, &ty)
}).unwrap_or(Ok(Cow::from("void")))
}
}
#[derive(Debug)]
pub struct ArgSpec<L: Lang> {
pub ident: ast::SpannedIdent,
pub ty: P<ast::Ty>,
pub value: L::ArgValue,
}
impl <L: Lang> ArgSpec<L> {
pub fn parse<'a>(ecx: &ExtCtxt,
parser: &mut Parser<'a>) -> PResult<'a, ArgSpec<L>> {
let ident = {
let lo = parser.span.lo;
let ident = try!(parser.parse_ident());
let hi = parser.span.lo;
spanned(lo, hi, ident)
};
try!(parser.expect(&token::Colon));
let ty = try!(parser.parse_ty());
let value = try!(L::parse_arg_value(ecx, parser, ident));
Ok(ArgSpec {
ident: ident,
ty: ty,
value: value,
})
}
pub fn cxx_type<'a>(&self, handler: &'a Handler)
-> PResult<'a, Cow<'static, str>> {
types::convert_ty_to_cxx(handler, &self.ty)
}
}
impl Function<Cxx> {
pub fn call_expr<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, P<ast::Expr>> {
let name = self.name.clone();
let args = self.args.iter().map(|arg| arg.value.clone()).collect();
Ok(ecx.expr_call_ident(self.span, name, args))
}
pub fn cxx_code<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> {
let ret_ty = try!(self.cxx_ret_ty(ecx));
let args = try!(self.cxx_args(ecx));
let signature = format!(
"{span}\nextern \"C\" {ret_ty} {name}({args})",
span = span_to_cpp_directive(ecx, self.span),
ret_ty = ret_ty,
name = self.name,
args = args);
let mut body = tokens_to_cpp(ecx, &flatten_tts(&self.body));
if self.ret_ty.is_some() {
body = format!("return ({{\n{};\n}});", body);
}
Ok(format!("{} {{\n{}\n}}\n", signature, body))
}
}
// Calling rust from C++ is a bit trickier.
// We must declare the function before it can be used.
// However C++ requires the function to be declared outside the current function, but then we may
// miss type definitions which are in scope due to being in a namespace, or some includes.
//
// For example :
// ```c++
// #include <stdint.h>
// #include <stdio.h>
//
// void foo() {
// uint32_t a = 3;
// uint32_t double = rust![(a: uint32_t) -> uint32_t {
// a * 2
// }];
// printf("double: ", a);
// }
// ```
//
// Declaring the extern function before the includes would not work, as uint32_t is not defined at
// this point. Finding the right place to declare it would be complicated and would almost require
// a full C++ parser.
//
// Instead we use an alternative approach. The function's symbol is declared with an opaque type at
// the top of the file. This does not require argument types to be in scope.
// When invoking the function, the symbol is first casted into a function pointer of the correct type.
// This way, the same typing context as in the original source is used.
//
// The example above would be translated into the following :
//
// ```c++
// struct rustcxx_XXXXXXXX;
// extern "C" rustcxx_XXXXXXXX rustcxx_XXXXXXXX;
//
// #include <stdint.h>
// #include <stdio.h>
//
// void foo() {
// uint32_t a = 3;
// uint32_t double = ((uint32_t (*)(uint32_t a)) &rustcxx_XXXXXXXX)(a);
// printf("double: ", a);
// }
// ```
impl Function<Rust> {
pub fn cxx_decl<'a>(&self, _ecx: &'a ExtCtxt) -> PResult<'a, String> {
Ok(format!("struct | parse | identifier_name |
server.rs | {
fn to_json(&self) -> Json {
match *self {
Value::Null => panic!("Cannot allow the client to see nulls"),
Value::Bool(bool) => Json::Boolean(bool),
Value::String(ref string) => Json::String(string.clone()),
Value::Float(float) => Json::F64(float),
Value::Column(ref column) => Json::Array(column.iter().map(|v| v.to_json()).collect()),
}
}
}
impl FromJson for Value {
fn from_json(json: &Json) -> Self {
match *json {
Json::Boolean(bool) => Value::Bool(bool),
Json::String(ref string) => Value::String(string.clone()),
Json::F64(float) => Value::Float(float),
Json::I64(int) => Value::Float(int as f64),
Json::U64(uint) => Value::Float(uint as f64),
Json::Array(ref array) => Value::Column(array.iter().map(FromJson::from_json).collect()),
_ => panic!("Cannot decode {:?} as Value", json),
}
}
}
impl FromJson for String {
fn | (json: &Json) -> Self {
json.as_string().unwrap().to_owned()
}
}
impl<T: FromJson> FromJson for Vec<T> {
fn from_json(json: &Json) -> Self {
json.as_array().unwrap().iter().map(|t| FromJson::from_json(t)).collect()
}
}
#[derive(Debug, Clone)]
pub struct Event {
pub changes: Changes,
pub session: String,
}
impl ToJson for Event {
fn to_json(&self) -> Json {
Json::Object(vec![
("changes".to_string(), Json::Array(
self.changes.iter().map(|&(ref view_id, ref view_changes)| {
Json::Array(vec![
view_id.to_json(),
view_changes.fields.to_json(),
view_changes.insert.to_json(),
view_changes.remove.to_json(),
])
}).collect()
)
),
("session".to_string(), self.session.to_json()),
].into_iter().collect())
}
}
impl FromJson for Event {
fn from_json(json: &Json) -> Self {
Event{
changes: json.as_object().unwrap()["changes"]
.as_array().unwrap().iter().map(|change| {
let change = change.as_array().unwrap();
assert_eq!(change.len(), 4);
let view_id = FromJson::from_json(&change[0]);
let fields = FromJson::from_json(&change[1]);
let insert = FromJson::from_json(&change[2]);
let remove = FromJson::from_json(&change[3]);
(view_id, Change{fields:fields, insert: insert, remove: remove})
}).collect(),
session: "".to_string(),
}
}
}
pub enum ServerEvent {
Change(Vec<u8>),
Sync((sender::Sender<WebSocketStream>,Option<String>)),
Terminate(Option<CloseData>),
}
// TODO holy crap why is everything blocking? this is a mess
pub fn server_events() -> mpsc::Receiver<ServerEvent> {
let (event_sender, event_receiver) = mpsc::channel();
thread::spawn(move || {
let server = websocket::Server::bind("0.0.0.0:2794").unwrap();
for connection in server {
let event_sender = event_sender.clone();
thread::spawn(move || {
// accept request
let request = connection.unwrap().read_request().unwrap();
request.validate().unwrap();
// Get the User ID from a cookie in the headers
let user_id = get_user_id(request.headers.get::<Cookie>());
let response = request.accept();
let (mut sender, mut receiver) = response.send().unwrap().split();
let ip = sender.get_mut().peer_addr().unwrap();
println!("Connection from {}", ip);
::std::io::stdout().flush().unwrap(); // TODO is this actually necessary?
// hand over sender
event_sender.send(ServerEvent::Sync((sender,user_id))).unwrap();
// handle messages
for message in receiver.incoming_messages() {
let message = match message {
Ok(m) => m,
Err(_) => return,
};
match message {
Message::Binary(bytes) => {
event_sender.send(ServerEvent::Change(bytes)).unwrap();
}
Message::Close(_) => {
let ip_addr = format!("{}", ip);
println!("Received close message from {}.",ip_addr);
let close_message = CloseData{status_code: 0, reason: ip_addr};
event_sender.send(ServerEvent::Terminate(Some(close_message))).unwrap();
}
_ => println!("Unknown message: {:?}", message)
}
}
});
}
});
event_receiver
}
pub fn load(flow: &mut Flow, filename: &str) {
let mut events = OpenOptions::new().create(true).open(filename).unwrap();
let mut old_events = String::new();
events.read_to_string(&mut old_events).unwrap();
for line in old_events.lines() {
let json = Json::from_str(&line).unwrap();
let event: Event = FromJson::from_json(&json);
flow.quiesce(event.changes);
}
}
pub struct Server {
pub flow: Flow,
pub events: File,
pub senders: Vec<sender::Sender<WebSocketStream>>,
}
pub fn handle_event(server: &mut Server, event: Event, event_json: Json) {
server.events.write_all(format!("{}", event_json).as_bytes()).unwrap();
server.events.write_all("\n".as_bytes()).unwrap();
server.events.flush().unwrap();
let old_flow = time!("cloning", {
server.flow.clone()
});
server.flow.quiesce(event.changes);
let changes = time!("diffing", {
server.flow.changes_from(old_flow)
});
for sender in server.senders.iter_mut() {
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let text = format!("{}", Event{changes: changes.clone(), session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
}
}
pub fn run() {
let mut flow = Flow::new();
time!("reading saved state", {
load(&mut flow, "./bootstrap");
load(&mut flow, "./events");
});
let events = OpenOptions::new().write(true).append(true).open("./events").unwrap();
let senders: Vec<sender::Sender<WebSocketStream>> = Vec::new();
let mut server = Server{flow: flow, events: events, senders: senders};
for server_event in server_events() {
match server_event {
ServerEvent::Sync((mut sender,user_id)) => {
// Add a session to the session table
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let mut add_session = client::insert_fact(&"sessions",&vec!["id","status"],&vec![Value::String(session_id.clone()),
Value::Float(1f64)
],None);
// If we have a user ID, add a mapping from the session ID to the user ID
add_session = match user_id {
Some(user_id) => {
client::insert_fact(&"session id to user id",&vec!["session id","user id"],&vec![Value::String(session_id.clone()),
Value::String(user_id),
],Some(add_session))
},
None => add_session,
};
let json = add_session.to_json();
handle_event(&mut server, add_session, json);
let changes = server.flow.as_changes();
let text = format!("{}", Event{changes: changes, session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
server.senders.push(sender)
}
ServerEvent::Change(input_bytes) => {
// TODO we throw cbor in here to avoid https://github.com/rust-lang/rustc-serialize/issues/113
let mut decoder = cbor::Decoder::from_bytes(&input_bytes[..]);
let cbor = decoder.items().next().unwrap().unwrap();
let json = cbor.to_json();
let event = FromJson::from_json(&json);
handle_event(&mut server, event, json);
}
ServerEvent::Terminate(m) => {
let terminate_ip = m.unwrap().reason;
println!("Closing connection from {}...",terminate_ip);
// Find the index of the connection's sender
let ip_ix = server.senders.iter_mut().position(|mut sender| {
let ip = format!("{}",sender.get_mut().peer_addr().unwrap());
ip == terminate_ip
});
// Properly clean up connections and the session table
match ip_ix {
Some(ix) => {
// Close the connection
let _ = server.senders[ix].send_message(Message::Close(None));
match server.senders[ix].get_mut().shutdown(Shutdown::Both) {
Ok(_) => println!("Connection from {} has closed successfully.",terminate_ip),
Err(e) => println!("Connection from {} failed to shut down properly: {}",terminate_ip,e),
}
server.senders.remove(ix);
// | from_json | identifier_name |
server.rs | {
fn to_json(&self) -> Json {
match *self {
Value::Null => panic!("Cannot allow the client to see nulls"),
Value::Bool(bool) => Json::Boolean(bool),
Value::String(ref string) => Json::String(string.clone()),
Value::Float(float) => Json::F64(float),
Value::Column(ref column) => Json::Array(column.iter().map(|v| v.to_json()).collect()),
}
}
}
impl FromJson for Value {
fn from_json(json: &Json) -> Self {
match *json {
Json::Boolean(bool) => Value::Bool(bool),
Json::String(ref string) => Value::String(string.clone()),
Json::F64(float) => Value::Float(float),
Json::I64(int) => Value::Float(int as f64),
Json::U64(uint) => Value::Float(uint as f64),
Json::Array(ref array) => Value::Column(array.iter().map(FromJson::from_json).collect()),
_ => panic!("Cannot decode {:?} as Value", json),
}
}
}
impl FromJson for String {
fn from_json(json: &Json) -> Self {
json.as_string().unwrap().to_owned()
}
}
impl<T: FromJson> FromJson for Vec<T> {
fn from_json(json: &Json) -> Self |
}
#[derive(Debug, Clone)]
pub struct Event {
pub changes: Changes,
pub session: String,
}
impl ToJson for Event {
fn to_json(&self) -> Json {
Json::Object(vec![
("changes".to_string(), Json::Array(
self.changes.iter().map(|&(ref view_id, ref view_changes)| {
Json::Array(vec![
view_id.to_json(),
view_changes.fields.to_json(),
view_changes.insert.to_json(),
view_changes.remove.to_json(),
])
}).collect()
)
),
("session".to_string(), self.session.to_json()),
].into_iter().collect())
}
}
impl FromJson for Event {
fn from_json(json: &Json) -> Self {
Event{
changes: json.as_object().unwrap()["changes"]
.as_array().unwrap().iter().map(|change| {
let change = change.as_array().unwrap();
assert_eq!(change.len(), 4);
let view_id = FromJson::from_json(&change[0]);
let fields = FromJson::from_json(&change[1]);
let insert = FromJson::from_json(&change[2]);
let remove = FromJson::from_json(&change[3]);
(view_id, Change{fields:fields, insert: insert, remove: remove})
}).collect(),
session: "".to_string(),
}
}
}
pub enum ServerEvent {
Change(Vec<u8>),
Sync((sender::Sender<WebSocketStream>,Option<String>)),
Terminate(Option<CloseData>),
}
// TODO holy crap why is everything blocking? this is a mess
pub fn server_events() -> mpsc::Receiver<ServerEvent> {
let (event_sender, event_receiver) = mpsc::channel();
thread::spawn(move || {
let server = websocket::Server::bind("0.0.0.0:2794").unwrap();
for connection in server {
let event_sender = event_sender.clone();
thread::spawn(move || {
// accept request
let request = connection.unwrap().read_request().unwrap();
request.validate().unwrap();
// Get the User ID from a cookie in the headers
let user_id = get_user_id(request.headers.get::<Cookie>());
let response = request.accept();
let (mut sender, mut receiver) = response.send().unwrap().split();
let ip = sender.get_mut().peer_addr().unwrap();
println!("Connection from {}", ip);
::std::io::stdout().flush().unwrap(); // TODO is this actually necessary?
// hand over sender
event_sender.send(ServerEvent::Sync((sender,user_id))).unwrap();
// handle messages
for message in receiver.incoming_messages() {
let message = match message {
Ok(m) => m,
Err(_) => return,
};
match message {
Message::Binary(bytes) => {
event_sender.send(ServerEvent::Change(bytes)).unwrap();
}
Message::Close(_) => {
let ip_addr = format!("{}", ip);
println!("Received close message from {}.",ip_addr);
let close_message = CloseData{status_code: 0, reason: ip_addr};
event_sender.send(ServerEvent::Terminate(Some(close_message))).unwrap();
}
_ => println!("Unknown message: {:?}", message)
}
}
});
}
});
event_receiver
}
pub fn load(flow: &mut Flow, filename: &str) {
let mut events = OpenOptions::new().create(true).open(filename).unwrap();
let mut old_events = String::new();
events.read_to_string(&mut old_events).unwrap();
for line in old_events.lines() {
let json = Json::from_str(&line).unwrap();
let event: Event = FromJson::from_json(&json);
flow.quiesce(event.changes);
}
}
pub struct Server {
pub flow: Flow,
pub events: File,
pub senders: Vec<sender::Sender<WebSocketStream>>,
}
pub fn handle_event(server: &mut Server, event: Event, event_json: Json) {
server.events.write_all(format!("{}", event_json).as_bytes()).unwrap();
server.events.write_all("\n".as_bytes()).unwrap();
server.events.flush().unwrap();
let old_flow = time!("cloning", {
server.flow.clone()
});
server.flow.quiesce(event.changes);
let changes = time!("diffing", {
server.flow.changes_from(old_flow)
});
for sender in server.senders.iter_mut() {
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let text = format!("{}", Event{changes: changes.clone(), session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
}
}
pub fn run() {
let mut flow = Flow::new();
time!("reading saved state", {
load(&mut flow, "./bootstrap");
load(&mut flow, "./events");
});
let events = OpenOptions::new().write(true).append(true).open("./events").unwrap();
let senders: Vec<sender::Sender<WebSocketStream>> = Vec::new();
let mut server = Server{flow: flow, events: events, senders: senders};
for server_event in server_events() {
match server_event {
ServerEvent::Sync((mut sender,user_id)) => {
// Add a session to the session table
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let mut add_session = client::insert_fact(&"sessions",&vec!["id","status"],&vec![Value::String(session_id.clone()),
Value::Float(1f64)
],None);
// If we have a user ID, add a mapping from the session ID to the user ID
add_session = match user_id {
Some(user_id) => {
client::insert_fact(&"session id to user id",&vec!["session id","user id"],&vec![Value::String(session_id.clone()),
Value::String(user_id),
],Some(add_session))
},
None => add_session,
};
let json = add_session.to_json();
handle_event(&mut server, add_session, json);
let changes = server.flow.as_changes();
let text = format!("{}", Event{changes: changes, session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
server.senders.push(sender)
}
ServerEvent::Change(input_bytes) => {
// TODO we throw cbor in here to avoid https://github.com/rust-lang/rustc-serialize/issues/113
let mut decoder = cbor::Decoder::from_bytes(&input_bytes[..]);
let cbor = decoder.items().next().unwrap().unwrap();
let json = cbor.to_json();
let event = FromJson::from_json(&json);
handle_event(&mut server, event, json);
}
ServerEvent::Terminate(m) => {
let terminate_ip = m.unwrap().reason;
println!("Closing connection from {}...",terminate_ip);
// Find the index of the connection's sender
let ip_ix = server.senders.iter_mut().position(|mut sender| {
let ip = format!("{}",sender.get_mut().peer_addr().unwrap());
ip == terminate_ip
});
// Properly clean up connections and the session table
match ip_ix {
Some(ix) => {
// Close the connection
let _ = server.senders[ix].send_message(Message::Close(None));
match server.senders[ix].get_mut().shutdown(Shutdown::Both) {
Ok(_) => println!("Connection from {} has closed successfully.",terminate_ip),
Err(e) => println!("Connection from {} failed to shut down properly: {}",terminate_ip,e),
}
server.senders.remove(ix);
| {
json.as_array().unwrap().iter().map(|t| FromJson::from_json(t)).collect()
} | identifier_body |
server.rs | {
fn to_json(&self) -> Json {
match *self {
Value::Null => panic!("Cannot allow the client to see nulls"),
Value::Bool(bool) => Json::Boolean(bool),
Value::String(ref string) => Json::String(string.clone()),
Value::Float(float) => Json::F64(float),
Value::Column(ref column) => Json::Array(column.iter().map(|v| v.to_json()).collect()),
}
}
}
impl FromJson for Value {
fn from_json(json: &Json) -> Self {
match *json {
Json::Boolean(bool) => Value::Bool(bool),
Json::String(ref string) => Value::String(string.clone()),
Json::F64(float) => Value::Float(float),
Json::I64(int) => Value::Float(int as f64),
Json::U64(uint) => Value::Float(uint as f64),
Json::Array(ref array) => Value::Column(array.iter().map(FromJson::from_json).collect()),
_ => panic!("Cannot decode {:?} as Value", json),
}
}
}
impl FromJson for String {
fn from_json(json: &Json) -> Self {
json.as_string().unwrap().to_owned()
}
}
impl<T: FromJson> FromJson for Vec<T> {
fn from_json(json: &Json) -> Self {
json.as_array().unwrap().iter().map(|t| FromJson::from_json(t)).collect()
}
}
#[derive(Debug, Clone)]
pub struct Event {
pub changes: Changes,
pub session: String,
}
impl ToJson for Event {
fn to_json(&self) -> Json { | ("changes".to_string(), Json::Array(
self.changes.iter().map(|&(ref view_id, ref view_changes)| {
Json::Array(vec![
view_id.to_json(),
view_changes.fields.to_json(),
view_changes.insert.to_json(),
view_changes.remove.to_json(),
])
}).collect()
)
),
("session".to_string(), self.session.to_json()),
].into_iter().collect())
}
}
impl FromJson for Event {
fn from_json(json: &Json) -> Self {
Event{
changes: json.as_object().unwrap()["changes"]
.as_array().unwrap().iter().map(|change| {
let change = change.as_array().unwrap();
assert_eq!(change.len(), 4);
let view_id = FromJson::from_json(&change[0]);
let fields = FromJson::from_json(&change[1]);
let insert = FromJson::from_json(&change[2]);
let remove = FromJson::from_json(&change[3]);
(view_id, Change{fields:fields, insert: insert, remove: remove})
}).collect(),
session: "".to_string(),
}
}
}
pub enum ServerEvent {
Change(Vec<u8>),
Sync((sender::Sender<WebSocketStream>,Option<String>)),
Terminate(Option<CloseData>),
}
// TODO holy crap why is everything blocking? this is a mess
pub fn server_events() -> mpsc::Receiver<ServerEvent> {
let (event_sender, event_receiver) = mpsc::channel();
thread::spawn(move || {
let server = websocket::Server::bind("0.0.0.0:2794").unwrap();
for connection in server {
let event_sender = event_sender.clone();
thread::spawn(move || {
// accept request
let request = connection.unwrap().read_request().unwrap();
request.validate().unwrap();
// Get the User ID from a cookie in the headers
let user_id = get_user_id(request.headers.get::<Cookie>());
let response = request.accept();
let (mut sender, mut receiver) = response.send().unwrap().split();
let ip = sender.get_mut().peer_addr().unwrap();
println!("Connection from {}", ip);
::std::io::stdout().flush().unwrap(); // TODO is this actually necessary?
// hand over sender
event_sender.send(ServerEvent::Sync((sender,user_id))).unwrap();
// handle messages
for message in receiver.incoming_messages() {
let message = match message {
Ok(m) => m,
Err(_) => return,
};
match message {
Message::Binary(bytes) => {
event_sender.send(ServerEvent::Change(bytes)).unwrap();
}
Message::Close(_) => {
let ip_addr = format!("{}", ip);
println!("Received close message from {}.",ip_addr);
let close_message = CloseData{status_code: 0, reason: ip_addr};
event_sender.send(ServerEvent::Terminate(Some(close_message))).unwrap();
}
_ => println!("Unknown message: {:?}", message)
}
}
});
}
});
event_receiver
}
pub fn load(flow: &mut Flow, filename: &str) {
let mut events = OpenOptions::new().create(true).open(filename).unwrap();
let mut old_events = String::new();
events.read_to_string(&mut old_events).unwrap();
for line in old_events.lines() {
let json = Json::from_str(&line).unwrap();
let event: Event = FromJson::from_json(&json);
flow.quiesce(event.changes);
}
}
pub struct Server {
pub flow: Flow,
pub events: File,
pub senders: Vec<sender::Sender<WebSocketStream>>,
}
pub fn handle_event(server: &mut Server, event: Event, event_json: Json) {
server.events.write_all(format!("{}", event_json).as_bytes()).unwrap();
server.events.write_all("\n".as_bytes()).unwrap();
server.events.flush().unwrap();
let old_flow = time!("cloning", {
server.flow.clone()
});
server.flow.quiesce(event.changes);
let changes = time!("diffing", {
server.flow.changes_from(old_flow)
});
for sender in server.senders.iter_mut() {
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let text = format!("{}", Event{changes: changes.clone(), session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
}
}
pub fn run() {
let mut flow = Flow::new();
time!("reading saved state", {
load(&mut flow, "./bootstrap");
load(&mut flow, "./events");
});
let events = OpenOptions::new().write(true).append(true).open("./events").unwrap();
let senders: Vec<sender::Sender<WebSocketStream>> = Vec::new();
let mut server = Server{flow: flow, events: events, senders: senders};
for server_event in server_events() {
match server_event {
ServerEvent::Sync((mut sender,user_id)) => {
// Add a session to the session table
let session_id = format!("{}", sender.get_mut().peer_addr().unwrap());
let mut add_session = client::insert_fact(&"sessions",&vec!["id","status"],&vec![Value::String(session_id.clone()),
Value::Float(1f64)
],None);
// If we have a user ID, add a mapping from the session ID to the user ID
add_session = match user_id {
Some(user_id) => {
client::insert_fact(&"session id to user id",&vec!["session id","user id"],&vec![Value::String(session_id.clone()),
Value::String(user_id),
],Some(add_session))
},
None => add_session,
};
let json = add_session.to_json();
handle_event(&mut server, add_session, json);
let changes = server.flow.as_changes();
let text = format!("{}", Event{changes: changes, session: session_id}.to_json());
match sender.send_message(Message::Text(text)) {
Ok(_) => (),
Err(error) => println!("Send error: {}", error),
};
server.senders.push(sender)
}
ServerEvent::Change(input_bytes) => {
// TODO we throw cbor in here to avoid https://github.com/rust-lang/rustc-serialize/issues/113
let mut decoder = cbor::Decoder::from_bytes(&input_bytes[..]);
let cbor = decoder.items().next().unwrap().unwrap();
let json = cbor.to_json();
let event = FromJson::from_json(&json);
handle_event(&mut server, event, json);
}
ServerEvent::Terminate(m) => {
let terminate_ip = m.unwrap().reason;
println!("Closing connection from {}...",terminate_ip);
// Find the index of the connection's sender
let ip_ix = server.senders.iter_mut().position(|mut sender| {
let ip = format!("{}",sender.get_mut().peer_addr().unwrap());
ip == terminate_ip
});
// Properly clean up connections and the session table
match ip_ix {
Some(ix) => {
// Close the connection
let _ = server.senders[ix].send_message(Message::Close(None));
match server.senders[ix].get_mut().shutdown(Shutdown::Both) {
Ok(_) => println!("Connection from {} has closed successfully.",terminate_ip),
Err(e) => println!("Connection from {} failed to shut down properly: {}",terminate_ip,e),
}
server.senders.remove(ix);
// Update | Json::Object(vec![ | random_line_split |
index.js | StartOrStop', function (event, arg) {
let id = Number.parseInt(arg);
if(globalCond[id] == null)
{
console.log("不存在此任务")
return;
}
globalCond[id] = !globalCond[id];
if(globalCond[id] == true)
{
configVideos.forEach(Element=>{
if(Element.id==id)
{
if(Element.isLiving == true)
{
startDownloadLive(Element.url, Element.headers, id);
}
else
{
startDownload(Element.url, Element.headers, id);
}
}
});
}
});
ipcMain.on('task-add', async function (event, arg, headers) {
let src = arg;
let _headers = {};
if(headers != '')
{
let __ = headers.match(/(.*?): ?(.*?)(\n|\r|$)/g);
__ && __.forEach((_)=>{
let ___ = _.match(/(.*?): ?(.*?)(\n|\r|$)/i);
___ && (_headers[___[1]] = ___[2]);
});
}
let mes = src.match(/^https?:\/\/[^/]*/);
let _hosts = '';
if(mes && mes.length >= 1)
{
_hosts = mes[0];
}
if(_headers['Origin'] == null && _headers['origin'] == null)
{
_headers['Origin'] = _hosts;
}
if(_headers['Referer'] == null && _headers['referer'] == null)
{
_headers['Referer'] = _hosts;
}
const response = await got(src, {headers: _headers, timeout: httpTimeout, https: {rejectUnauthorized: false}}).catch((error) => { console.log(error) })
{
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
if (response && response.body != null
&& response.body != '')
{
let parser = new Parser();
parser.push(response.body);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownload(src, _headers);
} else {
info = `直播资源解析成功,即将开始缓存...`;
startDownloadLive(src, _headers);
}
}
}
event.sender.send('task-add-reply', { code: code, message: info });
}
})
ipcMain.on('local-task-add', function (event, arg) {
let path = arg
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
try{
const response = fs.readFileSync(path, 'utf-8')
if (response){
let parser = new Parser();
parser.push(response);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownloadLocal(path);
} else {
info = `解析资源失败!`;
}
}
}
}catch(error) {
console.log(error)
}
event.sender.send('task-add-reply', { code: code, message: info });
})
function formatTime(duration) {
let sec = Math.floor(duration % 60).toLocaleString();
let min = Math.floor(duration / 60 % 60).toLocaleString();
let hour = Math.floor(duration / 3600 % 60).toLocaleString();
if (sec.length != 2) sec = '0' + sec;
if (min.length != 2) min = '0' + min;
if (hour.length != 2) hour = '0' + hour;
return hour + ":" + min + ":" + sec;
}
class QueueObject {
constructor() {
this.segment = null;
this.url = '';
this.headers = '';
this.id = 0;
this.idx = 0;
this.dir = '';
this.then = this.catch = null;
}
async callback( _callback ) {
try{
if(!globalCond[this.id])
{
console.log(`globalCond[this.id] is not exsited.`);
return;
}
let partent_uri = this.url.replace(/([^\/]*\?.*$)|([^\/]*$)/g, '');
let segment = this.segment;
let uri_ts = '';
if (/^http.*/.test(segment.uri)) {
uri_ts = segment.uri;
}
else if(/^\/.*/.test(segment.uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
uri_ts = mes[0] + segment.uri;
}
else
{
uri_ts = partent_uri + segment.uri;
}
}
else
{
uri_ts = partent_uri + segment.uri;
}
let filename = `${ ((this.idx + 1) +'').padStart(6,'0')}.ts`;
let filpath = path.join(this.dir, filename);
let filpath_dl = path.join(this.dir, filename+".dl");
// console.log(`2 ${segment.uri}`,`${filename}`);
//检测文件是否存在
for (let index = 0; index < 3 && !fs.existsSync(filpath); index++) {
// 下载的时候使用.dl后缀的文件名,下载完成后重命名
let that = this;
await download (uri_ts, that.dir, {filename:filename + ".dl",timeout:httpTimeout,headers:that.headers}).catch((err)=>{
console.log(err)
if(fs.existsSync(filpath_dl)) fs.unlinkSync( filpath_dl);
});
if(!fs.existsSync(filpath_dl)) continue;
if( fs.statSync(filpath_dl).size <= 0 )
{
fs.unlinkSync(filpath_dl);
}
if(segment.key != null && segment.key.method != null)
{
//标准解密TS流
let aes_path = path.join(this.dir, "aes.key" );
if(!fs.existsSync( aes_path ))
{
let key_uri = segment.key.uri;
if (! /^http.*/.test(segment.key.uri)) {
key_uri = partent_uri + segment.key.uri;
}
else if(/^\/.*/.test(key_uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
key_uri = mes[0] + segment.key.uri;
}
else
{
key_uri = partent_uri + segment.key.uri;
}
}
await download (key_uri, that.dir, { filename: "aes.key" }).catch(console.error);
}
if(fs.existsSync( aes_path ))
{
try {
let key_ = fs.readFileSync( aes_path );
let iv_ = segment.key.iv != null ? Buffer.from(segment.key.iv.buffer)
:Buffer.from(that.idx.toString(16).padStart(32,'0') ,'hex' );
let cipher = crypto.createDecipheriv((segment.key.method+"-cbc").toLowerCase(), key_, iv_);
cipher.on('error', (error) => {console.log(error)});
let inputData = fs.readFileSync( filpath_dl );
let outputData = Buffer.concat([cipher.update(inputData),cipher.final()]);
fs.writeFileSync(filpath,outputData);
if(fs.existsSync(filpath_dl)) fs.unlinkSync(filpath_dl);
that.then && that.then();
} catch (error) {
console.log(error)
if(fs.existsSync( filpath_dl ))
fs.unlinkSync(filpath_dl);
}
return;
}
}
else
{
fs.renameSync(filpath_dl , filpath);
break;
}
}
if(fs.existsSync(filpath))
{
this.then && this.then();
}
else
{
this.catch && this.catch();
}
}
catch(e)
{
console.log(e);
}
finally
{
_callback();
}
}
}
function queue_callback(that, callback)
{
that.callback(callback);
}
async function startDownload(url, headers = null, nId = null) {
let id = nId == null ? new Date().getTime() : nId;
let dir = path.join(store.get('downloadPath'), '/'+id);
let filesegments = [];
if(!fs.existsSync(dir))
{
fs.mkdirSync(dir, { recursive: true });
}
const response = await got(url, {headers: headers | , timeout: htt | identifier_name |
|
index.js | (Element.dir,{recursive :true})
}
var nIdx = configVideos.indexOf(Element);
if( nIdx > -1)
{
configVideos.splice(nIdx,1);
fs.writeFileSync(globalConfigVideoPath,JSON.stringify(configVideos));
}
event.sender.send("delvideo-reply",Element);
} catch (error) {
console.log(error)
}
}
});
});
ipcMain.on('StartOrStop', function (event, arg) {
let id = Number.parseInt(arg);
if(globalCond[id] == null)
{
console.log("不存在此任务")
return;
}
globalCond[id] = !globalCond[id];
if(globalCond[id] == true)
{
configVideos.forEach(Element=>{
if(Element.id==id)
{
if(Element.isLiving == true)
{
startDownloadLive(Element.url, Element.headers, id);
}
else
{
startDownload(Element.url, Element.headers, id);
}
}
});
}
});
ipcMain.on('task-add', async function (event, arg, headers) {
let src = arg;
let _headers = {};
if(headers != '')
{
let __ = headers.match(/(.*?): ?(.*?)(\n|\r|$)/g);
__ && __.forEach((_)=>{
let ___ = _.match(/(.*?): ?(.*?)(\n|\r|$)/i);
___ && (_headers[___[1]] = ___[2]);
});
}
let mes = src.match(/^https?:\/\/[^/]*/);
let _hosts = '';
if(mes && mes.length >= 1)
{
_hosts = mes[0];
}
if(_headers['Origin'] == null && _headers['origin'] == null)
{
_headers['Origin'] = _hosts;
}
if(_headers['Referer'] == null && _headers['referer'] == null)
{
_headers['Referer'] = _hosts;
}
const response = await got(src, {headers: _headers, timeout: httpTimeout, https: {rejectUnauthorized: false}}).catch((error) => { console.log(error) })
{
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
if (response && response.body != null
&& response.body != '')
{
let parser = new Parser();
parser.push(response.body);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownload(src, _headers);
} else {
info = `直播资源解析成功,即将开始缓存...`;
startDownloadLive(src, _headers);
}
}
}
event.sender.send('task-add-reply', { code: code, message: info });
}
})
ipcMain.on('local-task-add', function (event, arg) {
let path = arg
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
try{
const response = fs.readFileSync(path, 'utf-8')
if (response){
let parser = new Parser();
parser.push(response);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownloadLocal(path);
} else {
info = `解析资源失败!`;
}
}
}
}catch(error) {
console.log(error)
}
event.sender.send('task-add-reply', { code: code, message: info });
})
function formatTime(duration) {
let sec = Math.floor(duration % 60).toLocaleString();
let min = Math.floor(duration / 60 % 60).toLocaleString();
let hour = Math.floor(duration / 3600 % 60).toLocaleString();
if (sec.length != 2) sec = '0' + sec;
if (min.length != 2) min = '0' + min;
if (hour.length != 2) hour = '0' + hour;
return hour + ":" + min + ":" + sec;
}
class QueueObject {
constructor() {
this.segment = null;
this.url = '';
this.headers = '';
this.id = 0;
this.idx = 0;
this.dir = '';
this.then = this.catch = null;
}
async callback( _callback ) {
try{
if(!globalCond[this.id])
{
console.log(`globalCond[this.id] is not exsited.`);
return;
}
let partent_uri = this.url.replace(/([^\/]*\?.*$)|([^\/]*$)/g, '');
let segment = this.segment;
let uri_ts = '';
if (/^http.*/.test(segment.uri)) {
uri_ts = segment.uri;
}
else if(/^\/.*/.test(segment.uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
uri_ts = mes[0] + segment.uri;
}
else
{
uri_ts = partent_uri + segment.uri;
}
}
else
{
uri_ts = partent_uri + segment.uri;
}
let filename = `${ ((this.idx + 1) +'').padStart(6,'0')}.ts`;
let filpath = path.join(this.dir, filename);
let filpath_dl = path.join(this.dir, filename+".dl");
// console.log(`2 ${segment.uri}`,`${filename}`);
//检测文件是否存在
for (let index = 0; index < 3 && !fs.existsSync(filpath); index++) {
// 下载的时候使用.dl后缀的文件名,下载完成后重命名
let that = this;
await download (uri_ts, that.dir, {filename:filename + ".dl",timeout:httpTimeout,headers:that.headers}).catch((err)=>{
console.log(err)
if(fs.existsSync(filpath_dl)) fs.unlinkSync( filpath_dl);
});
if(!fs.existsSync(filpath_dl)) continue;
if( fs.statSync(filpath_dl).size <= 0 )
{
fs.unlinkSync(filpath_dl);
}
if(segment.key != null && segment.key.method != null)
{
//标准解密TS流
let aes_path = path.join(this.dir, "aes.key" );
if(!fs.existsSync( aes_path ))
{
let key_uri = segment.key.uri;
if (! /^http.*/.test(segment.key.uri)) {
key_uri = partent_uri + segment.key.uri;
}
else if(/^\/.*/.test(key_uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
key_uri = mes[0] + segment.key.uri;
} | else
{
key_uri = partent_uri + segment.key.uri;
}
}
await download (key_uri, that.dir, { filename: "aes.key" }).catch(console.error);
}
if(fs.existsSync( aes_path ))
{
try {
let key_ = fs.readFileSync( aes_path );
let iv_ = segment.key.iv != null ? Buffer.from(segment.key.iv.buffer)
:Buffer.from(that.idx.toString(16).padStart(32,'0') ,'hex' );
let cipher = crypto.createDecipheriv((segment.key.method+"-cbc").toLowerCase(), key_, iv_);
cipher.on('error', (error) => {console.log(error)});
let inputData = fs.readFileSync( filpath_dl );
let outputData = Buffer.concat([cipher.update(inputData),cipher.final()]);
fs.writeFileSync(filpath,outputData);
if(fs.existsSync(filpath_dl)) fs.unlinkSync(filpath_dl);
that.then && that.then();
} catch (error) {
console.log(error)
if(fs.existsSync( filpath_dl ))
fs.unlinkSync(filpath_dl);
}
return;
}
}
else
{
fs.renameSync(filpath_dl , filpath);
break;
}
}
if(fs.existsSync(filpath))
{
this.then && this.then();
}
else
{
this.catch && this.catch();
}
}
catch(e)
{
console.log(e);
}
finally
{
_callback();
}
}
}
function queue_callback(that, callback)
{
| random_line_split |
|
index.js | } catch (error) {
console.log(error)
}
}
});
});
ipcMain.on('StartOrStop', function (event, arg) {
let id = Number.parseInt(arg);
if(globalCond[id] == null)
{
console.log("不存在此任务")
return;
}
globalCond[id] = !globalCond[id];
if(globalCond[id] == true)
{
configVideos.forEach(Element=>{
if(Element.id==id)
{
if(Element.isLiving == true)
{
startDownloadLive(Element.url, Element.headers, id);
}
else
{
startDownload(Element.url, Element.headers, id);
}
}
});
}
});
ipcMain.on('task-add', async function (event, arg, headers) {
let src = arg;
let _headers = {};
if(headers != '')
{
let __ = headers.match(/(.*?): ?(.*?)(\n|\r|$)/g);
__ && __.forEach((_)=>{
let ___ = _.match(/(.*?): ?(.*?)(\n|\r|$)/i);
___ && (_headers[___[1]] = ___[2]);
});
}
let mes = src.match(/^https?:\/\/[^/]*/);
let _hosts = '';
if(mes && mes.length >= 1)
{
_hosts = mes[0];
}
if(_headers['Origin'] == null && _headers['origin'] == null)
{
_headers['Origin'] = _hosts;
}
if(_headers['Referer'] == null && _headers['referer'] == null)
{
_headers['Referer'] = _hosts;
}
const response = await got(src, {headers: _headers, timeout: httpTimeout, https: {rejectUnauthorized: false}}).catch((error) => { console.log(error) })
{
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
if (response && response.body != null
&& response.body != '')
{
let parser = new Parser();
parser.push(response.body);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownload(src, _headers);
} else {
info = `直播资源解析成功,即将开始缓存...`;
startDownloadLive(src, _headers);
}
}
}
event.sender.send('task-add-reply', { code: code, message: info });
}
})
ipcMain.on('local-task-add', function (event, arg) {
let path = arg
let info = '';
let code = 0;
code = -1;
info = '解析资源失败!';
try{
const response = fs.readFileSync(path, 'utf-8')
if (response){
let parser = new Parser();
parser.push(response);
parser.end();
let count_seg = parser.manifest.segments.length;
if (count_seg > 0) {
code = 0;
if (parser.manifest.endList) {
let duration = 0;
parser.manifest.segments.forEach(segment => {
duration += segment.duration;
});
info = `资源解析成功,有 ${count_seg} 个片段,时长:${formatTime(duration)},即将开始缓存...`;
startDownloadLocal(path);
} else {
info = `解析资源失败!`;
}
}
}
}catch(error) {
console.log(error)
}
event.sender.send('task-add-reply', { code: code, message: info });
})
function formatTime(duration) {
let sec = Math.floor(duration % 60).toLocaleString();
let min = Math.floor(duration / 60 % 60).toLocaleString();
let hour = Math.floor(duration / 3600 % 60).toLocaleString();
if (sec.length != 2) sec = '0' + sec;
if (min.length != 2) min = '0' + min;
if (hour.length != 2) hour = '0' + hour;
return hour + ":" + min + ":" + sec;
}
class QueueObject {
constructor() {
this.segment = null;
this.url = '';
this.headers = '';
this.id = 0;
this.idx = 0;
this.dir = '';
this.then = this.catch = null;
}
async callback( _callback ) {
try{
if(!globalCond[this.id])
{
console.log(`globalCond[this.id] is not exsited.`);
return;
}
let partent_uri = this.url.replace(/([^\/]*\?.*$)|([^\/]*$)/g, '');
let segment = this.segment;
let uri_ts = '';
if (/^http.*/.test(segment.uri)) {
uri_ts = segment.uri;
}
else if(/^\/.*/.test(segment.uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
uri_ts = mes[0] + segment.uri;
}
else
{
uri_ts = partent_uri + segment.uri;
}
}
else
{
uri_ts = partent_uri + segment.uri;
}
let filename = `${ ((this.idx + 1) +'').padStart(6,'0')}.ts`;
let filpath = path.join(this.dir, filename);
let filpath_dl = path.join(this.dir, filename+".dl");
// console.log(`2 ${segment.uri}`,`${filename}`);
//检测文件是否存在
for (let index = 0; index < 3 && !fs.existsSync(filpath); index++) {
// 下载的时候使用.dl后缀的文件名,下载完成后重命名
let that = this;
await download (uri_ts, that.dir, {filename:filename + ".dl",timeout:httpTimeout,headers:that.headers}).catch((err)=>{
console.log(err)
if(fs.existsSync(filpath_dl)) fs.unlinkSync( filpath_dl);
});
if(!fs.existsSync(filpath_dl)) continue;
if( fs.statSync(filpath_dl).size <= 0 )
{
fs.unlinkSync(filpath_dl);
}
if(segment.key != null && segment.key.method != null)
{
//标准解密TS流
let aes_path = path.join(this.dir, "aes.key" );
if(!fs.existsSync( aes_path ))
{
let key_uri = segment.key.uri;
if (! /^http.*/.test(segment.key.uri)) {
key_uri = partent_uri + segment.key.uri;
}
else if(/^\/.*/.test(key_uri))
{
let mes = this.url.match(/^https?:\/\/[^/]*/);
if(mes && mes.length >= 1)
{
key_uri = mes[0] + segment.key.uri;
}
else
{
key_uri = partent_uri + segment.key.uri;
}
}
await download (key_uri, that.dir, { filename: "aes.key" }).catch(console.error);
}
if(fs.existsSync( aes_path ))
{
try {
let key_ = fs.readFileSync( aes_path );
let iv_ = segment.key.iv != null ? Buffer.from(segment.key.iv.buffer)
:Buffer.from(that.idx.toString(16).padStart(32,'0') ,'hex' );
let cipher = crypto.createDecipheriv((segment.key.method+"-cbc").toLowerCase(), key_, iv_);
cipher.on('error', (error) => {console.log(error)});
let inputData = fs.readFileSync( filpath_dl );
let outputData = Buffer.concat([cipher.update(inputData),cipher.final()]);
fs.writeFileSync(filpath,outputData);
if(fs.existsSync(filpath_dl)) fs.unlinkSync(filpath_dl);
that.then && that.then();
} catch (error) {
console.log(error)
if(fs.existsSync( filpath_dl ))
fs.unlinkSync(filpath_dl);
}
return;
}
}
else
{
fs.renameSync(filpath_dl , filpath);
break;
}
}
if(fs.existsSync(filpath))
{
this.then && this.then();
}
else
{
this.catch && this.catch();
}
}
catch(e)
{
console.log(e);
}
finally
{
_callback();
}
}
}
function queue_callback(that, callback)
{
that.callback(callback);
}
async function startDownload(url, headers = null, nId = null) {
let id = nId == null ? new Date().getTime() : nId;
let dir = path.join(store.get('downloadPath'), '/'+id);
let filesegments = | [];
if(!fs.existsSync(dir))
{
fs | conditional_block |
|
state.go | // Tag() string
Engine() *StateEngine
Activate()
Deactivate()
UseActivator(StateResponse) States
UseDeactivator(StateResponse) States
OverrideValidator(StateValidator) States
acceptable(string, string) bool
}
// State represents a single state of with a specific tag and address
// where the address is a single piece 'home' item in the '.home.files' style state address
type State struct {
// atomic bit used to indicate active state or inactive state
active int64
// tag represent the identifier key used in a super-state
// tag string
// activator and deactivator provide actions to occur when the state is set to be active
activator StateResponse
deactivator StateResponse
// validator represents an option argument which also takes part in the validation process of a state in validating its rightness/wrongness from a giving state address
optionalValidator StateValidator
//internal engine that allows sub-states from a root state
engine *StateEngine
// the parent state this is connected to
// parent States
vo, ro, do sync.Mutex
}
// NewState builds a new state with a tag and single address point .eg home or files ..etc
func NewState() *State {
ns := State{}
ns.engine = BuildStateEngine(&ns)
return &ns
}
// Active returns true/false if this state is active
func (s *State) Active() bool {
return atomic.LoadInt64(&s.active) == 1
}
// Engine returns the internal nested StateEngine
func (s *State) Engine() *StateEngine {
return s.engine
}
// UseDeactivator assigns the state a new deactivate respone handler
func (s *State) UseDeactivator(so StateResponse) States {
s.do.Lock()
s.deactivator = so
s.do.Unlock()
return s
}
// UseActivator assigns the state a new active respone handler
func (s *State) UseActivator(so StateResponse) States {
s.ro.Lock()
s.activator = so
s.ro.Unlock()
return s
}
// OverrideValidator assigns an validator to perform custom matching of the state
func (s *State) OverrideValidator(so StateValidator) States {
s.vo.Lock()
s.optionalValidator = so
s.vo.Unlock()
return s
}
// Activate activates the state
func (s *State) Activate() {
if s.active > 1 {
return
}
atomic.StoreInt64(&s.active, 1)
subs := s.engine.diffOnlySubs()
//activate all the subroot states first so they can
//do any population they want
for _, ko := range subs {
ko.Activate()
}
s.ro.Lock()
if s.activator != nil {
s.activator()
}
s.ro.Unlock()
}
// Deactivate deactivates the state
func (s *State) Deactivate() {
if s.active < 1 {
return
}
atomic.StoreInt64(&s.active, 0)
s.do.Lock()
if s.deactivator != nil {
s.deactivator()
}
s.do.Unlock()
}
// acceptable checks if the state matches the current point
func (s *State) acceptable(addr string, point string) bool {
if s.optionalValidator == nil {
if addr == point {
return true
}
return false
}
s.vo.Lock()
state := s.optionalValidator(addr, point)
s.vo.Unlock()
return state
}
// StateEngine represents the engine that handles the state machine based operations for state-address based states
type StateEngine struct {
rw sync.RWMutex
states map[States]string
owner States
curr States
}
// NewStateEngine returns a new engine with a default empty state
func NewStateEngine() *StateEngine {
return BuildStateEngine(nil)
}
// BuildStateEngine returns a new StateEngine instance set with a particular state as its owner
func BuildStateEngine(s States) *StateEngine {
es := StateEngine{
states: make(map[States]string),
owner: s,
}
return &es
}
// AddState adds a new state into the engine with the tag used to identify the state, if the address is a empty string then the address recieves the tag as its value, remember the address is a single address point .eg home or files and not the length of the extend address eg .root.home.files
func (se *StateEngine) AddState(addr string) States {
sa := NewState()
se.add(addr, sa)
return sa
}
// UseState adds a state into the StateEngine with a specific tag, the state address point is still used in matching against it
func (se *StateEngine) UseState(addr string, s States) States {
if addr == "" {
addr = "."
}
se.add(addr, s)
return s
}
// ShallowState returns the current state of the engine and not the final state i.e with a state address of '.home.files' from its root, it will return State(:home) object
func (se *StateEngine) ShallowState() States {
if se.curr == nil {
return nil
}
return se.curr
}
// State returns the current last state of the engine with respect to any nested state that is with the state address of '.home.files', it will return State(:files) object
func (se *StateEngine) | () States {
co := se.curr
if co == nil {
// return se.owner
return nil
}
return co.Engine().State()
}
// Partial renders the partial of the last state of the state address
func (se *StateEngine) Partial(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, true)
}
// All renders the partial of the last state of the state address
func (se *StateEngine) All(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, false)
}
// DeactivateAll deactivates all states connected to this engine
func (se *StateEngine) DeactivateAll() {
se.eachState(func(so States, tag string, _ func()) {
so.Deactivate()
})
}
func (se *StateEngine) eachState(fx func(States, string, func())) {
if fx == nil {
return
}
se.rw.RLock()
defer se.rw.RUnlock()
var stop bool
for so, addr := range se.states {
if stop {
break
}
fx(so, addr, func() {
stop = true
})
}
}
func (se *StateEngine) getAddr(s States) string {
se.rw.RLock()
defer se.rw.RUnlock()
return se.states[s]
}
func (se *StateEngine) get(addr string) States {
se.rw.RLock()
defer se.rw.RUnlock()
for sm, ao := range se.states {
if ao != addr {
continue
}
return sm
}
return nil
}
func (se *StateEngine) add(addr string, s States) {
se.rw.RLock()
_, ok := se.states[s]
se.rw.RUnlock()
if ok {
return
}
se.rw.Lock()
se.states[s] = addr
se.rw.Unlock()
}
// trajectory is the real engine which checks the path and passes down the StateStat to the sub-states and determines wether its a full view or partial view
func (se *StateEngine) trajectory(points []string, partial bool) error {
subs, nosubs := se.diffSubs()
//are we out of points to walk through? if so then fire acive and tell others to be inactive
if len(points) < 1 {
//deactivate all the non-subroot states
for _, ko := range nosubs {
ko.Deactivate()
}
//if the engine has a root state activate it since in doing so,it will activate its own children else manually activate the children
if se.owner != nil {
se.owner.Activate()
} else {
//activate all the subroot states first so they can
//do be ready for the root. We call this here incase the StateEngine has no root state
for _, ko := range subs {
ko.Activate()
}
}
return nil
}
//cache the first point so we dont loose it
point := points[0]
var state = se.get(point)
if state == nil {
// for _, ko := range nosubs {
// if sko.acceptable(se.getAddr(ko), point, so) {
// state = ko
// break
// }
// }
//
// if state == nil {
return ErrStateNotFound
// }
}
//set this state as the current active state
se.curr = state
//shift the list one more bit for the points
points = points[1:]
//we pass down the points since that will handle the loadup downwards
err := state.Engine().trajectory(points, partial)
if err != nil {
return err
}
if !partial {
// //activate all the subroot states first so they can
// //do any population they want
// for _, ko := range subs {
// ko.Activate(so | State | identifier_name |
state.go | // Tag() string
Engine() *StateEngine
Activate()
Deactivate()
UseActivator(StateResponse) States
UseDeactivator(StateResponse) States
OverrideValidator(StateValidator) States
acceptable(string, string) bool
}
// State represents a single state of with a specific tag and address
// where the address is a single piece 'home' item in the '.home.files' style state address
type State struct {
// atomic bit used to indicate active state or inactive state
active int64
// tag represent the identifier key used in a super-state
// tag string
// activator and deactivator provide actions to occur when the state is set to be active
activator StateResponse
deactivator StateResponse
// validator represents an option argument which also takes part in the validation process of a state in validating its rightness/wrongness from a giving state address
optionalValidator StateValidator
//internal engine that allows sub-states from a root state
engine *StateEngine
// the parent state this is connected to
// parent States
vo, ro, do sync.Mutex
}
// NewState builds a new state with a tag and single address point .eg home or files ..etc
func NewState() *State {
ns := State{}
ns.engine = BuildStateEngine(&ns)
return &ns
}
// Active returns true/false if this state is active
func (s *State) Active() bool {
return atomic.LoadInt64(&s.active) == 1
}
// Engine returns the internal nested StateEngine
func (s *State) Engine() *StateEngine {
return s.engine
}
// UseDeactivator assigns the state a new deactivate respone handler
func (s *State) UseDeactivator(so StateResponse) States {
s.do.Lock()
s.deactivator = so
s.do.Unlock()
return s
}
// UseActivator assigns the state a new active respone handler
func (s *State) UseActivator(so StateResponse) States {
s.ro.Lock()
s.activator = so
s.ro.Unlock()
return s
}
// OverrideValidator assigns an validator to perform custom matching of the state
func (s *State) OverrideValidator(so StateValidator) States {
s.vo.Lock()
s.optionalValidator = so
s.vo.Unlock()
return s
}
// Activate activates the state
func (s *State) Activate() {
if s.active > 1 {
return
}
atomic.StoreInt64(&s.active, 1)
subs := s.engine.diffOnlySubs()
//activate all the subroot states first so they can
//do any population they want
for _, ko := range subs {
ko.Activate()
}
s.ro.Lock()
if s.activator != nil {
s.activator()
}
s.ro.Unlock()
}
// Deactivate deactivates the state
func (s *State) Deactivate() {
if s.active < 1 |
atomic.StoreInt64(&s.active, 0)
s.do.Lock()
if s.deactivator != nil {
s.deactivator()
}
s.do.Unlock()
}
// acceptable checks if the state matches the current point
func (s *State) acceptable(addr string, point string) bool {
if s.optionalValidator == nil {
if addr == point {
return true
}
return false
}
s.vo.Lock()
state := s.optionalValidator(addr, point)
s.vo.Unlock()
return state
}
// StateEngine represents the engine that handles the state machine based operations for state-address based states
type StateEngine struct {
rw sync.RWMutex
states map[States]string
owner States
curr States
}
// NewStateEngine returns a new engine with a default empty state
func NewStateEngine() *StateEngine {
return BuildStateEngine(nil)
}
// BuildStateEngine returns a new StateEngine instance set with a particular state as its owner
func BuildStateEngine(s States) *StateEngine {
es := StateEngine{
states: make(map[States]string),
owner: s,
}
return &es
}
// AddState adds a new state into the engine with the tag used to identify the state, if the address is a empty string then the address recieves the tag as its value, remember the address is a single address point .eg home or files and not the length of the extend address eg .root.home.files
func (se *StateEngine) AddState(addr string) States {
sa := NewState()
se.add(addr, sa)
return sa
}
// UseState adds a state into the StateEngine with a specific tag, the state address point is still used in matching against it
func (se *StateEngine) UseState(addr string, s States) States {
if addr == "" {
addr = "."
}
se.add(addr, s)
return s
}
// ShallowState returns the current state of the engine and not the final state i.e with a state address of '.home.files' from its root, it will return State(:home) object
func (se *StateEngine) ShallowState() States {
if se.curr == nil {
return nil
}
return se.curr
}
// State returns the current last state of the engine with respect to any nested state that is with the state address of '.home.files', it will return State(:files) object
func (se *StateEngine) State() States {
co := se.curr
if co == nil {
// return se.owner
return nil
}
return co.Engine().State()
}
// Partial renders the partial of the last state of the state address
func (se *StateEngine) Partial(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, true)
}
// All renders the partial of the last state of the state address
func (se *StateEngine) All(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, false)
}
// DeactivateAll deactivates all states connected to this engine
func (se *StateEngine) DeactivateAll() {
se.eachState(func(so States, tag string, _ func()) {
so.Deactivate()
})
}
func (se *StateEngine) eachState(fx func(States, string, func())) {
if fx == nil {
return
}
se.rw.RLock()
defer se.rw.RUnlock()
var stop bool
for so, addr := range se.states {
if stop {
break
}
fx(so, addr, func() {
stop = true
})
}
}
func (se *StateEngine) getAddr(s States) string {
se.rw.RLock()
defer se.rw.RUnlock()
return se.states[s]
}
func (se *StateEngine) get(addr string) States {
se.rw.RLock()
defer se.rw.RUnlock()
for sm, ao := range se.states {
if ao != addr {
continue
}
return sm
}
return nil
}
func (se *StateEngine) add(addr string, s States) {
se.rw.RLock()
_, ok := se.states[s]
se.rw.RUnlock()
if ok {
return
}
se.rw.Lock()
se.states[s] = addr
se.rw.Unlock()
}
// trajectory is the real engine which checks the path and passes down the StateStat to the sub-states and determines wether its a full view or partial view
func (se *StateEngine) trajectory(points []string, partial bool) error {
subs, nosubs := se.diffSubs()
//are we out of points to walk through? if so then fire acive and tell others to be inactive
if len(points) < 1 {
//deactivate all the non-subroot states
for _, ko := range nosubs {
ko.Deactivate()
}
//if the engine has a root state activate it since in doing so,it will activate its own children else manually activate the children
if se.owner != nil {
se.owner.Activate()
} else {
//activate all the subroot states first so they can
//do be ready for the root. We call this here incase the StateEngine has no root state
for _, ko := range subs {
ko.Activate()
}
}
return nil
}
//cache the first point so we dont loose it
point := points[0]
var state = se.get(point)
if state == nil {
// for _, ko := range nosubs {
// if sko.acceptable(se.getAddr(ko), point, so) {
// state = ko
// break
// }
// }
//
// if state == nil {
return ErrStateNotFound
// }
}
//set this state as the current active state
se.curr = state
//shift the list one more bit for the points
points = points[1:]
//we pass down the points since that will handle the loadup downwards
err := state.Engine().trajectory(points, partial)
if err != nil {
return err
}
if !partial {
// //activate all the subroot states first so they can
// //do any population they want
// for _, ko := range subs {
// ko.Activate | {
return
} | conditional_block |
state.go | // Tag() string
Engine() *StateEngine
Activate()
Deactivate()
UseActivator(StateResponse) States
UseDeactivator(StateResponse) States
OverrideValidator(StateValidator) States
acceptable(string, string) bool
}
// State represents a single state of with a specific tag and address
// where the address is a single piece 'home' item in the '.home.files' style state address
type State struct {
// atomic bit used to indicate active state or inactive state
active int64
// tag represent the identifier key used in a super-state
// tag string
// activator and deactivator provide actions to occur when the state is set to be active
activator StateResponse
deactivator StateResponse
// validator represents an option argument which also takes part in the validation process of a state in validating its rightness/wrongness from a giving state address
optionalValidator StateValidator
//internal engine that allows sub-states from a root state
engine *StateEngine
// the parent state this is connected to
// parent States
vo, ro, do sync.Mutex
}
// NewState builds a new state with a tag and single address point .eg home or files ..etc
func NewState() *State {
ns := State{}
ns.engine = BuildStateEngine(&ns)
return &ns
}
// Active returns true/false if this state is active
func (s *State) Active() bool {
return atomic.LoadInt64(&s.active) == 1
}
// Engine returns the internal nested StateEngine
func (s *State) Engine() *StateEngine {
return s.engine
}
// UseDeactivator assigns the state a new deactivate respone handler
func (s *State) UseDeactivator(so StateResponse) States |
// UseActivator assigns the state a new active respone handler
func (s *State) UseActivator(so StateResponse) States {
s.ro.Lock()
s.activator = so
s.ro.Unlock()
return s
}
// OverrideValidator assigns an validator to perform custom matching of the state
func (s *State) OverrideValidator(so StateValidator) States {
s.vo.Lock()
s.optionalValidator = so
s.vo.Unlock()
return s
}
// Activate activates the state
func (s *State) Activate() {
if s.active > 1 {
return
}
atomic.StoreInt64(&s.active, 1)
subs := s.engine.diffOnlySubs()
//activate all the subroot states first so they can
//do any population they want
for _, ko := range subs {
ko.Activate()
}
s.ro.Lock()
if s.activator != nil {
s.activator()
}
s.ro.Unlock()
}
// Deactivate deactivates the state
func (s *State) Deactivate() {
if s.active < 1 {
return
}
atomic.StoreInt64(&s.active, 0)
s.do.Lock()
if s.deactivator != nil {
s.deactivator()
}
s.do.Unlock()
}
// acceptable checks if the state matches the current point
func (s *State) acceptable(addr string, point string) bool {
if s.optionalValidator == nil {
if addr == point {
return true
}
return false
}
s.vo.Lock()
state := s.optionalValidator(addr, point)
s.vo.Unlock()
return state
}
// StateEngine represents the engine that handles the state machine based operations for state-address based states
type StateEngine struct {
rw sync.RWMutex
states map[States]string
owner States
curr States
}
// NewStateEngine returns a new engine with a default empty state
func NewStateEngine() *StateEngine {
return BuildStateEngine(nil)
}
// BuildStateEngine returns a new StateEngine instance set with a particular state as its owner
func BuildStateEngine(s States) *StateEngine {
es := StateEngine{
states: make(map[States]string),
owner: s,
}
return &es
}
// AddState adds a new state into the engine with the tag used to identify the state, if the address is a empty string then the address recieves the tag as its value, remember the address is a single address point .eg home or files and not the length of the extend address eg .root.home.files
func (se *StateEngine) AddState(addr string) States {
sa := NewState()
se.add(addr, sa)
return sa
}
// UseState adds a state into the StateEngine with a specific tag, the state address point is still used in matching against it
func (se *StateEngine) UseState(addr string, s States) States {
if addr == "" {
addr = "."
}
se.add(addr, s)
return s
}
// ShallowState returns the current state of the engine and not the final state i.e with a state address of '.home.files' from its root, it will return State(:home) object
func (se *StateEngine) ShallowState() States {
if se.curr == nil {
return nil
}
return se.curr
}
// State returns the current last state of the engine with respect to any nested state that is with the state address of '.home.files', it will return State(:files) object
func (se *StateEngine) State() States {
co := se.curr
if co == nil {
// return se.owner
return nil
}
return co.Engine().State()
}
// Partial renders the partial of the last state of the state address
func (se *StateEngine) Partial(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, true)
}
// All renders the partial of the last state of the state address
func (se *StateEngine) All(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, false)
}
// DeactivateAll deactivates all states connected to this engine
func (se *StateEngine) DeactivateAll() {
se.eachState(func(so States, tag string, _ func()) {
so.Deactivate()
})
}
func (se *StateEngine) eachState(fx func(States, string, func())) {
if fx == nil {
return
}
se.rw.RLock()
defer se.rw.RUnlock()
var stop bool
for so, addr := range se.states {
if stop {
break
}
fx(so, addr, func() {
stop = true
})
}
}
func (se *StateEngine) getAddr(s States) string {
se.rw.RLock()
defer se.rw.RUnlock()
return se.states[s]
}
func (se *StateEngine) get(addr string) States {
se.rw.RLock()
defer se.rw.RUnlock()
for sm, ao := range se.states {
if ao != addr {
continue
}
return sm
}
return nil
}
func (se *StateEngine) add(addr string, s States) {
se.rw.RLock()
_, ok := se.states[s]
se.rw.RUnlock()
if ok {
return
}
se.rw.Lock()
se.states[s] = addr
se.rw.Unlock()
}
// trajectory is the real engine which checks the path and passes down the StateStat to the sub-states and determines wether its a full view or partial view
func (se *StateEngine) trajectory(points []string, partial bool) error {
subs, nosubs := se.diffSubs()
//are we out of points to walk through? if so then fire acive and tell others to be inactive
if len(points) < 1 {
//deactivate all the non-subroot states
for _, ko := range nosubs {
ko.Deactivate()
}
//if the engine has a root state activate it since in doing so,it will activate its own children else manually activate the children
if se.owner != nil {
se.owner.Activate()
} else {
//activate all the subroot states first so they can
//do be ready for the root. We call this here incase the StateEngine has no root state
for _, ko := range subs {
ko.Activate()
}
}
return nil
}
//cache the first point so we dont loose it
point := points[0]
var state = se.get(point)
if state == nil {
// for _, ko := range nosubs {
// if sko.acceptable(se.getAddr(ko), point, so) {
// state = ko
// break
// }
// }
//
// if state == nil {
return ErrStateNotFound
// }
}
//set this state as the current active state
se.curr = state
//shift the list one more bit for the points
points = points[1:]
//we pass down the points since that will handle the loadup downwards
err := state.Engine().trajectory(points, partial)
if err != nil {
return err
}
if !partial {
// //activate all the subroot states first so they can
// //do any population they want
// for _, ko := range subs {
// ko.Activate | {
s.do.Lock()
s.deactivator = so
s.do.Unlock()
return s
} | identifier_body |
state.go | // Tag() string
Engine() *StateEngine
Activate()
Deactivate()
UseActivator(StateResponse) States
UseDeactivator(StateResponse) States
OverrideValidator(StateValidator) States
acceptable(string, string) bool
}
// State represents a single state of with a specific tag and address
// where the address is a single piece 'home' item in the '.home.files' style state address
type State struct {
// atomic bit used to indicate active state or inactive state
active int64
// tag represent the identifier key used in a super-state
// tag string
// activator and deactivator provide actions to occur when the state is set to be active
activator StateResponse
deactivator StateResponse
// validator represents an option argument which also takes part in the validation process of a state in validating its rightness/wrongness from a giving state address
optionalValidator StateValidator
//internal engine that allows sub-states from a root state
engine *StateEngine
// the parent state this is connected to
// parent States
vo, ro, do sync.Mutex
}
// NewState builds a new state with a tag and single address point .eg home or files ..etc
func NewState() *State {
ns := State{}
ns.engine = BuildStateEngine(&ns)
return &ns
}
// Active returns true/false if this state is active
func (s *State) Active() bool {
return atomic.LoadInt64(&s.active) == 1
}
// Engine returns the internal nested StateEngine
func (s *State) Engine() *StateEngine {
return s.engine
}
// UseDeactivator assigns the state a new deactivate respone handler
func (s *State) UseDeactivator(so StateResponse) States {
s.do.Lock()
s.deactivator = so
s.do.Unlock()
return s
}
// UseActivator assigns the state a new active respone handler
func (s *State) UseActivator(so StateResponse) States {
s.ro.Lock()
s.activator = so
s.ro.Unlock()
return s
}
// OverrideValidator assigns an validator to perform custom matching of the state
func (s *State) OverrideValidator(so StateValidator) States {
s.vo.Lock()
s.optionalValidator = so
s.vo.Unlock()
return s
}
// Activate activates the state
func (s *State) Activate() {
if s.active > 1 {
return
}
atomic.StoreInt64(&s.active, 1)
subs := s.engine.diffOnlySubs()
//activate all the subroot states first so they can
//do any population they want
for _, ko := range subs {
ko.Activate()
}
s.ro.Lock()
if s.activator != nil {
s.activator()
}
s.ro.Unlock()
}
// Deactivate deactivates the state
func (s *State) Deactivate() {
if s.active < 1 {
return
}
atomic.StoreInt64(&s.active, 0)
s.do.Lock()
if s.deactivator != nil {
s.deactivator()
}
s.do.Unlock()
}
// acceptable checks if the state matches the current point
func (s *State) acceptable(addr string, point string) bool {
if s.optionalValidator == nil {
if addr == point {
return true
}
return false
}
s.vo.Lock()
state := s.optionalValidator(addr, point)
s.vo.Unlock()
return state
}
// StateEngine represents the engine that handles the state machine based operations for state-address based states
type StateEngine struct {
rw sync.RWMutex
states map[States]string
owner States
curr States
}
// NewStateEngine returns a new engine with a default empty state
func NewStateEngine() *StateEngine {
return BuildStateEngine(nil)
}
// BuildStateEngine returns a new StateEngine instance set with a particular state as its owner
func BuildStateEngine(s States) *StateEngine {
es := StateEngine{
states: make(map[States]string),
owner: s,
}
return &es
}
// AddState adds a new state into the engine with the tag used to identify the state, if the address is a empty string then the address recieves the tag as its value, remember the address is a single address point .eg home or files and not the length of the extend address eg .root.home.files
func (se *StateEngine) AddState(addr string) States {
sa := NewState()
se.add(addr, sa)
return sa
}
// UseState adds a state into the StateEngine with a specific tag, the state address point is still used in matching against it
func (se *StateEngine) UseState(addr string, s States) States {
if addr == "" {
addr = "."
}
se.add(addr, s)
return s
}
// ShallowState returns the current state of the engine and not the final state i.e with a state address of '.home.files' from its root, it will return State(:home) object
func (se *StateEngine) ShallowState() States {
if se.curr == nil {
return nil
}
return se.curr
}
// State returns the current last state of the engine with respect to any nested state that is with the state address of '.home.files', it will return State(:files) object
func (se *StateEngine) State() States {
co := se.curr
if co == nil {
// return se.owner
return nil
}
return co.Engine().State()
}
// Partial renders the partial of the last state of the state address
func (se *StateEngine) Partial(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, true)
}
// All renders the partial of the last state of the state address
func (se *StateEngine) All(addr string) error {
points, err := se.prepare(addr)
if err != nil {
return err
}
return se.trajectory(points, false)
}
// DeactivateAll deactivates all states connected to this engine
func (se *StateEngine) DeactivateAll() {
se.eachState(func(so States, tag string, _ func()) {
so.Deactivate()
})
}
func (se *StateEngine) eachState(fx func(States, string, func())) {
if fx == nil {
return
}
se.rw.RLock()
defer se.rw.RUnlock()
var stop bool
for so, addr := range se.states {
if stop {
break
}
fx(so, addr, func() { | func (se *StateEngine) getAddr(s States) string {
se.rw.RLock()
defer se.rw.RUnlock()
return se.states[s]
}
func (se *StateEngine) get(addr string) States {
se.rw.RLock()
defer se.rw.RUnlock()
for sm, ao := range se.states {
if ao != addr {
continue
}
return sm
}
return nil
}
func (se *StateEngine) add(addr string, s States) {
se.rw.RLock()
_, ok := se.states[s]
se.rw.RUnlock()
if ok {
return
}
se.rw.Lock()
se.states[s] = addr
se.rw.Unlock()
}
// trajectory is the real engine which checks the path and passes down the StateStat to the sub-states and determines wether its a full view or partial view
func (se *StateEngine) trajectory(points []string, partial bool) error {
subs, nosubs := se.diffSubs()
//are we out of points to walk through? if so then fire acive and tell others to be inactive
if len(points) < 1 {
//deactivate all the non-subroot states
for _, ko := range nosubs {
ko.Deactivate()
}
//if the engine has a root state activate it since in doing so,it will activate its own children else manually activate the children
if se.owner != nil {
se.owner.Activate()
} else {
//activate all the subroot states first so they can
//do be ready for the root. We call this here incase the StateEngine has no root state
for _, ko := range subs {
ko.Activate()
}
}
return nil
}
//cache the first point so we dont loose it
point := points[0]
var state = se.get(point)
if state == nil {
// for _, ko := range nosubs {
// if sko.acceptable(se.getAddr(ko), point, so) {
// state = ko
// break
// }
// }
//
// if state == nil {
return ErrStateNotFound
// }
}
//set this state as the current active state
se.curr = state
//shift the list one more bit for the points
points = points[1:]
//we pass down the points since that will handle the loadup downwards
err := state.Engine().trajectory(points, partial)
if err != nil {
return err
}
if !partial {
// //activate all the subroot states first so they can
// //do any population they want
// for _, ko := range subs {
// ko.Activate(so)
| stop = true
})
}
}
| random_line_split |
mpsc.rs | let shared = match self.shared.upgrade() {
Some(shared) => shared,
None => return Err(SendError(msg)), // receiver was dropped
};
let mut shared = shared.borrow_mut();
match shared.capacity {
Some(capacity) if shared.buffer.len() == capacity => {
shared.blocked_senders.push_back(task::current());
Ok(AsyncSink::NotReady(msg))
}
_ => {
shared.buffer.push_back(msg);
if let Some(task) = shared.blocked_recv.take() {
task.notify();
}
Ok(AsyncSink::Ready)
}
}
}
}
impl<T> Clone for Sender<T> {
fn clone(&self) -> Self {
Sender { shared: self.shared.clone() }
}
}
impl<T> Sink for Sender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn | (&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.do_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
let shared = match self.shared.upgrade() {
Some(shared) => shared,
None => return,
};
// The number of existing `Weak` indicates if we are possibly the last
// `Sender`. If we are the last, we possibly must notify a blocked
// `Receiver`. `self.shared` is always one of the `Weak` to this shared
// data. Therefore the smallest possible Rc::weak_count(&shared) is 1.
if Rc::weak_count(&shared) == 1 {
if let Some(task) = shared.borrow_mut().blocked_recv.take() {
// Wake up receiver as its stream has ended
task.notify();
}
}
}
}
/// The receiving end of a channel which implements the `Stream` trait.
///
/// This is created by the `channel` function.
#[derive(Debug)]
pub struct Receiver<T> {
state: State<T>,
}
/// Possible states of a receiver. We're either Open (can receive more messages)
/// or we're closed with a list of messages we have left to receive.
#[derive(Debug)]
enum State<T> {
Open(Rc<RefCell<Shared<T>>>),
Closed(VecDeque<T>),
}
impl<T> Receiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) {
let (blockers, items) = match self.state {
State::Open(ref state) => {
let mut state = state.borrow_mut();
let items = mem::replace(&mut state.buffer, VecDeque::new());
let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new());
(blockers, items)
}
State::Closed(_) => return,
};
self.state = State::Closed(items);
for task in blockers {
task.notify();
}
}
}
impl<T> Stream for Receiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let me = match self.state {
State::Open(ref mut me) => me,
State::Closed(ref mut items) => {
return Ok(Async::Ready(items.pop_front()))
}
};
if let Some(shared) = Rc::get_mut(me) {
// All senders have been dropped, so drain the buffer and end the
// stream.
return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front()));
}
let mut shared = me.borrow_mut();
if let Some(msg) = shared.buffer.pop_front() {
if let Some(task) = shared.blocked_senders.pop_front() {
drop(shared);
task.notify();
}
Ok(Async::Ready(Some(msg)))
} else {
shared.blocked_recv = Some(task::current());
Ok(Async::NotReady)
}
}
}
impl<T> Drop for Receiver<T> {
fn drop(&mut self) {
self.close();
}
}
/// The transmission end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedSender<T>(Sender<T>);
impl<T> Clone for UnboundedSender<T> {
fn clone(&self) -> Self {
UnboundedSender(self.0.clone())
}
}
impl<T> Sink for UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.start_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<'a, T> Sink for &'a UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.do_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<T> UnboundedSender<T> {
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
#[deprecated(note = "renamed to `unbounded_send`")]
#[doc(hidden)]
pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
self.unbounded_send(msg)
}
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
let shared = match self.0.shared.upgrade() {
Some(shared) => shared,
None => return Err(SendError(msg)),
};
let mut shared = shared.borrow_mut();
shared.buffer.push_back(msg);
if let Some(task) = shared.blocked_recv.take() {
drop(shared);
task.notify();
}
Ok(())
}
}
/// The receiving end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedReceiver<T>(Receiver<T>);
impl<T> UnboundedReceiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) {
self.0.close();
}
}
impl<T> Stream for UnboundedReceiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.0.poll()
}
}
/// Creates an unbounded in-memory channel with buffered storage.
///
/// Identical semantics to `channel`, except with no limit to buffer size.
pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
let (send, recv) = channel_(None);
(UnboundedSender(send), UnboundedReceiver(recv))
}
/// Error type for sending, used when the receiving end of a channel is
/// dropped
pub struct SendError<T>(T);
impl<T> fmt::Debug for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_tuple("SendError")
.field(&"...")
.finish()
}
}
impl<T> fmt::Display for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "send failed because receiver is gone")
}
}
impl<T: Any> Error for SendError<T> {
fn description(&self) -> &str {
"send failed because receiver is gone"
}
}
impl<T> SendError<T> {
/// Returns the message that was attempted to be sent but failed.
pub fn into_inner(self) -> T {
self.0
}
}
/// Handle returned from the `spawn` function.
///
/// This handle is a stream that proxies a stream on a separate `Executor`.
/// Created through the `mpsc::spawn` function, this handle will produce
/// the same values as the proxied stream, as they are produced in the executor,
/// and uses a limited buffer to exert back-pressure on the remote stream.
///
/// If this handle is dropped, then the stream will no longer be polled and is
/// scheduled to be dropped.
pub struct SpawnHandle<Item, Error> | start_send | identifier_name |
mpsc.rs | receive more messages)
/// or we're closed with a list of messages we have left to receive.
#[derive(Debug)]
enum State<T> {
Open(Rc<RefCell<Shared<T>>>),
Closed(VecDeque<T>),
}
impl<T> Receiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) {
let (blockers, items) = match self.state {
State::Open(ref state) => {
let mut state = state.borrow_mut();
let items = mem::replace(&mut state.buffer, VecDeque::new());
let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new());
(blockers, items)
}
State::Closed(_) => return,
};
self.state = State::Closed(items);
for task in blockers {
task.notify();
}
}
}
impl<T> Stream for Receiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let me = match self.state {
State::Open(ref mut me) => me,
State::Closed(ref mut items) => {
return Ok(Async::Ready(items.pop_front()))
}
};
if let Some(shared) = Rc::get_mut(me) {
// All senders have been dropped, so drain the buffer and end the
// stream.
return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front()));
}
let mut shared = me.borrow_mut();
if let Some(msg) = shared.buffer.pop_front() {
if let Some(task) = shared.blocked_senders.pop_front() {
drop(shared);
task.notify();
}
Ok(Async::Ready(Some(msg)))
} else {
shared.blocked_recv = Some(task::current());
Ok(Async::NotReady)
}
}
}
impl<T> Drop for Receiver<T> {
fn drop(&mut self) {
self.close();
}
}
/// The transmission end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedSender<T>(Sender<T>);
impl<T> Clone for UnboundedSender<T> {
fn clone(&self) -> Self {
UnboundedSender(self.0.clone())
}
}
impl<T> Sink for UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.start_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<'a, T> Sink for &'a UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.do_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<T> UnboundedSender<T> {
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
#[deprecated(note = "renamed to `unbounded_send`")]
#[doc(hidden)]
pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
self.unbounded_send(msg)
}
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
let shared = match self.0.shared.upgrade() {
Some(shared) => shared,
None => return Err(SendError(msg)),
};
let mut shared = shared.borrow_mut();
shared.buffer.push_back(msg);
if let Some(task) = shared.blocked_recv.take() {
drop(shared);
task.notify();
}
Ok(())
}
}
/// The receiving end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedReceiver<T>(Receiver<T>);
impl<T> UnboundedReceiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) {
self.0.close();
}
}
impl<T> Stream for UnboundedReceiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.0.poll()
}
}
/// Creates an unbounded in-memory channel with buffered storage.
///
/// Identical semantics to `channel`, except with no limit to buffer size.
pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
let (send, recv) = channel_(None);
(UnboundedSender(send), UnboundedReceiver(recv))
}
/// Error type for sending, used when the receiving end of a channel is
/// dropped
pub struct SendError<T>(T);
impl<T> fmt::Debug for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_tuple("SendError")
.field(&"...")
.finish()
}
}
impl<T> fmt::Display for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "send failed because receiver is gone")
}
}
impl<T: Any> Error for SendError<T> {
fn description(&self) -> &str {
"send failed because receiver is gone"
}
}
impl<T> SendError<T> {
/// Returns the message that was attempted to be sent but failed.
pub fn into_inner(self) -> T {
self.0
}
}
/// Handle returned from the `spawn` function.
///
/// This handle is a stream that proxies a stream on a separate `Executor`.
/// Created through the `mpsc::spawn` function, this handle will produce
/// the same values as the proxied stream, as they are produced in the executor,
/// and uses a limited buffer to exert back-pressure on the remote stream.
///
/// If this handle is dropped, then the stream will no longer be polled and is
/// scheduled to be dropped.
pub struct SpawnHandle<Item, Error> {
inner: Receiver<Result<Item, Error>>,
_cancel_tx: oneshot::Sender<()>,
}
/// Type of future which `Executor` instances must be able to execute for `spawn`.
pub struct Execute<S: Stream> {
inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>,
cancel_rx: oneshot::Receiver<()>,
}
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
/// returning a handle representing the remote stream.
///
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
///
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
/// When `stream` has additional items available, then the `SpawnHandle`
/// will have those same items available.
///
/// At most `buffer + 1` elements will be buffered at a time. If the buffer
/// is full, then `stream` will stop progressing until more space is available.
/// This allows the `SpawnHandle` to exert backpressure on the `stream`.
///
/// # Panics
///
/// This function will panic if `executor` is unable spawn a `Future` containing
/// the entirety of the `stream`.
pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error>
where S: Stream,
E: Executor<Execute<S>>
{
let (cancel_tx, cancel_rx) = oneshot::channel();
let (tx, rx) = channel(buffer);
executor.execute(Execute {
inner: tx.send_all(resultstream::new(stream)),
cancel_rx: cancel_rx,
}).expect("failed to spawn stream");
SpawnHandle {
inner: rx,
_cancel_tx: cancel_tx,
}
}
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
/// returning a handle representing the remote stream, with unbounded buffering.
///
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
///
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself. | /// When `stream` has additional items available, then the `SpawnHandle`
/// will have those same items available. | random_line_split |
|
mpsc.rs | let shared = match self.shared.upgrade() {
Some(shared) => shared,
None => return Err(SendError(msg)), // receiver was dropped
};
let mut shared = shared.borrow_mut();
match shared.capacity {
Some(capacity) if shared.buffer.len() == capacity => {
shared.blocked_senders.push_back(task::current());
Ok(AsyncSink::NotReady(msg))
}
_ => {
shared.buffer.push_back(msg);
if let Some(task) = shared.blocked_recv.take() {
task.notify();
}
Ok(AsyncSink::Ready)
}
}
}
}
impl<T> Clone for Sender<T> {
fn clone(&self) -> Self {
Sender { shared: self.shared.clone() }
}
}
impl<T> Sink for Sender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.do_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
let shared = match self.shared.upgrade() {
Some(shared) => shared,
None => return,
};
// The number of existing `Weak` indicates if we are possibly the last
// `Sender`. If we are the last, we possibly must notify a blocked
// `Receiver`. `self.shared` is always one of the `Weak` to this shared
// data. Therefore the smallest possible Rc::weak_count(&shared) is 1.
if Rc::weak_count(&shared) == 1 {
if let Some(task) = shared.borrow_mut().blocked_recv.take() {
// Wake up receiver as its stream has ended
task.notify();
}
}
}
}
/// The receiving end of a channel which implements the `Stream` trait.
///
/// This is created by the `channel` function.
#[derive(Debug)]
pub struct Receiver<T> {
state: State<T>,
}
/// Possible states of a receiver. We're either Open (can receive more messages)
/// or we're closed with a list of messages we have left to receive.
#[derive(Debug)]
enum State<T> {
Open(Rc<RefCell<Shared<T>>>),
Closed(VecDeque<T>),
}
impl<T> Receiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) {
let (blockers, items) = match self.state {
State::Open(ref state) => {
let mut state = state.borrow_mut();
let items = mem::replace(&mut state.buffer, VecDeque::new());
let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new());
(blockers, items)
}
State::Closed(_) => return,
};
self.state = State::Closed(items);
for task in blockers {
task.notify();
}
}
}
impl<T> Stream for Receiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let me = match self.state {
State::Open(ref mut me) => me,
State::Closed(ref mut items) => {
return Ok(Async::Ready(items.pop_front()))
}
};
if let Some(shared) = Rc::get_mut(me) {
// All senders have been dropped, so drain the buffer and end the
// stream.
return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front()));
}
let mut shared = me.borrow_mut();
if let Some(msg) = shared.buffer.pop_front() {
if let Some(task) = shared.blocked_senders.pop_front() {
drop(shared);
task.notify();
}
Ok(Async::Ready(Some(msg)))
} else {
shared.blocked_recv = Some(task::current());
Ok(Async::NotReady)
}
}
}
impl<T> Drop for Receiver<T> {
fn drop(&mut self) {
self.close();
}
}
/// The transmission end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedSender<T>(Sender<T>);
impl<T> Clone for UnboundedSender<T> {
fn clone(&self) -> Self {
UnboundedSender(self.0.clone())
}
}
impl<T> Sink for UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.start_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<'a, T> Sink for &'a UnboundedSender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
self.0.do_send(msg)
}
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), SendError<T>> {
Ok(Async::Ready(()))
}
}
impl<T> UnboundedSender<T> {
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
#[deprecated(note = "renamed to `unbounded_send`")]
#[doc(hidden)]
pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
self.unbounded_send(msg)
}
/// Sends the provided message along this channel.
///
/// This is an unbounded sender, so this function differs from `Sink::send`
/// by ensuring the return type reflects that the channel is always ready to
/// receive messages.
pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
let shared = match self.0.shared.upgrade() {
Some(shared) => shared,
None => return Err(SendError(msg)),
};
let mut shared = shared.borrow_mut();
shared.buffer.push_back(msg);
if let Some(task) = shared.blocked_recv.take() {
drop(shared);
task.notify();
}
Ok(())
}
}
/// The receiving end of an unbounded channel.
///
/// This is created by the `unbounded` function.
#[derive(Debug)]
pub struct UnboundedReceiver<T>(Receiver<T>);
impl<T> UnboundedReceiver<T> {
/// Closes the receiving half
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
pub fn close(&mut self) |
}
impl<T> Stream for UnboundedReceiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.0.poll()
}
}
/// Creates an unbounded in-memory channel with buffered storage.
///
/// Identical semantics to `channel`, except with no limit to buffer size.
pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
let (send, recv) = channel_(None);
(UnboundedSender(send), UnboundedReceiver(recv))
}
/// Error type for sending, used when the receiving end of a channel is
/// dropped
pub struct SendError<T>(T);
impl<T> fmt::Debug for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_tuple("SendError")
.field(&"...")
.finish()
}
}
impl<T> fmt::Display for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "send failed because receiver is gone")
}
}
impl<T: Any> Error for SendError<T> {
fn description(&self) -> &str {
"send failed because receiver is gone"
}
}
impl<T> SendError<T> {
/// Returns the message that was attempted to be sent but failed.
pub fn into_inner(self) -> T {
self.0
}
}
/// Handle returned from the `spawn` function.
///
/// This handle is a stream that proxies a stream on a separate `Executor`.
/// Created through the `mpsc::spawn` function, this handle will produce
/// the same values as the proxied stream, as they are produced in the executor,
/// and uses a limited buffer to exert back-pressure on the remote stream.
///
/// If this handle is dropped, then the stream will no longer be polled and is
/// scheduled to be dropped.
pub struct SpawnHandle<Item, Error> | {
self.0.close();
} | identifier_body |
drafts.go | toml.Unmarshal(rawTOML, pkg); err != nil {
return nil, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return pkg, nil
}
func loadBuilderTOML(buildpackPath string) (*Builder, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "builder.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read builder toml\n%w", err)
}
builder := &Builder{}
if err := toml.Unmarshal(rawTOML, builder); err != nil {
return nil, fmt.Errorf("unable to parse builder TOML\n%w", err)
}
return builder, nil
}
type GithubBuildpackLoader struct {
GithubClient *github.Client
RegexMappers []string
}
func (g GithubBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
bp, err := g.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
sort.Slice(buildpacks, func(i, j int) bool {
return strings.ToLower(buildpacks[i].Info.Name) < strings.ToLower(buildpacks[j].Info.Name)
})
return buildpacks, nil
}
func (g GithubBuildpackLoader) LoadBuildpack(imgUri string) (Buildpack, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map URIs\n%w", err)
}
origOrg, origRepo, _, err := parseRepoOrgVersionFromImageUri(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse original image uri\n%w", err)
}
for _, uri := range uris {
org, repo, version, err := parseRepoOrgVersionFromImageUri(uri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse image uri\n%w", err)
}
paths, err := g.mapBuildpackTOMLPath(origOrg, origRepo)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map buildpack toml path\n%w", err)
}
if regexp.MustCompile(`^\d+\.\d+\.\d+$`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
for _, path := range paths {
tomlBytes, err := g.fetchTOMLFile(org, repo, version, path)
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Buildpack{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
bp, err := loadBuildpackTOML(tomlBytes)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
bp.Info.Version = version
return *bp, nil
}
}
}
return Buildpack{}, fmt.Errorf("unable to load buildpack.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) LoadPackages(imgUri string) (Package, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Package{}, fmt.Errorf("unable to map URIs\n%w", err)
}
for _, uri := range uris {
uriPattern := regexp.MustCompile(`.*\/(.*)\/(.*):(.*)`)
parts := uriPattern.FindStringSubmatch(uri)
if len(parts) != 4 {
return Package{}, fmt.Errorf("unable to parse %s, found %q", uri, parts)
}
org := parts[1]
repo := parts[2]
version := parts[3]
if regexp.MustCompile(`\d+\.\d+\.\d+`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
tomlBytes, err := g.fetchTOMLFile(org, repo, version, "/package.toml")
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Package{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
pkg := &Package{}
if err := toml.Unmarshal(tomlBytes, pkg); err != nil {
return Package{}, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return *pkg, nil
}
}
return Package{}, fmt.Errorf("unable to load package.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) mapURIs(uri string) ([]string, error) {
possibilities := []string{uri}
for _, mapper := range g.RegexMappers {
if len(mapper) <= 3 {
continue
}
splitCh := string(mapper[0])
parts := strings.SplitN(mapper[1:len(mapper)-1], splitCh, 2)
expr, err := regexp.Compile(parts[0])
if err != nil {
return []string{}, fmt.Errorf("unable to parse regex %s\n%w", mapper, err)
}
possibilities = append(possibilities, expr.ReplaceAllString(uri, parts[1]))
}
return possibilities, nil
}
func (g GithubBuildpackLoader) mapBuildpackTOMLPath(org, repo string) ([]string, error) {
paths := []string{
"/buildpack.toml",
}
org = strings.ToUpper(strings.ReplaceAll(org, "-", "_"))
repo = strings.ToUpper(strings.ReplaceAll(repo, "-", "_"))
if p, found := os.LookupEnv(fmt.Sprintf("BP_TOML_PATH_%s_%s", org, repo)); found {
if !strings.HasSuffix(p, "/buildpack.toml") {
p = fmt.Sprintf("%s/buildpack.toml", p)
}
return []string{p}, nil
}
return paths, nil
}
func (g GithubBuildpackLoader) fetchTOMLFile(org, repo, version, path string) ([]byte, error) {
fmt.Println("Fetching from org:", org, "repo:", repo, "version:", version, "path:", path)
body, _, err := g.GithubClient.Repositories.DownloadContents(
context.Background(),
org,
repo,
path,
&github.RepositoryContentGetOptions{Ref: version})
if err != nil {
return []byte{}, fmt.Errorf("unable to download file\n%w", err)
}
buf := &bytes.Buffer{}
_, err = io.Copy(buf, body)
if err != nil {
return []byte{}, fmt.Errorf("unable to read downloaded file\n%w", err)
}
return buf.Bytes(), nil
}
type RegistryBuildpackLoader struct {
GCRToken string
}
func (r RegistryBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
fmt.Println("Loading buildpack info from:", uri)
bp, err := r.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
return buildpacks, nil
}
func (r RegistryBuildpackLoader) LoadBuildpack(uri string) (Buildpack, error) {
if err := os.MkdirAll("/tmp", 1777); err != nil {
return Buildpack{}, fmt.Errorf("unable to create /tmp\n%w", err)
}
tarFile, err := ioutil.TempFile("/tmp", "tarfiles")
if err != nil {
return Buildpack{}, fmt.Errorf("unable to create tempfile\n%w", err)
}
defer os.Remove(tarFile.Name())
err = r.loadBuildpackImage(uri, tarFile)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load %s\n%w", uri, err)
}
_, err = tarFile.Seek(0, 0)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to reset file pointer\n%w", err)
}
bpTOML, err := readBuildpackTOML(tarFile)
if err != nil {
return Buildpack{}, err
}
bp, err := loadBuildpackTOML(bpTOML)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
| random_line_split |
||
drafts.go | [j].ID)
})
if deps, found := bp.Metadata["dependencies"]; found {
if depList, ok := deps.([]map[string]interface{}); ok {
for _, dep := range depList {
bpDep := libpak.BuildpackDependency{
ID: asString(dep, "id"),
Name: asString(dep, "name"),
Version: asString(dep, "version"),
URI: asString(dep, "uri"),
SHA256: asString(dep, "sha256"),
PURL: asString(dep, "purl"),
}
if stacks, ok := dep["stacks"].([]interface{}); ok {
for _, stack := range stacks {
if stack, ok := stack.(string); ok {
bpDep.Stacks = append(bpDep.Stacks, stack)
}
}
}
if cpes, ok := dep["cpes"].([]interface{}); ok {
for _, cpe := range cpes {
if cpe, ok := cpe.(string); ok {
bpDep.CPEs = append(bpDep.CPEs, cpe)
}
}
}
if licenses, ok := dep["licenses"].([]map[string]interface{}); ok {
for _, license := range licenses {
bpDep.Licenses = append(bpDep.Licenses, libpak.BuildpackDependencyLicense{
Type: asString(license, "type"),
URI: asString(license, "uri"),
})
}
}
bp.Dependencies = append(bp.Dependencies, bpDep)
}
} else {
return nil, fmt.Errorf("unable to read dependencies from %v", bp.Metadata)
}
sort.Slice(bp.Dependencies, func(i, j int) bool {
return strings.ToLower(bp.Dependencies[i].Name) < strings.ToLower(bp.Dependencies[j].Name)
})
}
return bp, nil
}
func asString(m map[string]interface{}, key string) string {
if tmp, ok := m[key].(string); ok {
return tmp
}
return ""
}
func loadPackage(buildpackPath string) (*Package, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "package.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read package toml\n%w", err)
}
pkg := &Package{}
if err := toml.Unmarshal(rawTOML, pkg); err != nil {
return nil, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return pkg, nil
}
func loadBuilderTOML(buildpackPath string) (*Builder, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "builder.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read builder toml\n%w", err)
}
builder := &Builder{}
if err := toml.Unmarshal(rawTOML, builder); err != nil {
return nil, fmt.Errorf("unable to parse builder TOML\n%w", err)
}
return builder, nil
}
type GithubBuildpackLoader struct {
GithubClient *github.Client
RegexMappers []string
}
func (g GithubBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
bp, err := g.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
sort.Slice(buildpacks, func(i, j int) bool {
return strings.ToLower(buildpacks[i].Info.Name) < strings.ToLower(buildpacks[j].Info.Name)
})
return buildpacks, nil
}
func (g GithubBuildpackLoader) LoadBuildpack(imgUri string) (Buildpack, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map URIs\n%w", err)
}
origOrg, origRepo, _, err := parseRepoOrgVersionFromImageUri(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse original image uri\n%w", err)
}
for _, uri := range uris {
org, repo, version, err := parseRepoOrgVersionFromImageUri(uri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse image uri\n%w", err)
}
paths, err := g.mapBuildpackTOMLPath(origOrg, origRepo)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map buildpack toml path\n%w", err)
}
if regexp.MustCompile(`^\d+\.\d+\.\d+$`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
for _, path := range paths {
tomlBytes, err := g.fetchTOMLFile(org, repo, version, path)
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Buildpack{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
bp, err := loadBuildpackTOML(tomlBytes)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
bp.Info.Version = version
return *bp, nil
}
}
}
return Buildpack{}, fmt.Errorf("unable to load buildpack.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) LoadPackages(imgUri string) (Package, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Package{}, fmt.Errorf("unable to map URIs\n%w", err)
}
for _, uri := range uris {
uriPattern := regexp.MustCompile(`.*\/(.*)\/(.*):(.*)`)
parts := uriPattern.FindStringSubmatch(uri)
if len(parts) != 4 {
return Package{}, fmt.Errorf("unable to parse %s, found %q", uri, parts)
}
org := parts[1]
repo := parts[2]
version := parts[3]
if regexp.MustCompile(`\d+\.\d+\.\d+`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
tomlBytes, err := g.fetchTOMLFile(org, repo, version, "/package.toml")
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Package{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
pkg := &Package{}
if err := toml.Unmarshal(tomlBytes, pkg); err != nil {
return Package{}, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return *pkg, nil
}
}
return Package{}, fmt.Errorf("unable to load package.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) mapURIs(uri string) ([]string, error) {
possibilities := []string{uri}
for _, mapper := range g.RegexMappers {
if len(mapper) <= 3 {
continue
}
splitCh := string(mapper[0])
parts := strings.SplitN(mapper[1:len(mapper)-1], splitCh, 2)
expr, err := regexp.Compile(parts[0])
if err != nil {
return []string{}, fmt.Errorf("unable to parse regex %s\n%w", mapper, err)
}
possibilities = append(possibilities, expr.ReplaceAllString(uri, parts[1]))
}
return possibilities, nil
}
func (g GithubBuildpackLoader) mapBuildpackTOMLPath(org, repo string) ([]string, error) | {
paths := []string{
"/buildpack.toml",
}
org = strings.ToUpper(strings.ReplaceAll(org, "-", "_"))
repo = strings.ToUpper(strings.ReplaceAll(repo, "-", "_"))
if p, found := os.LookupEnv(fmt.Sprintf("BP_TOML_PATH_%s_%s", org, repo)); found {
if !strings.HasSuffix(p, "/buildpack.toml") {
p = fmt.Sprintf("%s/buildpack.toml", p)
}
return []string{p}, nil
}
return paths, nil
} | identifier_body |
|
drafts.go | ("unable to parse package TOML\n%w", err)
}
return pkg, nil
}
func loadBuilderTOML(buildpackPath string) (*Builder, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "builder.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read builder toml\n%w", err)
}
builder := &Builder{}
if err := toml.Unmarshal(rawTOML, builder); err != nil {
return nil, fmt.Errorf("unable to parse builder TOML\n%w", err)
}
return builder, nil
}
type GithubBuildpackLoader struct {
GithubClient *github.Client
RegexMappers []string
}
func (g GithubBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
bp, err := g.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
sort.Slice(buildpacks, func(i, j int) bool {
return strings.ToLower(buildpacks[i].Info.Name) < strings.ToLower(buildpacks[j].Info.Name)
})
return buildpacks, nil
}
func (g GithubBuildpackLoader) LoadBuildpack(imgUri string) (Buildpack, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map URIs\n%w", err)
}
origOrg, origRepo, _, err := parseRepoOrgVersionFromImageUri(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse original image uri\n%w", err)
}
for _, uri := range uris {
org, repo, version, err := parseRepoOrgVersionFromImageUri(uri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse image uri\n%w", err)
}
paths, err := g.mapBuildpackTOMLPath(origOrg, origRepo)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map buildpack toml path\n%w", err)
}
if regexp.MustCompile(`^\d+\.\d+\.\d+$`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
for _, path := range paths {
tomlBytes, err := g.fetchTOMLFile(org, repo, version, path)
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Buildpack{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
bp, err := loadBuildpackTOML(tomlBytes)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
bp.Info.Version = version
return *bp, nil
}
}
}
return Buildpack{}, fmt.Errorf("unable to load buildpack.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) LoadPackages(imgUri string) (Package, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Package{}, fmt.Errorf("unable to map URIs\n%w", err)
}
for _, uri := range uris {
uriPattern := regexp.MustCompile(`.*\/(.*)\/(.*):(.*)`)
parts := uriPattern.FindStringSubmatch(uri)
if len(parts) != 4 {
return Package{}, fmt.Errorf("unable to parse %s, found %q", uri, parts)
}
org := parts[1]
repo := parts[2]
version := parts[3]
if regexp.MustCompile(`\d+\.\d+\.\d+`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
tomlBytes, err := g.fetchTOMLFile(org, repo, version, "/package.toml")
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Package{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
pkg := &Package{}
if err := toml.Unmarshal(tomlBytes, pkg); err != nil {
return Package{}, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return *pkg, nil
}
}
return Package{}, fmt.Errorf("unable to load package.toml for %s", imgUri)
}
func (g GithubBuildpackLoader) mapURIs(uri string) ([]string, error) {
possibilities := []string{uri}
for _, mapper := range g.RegexMappers {
if len(mapper) <= 3 {
continue
}
splitCh := string(mapper[0])
parts := strings.SplitN(mapper[1:len(mapper)-1], splitCh, 2)
expr, err := regexp.Compile(parts[0])
if err != nil {
return []string{}, fmt.Errorf("unable to parse regex %s\n%w", mapper, err)
}
possibilities = append(possibilities, expr.ReplaceAllString(uri, parts[1]))
}
return possibilities, nil
}
func (g GithubBuildpackLoader) mapBuildpackTOMLPath(org, repo string) ([]string, error) {
paths := []string{
"/buildpack.toml",
}
org = strings.ToUpper(strings.ReplaceAll(org, "-", "_"))
repo = strings.ToUpper(strings.ReplaceAll(repo, "-", "_"))
if p, found := os.LookupEnv(fmt.Sprintf("BP_TOML_PATH_%s_%s", org, repo)); found {
if !strings.HasSuffix(p, "/buildpack.toml") {
p = fmt.Sprintf("%s/buildpack.toml", p)
}
return []string{p}, nil
}
return paths, nil
}
func (g GithubBuildpackLoader) fetchTOMLFile(org, repo, version, path string) ([]byte, error) {
fmt.Println("Fetching from org:", org, "repo:", repo, "version:", version, "path:", path)
body, _, err := g.GithubClient.Repositories.DownloadContents(
context.Background(),
org,
repo,
path,
&github.RepositoryContentGetOptions{Ref: version})
if err != nil {
return []byte{}, fmt.Errorf("unable to download file\n%w", err)
}
buf := &bytes.Buffer{}
_, err = io.Copy(buf, body)
if err != nil {
return []byte{}, fmt.Errorf("unable to read downloaded file\n%w", err)
}
return buf.Bytes(), nil
}
type RegistryBuildpackLoader struct {
GCRToken string
}
func (r RegistryBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
fmt.Println("Loading buildpack info from:", uri)
bp, err := r.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
return buildpacks, nil
}
func (r RegistryBuildpackLoader) LoadBuildpack(uri string) (Buildpack, error) {
if err := os.MkdirAll("/tmp", 1777); err != nil {
return Buildpack{}, fmt.Errorf("unable to create /tmp\n%w", err)
}
tarFile, err := ioutil.TempFile("/tmp", "tarfiles")
if err != nil {
return Buildpack{}, fmt.Errorf("unable to create tempfile\n%w", err)
}
defer os.Remove(tarFile.Name())
err = r.loadBuildpackImage(uri, tarFile)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load %s\n%w", uri, err)
}
_, err = tarFile.Seek(0, 0)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to reset file pointer\n%w", err)
}
bpTOML, err := readBuildpackTOML(tarFile)
if err != nil {
return Buildpack{}, err
}
bp, err := loadBuildpackTOML(bpTOML)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
return *bp, nil
}
func (r RegistryBuildpackLoader) | loadBuildpackImage | identifier_name |
|
drafts.go | , err := template.New("draft").Parse(templateContents)
if err != nil {
return fmt.Errorf("unable to parse template %q\n%w", templateContents, err)
}
err = tmpl.Execute(output, context)
if err != nil {
return fmt.Errorf("unable to execute template %q\n%w", templateContents, err)
}
return nil
}
func (d Drafter) CreatePayload(inputs actions.Inputs, buildpackPath string) (Payload, error) {
release := Release{
ID: inputs["release_id"],
Name: inputs["release_name"],
Body: inputs["release_body"],
Tag: inputs["release_tag_name"],
}
builder, err := loadBuilderTOML(buildpackPath)
if err != nil {
return Payload{}, err
}
if builder != nil {
bps, err := d.Loader.LoadBuildpacks(builder.Flatten())
if err != nil {
return Payload{}, fmt.Errorf("unable to load buildpacks\n%w", err)
}
return Payload{
PrimaryBuildpack: Buildpack{},
Builder: *builder,
NestedBuildpacks: bps,
Release: release,
}, nil
}
bp, err := loadBuildpackTOMLFromFile(buildpackPath)
if err != nil {
return Payload{}, err
}
pkg, err := loadPackage(buildpackPath)
if err != nil {
return Payload{}, err
}
if bp != nil && pkg == nil { // component
return Payload{
PrimaryBuildpack: *bp,
Release: release,
}, nil
} else if bp != nil && pkg != nil { // composite
bps, err := d.Loader.LoadBuildpacks(pkg.Flatten())
if err != nil {
return Payload{}, fmt.Errorf("unable to load buildpacks\n%w", err)
}
return Payload{
NestedBuildpacks: bps,
PrimaryBuildpack: *bp,
Release: release,
}, nil
}
return Payload{}, fmt.Errorf("unable to generate payload, need buildpack.toml or buildpack.toml + package.toml or builder.toml")
}
func loadBuildpackTOMLFromFile(buildpackPath string) (*Buildpack, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "buildpack.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read buildpack toml\n%w", err)
}
return loadBuildpackTOML(rawTOML)
}
func loadBuildpackTOML(TOML []byte) (*Buildpack, error) {
bp := &Buildpack{}
if err := toml.Unmarshal(TOML, bp); err != nil {
return nil, fmt.Errorf("unable to parse buildpack TOML\n%w", err)
}
sort.Slice(bp.Stacks, func(i, j int) bool {
return strings.ToLower(bp.Stacks[i].ID) < strings.ToLower(bp.Stacks[j].ID)
})
if deps, found := bp.Metadata["dependencies"]; found {
if depList, ok := deps.([]map[string]interface{}); ok {
for _, dep := range depList {
bpDep := libpak.BuildpackDependency{
ID: asString(dep, "id"),
Name: asString(dep, "name"),
Version: asString(dep, "version"),
URI: asString(dep, "uri"),
SHA256: asString(dep, "sha256"),
PURL: asString(dep, "purl"),
}
if stacks, ok := dep["stacks"].([]interface{}); ok {
for _, stack := range stacks {
if stack, ok := stack.(string); ok {
bpDep.Stacks = append(bpDep.Stacks, stack)
}
}
}
if cpes, ok := dep["cpes"].([]interface{}); ok {
for _, cpe := range cpes {
if cpe, ok := cpe.(string); ok {
bpDep.CPEs = append(bpDep.CPEs, cpe)
}
}
}
if licenses, ok := dep["licenses"].([]map[string]interface{}); ok {
for _, license := range licenses {
bpDep.Licenses = append(bpDep.Licenses, libpak.BuildpackDependencyLicense{
Type: asString(license, "type"),
URI: asString(license, "uri"),
})
}
}
bp.Dependencies = append(bp.Dependencies, bpDep)
}
} else {
return nil, fmt.Errorf("unable to read dependencies from %v", bp.Metadata)
}
sort.Slice(bp.Dependencies, func(i, j int) bool {
return strings.ToLower(bp.Dependencies[i].Name) < strings.ToLower(bp.Dependencies[j].Name)
})
}
return bp, nil
}
func asString(m map[string]interface{}, key string) string {
if tmp, ok := m[key].(string); ok {
return tmp
}
return ""
}
func loadPackage(buildpackPath string) (*Package, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "package.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read package toml\n%w", err)
}
pkg := &Package{}
if err := toml.Unmarshal(rawTOML, pkg); err != nil {
return nil, fmt.Errorf("unable to parse package TOML\n%w", err)
}
return pkg, nil
}
func loadBuilderTOML(buildpackPath string) (*Builder, error) {
rawTOML, err := ioutil.ReadFile(filepath.Join(buildpackPath, "builder.toml"))
if err != nil && os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("unable to read builder toml\n%w", err)
}
builder := &Builder{}
if err := toml.Unmarshal(rawTOML, builder); err != nil {
return nil, fmt.Errorf("unable to parse builder TOML\n%w", err)
}
return builder, nil
}
type GithubBuildpackLoader struct {
GithubClient *github.Client
RegexMappers []string
}
func (g GithubBuildpackLoader) LoadBuildpacks(uris []string) ([]Buildpack, error) {
buildpacks := []Buildpack{}
for _, uri := range uris {
bp, err := g.LoadBuildpack(uri)
if err != nil {
return []Buildpack{}, fmt.Errorf("unable to process %s\n%w", uri, err)
}
buildpacks = append(buildpacks, bp)
}
sort.Slice(buildpacks, func(i, j int) bool {
return strings.ToLower(buildpacks[i].Info.Name) < strings.ToLower(buildpacks[j].Info.Name)
})
return buildpacks, nil
}
func (g GithubBuildpackLoader) LoadBuildpack(imgUri string) (Buildpack, error) {
uris, err := g.mapURIs(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map URIs\n%w", err)
}
origOrg, origRepo, _, err := parseRepoOrgVersionFromImageUri(imgUri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse original image uri\n%w", err)
}
for _, uri := range uris {
org, repo, version, err := parseRepoOrgVersionFromImageUri(uri)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to parse image uri\n%w", err)
}
paths, err := g.mapBuildpackTOMLPath(origOrg, origRepo)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to map buildpack toml path\n%w", err)
}
if regexp.MustCompile(`^\d+\.\d+\.\d+$`).MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
for _, path := range paths | }
}
return Buildpack{}, fmt.Errorf("unable to load buildpack.toml for %s", imgUri)
}
func (g Github | {
tomlBytes, err := g.fetchTOMLFile(org, repo, version, path)
if err != nil {
var apiErr *github.ErrorResponse
if errors.As(err, &apiErr) && apiErr.Response.StatusCode == 404 {
fmt.Println("skipping 404", apiErr)
continue
}
return Buildpack{}, fmt.Errorf("unable to fetch toml\n%w", err)
}
if len(tomlBytes) > 0 {
bp, err := loadBuildpackTOML(tomlBytes)
if err != nil {
return Buildpack{}, fmt.Errorf("unable to load buildpack toml from image\n%w", err)
}
bp.Info.Version = version
return *bp, nil
} | conditional_block |
utility.js | ;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Send AjaxRequest With Object/parameter
//...................................................Recommanded for all other requests excepts Get Request
SendAjaxRequestWithObject = (ApiUrl, RequestType , Object) => {
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
data: JSON.stringify(Object),
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Section Textbox/Labels/div handling
//..................return Textbox Value
let getTextboxValue = (selector) => {
return $.trim($(selector).val())
}
//..................Put Value into Textbox
let putValueIntoTextbox = (selector, value) => {
$(selector).val(value)
}
//..................Reset Textbox value
let resetTextbox = (selector) => {
$(selector).val('')
}
//..................Reset Textbox value with value assign by user
let resetTextboxWithCustomValue = (selector, value) => {
$(selector).val(value)
}
//...........................................select Value From dropdown
let selectValueFromDropdown = (selector) => {
return $(`${selector} option:selected`).val()
}
//..................Change Main Page Title
let CurrentPageTitle = (title, openPage) => {
$("#currentPage").text(title)
if (openPage != null) {
$("#slash").removeAttr("hidden")
$("#openPage").text(openPage)
}
}
//..................Put Value into Label
let putValueIntoLabels = (selector, value) => {
$(selector).text(value)
}
//..................Get Value From Label
let getValueFromLabel = (selector) => {
return $(selector).text()
}
//............................................................................Section Notifications
//..................Show Error Message
let showErrorMessage = (selector, message) => {
$(selector).removeAttr('hidden')
$(selector).text(message)
setTimeout(() => {
$(selector).text('');
$(selector).attr('hidden', 'hidden');
}, 2000);
}
//..................Show error of Ajax Response Message
let RedirectAccordingToError = (errorThrown) => {
if (errorThrown == "Unauthorized") {
window.location = "/Account/Accounts/UnAuthorized"
} else if (errorThrown == "Not Found") {
window.location = "/Account/Accounts/Login"
} else if (errorThrown == "Forbidden") {
window.location = "/Account/Accounts/UnAuthorized"
}
else {
console.log(errorThrown);
}
}
//............................................................................Section Date Time
//................. Datepicker format like Dec 17 2019
$('.datepickerDEC')[0] && $('.datepickerDEC').each(function () {
$('.datepickerDEC').datepicker({
disableTouchKeyboard: true,
autoclose: true,
format: "M dd yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepicker1')[1] && $('.datepicker1').each(function () {
$('.datepicker1').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepickerGetMonth')[1] && $('.datepickerGetMonth').each(function () {
$('.datepickerGetMonth').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//.............Get Time Using Moment
let momentTime = (value) => {
return moment(value).format("LT")
}
//.............Get date Using MomentFormat
let momentDate = (value) => {
return moment(value).format("MMM DD YYYY")
}
//.............moment human readable Date
let momentHumanDate = (value) => {
return moment(value).fromNow();
}
//.............Get date Using MomentFormat like Dec
let momentMonth = (value) => {
return moment(value).format("MMM")
}
//..............................Get Comming month with year
let commingMonth = () => {
var now = new Date();
if (now.getMonth() == 11) {
var current = new Date(now.getFullYear() + 1, 0, 1);
return current;
} else {
var current = new Date(now.getFullYear(), now.getMonth() + 1, 1);
return current;
}
}
//............................................................................Section Custom validtions(Email/Mobile)
//...............Email Format Verification
let email_validate = (value) => {
let email = value
var regexPattern = new RegExp(/^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$/); // regular expression pattern
let isValid = regexPattern.test(email);
if (!isValid) {
| return true;
}
//..........................Contact validation
let contact_validate = (value) => {
let contact = value
var regexPattern = new RegExp(/^\d{4}[- ]?\d{7}$/); // regular expression pattern
let isValid = regexPattern.test(contact);
if (!isValid) {
return false;
}
return true;
}
//............................................................................Section Table Pagenation
// .................... pager
function Pager(tableName, itemsPerPage) {
this.tableName = tableName;
this.itemsPerPage = itemsPerPage;
this.currentPage = 1;
this.pages = 0;
this.inited = false;
this.showRecords = function (from, to) {
var rows = document.getElementById(tableName).rows;
// i starts from 1 to skip table header row
for (var i = 1; i <= rows.length - 1; i++) {
if (i < from || i > to)
rows[i].style.display = 'none';
else
rows[i].style.display = '';
}
}
this.showPage = function (pageNumber) {
if (!this.inited) {
alert("not inited");
return;
}
var oldPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
oldPageAnchor.className = 'page-item';
this.currentPage = pageNumber;
var newPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
newPageAnchor.className = 'page-item active';
var from = (pageNumber - 1) * itemsPerPage + 1;
var to = from + itemsPerPage - 1;
this.showRecords(from, to);
}
this.prev = function () {
if (this.currentPage > 1)
this.showPage(this.currentPage - 1);
}
this.next = function () {
if (this.currentPage < this.pages) {
this.showPage(this.currentPage + 1);
}
}
this.init = function () {
var rows = document.getElementById(tableName).rows;
var records = (rows.length - 1);
this.pages = Math.ceil(records / itemsPerPage);
this.inited = true;
}
this.showPageNav = function (pagerName, positionId) {
if (!this.inited) {
alert("not inited");
return;
}
var element = document.getElementById(positionId);
var pagerHtml = `<li class="page-pre" title="Previous" style="cursor:pointer" onclick="${pagerName}.prev();">
<a class="page-link">‹</a>
</li>
`
for (var page = 1; page <= this.pages; page++)
pagerHtml += `<li style="cursor:pointer" class="page-number" id="pg${tableName}${page}" onclick="${pagerName}.showPage(${page});"><a class="page-link">${page}</a></li>`
pagerHtml += `<li style="cursor:pointer" title="Next" class="page-next" onclick="${pagerName}.next();">
<a class="page-link">›</a>
</li >`
element.innerHTML = pagerHtml;
}
}
//...........................................................................Section Days Name With Id's
//.................get today by using day id
let getToday = (day) => {
switch (day) {
case 1:
return "Monday"
break;
case 2:
return "Tuesday"
break;
case 3:
return "Wednesday"
break;
case 4:
return "Thursday"
break;
case 5:
return "Friday"
break;
case 6:
return "Saturday";
break;
case 7:
return "Sunday"
break;
}
}
//................................ReturnDayId by Using Day Name
let getDayID = (dayName) => {
switch (dayName) {
case "monday":
return 1
break;
case "tuesday":
return 2
break;
case "wed | return false;
}
| conditional_block |
utility.js | },
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Send AjaxRequest With Object/parameter
//...................................................Recommanded for all other requests excepts Get Request
SendAjaxRequestWithObject = (ApiUrl, RequestType , Object) => {
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
data: JSON.stringify(Object),
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Section Textbox/Labels/div handling
//..................return Textbox Value
let getTextboxValue = (selector) => {
return $.trim($(selector).val())
}
//..................Put Value into Textbox
let putValueIntoTextbox = (selector, value) => {
$(selector).val(value)
}
//..................Reset Textbox value
let resetTextbox = (selector) => {
$(selector).val('')
}
//..................Reset Textbox value with value assign by user
let resetTextboxWithCustomValue = (selector, value) => {
$(selector).val(value)
}
//...........................................select Value From dropdown
let selectValueFromDropdown = (selector) => {
return $(`${selector} option:selected`).val()
}
//..................Change Main Page Title
let CurrentPageTitle = (title, openPage) => {
$("#currentPage").text(title)
if (openPage != null) {
$("#slash").removeAttr("hidden")
$("#openPage").text(openPage)
}
}
//..................Put Value into Label
let putValueIntoLabels = (selector, value) => {
$(selector).text(value)
}
//..................Get Value From Label
let getValueFromLabel = (selector) => {
return $(selector).text()
}
//............................................................................Section Notifications
//..................Show Error Message
let showErrorMessage = (selector, message) => {
$(selector).removeAttr('hidden')
$(selector).text(message)
setTimeout(() => {
$(selector).text('');
$(selector).attr('hidden', 'hidden');
}, 2000);
}
//..................Show error of Ajax Response Message
let RedirectAccordingToError = (errorThrown) => {
if (errorThrown == "Unauthorized") {
window.location = "/Account/Accounts/UnAuthorized"
} else if (errorThrown == "Not Found") {
window.location = "/Account/Accounts/Login"
} else if (errorThrown == "Forbidden") {
window.location = "/Account/Accounts/UnAuthorized"
}
else {
console.log(errorThrown);
}
}
//............................................................................Section Date Time
//................. Datepicker format like Dec 17 2019
$('.datepickerDEC')[0] && $('.datepickerDEC').each(function () {
$('.datepickerDEC').datepicker({
disableTouchKeyboard: true,
autoclose: true,
format: "M dd yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepicker1')[1] && $('.datepicker1').each(function () {
$('.datepicker1').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepickerGetMonth')[1] && $('.datepickerGetMonth').each(function () {
$('.datepickerGetMonth').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//.............Get Time Using Moment
let momentTime = (value) => {
return moment(value).format("LT")
}
//.............Get date Using MomentFormat
let momentDate = (value) => {
return moment(value).format("MMM DD YYYY")
}
//.............moment human readable Date
let momentHumanDate = (value) => {
return moment(value).fromNow();
}
//.............Get date Using MomentFormat like Dec
let momentMonth = (value) => {
return moment(value).format("MMM")
}
//..............................Get Comming month with year
let commingMonth = () => {
var now = new Date();
if (now.getMonth() == 11) {
var current = new Date(now.getFullYear() + 1, 0, 1);
return current;
} else {
var current = new Date(now.getFullYear(), now.getMonth() + 1, 1);
return current;
}
}
//............................................................................Section Custom validtions(Email/Mobile)
//...............Email Format Verification
let email_validate = (value) => {
let email = value
var regexPattern = new RegExp(/^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$/); // regular expression pattern
let isValid = regexPattern.test(email);
if (!isValid) {
return false;
}
return true;
}
//..........................Contact validation
let contact_validate = (value) => {
let contact = value
var regexPattern = new RegExp(/^\d{4}[- ]?\d{7}$/); // regular expression pattern
let isValid = regexPattern.test(contact);
if (!isValid) {
return false;
}
return true;
}
//............................................................................Section Table Pagenation
// .................... pager
function Pa | ableName, itemsPerPage) {
this.tableName = tableName;
this.itemsPerPage = itemsPerPage;
this.currentPage = 1;
this.pages = 0;
this.inited = false;
this.showRecords = function (from, to) {
var rows = document.getElementById(tableName).rows;
// i starts from 1 to skip table header row
for (var i = 1; i <= rows.length - 1; i++) {
if (i < from || i > to)
rows[i].style.display = 'none';
else
rows[i].style.display = '';
}
}
this.showPage = function (pageNumber) {
if (!this.inited) {
alert("not inited");
return;
}
var oldPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
oldPageAnchor.className = 'page-item';
this.currentPage = pageNumber;
var newPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
newPageAnchor.className = 'page-item active';
var from = (pageNumber - 1) * itemsPerPage + 1;
var to = from + itemsPerPage - 1;
this.showRecords(from, to);
}
this.prev = function () {
if (this.currentPage > 1)
this.showPage(this.currentPage - 1);
}
this.next = function () {
if (this.currentPage < this.pages) {
this.showPage(this.currentPage + 1);
}
}
this.init = function () {
var rows = document.getElementById(tableName).rows;
var records = (rows.length - 1);
this.pages = Math.ceil(records / itemsPerPage);
this.inited = true;
}
this.showPageNav = function (pagerName, positionId) {
if (!this.inited) {
alert("not inited");
return;
}
var element = document.getElementById(positionId);
var pagerHtml = `<li class="page-pre" title="Previous" style="cursor:pointer" onclick="${pagerName}.prev();">
<a class="page-link">‹</a>
</li>
`
for (var page = 1; page <= this.pages; page++)
pagerHtml += `<li style="cursor:pointer" class="page-number" id="pg${tableName}${page}" onclick="${pagerName}.showPage(${page});"><a class="page-link">${page}</a></li>`
pagerHtml += `<li style="cursor:pointer" title="Next" class="page-next" onclick="${pagerName}.next();">
<a class="page-link">›</a>
</li >`
element.innerHTML = pagerHtml;
}
}
//...........................................................................Section Days Name With Id's
//.................get today by using day id
let getToday = (day) => {
switch (day) {
case 1:
return "Monday"
break;
case 2:
return "Tuesday"
break;
case 3:
return "Wednesday"
break;
case 4:
return "Thursday"
break;
case 5:
return "Friday"
break;
case 6:
return "Saturday";
break;
case 7:
return "Sunday"
break;
}
}
//................................ReturnDayId by Using Day Name
let getDayID = (dayName) => {
switch (dayName) {
case "monday":
return 1
break;
case "tuesday":
return 2
break;
case " | ger(t | identifier_name |
utility.js | ;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Send AjaxRequest With Object/parameter
//...................................................Recommanded for all other requests excepts Get Request
SendAjaxRequestWithObject = (ApiUrl, RequestType , Object) => {
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
data: JSON.stringify(Object),
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response; | },
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Section Textbox/Labels/div handling
//..................return Textbox Value
let getTextboxValue = (selector) => {
return $.trim($(selector).val())
}
//..................Put Value into Textbox
let putValueIntoTextbox = (selector, value) => {
$(selector).val(value)
}
//..................Reset Textbox value
let resetTextbox = (selector) => {
$(selector).val('')
}
//..................Reset Textbox value with value assign by user
let resetTextboxWithCustomValue = (selector, value) => {
$(selector).val(value)
}
//...........................................select Value From dropdown
let selectValueFromDropdown = (selector) => {
return $(`${selector} option:selected`).val()
}
//..................Change Main Page Title
let CurrentPageTitle = (title, openPage) => {
$("#currentPage").text(title)
if (openPage != null) {
$("#slash").removeAttr("hidden")
$("#openPage").text(openPage)
}
}
//..................Put Value into Label
let putValueIntoLabels = (selector, value) => {
$(selector).text(value)
}
//..................Get Value From Label
let getValueFromLabel = (selector) => {
return $(selector).text()
}
//............................................................................Section Notifications
//..................Show Error Message
let showErrorMessage = (selector, message) => {
$(selector).removeAttr('hidden')
$(selector).text(message)
setTimeout(() => {
$(selector).text('');
$(selector).attr('hidden', 'hidden');
}, 2000);
}
//..................Show error of Ajax Response Message
let RedirectAccordingToError = (errorThrown) => {
if (errorThrown == "Unauthorized") {
window.location = "/Account/Accounts/UnAuthorized"
} else if (errorThrown == "Not Found") {
window.location = "/Account/Accounts/Login"
} else if (errorThrown == "Forbidden") {
window.location = "/Account/Accounts/UnAuthorized"
}
else {
console.log(errorThrown);
}
}
//............................................................................Section Date Time
//................. Datepicker format like Dec 17 2019
$('.datepickerDEC')[0] && $('.datepickerDEC').each(function () {
$('.datepickerDEC').datepicker({
disableTouchKeyboard: true,
autoclose: true,
format: "M dd yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepicker1')[1] && $('.datepicker1').each(function () {
$('.datepicker1').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepickerGetMonth')[1] && $('.datepickerGetMonth').each(function () {
$('.datepickerGetMonth').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//.............Get Time Using Moment
let momentTime = (value) => {
return moment(value).format("LT")
}
//.............Get date Using MomentFormat
let momentDate = (value) => {
return moment(value).format("MMM DD YYYY")
}
//.............moment human readable Date
let momentHumanDate = (value) => {
return moment(value).fromNow();
}
//.............Get date Using MomentFormat like Dec
let momentMonth = (value) => {
return moment(value).format("MMM")
}
//..............................Get Comming month with year
let commingMonth = () => {
var now = new Date();
if (now.getMonth() == 11) {
var current = new Date(now.getFullYear() + 1, 0, 1);
return current;
} else {
var current = new Date(now.getFullYear(), now.getMonth() + 1, 1);
return current;
}
}
//............................................................................Section Custom validtions(Email/Mobile)
//...............Email Format Verification
let email_validate = (value) => {
let email = value
var regexPattern = new RegExp(/^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$/); // regular expression pattern
let isValid = regexPattern.test(email);
if (!isValid) {
return false;
}
return true;
}
//..........................Contact validation
let contact_validate = (value) => {
let contact = value
var regexPattern = new RegExp(/^\d{4}[- ]?\d{7}$/); // regular expression pattern
let isValid = regexPattern.test(contact);
if (!isValid) {
return false;
}
return true;
}
//............................................................................Section Table Pagenation
// .................... pager
function Pager(tableName, itemsPerPage) {
this.tableName = tableName;
this.itemsPerPage = itemsPerPage;
this.currentPage = 1;
this.pages = 0;
this.inited = false;
this.showRecords = function (from, to) {
var rows = document.getElementById(tableName).rows;
// i starts from 1 to skip table header row
for (var i = 1; i <= rows.length - 1; i++) {
if (i < from || i > to)
rows[i].style.display = 'none';
else
rows[i].style.display = '';
}
}
this.showPage = function (pageNumber) {
if (!this.inited) {
alert("not inited");
return;
}
var oldPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
oldPageAnchor.className = 'page-item';
this.currentPage = pageNumber;
var newPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
newPageAnchor.className = 'page-item active';
var from = (pageNumber - 1) * itemsPerPage + 1;
var to = from + itemsPerPage - 1;
this.showRecords(from, to);
}
this.prev = function () {
if (this.currentPage > 1)
this.showPage(this.currentPage - 1);
}
this.next = function () {
if (this.currentPage < this.pages) {
this.showPage(this.currentPage + 1);
}
}
this.init = function () {
var rows = document.getElementById(tableName).rows;
var records = (rows.length - 1);
this.pages = Math.ceil(records / itemsPerPage);
this.inited = true;
}
this.showPageNav = function (pagerName, positionId) {
if (!this.inited) {
alert("not inited");
return;
}
var element = document.getElementById(positionId);
var pagerHtml = `<li class="page-pre" title="Previous" style="cursor:pointer" onclick="${pagerName}.prev();">
<a class="page-link">‹</a>
</li>
`
for (var page = 1; page <= this.pages; page++)
pagerHtml += `<li style="cursor:pointer" class="page-number" id="pg${tableName}${page}" onclick="${pagerName}.showPage(${page});"><a class="page-link">${page}</a></li>`
pagerHtml += `<li style="cursor:pointer" title="Next" class="page-next" onclick="${pagerName}.next();">
<a class="page-link">›</a>
</li >`
element.innerHTML = pagerHtml;
}
}
//...........................................................................Section Days Name With Id's
//.................get today by using day id
let getToday = (day) => {
switch (day) {
case 1:
return "Monday"
break;
case 2:
return "Tuesday"
break;
case 3:
return "Wednesday"
break;
case 4:
return "Thursday"
break;
case 5:
return "Friday"
break;
case 6:
return "Saturday";
break;
case 7:
return "Sunday"
break;
}
}
//................................ReturnDayId by Using Day Name
let getDayID = (dayName) => {
switch (dayName) {
case "monday":
return 1
break;
case "tuesday":
return 2
break;
case "wednesday | random_line_split |
|
utility.js | },
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Send AjaxRequest With Object/parameter
//...................................................Recommanded for all other requests excepts Get Request
SendAjaxRequestWithObject = (ApiUrl, RequestType , Object) => {
let ajaxConfig = {
url: ApiBaseUrl(ApiUrl),
type: RequestType,
contentType: 'application/json; charset=utf-8',
dataType: "json",
data: JSON.stringify(Object),
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader('Authorization', 'Bearer ' + localStorage.getItem("UserToken"));
},
success: (response) => {
return response;
},
error: (XMLHttpRequest, textStatus, errorThrown) => {
return errorThrown;
}
}
$.ajax(ajaxConfig)
}
//............................................................................Section Textbox/Labels/div handling
//..................return Textbox Value
let getTextboxValue = (selector) => {
return $.trim($(selector).val())
}
//..................Put Value into Textbox
let putValueIntoTextbox = (selector, value) => {
$(selector).val(value)
}
//..................Reset Textbox value
let resetTextbox = (selector) => {
$(selector).val('')
}
//..................Reset Textbox value with value assign by user
let resetTextboxWithCustomValue = (selector, value) => {
$(selector).val(value)
}
//...........................................select Value From dropdown
let selectValueFromDropdown = (selector) => {
return $(`${selector} option:selected`).val()
}
//..................Change Main Page Title
let CurrentPageTitle = (title, openPage) => {
$("#currentPage").text(title)
if (openPage != null) {
$("#slash").removeAttr("hidden")
$("#openPage").text(openPage)
}
}
//..................Put Value into Label
let putValueIntoLabels = (selector, value) => {
$(selector).text(value)
}
//..................Get Value From Label
let getValueFromLabel = (selector) => {
return $(selector).text()
}
//............................................................................Section Notifications
//..................Show Error Message
let showErrorMessage = (selector, message) => {
$(selector).removeAttr('hidden')
$(selector).text(message)
setTimeout(() => {
$(selector).text('');
$(selector).attr('hidden', 'hidden');
}, 2000);
}
//..................Show error of Ajax Response Message
let RedirectAccordingToError = (errorThrown) => {
if (errorThrown == "Unauthorized") {
window.location = "/Account/Accounts/UnAuthorized"
} else if (errorThrown == "Not Found") {
window.location = "/Account/Accounts/Login"
} else if (errorThrown == "Forbidden") {
window.location = "/Account/Accounts/UnAuthorized"
}
else {
console.log(errorThrown);
}
}
//............................................................................Section Date Time
//................. Datepicker format like Dec 17 2019
$('.datepickerDEC')[0] && $('.datepickerDEC').each(function () {
$('.datepickerDEC').datepicker({
disableTouchKeyboard: true,
autoclose: true,
format: "M dd yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepicker1')[1] && $('.datepicker1').each(function () {
$('.datepicker1').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//................. Datepicker format like Dec 2019
$('.datepickerGetMonth')[1] && $('.datepickerGetMonth').each(function () {
$('.datepickerGetMonth').datepicker({
disableTouchKeyboard: true,
autoclose: true,
minViewMode: 'months',
viewMode: 'months',
pickTime: false,
format: "M yyyy"
});
});
//.............Get Time Using Moment
let momentTime = (value) => {
return moment(value).format("LT")
}
//.............Get date Using MomentFormat
let momentDate = (value) => {
return moment(value).format("MMM DD YYYY")
}
//.............moment human readable Date
let momentHumanDate = (value) => {
return moment(value).fromNow();
}
//.............Get date Using MomentFormat like Dec
let momentMonth = (value) => {
return moment(value).format("MMM")
}
//..............................Get Comming month with year
let commingMonth = () => {
var now = new Date();
if (now.getMonth() == 11) {
var current = new Date(now.getFullYear() + 1, 0, 1);
return current;
} else {
var current = new Date(now.getFullYear(), now.getMonth() + 1, 1);
return current;
}
}
//............................................................................Section Custom validtions(Email/Mobile)
//...............Email Format Verification
let email_validate = (value) => {
let email = value
var regexPattern = new RegExp(/^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$/); // regular expression pattern
let isValid = regexPattern.test(email);
if (!isValid) {
return false;
}
return true;
}
//..........................Contact validation
let contact_validate = (value) => {
let contact = value
var regexPattern = new RegExp(/^\d{4}[- ]?\d{7}$/); // regular expression pattern
let isValid = regexPattern.test(contact);
if (!isValid) {
return false;
}
return true;
}
//............................................................................Section Table Pagenation
// .................... pager
function Pager(tableName, itemsPerPage) {
|
rows[i].style.display = 'none';
else
rows[i].style.display = '';
}
}
this.showPage = function (pageNumber) {
if (!this.inited) {
alert("not inited");
return;
}
var oldPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
oldPageAnchor.className = 'page-item';
this.currentPage = pageNumber;
var newPageAnchor = document.getElementById(`pg${tableName}` + this.currentPage);
newPageAnchor.className = 'page-item active';
var from = (pageNumber - 1) * itemsPerPage + 1;
var to = from + itemsPerPage - 1;
this.showRecords(from, to);
}
this.prev = function () {
if (this.currentPage > 1)
this.showPage(this.currentPage - 1);
}
this.next = function () {
if (this.currentPage < this.pages) {
this.showPage(this.currentPage + 1);
}
}
this.init = function () {
var rows = document.getElementById(tableName).rows;
var records = (rows.length - 1);
this.pages = Math.ceil(records / itemsPerPage);
this.inited = true;
}
this.showPageNav = function (pagerName, positionId) {
if (!this.inited) {
alert("not inited");
return;
}
var element = document.getElementById(positionId);
var pagerHtml = `<li class="page-pre" title="Previous" style="cursor:pointer" onclick="${pagerName}.prev();">
<a class="page-link">‹</a>
</li>
`
for (var page = 1; page <= this.pages; page++)
pagerHtml += `<li style="cursor:pointer" class="page-number" id="pg${tableName}${page}" onclick="${pagerName}.showPage(${page});"><a class="page-link">${page}</a></li>`
pagerHtml += `<li style="cursor:pointer" title="Next" class="page-next" onclick="${pagerName}.next();">
<a class="page-link">›</a>
</li >`
element.innerHTML = pagerHtml;
}
}
//..
.........................................................................Section Days Name With Id's
//.................get today by using day id
let getToday = (day) => {
switch (day) {
case 1:
return "Monday"
break;
case 2:
return "Tuesday"
break;
case 3:
return "Wednesday"
break;
case 4:
return "Thursday"
break;
case 5:
return "Friday"
break;
case 6:
return "Saturday";
break;
case 7:
return "Sunday"
break;
}
}
//................................ReturnDayId by Using Day Name
let getDayID = (dayName) => {
switch (dayName) {
case "monday":
return 1
break;
case "tuesday":
return 2
break;
case "wed |
this.tableName = tableName;
this.itemsPerPage = itemsPerPage;
this.currentPage = 1;
this.pages = 0;
this.inited = false;
this.showRecords = function (from, to) {
var rows = document.getElementById(tableName).rows;
// i starts from 1 to skip table header row
for (var i = 1; i <= rows.length - 1; i++) {
if (i < from || i > to) | identifier_body |
Server.go | s.HandleDisconnect
s.RegisterDefaultProtocols()
s.PackManager = packs.NewManager(serverPath)
s.PermissionManager = permissions.NewManager()
s.PluginManager = NewPluginManager(s)
s.QueryManager = query.NewManager()
if config.UseEncryption {
var curve = elliptic.P384()
var err error
s.privateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
text.DefaultLogger.LogError(err)
if !curve.IsOnCurve(s.privateKey.X, s.privateKey.Y) {
text.DefaultLogger.Error("Invalid private key generated")
}
var token = make([]byte, 128)
rand.Read(token)
s.token = token
}
return s
*/
return server
}
func (server *Server) IsRunning() bool {
return server.isRunning
}
func (server *Server) Start() {
if server.isRunning {
panic("The server has beem started!")
}
server.mkdirs()
server.logger.Info("MineGopher " + ServerVersion + ", running on " + server.serverPath)
server.isRunning = true
server.defaultLevel = server.config.Get("level-name", "world").(string)
dl := level.NewLevel(server.worldsPath+server.defaultLevel, server.defaultLevel)
server.levels[server.defaultLevel] = dl
server.network = network.New()
server.network.SetName(server.config.Get("motd", "MineGopher Server For Minecraft: PE").(string))
server.raknetServer = raknet.New(server.GetIp(), server.GetPort())
server.raknetServer.Start()
server.logger.Info("RakNetServer Listen " + server.GetIp() + ":" + strconv.Itoa(server.GetPort()))
if server.config.Get("webconsole", true).(bool) {
webconsole.Start()
}
server.pluginLoader.LoadPlugins()
server.config.Save()
}
func (server *Server) Shutdown() {
if !server.isRunning {
return
}
for _, l := range server.levels {
l.GetDimension().Save()
}
server.logger.Info("Server stopped.")
server.isRunning = false
server.logger.Close()
}
func (server *Server) GetConfig() *utils.Config {
return server.config
}
func (server *Server) GetAllPlayer() map[string]player.Player {
return server.raknetServer.GetPlayers()
}
func (server *Server) GetNetWork() networkapi.NetWork {
return server.network
}
func (server *Server) GetRaknetServer() raknetapi.RaknetServer {
return server.raknetServer
}
func (server *Server) GetName() string {
return ServerName
}
func (server *Server) GetLogger() *utils.Logger {
return server.logger
}
func (server *Server) GetLevels() map[string]*level.Level {
return server.levels
}
func (server *Server) GetLevel(name string) *level.Level {
return server.levels[name]
}
func (server *Server) GetDefaultLevel() *level.Level {
return server.GetLevel(server.defaultLevel)
}
func (server *Server) GetPath() string {
return server.serverPath
}
func (server *Server) ScheduleRepeatingTask(fn func(), d time.Duration) *time.Ticker {
ticker := time.NewTicker(d)
go func() {
for range ticker.C {
fn()
}
}()
return ticker
}
func (server *Server) ScheduleDelayedTask(fn func(), d time.Duration) *time.Timer {
return time.AfterFunc(d, fn)
}
/*
// GetMinecraftVersion returns the latest Minecraft game version.
// It is prefixed with a 'v', for example: "v1.2.10.1"
func (server *Server) GetMinecraftVersion() string {
return info.LatestGameVersion
}
// GetMinecraftNetworkVersion returns the latest Minecraft network version.
// For example: "1.2.10.1"
func (server *Server) GetMinecraftNetworkVersion() string {
return info.LatestGameVersionNetwork
}
// HasPermission returns if the server has a given permission.
// Always returns true to satisfy the ICommandSender interface.
func (server *Server) HasPermission(string) bool {
return true
}
// SendMessage sends a message to the server to satisfy the ICommandSender interface.
func (server *Server) SendMessage(message ...interface{}) {
text.DefaultLogger.Notice(message)
}
// GetEngineName returns 'minegopher'.
func (server *Server) GetEngineName() string {
return minegopherName
}
// GetName returns the LAN name of the server specified in the configuration.
func (server *Server) GetName() string {
return server.Config.ServerName
}
// GetPort returns the port of the server specified in the configuration.
func (server *Server) GetPort() uint16 {
return server.Config.ServerPort
}
// GetAddress returns the IP address specified in the configuration.
func (server *Server) GetAddress() string {
return server.Config.ServerIp
}
// GetMaximumPlayers returns the maximum amount of players on the server.
func (server *Server) GetMaximumPlayers() uint {
return server.Config.MaximumPlayers
}
// Returns the Message Of The Day of the server.
func (server *Server) GetMotd() string {
return server.Config.ServerMotd
}
// GetCurrentTick returns the current tick the server is on.
func (server *Server) GetCurrentTick() int64 {
return server.tick
}
// BroadcastMessageTo broadcasts a message to all receivers.
func (server *Server) BroadcastMessageTo(receivers []*packet.MinecraftSession, message ...interface{}) {
for _, session := range receivers {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// Broadcast broadcasts a message to all players and the console in the server.
func (server *Server) BroadcastMessage(message ...interface{}) {
for _, session := range server.SessionManager.GetSessions() {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// GetPrivateKey returns the ECDSA private key of the server.
func (server *Server) GetPrivateKey() *ecdsa.PrivateKey {
return server.privateKey
}
// GetPublicKey returns the ECDSA public key of the private key of the server.
func (server *Server) GetPublicKey() *ecdsa.PublicKey {
return &server.privateKey.PublicKey
}
// GetServerToken returns the server token byte sequence.
func (server *Server) GetServerToken() []byte {
return server.token
}
// GenerateQueryResult returns the query data of the server in a byte array.
func (server *Server) GenerateQueryResult() query.Result {
var plugs []string
for _, plug := range server.PluginManager.GetPlugins() {
plugs = append(plugs, plug.GetName()+" v"+plug.GetVersion())
}
var ps []string
for name := range server.SessionManager.GetSessions() {
ps = append(ps, name)
}
var result = query.Result{
MOTD: server.GetMotd(),
ListPlugins: server.Config.AllowPluginQuery,
PluginNames: plugs,
PlayerNames: ps,
GameMode: "SMP",
Version: server.GetMinecraftVersion(),
ServerEngine: server.GetEngineName(),
WorldName: server.LevelManager.GetDefaultLevel().GetName(),
OnlinePlayers: int(server.SessionManager.GetSessionCount()),
MaximumPlayers: int(server.Config.MaximumPlayers),
Whitelist: "off",
Port: server.Config.ServerPort,
Address: server.Config.ServerIp,
}
return result
}
// HandleRaw handles a raw packet, for instance a query packet.
func (server *Server) HandleRaw(packet []byte, addr *net2.UDPAddr) {
if string(packet[0:2]) == string(query.Header) {
if !server.Config.AllowQuery {
return
}
var q = query.NewFromRaw(packet, addr)
q.DecodeServer()
server.QueryManager.HandleQuery(q)
return
}
text.DefaultLogger.Debug("Unhandled raw packet:", hex.EncodeToString(packet))
}
// HandleDisconnect handles a disconnection from a session.
func (server *Server) HandleDisconnect(s *server.Session) {
text.DefaultLogger.Debug(s, "disconnected!")
session, ok := server.SessionManager.GetSessionByRakNetSession(s)
server.SessionManager.RemoveMinecraftSession(session)
if !ok {
return
}
if session.GetPlayer().Dimension != nil {
for _, online := range server.SessionManager.GetSessions() {
online.SendPlayerList(data.ListTypeRemove, map[string]protocol.PlayerListEntry{online.GetPlayer().GetName(): online.GetPlayer()})
}
session.GetPlayer().DespawnFromAll()
session.GetPlayer().Close()
server.BroadcastMessage(text.Yellow+session.GetDisplayName(), "has left the server")
}
}
// GeneratePongData generates the raknet pong data for the UnconnectedPong RakNet packet.
func (server *Server) GeneratePongData() string {
return fmt.Sprint("MCPE;", server.GetMotd(), ";", info.LatestProtocol, ";", server.GetMinecraftNetworkVersion(), ";", server.SessionManager.GetSessionCount(), ";", server.Config.MaximumPlayers, ";", server.NetworkAdapter.GetRakLibManager().ServerId, ";", server.GetEngineName(), ";Creative;")
}
// Tick ticks the entire server. (Levels, scheduler, raknet server etc.)
// Internal. Not to be used by plugins.
func (server *Server) Tick() {
if !server.isRunning {
return
} | if server.tick%20 == 0 {
server.QueryManager.SetQueryResult(server.GenerateQueryResult())
server.NetworkAdapter.GetRakLibManager().PongData = server.GeneratePongData() | random_line_split |
|
Server.go | var err error
s.privateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
text.DefaultLogger.LogError(err)
if !curve.IsOnCurve(s.privateKey.X, s.privateKey.Y) {
text.DefaultLogger.Error("Invalid private key generated")
}
var token = make([]byte, 128)
rand.Read(token)
s.token = token
}
return s
*/
return server
}
func (server *Server) IsRunning() bool {
return server.isRunning
}
func (server *Server) Start() {
if server.isRunning {
panic("The server has beem started!")
}
server.mkdirs()
server.logger.Info("MineGopher " + ServerVersion + ", running on " + server.serverPath)
server.isRunning = true
server.defaultLevel = server.config.Get("level-name", "world").(string)
dl := level.NewLevel(server.worldsPath+server.defaultLevel, server.defaultLevel)
server.levels[server.defaultLevel] = dl
server.network = network.New()
server.network.SetName(server.config.Get("motd", "MineGopher Server For Minecraft: PE").(string))
server.raknetServer = raknet.New(server.GetIp(), server.GetPort())
server.raknetServer.Start()
server.logger.Info("RakNetServer Listen " + server.GetIp() + ":" + strconv.Itoa(server.GetPort()))
if server.config.Get("webconsole", true).(bool) {
webconsole.Start()
}
server.pluginLoader.LoadPlugins()
server.config.Save()
}
func (server *Server) Shutdown() {
if !server.isRunning {
return
}
for _, l := range server.levels {
l.GetDimension().Save()
}
server.logger.Info("Server stopped.")
server.isRunning = false
server.logger.Close()
}
func (server *Server) GetConfig() *utils.Config {
return server.config
}
func (server *Server) GetAllPlayer() map[string]player.Player {
return server.raknetServer.GetPlayers()
}
func (server *Server) GetNetWork() networkapi.NetWork {
return server.network
}
func (server *Server) GetRaknetServer() raknetapi.RaknetServer {
return server.raknetServer
}
func (server *Server) GetName() string {
return ServerName
}
func (server *Server) GetLogger() *utils.Logger {
return server.logger
}
func (server *Server) GetLevels() map[string]*level.Level {
return server.levels
}
func (server *Server) GetLevel(name string) *level.Level {
return server.levels[name]
}
func (server *Server) GetDefaultLevel() *level.Level {
return server.GetLevel(server.defaultLevel)
}
func (server *Server) GetPath() string {
return server.serverPath
}
func (server *Server) ScheduleRepeatingTask(fn func(), d time.Duration) *time.Ticker {
ticker := time.NewTicker(d)
go func() {
for range ticker.C {
fn()
}
}()
return ticker
}
func (server *Server) ScheduleDelayedTask(fn func(), d time.Duration) *time.Timer {
return time.AfterFunc(d, fn)
}
/*
// GetMinecraftVersion returns the latest Minecraft game version.
// It is prefixed with a 'v', for example: "v1.2.10.1"
func (server *Server) GetMinecraftVersion() string {
return info.LatestGameVersion
}
// GetMinecraftNetworkVersion returns the latest Minecraft network version.
// For example: "1.2.10.1"
func (server *Server) GetMinecraftNetworkVersion() string {
return info.LatestGameVersionNetwork
}
// HasPermission returns if the server has a given permission.
// Always returns true to satisfy the ICommandSender interface.
func (server *Server) HasPermission(string) bool {
return true
}
// SendMessage sends a message to the server to satisfy the ICommandSender interface.
func (server *Server) SendMessage(message ...interface{}) {
text.DefaultLogger.Notice(message)
}
// GetEngineName returns 'minegopher'.
func (server *Server) GetEngineName() string {
return minegopherName
}
// GetName returns the LAN name of the server specified in the configuration.
func (server *Server) GetName() string {
return server.Config.ServerName
}
// GetPort returns the port of the server specified in the configuration.
func (server *Server) GetPort() uint16 {
return server.Config.ServerPort
}
// GetAddress returns the IP address specified in the configuration.
func (server *Server) GetAddress() string {
return server.Config.ServerIp
}
// GetMaximumPlayers returns the maximum amount of players on the server.
func (server *Server) GetMaximumPlayers() uint {
return server.Config.MaximumPlayers
}
// Returns the Message Of The Day of the server.
func (server *Server) GetMotd() string {
return server.Config.ServerMotd
}
// GetCurrentTick returns the current tick the server is on.
func (server *Server) GetCurrentTick() int64 {
return server.tick
}
// BroadcastMessageTo broadcasts a message to all receivers.
func (server *Server) BroadcastMessageTo(receivers []*packet.MinecraftSession, message ...interface{}) {
for _, session := range receivers {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// Broadcast broadcasts a message to all players and the console in the server.
func (server *Server) BroadcastMessage(message ...interface{}) {
for _, session := range server.SessionManager.GetSessions() {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// GetPrivateKey returns the ECDSA private key of the server.
func (server *Server) GetPrivateKey() *ecdsa.PrivateKey {
return server.privateKey
}
// GetPublicKey returns the ECDSA public key of the private key of the server.
func (server *Server) GetPublicKey() *ecdsa.PublicKey {
return &server.privateKey.PublicKey
}
// GetServerToken returns the server token byte sequence.
func (server *Server) GetServerToken() []byte {
return server.token
}
// GenerateQueryResult returns the query data of the server in a byte array.
func (server *Server) GenerateQueryResult() query.Result {
var plugs []string
for _, plug := range server.PluginManager.GetPlugins() {
plugs = append(plugs, plug.GetName()+" v"+plug.GetVersion())
}
var ps []string
for name := range server.SessionManager.GetSessions() {
ps = append(ps, name)
}
var result = query.Result{
MOTD: server.GetMotd(),
ListPlugins: server.Config.AllowPluginQuery,
PluginNames: plugs,
PlayerNames: ps,
GameMode: "SMP",
Version: server.GetMinecraftVersion(),
ServerEngine: server.GetEngineName(),
WorldName: server.LevelManager.GetDefaultLevel().GetName(),
OnlinePlayers: int(server.SessionManager.GetSessionCount()),
MaximumPlayers: int(server.Config.MaximumPlayers),
Whitelist: "off",
Port: server.Config.ServerPort,
Address: server.Config.ServerIp,
}
return result
}
// HandleRaw handles a raw packet, for instance a query packet.
func (server *Server) HandleRaw(packet []byte, addr *net2.UDPAddr) {
if string(packet[0:2]) == string(query.Header) {
if !server.Config.AllowQuery {
return
}
var q = query.NewFromRaw(packet, addr)
q.DecodeServer()
server.QueryManager.HandleQuery(q)
return
}
text.DefaultLogger.Debug("Unhandled raw packet:", hex.EncodeToString(packet))
}
// HandleDisconnect handles a disconnection from a session.
func (server *Server) HandleDisconnect(s *server.Session) {
text.DefaultLogger.Debug(s, "disconnected!")
session, ok := server.SessionManager.GetSessionByRakNetSession(s)
server.SessionManager.RemoveMinecraftSession(session)
if !ok {
return
}
if session.GetPlayer().Dimension != nil {
for _, online := range server.SessionManager.GetSessions() {
online.SendPlayerList(data.ListTypeRemove, map[string]protocol.PlayerListEntry{online.GetPlayer().GetName(): online.GetPlayer()})
}
session.GetPlayer().DespawnFromAll()
session.GetPlayer().Close()
server.BroadcastMessage(text.Yellow+session.GetDisplayName(), "has left the server")
}
}
// GeneratePongData generates the raknet pong data for the UnconnectedPong RakNet packet.
func (server *Server) GeneratePongData() string {
return fmt.Sprint("MCPE;", server.GetMotd(), ";", info.LatestProtocol, ";", server.GetMinecraftNetworkVersion(), ";", server.SessionManager.GetSessionCount(), ";", server.Config.MaximumPlayers, ";", server.NetworkAdapter.GetRakLibManager().ServerId, ";", server.GetEngineName(), ";Creative;")
}
// Tick ticks the entire server. (Levels, scheduler, raknet server etc.)
// Internal. Not to be used by plugins.
func (server *Server) Tick() {
if !server.isRunning {
return
}
if server.tick%20 == 0 {
server.QueryManager.SetQueryResult(server.GenerateQueryResult())
server.NetworkAdapter.GetRakLibManager().PongData = server.GeneratePongData()
}
for _, session := range server.SessionManager.GetSessions() {
session.Tick()
}
for range server.LevelManager.GetLevels() {
//level.Tick()
}
server.tick++
}
func (server *Server) GetCommandManager() command {
return server.CommandManager
}
**/
func (server *Server) | Tick | identifier_name |
|
Server.go |
tick int64
logger *utils.Logger
pluginPath string
playersPath string
themePath string
worldsPath string
behaviorPacksPath string
resourecePackPath string
serverPath string
config *utils.Config
network networkapi.NetWork
ip string
port int
raknetServer raknetapi.RaknetServer
pluginLoader *plugin.PluginLoader
levels map[string]*level.Level
defaultLevel string
}
func New(serverPath string, config *utils.Config, logger *utils.Logger) *Server {
server := new(Server)
api.SetServer(server)
server.serverPath = serverPath
server.config = config
server.logger = logger
server.pluginPath = serverPath + "/plugins/"
server.themePath = serverPath + "/theme/"
server.playersPath = serverPath + "/players/"
server.worldsPath = serverPath + "/worlds/"
server.resourecePackPath = serverPath + "/resoureces_pack/"
server.levels = map[string]*level.Level{}
server.ip = config.Get("server-ip", "0.0.0.0").(string)
server.port = config.Get("server-port", 19132).(int)
server.pluginLoader = plugin.NewLoader(server.pluginPath)
//s.LevelManager = level.NewManager(serverPath)
//server.CommandManager = commands.NewManager()
//server.CommandReader = command.NewCommandReader(os.Stdin)
/*
s.SessionManager = packet.NewSessionManager()
s.NetworkAdapter = packet.NewNetworkAdapter(s.SessionManager)
s.NetworkAdapter.GetRakLibManager().PongData = s.GeneratePongData()
s.NetworkAdapter.GetRakLibManager().RawPacketFunction = s.HandleRaw
s.NetworkAdapter.GetRakLibManager().DisconnectFunction = s.HandleDisconnect
s.RegisterDefaultProtocols()
s.PackManager = packs.NewManager(serverPath)
s.PermissionManager = permissions.NewManager()
s.PluginManager = NewPluginManager(s)
s.QueryManager = query.NewManager()
if config.UseEncryption {
var curve = elliptic.P384()
var err error
s.privateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
text.DefaultLogger.LogError(err)
if !curve.IsOnCurve(s.privateKey.X, s.privateKey.Y) {
text.DefaultLogger.Error("Invalid private key generated")
}
var token = make([]byte, 128)
rand.Read(token)
s.token = token
}
return s
*/
return server
}
func (server *Server) IsRunning() bool {
return server.isRunning
}
func (server *Server) Start() {
if server.isRunning {
panic("The server has beem started!")
}
server.mkdirs()
server.logger.Info("MineGopher " + ServerVersion + ", running on " + server.serverPath)
server.isRunning = true
server.defaultLevel = server.config.Get("level-name", "world").(string)
dl := level.NewLevel(server.worldsPath+server.defaultLevel, server.defaultLevel)
server.levels[server.defaultLevel] = dl
server.network = network.New()
server.network.SetName(server.config.Get("motd", "MineGopher Server For Minecraft: PE").(string))
server.raknetServer = raknet.New(server.GetIp(), server.GetPort())
server.raknetServer.Start()
server.logger.Info("RakNetServer Listen " + server.GetIp() + ":" + strconv.Itoa(server.GetPort()))
if server.config.Get("webconsole", true).(bool) {
webconsole.Start()
}
server.pluginLoader.LoadPlugins()
server.config.Save()
}
func (server *Server) Shutdown() {
if !server.isRunning {
return
}
for _, l := range server.levels {
l.GetDimension().Save()
}
server.logger.Info("Server stopped.")
server.isRunning = false
server.logger.Close()
}
func (server *Server) GetConfig() *utils.Config {
return server.config
}
func (server *Server) GetAllPlayer() map[string]player.Player {
return server.raknetServer.GetPlayers()
}
func (server *Server) GetNetWork() networkapi.NetWork {
return server.network
}
func (server *Server) GetRaknetServer() raknetapi.RaknetServer {
return server.raknetServer
}
func (server *Server) GetName() string {
return ServerName
}
func (server *Server) GetLogger() *utils.Logger {
return server.logger
}
func (server *Server) GetLevels() map[string]*level.Level |
func (server *Server) GetLevel(name string) *level.Level {
return server.levels[name]
}
func (server *Server) GetDefaultLevel() *level.Level {
return server.GetLevel(server.defaultLevel)
}
func (server *Server) GetPath() string {
return server.serverPath
}
func (server *Server) ScheduleRepeatingTask(fn func(), d time.Duration) *time.Ticker {
ticker := time.NewTicker(d)
go func() {
for range ticker.C {
fn()
}
}()
return ticker
}
func (server *Server) ScheduleDelayedTask(fn func(), d time.Duration) *time.Timer {
return time.AfterFunc(d, fn)
}
/*
// GetMinecraftVersion returns the latest Minecraft game version.
// It is prefixed with a 'v', for example: "v1.2.10.1"
func (server *Server) GetMinecraftVersion() string {
return info.LatestGameVersion
}
// GetMinecraftNetworkVersion returns the latest Minecraft network version.
// For example: "1.2.10.1"
func (server *Server) GetMinecraftNetworkVersion() string {
return info.LatestGameVersionNetwork
}
// HasPermission returns if the server has a given permission.
// Always returns true to satisfy the ICommandSender interface.
func (server *Server) HasPermission(string) bool {
return true
}
// SendMessage sends a message to the server to satisfy the ICommandSender interface.
func (server *Server) SendMessage(message ...interface{}) {
text.DefaultLogger.Notice(message)
}
// GetEngineName returns 'minegopher'.
func (server *Server) GetEngineName() string {
return minegopherName
}
// GetName returns the LAN name of the server specified in the configuration.
func (server *Server) GetName() string {
return server.Config.ServerName
}
// GetPort returns the port of the server specified in the configuration.
func (server *Server) GetPort() uint16 {
return server.Config.ServerPort
}
// GetAddress returns the IP address specified in the configuration.
func (server *Server) GetAddress() string {
return server.Config.ServerIp
}
// GetMaximumPlayers returns the maximum amount of players on the server.
func (server *Server) GetMaximumPlayers() uint {
return server.Config.MaximumPlayers
}
// Returns the Message Of The Day of the server.
func (server *Server) GetMotd() string {
return server.Config.ServerMotd
}
// GetCurrentTick returns the current tick the server is on.
func (server *Server) GetCurrentTick() int64 {
return server.tick
}
// BroadcastMessageTo broadcasts a message to all receivers.
func (server *Server) BroadcastMessageTo(receivers []*packet.MinecraftSession, message ...interface{}) {
for _, session := range receivers {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// Broadcast broadcasts a message to all players and the console in the server.
func (server *Server) BroadcastMessage(message ...interface{}) {
for _, session := range server.SessionManager.GetSessions() {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// GetPrivateKey returns the ECDSA private key of the server.
func (server *Server) GetPrivateKey() *ecdsa.PrivateKey {
return server.privateKey
}
// GetPublicKey returns the ECDSA public key of the private key of the server.
func (server *Server) GetPublicKey() *ecdsa.PublicKey {
return &server.privateKey.PublicKey
}
// GetServerToken returns the server token byte sequence.
func (server *Server) GetServerToken() []byte {
return server.token
}
// GenerateQueryResult returns the query data of the server in a byte array.
func (server *Server) GenerateQueryResult() query.Result {
var plugs []string
for _, plug := range server.PluginManager.GetPlugins() {
plugs = append(plugs, plug.GetName()+" v"+plug.GetVersion())
}
var ps []string
for name := range server.SessionManager.GetSessions() {
ps = append(ps, name)
}
var result = query.Result{
MOTD: server.GetMotd(),
ListPlugins: server.Config.AllowPluginQuery,
PluginNames: plugs,
PlayerNames: ps,
GameMode: "SMP",
Version: server.GetMinecraftVersion(),
ServerEngine: server.GetEngineName(),
WorldName: server.LevelManager.GetDefaultLevel().GetName(),
OnlinePlayers: int(server.SessionManager.GetSessionCount()),
MaximumPlayers: int(server.Config.MaximumPlayers),
Whitelist: "off",
Port: server.Config.ServerPort,
Address: server.Config.ServerIp,
}
return result
}
// HandleRaw handles a raw packet, for instance a query packet.
func (server *Server) HandleRaw(packet []byte, addr *net2.UDPAddr) {
if string(packet[0:2]) == string(query.Header) {
if !server.Config.AllowQuery {
return
}
var q = query.NewFrom | {
return server.levels
} | identifier_body |
Server.go |
tick int64
logger *utils.Logger
pluginPath string
playersPath string
themePath string
worldsPath string
behaviorPacksPath string
resourecePackPath string
serverPath string
config *utils.Config
network networkapi.NetWork
ip string
port int
raknetServer raknetapi.RaknetServer
pluginLoader *plugin.PluginLoader
levels map[string]*level.Level
defaultLevel string
}
func New(serverPath string, config *utils.Config, logger *utils.Logger) *Server {
server := new(Server)
api.SetServer(server)
server.serverPath = serverPath
server.config = config
server.logger = logger
server.pluginPath = serverPath + "/plugins/"
server.themePath = serverPath + "/theme/"
server.playersPath = serverPath + "/players/"
server.worldsPath = serverPath + "/worlds/"
server.resourecePackPath = serverPath + "/resoureces_pack/"
server.levels = map[string]*level.Level{}
server.ip = config.Get("server-ip", "0.0.0.0").(string)
server.port = config.Get("server-port", 19132).(int)
server.pluginLoader = plugin.NewLoader(server.pluginPath)
//s.LevelManager = level.NewManager(serverPath)
//server.CommandManager = commands.NewManager()
//server.CommandReader = command.NewCommandReader(os.Stdin)
/*
s.SessionManager = packet.NewSessionManager()
s.NetworkAdapter = packet.NewNetworkAdapter(s.SessionManager)
s.NetworkAdapter.GetRakLibManager().PongData = s.GeneratePongData()
s.NetworkAdapter.GetRakLibManager().RawPacketFunction = s.HandleRaw
s.NetworkAdapter.GetRakLibManager().DisconnectFunction = s.HandleDisconnect
s.RegisterDefaultProtocols()
s.PackManager = packs.NewManager(serverPath)
s.PermissionManager = permissions.NewManager()
s.PluginManager = NewPluginManager(s)
s.QueryManager = query.NewManager()
if config.UseEncryption {
var curve = elliptic.P384()
var err error
s.privateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
text.DefaultLogger.LogError(err)
if !curve.IsOnCurve(s.privateKey.X, s.privateKey.Y) {
text.DefaultLogger.Error("Invalid private key generated")
}
var token = make([]byte, 128)
rand.Read(token)
s.token = token
}
return s
*/
return server
}
func (server *Server) IsRunning() bool {
return server.isRunning
}
func (server *Server) Start() {
if server.isRunning {
panic("The server has beem started!")
}
server.mkdirs()
server.logger.Info("MineGopher " + ServerVersion + ", running on " + server.serverPath)
server.isRunning = true
server.defaultLevel = server.config.Get("level-name", "world").(string)
dl := level.NewLevel(server.worldsPath+server.defaultLevel, server.defaultLevel)
server.levels[server.defaultLevel] = dl
server.network = network.New()
server.network.SetName(server.config.Get("motd", "MineGopher Server For Minecraft: PE").(string))
server.raknetServer = raknet.New(server.GetIp(), server.GetPort())
server.raknetServer.Start()
server.logger.Info("RakNetServer Listen " + server.GetIp() + ":" + strconv.Itoa(server.GetPort()))
if server.config.Get("webconsole", true).(bool) {
webconsole.Start()
}
server.pluginLoader.LoadPlugins()
server.config.Save()
}
func (server *Server) Shutdown() {
if !server.isRunning {
return
}
for _, l := range server.levels {
l.GetDimension().Save()
}
server.logger.Info("Server stopped.")
server.isRunning = false
server.logger.Close()
}
func (server *Server) GetConfig() *utils.Config {
return server.config
}
func (server *Server) GetAllPlayer() map[string]player.Player {
return server.raknetServer.GetPlayers()
}
func (server *Server) GetNetWork() networkapi.NetWork {
return server.network
}
func (server *Server) GetRaknetServer() raknetapi.RaknetServer {
return server.raknetServer
}
func (server *Server) GetName() string {
return ServerName
}
func (server *Server) GetLogger() *utils.Logger {
return server.logger
}
func (server *Server) GetLevels() map[string]*level.Level {
return server.levels
}
func (server *Server) GetLevel(name string) *level.Level {
return server.levels[name]
}
func (server *Server) GetDefaultLevel() *level.Level {
return server.GetLevel(server.defaultLevel)
}
func (server *Server) GetPath() string {
return server.serverPath
}
func (server *Server) ScheduleRepeatingTask(fn func(), d time.Duration) *time.Ticker {
ticker := time.NewTicker(d)
go func() {
for range ticker.C |
}()
return ticker
}
func (server *Server) ScheduleDelayedTask(fn func(), d time.Duration) *time.Timer {
return time.AfterFunc(d, fn)
}
/*
// GetMinecraftVersion returns the latest Minecraft game version.
// It is prefixed with a 'v', for example: "v1.2.10.1"
func (server *Server) GetMinecraftVersion() string {
return info.LatestGameVersion
}
// GetMinecraftNetworkVersion returns the latest Minecraft network version.
// For example: "1.2.10.1"
func (server *Server) GetMinecraftNetworkVersion() string {
return info.LatestGameVersionNetwork
}
// HasPermission returns if the server has a given permission.
// Always returns true to satisfy the ICommandSender interface.
func (server *Server) HasPermission(string) bool {
return true
}
// SendMessage sends a message to the server to satisfy the ICommandSender interface.
func (server *Server) SendMessage(message ...interface{}) {
text.DefaultLogger.Notice(message)
}
// GetEngineName returns 'minegopher'.
func (server *Server) GetEngineName() string {
return minegopherName
}
// GetName returns the LAN name of the server specified in the configuration.
func (server *Server) GetName() string {
return server.Config.ServerName
}
// GetPort returns the port of the server specified in the configuration.
func (server *Server) GetPort() uint16 {
return server.Config.ServerPort
}
// GetAddress returns the IP address specified in the configuration.
func (server *Server) GetAddress() string {
return server.Config.ServerIp
}
// GetMaximumPlayers returns the maximum amount of players on the server.
func (server *Server) GetMaximumPlayers() uint {
return server.Config.MaximumPlayers
}
// Returns the Message Of The Day of the server.
func (server *Server) GetMotd() string {
return server.Config.ServerMotd
}
// GetCurrentTick returns the current tick the server is on.
func (server *Server) GetCurrentTick() int64 {
return server.tick
}
// BroadcastMessageTo broadcasts a message to all receivers.
func (server *Server) BroadcastMessageTo(receivers []*packet.MinecraftSession, message ...interface{}) {
for _, session := range receivers {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// Broadcast broadcasts a message to all players and the console in the server.
func (server *Server) BroadcastMessage(message ...interface{}) {
for _, session := range server.SessionManager.GetSessions() {
session.SendMessage(message)
}
text.DefaultLogger.LogChat(message)
}
// GetPrivateKey returns the ECDSA private key of the server.
func (server *Server) GetPrivateKey() *ecdsa.PrivateKey {
return server.privateKey
}
// GetPublicKey returns the ECDSA public key of the private key of the server.
func (server *Server) GetPublicKey() *ecdsa.PublicKey {
return &server.privateKey.PublicKey
}
// GetServerToken returns the server token byte sequence.
func (server *Server) GetServerToken() []byte {
return server.token
}
// GenerateQueryResult returns the query data of the server in a byte array.
func (server *Server) GenerateQueryResult() query.Result {
var plugs []string
for _, plug := range server.PluginManager.GetPlugins() {
plugs = append(plugs, plug.GetName()+" v"+plug.GetVersion())
}
var ps []string
for name := range server.SessionManager.GetSessions() {
ps = append(ps, name)
}
var result = query.Result{
MOTD: server.GetMotd(),
ListPlugins: server.Config.AllowPluginQuery,
PluginNames: plugs,
PlayerNames: ps,
GameMode: "SMP",
Version: server.GetMinecraftVersion(),
ServerEngine: server.GetEngineName(),
WorldName: server.LevelManager.GetDefaultLevel().GetName(),
OnlinePlayers: int(server.SessionManager.GetSessionCount()),
MaximumPlayers: int(server.Config.MaximumPlayers),
Whitelist: "off",
Port: server.Config.ServerPort,
Address: server.Config.ServerIp,
}
return result
}
// HandleRaw handles a raw packet, for instance a query packet.
func (server *Server) HandleRaw(packet []byte, addr *net2.UDPAddr) {
if string(packet[0:2]) == string(query.Header) {
if !server.Config.AllowQuery {
return
}
var q = query.NewFrom | {
fn()
} | conditional_block |
stopwords.rs | "])),
("cross-platform", Some(&["windows", "winsdk", "macos", "mac", "osx", "linux", "unix", "gnu"])),
("portable", Some(&["windows", "winsdk", "macos", "mac", "osx", "linux", "unix", "gnu"])),
("winapi", Some(&["target", "windows", "gnu", "x86", "i686", "64", "pc"])),
("windows", Some(&["gnu"])),
("compile-time", Some(&["time"])),
("constant-time", Some(&["time"])),
("real-time", Some(&["time"])),
("time-series", Some(&["time"])),
("execution", Some(&["time"])),
("iterator", Some(&["window", "windows"])),
("buffer", Some(&["window", "windows"])),
("sliding", Some(&["window", "windows"])),
("web", Some(&["windows", "macos", "mac", "osx", "linux"])),
("error", Some(&["color"])),
("pretty-print", Some(&["color"])),
("pretty-printer", Some(&["color"])),
("ios", Some(&["core"])),
("macos", Some(&["core"])),
("osx", Some(&["core"])),
("mac", Some(&["core"])),
("module", Some(&["core"])),
("wasm", Some(&["embedded", "javascript", "no-std", "no_std", "feature:no_std", "deploy"])),
("javascript", Some(&["embedded", "no-std", "no_std", "feature:no_std"])),
("webassembly", Some(&["embedded", "javascript", "no-std", "no_std", "feature:no_std"])),
("deep-learning", Some(&["math", "statistics"])),
("machine-learning", Some(&["math", "statistics"])),
("neural-networks", Some(&["math", "statistics", "network"])),
("neural", Some(&["network"])),
("fantasy", Some(&["console"])),
("learning", Some(&["network"])),
("safe", Some(&["network"])),
("database", Some(&["embedded"])),
("robotics", Some(&["localization"])),
("thread", Some(&["storage"])),
("exchange", Some(&["twitch", "animation"])),
("animation", Some(&["kraken"])),
("bitcoin", Some(&["http", "day", "database", "key-value", "network", "wasm", "secp256k1", "client", "rpc", "websocket"])),
("solana", Some(&["http", "day", "database", "key-value", "network", "wasm", "secp256k1", "client", "cryptographic", "gfx", "sdk"])),
("exonum", Some(&["http", "day", "database", "key-value", "network", "wasm", "client"])),
("blockchain", Some(&["database", "key-value", "network", "wasm", "nosql", "orm", "driver", "fun", "rpc", "client", "server", "p2p", "networking", "websocket"])),
("cryptocurrencies", Some(&["database", "key-value", "network", "wasm", "nosql", "orm", "driver", "fun", "rpc", "client", "server", "p2p", "networking", "websocket"])),
("cryptocurrency", Some(&["database", "key-value", "network", "wasm", "nosql", "orm", "driver", "fun", "rpc", "client", "server", "p2p", "networking", "websocket", "twitch"])),
("ethereum", Some(&["http", "day", "nosql", "eth", "log", "generic", "network", "wasm", "key-value", "orm", "client", "database", "secp256k1", "websocket", "parity"])),
("iter", Some(&["math"])),
("ethernet", Some(&["eth"])),
("macro", Some(&["no-std", "no_std", "feature:no_std"])),
("macros", Some(&["no-std", "no_std", "feature:no_std"])),
("embedded", Some(&["no-std", "no_std", "feature:no_std"])),
("arm", Some(&["no-std", "no_std", "feature:no_std"])),
("float", Some(&["math"])),
("c64", Some(&["terminal", "core"])),
("emulator", Some(&["6502", "core", "gpu", "color", "timer"])),
("garbage", Some(&["tracing"])),
("terminal", Some(&["math", "emulator"])),
("terminal-emulator", Some(&["math", "emulator"])),
("editor", Some(&["terminal"])),
("build", Some(&["logic"])), // confuses categorization
("messaging", Some(&["matrix"])), // confuses categorization
("led", Some(&["matrix"])), // confuses categorization
("rgb", Some(&["matrix"])), // confuses categorization
("chat", Some(&["matrix"])), // confuses categorization
("math", Some(&["num", "symbolic", "algorithms", "algorithm", "utils"])), // confuses categorization
("mathematics", Some(&["num", "numeric", "symbolic", "algorithms", "algorithm", "utils"])), // confuses categorization
("cuda", Some(&["nvidia"])), // confuses categorization
("subcommand", Some(&["plugin"])),
("lint", Some(&["plugin"])),
("email", Some(&["validator", "validation"])),
("e-mail", Some(&["validator", "validation"])),
("template", Some(&["derive"])),
("dsl", Some(&["template"])),
("syn", Some(&["nom"])),
("cargo", Some(&["plugin"])),
("git", Some(&["terminal"])),
("nzxt", Some(&["kraken"])),
("wide", Some(&["windows", "win32"])),
("i18n", Some(&["text", "format", "message", "json", "ffi"])),
("l10n", Some(&["text", "format", "message", "json", "ffi"])),
("unicode", Some(&["text"])),
("parity", Some(&["fun", "backend"])),
("secp256k1", Some(&["fun", "backend", "alloc", "ecc"])),
("font", Some(&["text", "bitmap"])),
("freetype", Some(&["text", "bitmap"])),
("tex", Some(&["font"])),
("regex", Some(&["text", "linear", "time", "search"])),
("language", Some(&["server"])),
("server", Some(&["files"])),
("medical", Some(&["image"])),
("social", Some(&["media"])),
("codegen", Some(&["backend"])),
("game", Some(&["simulator", "simulation"])),
("vkontakte", Some(&["vk"])),
("vulkan", Some(&["vk"])),
("2d", Some(&["path", "paths"])),
("video", Some(&["audio"])), // have to pick one…
("sound", Some(&["3d", "windows"])),
("memory", Some(&["os", "system", "storage"])), // too generic
("data-structure", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one
("crypto", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one
("macro", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one
("parser", Some(&["no-std", "no_std", "game"])), // it's a nice feature, but not defining one
("cryptography", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one
("websocket", Some(&["http", "cli", "tokio", "client", "io", "network", "servo", "web"])), // there's a separate category for it
("rest", Some(&["api"])),
("cargo-subcommand", None),
("substrate", None),
("twitch", Some(&["kraken"])),
("chess", Some(&["bot"])),
("lichess", Some(&["bot"])),
("nftables", Some(&["nft"])),
("placeholder", None), // spam
("reserved", None), // spam
("name-squatting", None), // spam
("parked", None), // spam
("squatting", None), // spam
("malware", None), // spam
("unfinished", None), // spam | ].iter().copied().collect(); | random_line_split |
|
rasterbackend.rs | : u32,
aspect_ratio: f32,
}
impl RasterBackend {
pub fn new(width: u32, height: u32) -> Self {
Self {
render_options: RenderOptions::default(),
width,
height,
aspect_ratio: width as f32 / height as f32,
}
}
fn view_projection(&self, zoom: f32) -> Mat4 {
// calculate view projection matrix
let proj = glm::ortho(
zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5,
zoom * 0.5,
0.0,
1.0,
);
let view = glm::look_at(
&self.render_options.view_pos,
&Vec3::new(0.0, 0.0, 0.0),
&Vec3::new(0.0, 0.0, -1.0),
);
proj * view
}
pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) {
let aabb = AABB::from_iterable(mesh);
let vp = self.view_projection(1.0);
// scale the model such that is fills the entire canvas
(aabb, scale_for_unitsize(&vp, &aabb))
}
pub fn render(
&self,
mesh: impl IntoIterator<Item = Triangle> + Copy,
model_scale: f32,
aabb: &AABB,
timeout: Option<Duration>,
) -> Picture {
let start_time = Instant::now();
let mut pic = Picture::new(self.width, self.height);
let mut zbuf = ZBuffer::new(self.width, self.height);
let mut scaled_aabb = *aabb;
pic.fill(&(&self.render_options.background_color).into());
let vp = self.view_projection(self.render_options.zoom);
// calculate transforms taking the new model scale into account
let model = Mat4::identity()
.append_translation(&-aabb.center())
.append_scaling(model_scale);
let mvp = vp * model;
// let the AABB match the transformed model
scaled_aabb.apply_transform(&model);
// eye normal pointing towards the camera in world space
let eye_normal = self.render_options.view_pos.normalize();
// grid in x and y direction
if self.render_options.grid_visible {
draw_grid(
&mut pic,
&vp,
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
draw_grid(
&mut pic,
&(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))),
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
}
for t in mesh {
// timed out?
if let Some(timeout) = timeout {
let dt = Instant::now() - start_time;
if dt > timeout {
// abort
println!("... timeout!");
return pic;
}
}
let normal = -t.normal;
// backface culling
if glm::dot(&eye_normal, &normal) < 0.0 {
continue;
}
let v = &t.vertices;
let v0 = matmul(&mvp, &v[0]);
let v1 = matmul(&mvp, &v[1]);
let v2 = matmul(&mvp, &v[2]);
let v0m = matmul(&model, &v[0]);
let v1m = matmul(&model, &v[1]);
let v2m = matmul(&model, &v[2]);
// triangle bounding box
let min_x = v0.x.min(v1.x).min(v2.x);
let min_y = v0.y.min(v1.y).min(v2.y);
let max_x = v0.x.max(v1.x).max(v2.x);
let max_y = v0.y.max(v1.y).max(v2.y);
// triangle bounding box in screen space
let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32);
let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32);
let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32));
let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32));
for y in smin_y..=smax_y {
for x in smin_x..=smax_x {
// normalized screen coordinates [-1,1]
let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5);
let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5);
let p = Vec2::new(nx, ny);
let p0 = v0.xy();
let p1 = v1.xy();
let p2 = v2.xy();
let inside =
edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0;
if inside {
// calculate barycentric coordinates
let area = edge_fn(&p0, &p1, &p2);
let w0 = edge_fn(&p1, &p2, &p) / area;
let w1 = edge_fn(&p2, &p0, &p) / area;
let w2 = edge_fn(&p0, &p1, &p) / area;
// fragment position in screen space
let frag_pos = Vec3::new(
w0 * v0.x + w1 * v1.x + w2 * v2.x, | w0 * v0.y + w1 * v1.y + w2 * v2.y,
w0 * v0.z + w1 * v1.z + w2 * v2.z,
);
// fragment position in world space
let fp = Vec3::new(
w0 * v0m.x + w1 * v1m.x + w2 * v2m.x,
w0 * v0m.y + w1 * v1m.y + w2 * v2m.y,
w0 * v0m.z + w1 * v1m.z + w2 * v2m.z,
);
//let fp = matmul(&mvp_inv, &frag_pos);
if zbuf.test_and_set(x, y, frag_pos.z) {
// calculate lightning
let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space)
let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space)
let reflect_dir = glm::reflect_vec(&-light_normal, &normal);
// diffuse
let diff_color =
glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0;
// specular
let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7)
* self.render_options.light_color;
// merge
let mut color = self.render_options.ambient_color + diff_color + spec_color;
color.x *= self.render_options.model_color.x;
color.y *= self.render_options.model_color.y;
color.z *= self.render_options.model_color.z;
pic.set(x, y, &(color.x, color.y, color.z, 1.0).into());
}
}
}
}
}
if self.render_options.draw_size_hint {
let margin = 3;
let text_to_height_ratio = 16;
let text = format!(
"{}x{}x{}",
aabb.size().x as i32,
aabb.size().y as i32,
aabb.size().z as i32
);
let text_size = pic.height() / text_to_height_ratio;
pic.fill_rect(
0,
pic.height() as i32 - (text_size + margin * 2) as i32,
pic.width() as i32,
pic.height() as i32,
&"333333FF".into(),
);
pic.stroke_string(
margin,
pic.height() - text_size - margin,
&text,
text_size as f32,
&"FFFFFFFF".into(),
);
}
pic
| random_line_split |
|
rasterbackend.rs | : u32,
aspect_ratio: f32,
}
impl RasterBackend {
pub fn new(width: u32, height: u32) -> Self |
fn view_projection(&self, zoom: f32) -> Mat4 {
// calculate view projection matrix
let proj = glm::ortho(
zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5,
zoom * 0.5,
0.0,
1.0,
);
let view = glm::look_at(
&self.render_options.view_pos,
&Vec3::new(0.0, 0.0, 0.0),
&Vec3::new(0.0, 0.0, -1.0),
);
proj * view
}
pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) {
let aabb = AABB::from_iterable(mesh);
let vp = self.view_projection(1.0);
// scale the model such that is fills the entire canvas
(aabb, scale_for_unitsize(&vp, &aabb))
}
pub fn render(
&self,
mesh: impl IntoIterator<Item = Triangle> + Copy,
model_scale: f32,
aabb: &AABB,
timeout: Option<Duration>,
) -> Picture {
let start_time = Instant::now();
let mut pic = Picture::new(self.width, self.height);
let mut zbuf = ZBuffer::new(self.width, self.height);
let mut scaled_aabb = *aabb;
pic.fill(&(&self.render_options.background_color).into());
let vp = self.view_projection(self.render_options.zoom);
// calculate transforms taking the new model scale into account
let model = Mat4::identity()
.append_translation(&-aabb.center())
.append_scaling(model_scale);
let mvp = vp * model;
// let the AABB match the transformed model
scaled_aabb.apply_transform(&model);
// eye normal pointing towards the camera in world space
let eye_normal = self.render_options.view_pos.normalize();
// grid in x and y direction
if self.render_options.grid_visible {
draw_grid(
&mut pic,
&vp,
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
draw_grid(
&mut pic,
&(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))),
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
}
for t in mesh {
// timed out?
if let Some(timeout) = timeout {
let dt = Instant::now() - start_time;
if dt > timeout {
// abort
println!("... timeout!");
return pic;
}
}
let normal = -t.normal;
// backface culling
if glm::dot(&eye_normal, &normal) < 0.0 {
continue;
}
let v = &t.vertices;
let v0 = matmul(&mvp, &v[0]);
let v1 = matmul(&mvp, &v[1]);
let v2 = matmul(&mvp, &v[2]);
let v0m = matmul(&model, &v[0]);
let v1m = matmul(&model, &v[1]);
let v2m = matmul(&model, &v[2]);
// triangle bounding box
let min_x = v0.x.min(v1.x).min(v2.x);
let min_y = v0.y.min(v1.y).min(v2.y);
let max_x = v0.x.max(v1.x).max(v2.x);
let max_y = v0.y.max(v1.y).max(v2.y);
// triangle bounding box in screen space
let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32);
let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32);
let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32));
let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32));
for y in smin_y..=smax_y {
for x in smin_x..=smax_x {
// normalized screen coordinates [-1,1]
let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5);
let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5);
let p = Vec2::new(nx, ny);
let p0 = v0.xy();
let p1 = v1.xy();
let p2 = v2.xy();
let inside =
edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0;
if inside {
// calculate barycentric coordinates
let area = edge_fn(&p0, &p1, &p2);
let w0 = edge_fn(&p1, &p2, &p) / area;
let w1 = edge_fn(&p2, &p0, &p) / area;
let w2 = edge_fn(&p0, &p1, &p) / area;
// fragment position in screen space
let frag_pos = Vec3::new(
w0 * v0.x + w1 * v1.x + w2 * v2.x,
w0 * v0.y + w1 * v1.y + w2 * v2.y,
w0 * v0.z + w1 * v1.z + w2 * v2.z,
);
// fragment position in world space
let fp = Vec3::new(
w0 * v0m.x + w1 * v1m.x + w2 * v2m.x,
w0 * v0m.y + w1 * v1m.y + w2 * v2m.y,
w0 * v0m.z + w1 * v1m.z + w2 * v2m.z,
);
//let fp = matmul(&mvp_inv, &frag_pos);
if zbuf.test_and_set(x, y, frag_pos.z) {
// calculate lightning
let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space)
let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space)
let reflect_dir = glm::reflect_vec(&-light_normal, &normal);
// diffuse
let diff_color =
glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0;
// specular
let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7)
* self.render_options.light_color;
// merge
let mut color = self.render_options.ambient_color + diff_color + spec_color;
color.x *= self.render_options.model_color.x;
color.y *= self.render_options.model_color.y;
color.z *= self.render_options.model_color.z;
pic.set(x, y, &(color.x, color.y, color.z, 1.0).into());
}
}
}
}
}
if self.render_options.draw_size_hint {
let margin = 3;
let text_to_height_ratio = 16;
let text = format!(
"{}x{}x{}",
aabb.size().x as i32,
aabb.size().y as i32,
aabb.size().z as i32
);
let text_size = pic.height() / text_to_height_ratio;
pic.fill_rect(
0,
pic.height() as i32 - (text_size + margin * 2) as i32,
pic.width() as i32,
pic.height() as i32,
&"333333FF".into(),
);
pic.stroke_string(
margin,
pic.height() - text_size - margin,
&text,
text_size as f32,
&"FFFFFFFF".into(),
);
}
pic | {
Self {
render_options: RenderOptions::default(),
width,
height,
aspect_ratio: width as f32 / height as f32,
}
} | identifier_body |
rasterbackend.rs | : u32,
aspect_ratio: f32,
}
impl RasterBackend {
pub fn new(width: u32, height: u32) -> Self {
Self {
render_options: RenderOptions::default(),
width,
height,
aspect_ratio: width as f32 / height as f32,
}
}
fn view_projection(&self, zoom: f32) -> Mat4 {
// calculate view projection matrix
let proj = glm::ortho(
zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5,
zoom * 0.5,
0.0,
1.0,
);
let view = glm::look_at(
&self.render_options.view_pos,
&Vec3::new(0.0, 0.0, 0.0),
&Vec3::new(0.0, 0.0, -1.0),
);
proj * view
}
pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) {
let aabb = AABB::from_iterable(mesh);
let vp = self.view_projection(1.0);
// scale the model such that is fills the entire canvas
(aabb, scale_for_unitsize(&vp, &aabb))
}
pub fn | (
&self,
mesh: impl IntoIterator<Item = Triangle> + Copy,
model_scale: f32,
aabb: &AABB,
timeout: Option<Duration>,
) -> Picture {
let start_time = Instant::now();
let mut pic = Picture::new(self.width, self.height);
let mut zbuf = ZBuffer::new(self.width, self.height);
let mut scaled_aabb = *aabb;
pic.fill(&(&self.render_options.background_color).into());
let vp = self.view_projection(self.render_options.zoom);
// calculate transforms taking the new model scale into account
let model = Mat4::identity()
.append_translation(&-aabb.center())
.append_scaling(model_scale);
let mvp = vp * model;
// let the AABB match the transformed model
scaled_aabb.apply_transform(&model);
// eye normal pointing towards the camera in world space
let eye_normal = self.render_options.view_pos.normalize();
// grid in x and y direction
if self.render_options.grid_visible {
draw_grid(
&mut pic,
&vp,
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
draw_grid(
&mut pic,
&(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))),
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
}
for t in mesh {
// timed out?
if let Some(timeout) = timeout {
let dt = Instant::now() - start_time;
if dt > timeout {
// abort
println!("... timeout!");
return pic;
}
}
let normal = -t.normal;
// backface culling
if glm::dot(&eye_normal, &normal) < 0.0 {
continue;
}
let v = &t.vertices;
let v0 = matmul(&mvp, &v[0]);
let v1 = matmul(&mvp, &v[1]);
let v2 = matmul(&mvp, &v[2]);
let v0m = matmul(&model, &v[0]);
let v1m = matmul(&model, &v[1]);
let v2m = matmul(&model, &v[2]);
// triangle bounding box
let min_x = v0.x.min(v1.x).min(v2.x);
let min_y = v0.y.min(v1.y).min(v2.y);
let max_x = v0.x.max(v1.x).max(v2.x);
let max_y = v0.y.max(v1.y).max(v2.y);
// triangle bounding box in screen space
let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32);
let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32);
let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32));
let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32));
for y in smin_y..=smax_y {
for x in smin_x..=smax_x {
// normalized screen coordinates [-1,1]
let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5);
let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5);
let p = Vec2::new(nx, ny);
let p0 = v0.xy();
let p1 = v1.xy();
let p2 = v2.xy();
let inside =
edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0;
if inside {
// calculate barycentric coordinates
let area = edge_fn(&p0, &p1, &p2);
let w0 = edge_fn(&p1, &p2, &p) / area;
let w1 = edge_fn(&p2, &p0, &p) / area;
let w2 = edge_fn(&p0, &p1, &p) / area;
// fragment position in screen space
let frag_pos = Vec3::new(
w0 * v0.x + w1 * v1.x + w2 * v2.x,
w0 * v0.y + w1 * v1.y + w2 * v2.y,
w0 * v0.z + w1 * v1.z + w2 * v2.z,
);
// fragment position in world space
let fp = Vec3::new(
w0 * v0m.x + w1 * v1m.x + w2 * v2m.x,
w0 * v0m.y + w1 * v1m.y + w2 * v2m.y,
w0 * v0m.z + w1 * v1m.z + w2 * v2m.z,
);
//let fp = matmul(&mvp_inv, &frag_pos);
if zbuf.test_and_set(x, y, frag_pos.z) {
// calculate lightning
let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space)
let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space)
let reflect_dir = glm::reflect_vec(&-light_normal, &normal);
// diffuse
let diff_color =
glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0;
// specular
let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7)
* self.render_options.light_color;
// merge
let mut color = self.render_options.ambient_color + diff_color + spec_color;
color.x *= self.render_options.model_color.x;
color.y *= self.render_options.model_color.y;
color.z *= self.render_options.model_color.z;
pic.set(x, y, &(color.x, color.y, color.z, 1.0).into());
}
}
}
}
}
if self.render_options.draw_size_hint {
let margin = 3;
let text_to_height_ratio = 16;
let text = format!(
"{}x{}x{}",
aabb.size().x as i32,
aabb.size().y as i32,
aabb.size().z as i32
);
let text_size = pic.height() / text_to_height_ratio;
pic.fill_rect(
0,
pic.height() as i32 - (text_size + margin * 2) as i32,
pic.width() as i32,
pic.height() as i32,
&"333333FF".into(),
);
pic.stroke_string(
margin,
pic.height() - text_size - margin,
&text,
text_size as f32,
&"FFFFFFFF".into(),
);
}
pic
| render | identifier_name |
rasterbackend.rs | : u32,
aspect_ratio: f32,
}
impl RasterBackend {
pub fn new(width: u32, height: u32) -> Self {
Self {
render_options: RenderOptions::default(),
width,
height,
aspect_ratio: width as f32 / height as f32,
}
}
fn view_projection(&self, zoom: f32) -> Mat4 {
// calculate view projection matrix
let proj = glm::ortho(
zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5 * self.aspect_ratio,
-zoom * 0.5,
zoom * 0.5,
0.0,
1.0,
);
let view = glm::look_at(
&self.render_options.view_pos,
&Vec3::new(0.0, 0.0, 0.0),
&Vec3::new(0.0, 0.0, -1.0),
);
proj * view
}
pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) {
let aabb = AABB::from_iterable(mesh);
let vp = self.view_projection(1.0);
// scale the model such that is fills the entire canvas
(aabb, scale_for_unitsize(&vp, &aabb))
}
pub fn render(
&self,
mesh: impl IntoIterator<Item = Triangle> + Copy,
model_scale: f32,
aabb: &AABB,
timeout: Option<Duration>,
) -> Picture {
let start_time = Instant::now();
let mut pic = Picture::new(self.width, self.height);
let mut zbuf = ZBuffer::new(self.width, self.height);
let mut scaled_aabb = *aabb;
pic.fill(&(&self.render_options.background_color).into());
let vp = self.view_projection(self.render_options.zoom);
// calculate transforms taking the new model scale into account
let model = Mat4::identity()
.append_translation(&-aabb.center())
.append_scaling(model_scale);
let mvp = vp * model;
// let the AABB match the transformed model
scaled_aabb.apply_transform(&model);
// eye normal pointing towards the camera in world space
let eye_normal = self.render_options.view_pos.normalize();
// grid in x and y direction
if self.render_options.grid_visible {
draw_grid(
&mut pic,
&vp,
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
draw_grid(
&mut pic,
&(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))),
scaled_aabb.lower.z,
&self.render_options.grid_color,
aabb.size(),
model_scale,
);
}
for t in mesh {
// timed out?
if let Some(timeout) = timeout {
let dt = Instant::now() - start_time;
if dt > timeout {
// abort
println!("... timeout!");
return pic;
}
}
let normal = -t.normal;
// backface culling
if glm::dot(&eye_normal, &normal) < 0.0 {
continue;
}
let v = &t.vertices;
let v0 = matmul(&mvp, &v[0]);
let v1 = matmul(&mvp, &v[1]);
let v2 = matmul(&mvp, &v[2]);
let v0m = matmul(&model, &v[0]);
let v1m = matmul(&model, &v[1]);
let v2m = matmul(&model, &v[2]);
// triangle bounding box
let min_x = v0.x.min(v1.x).min(v2.x);
let min_y = v0.y.min(v1.y).min(v2.y);
let max_x = v0.x.max(v1.x).max(v2.x);
let max_y = v0.y.max(v1.y).max(v2.y);
// triangle bounding box in screen space
let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32);
let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32);
let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32));
let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32));
for y in smin_y..=smax_y {
for x in smin_x..=smax_x {
// normalized screen coordinates [-1,1]
let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5);
let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5);
let p = Vec2::new(nx, ny);
let p0 = v0.xy();
let p1 = v1.xy();
let p2 = v2.xy();
let inside =
edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0;
if inside {
// calculate barycentric coordinates
let area = edge_fn(&p0, &p1, &p2);
let w0 = edge_fn(&p1, &p2, &p) / area;
let w1 = edge_fn(&p2, &p0, &p) / area;
let w2 = edge_fn(&p0, &p1, &p) / area;
// fragment position in screen space
let frag_pos = Vec3::new(
w0 * v0.x + w1 * v1.x + w2 * v2.x,
w0 * v0.y + w1 * v1.y + w2 * v2.y,
w0 * v0.z + w1 * v1.z + w2 * v2.z,
);
// fragment position in world space
let fp = Vec3::new(
w0 * v0m.x + w1 * v1m.x + w2 * v2m.x,
w0 * v0m.y + w1 * v1m.y + w2 * v2m.y,
w0 * v0m.z + w1 * v1m.z + w2 * v2m.z,
);
//let fp = matmul(&mvp_inv, &frag_pos);
if zbuf.test_and_set(x, y, frag_pos.z) {
// calculate lightning
let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space)
let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space)
let reflect_dir = glm::reflect_vec(&-light_normal, &normal);
// diffuse
let diff_color =
glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0;
// specular
let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7)
* self.render_options.light_color;
// merge
let mut color = self.render_options.ambient_color + diff_color + spec_color;
color.x *= self.render_options.model_color.x;
color.y *= self.render_options.model_color.y;
color.z *= self.render_options.model_color.z;
pic.set(x, y, &(color.x, color.y, color.z, 1.0).into());
}
}
}
}
}
if self.render_options.draw_size_hint |
pic.stroke_string(
margin,
pic.height() - text_size - margin,
&text,
text_size as f32,
&"FFFFFFFF".into(),
);
}
pic | {
let margin = 3;
let text_to_height_ratio = 16;
let text = format!(
"{}x{}x{}",
aabb.size().x as i32,
aabb.size().y as i32,
aabb.size().z as i32
);
let text_size = pic.height() / text_to_height_ratio;
pic.fill_rect(
0,
pic.height() as i32 - (text_size + margin * 2) as i32,
pic.width() as i32,
pic.height() as i32,
&"333333FF".into(),
); | conditional_block |
mod.rs | inline]
fn put(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
value: impl AsRef<[u8]>,
) -> Result<()> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self
.db()
.put_cf_opt(cf, key, value, &default_write_opts())?)
}
#[inline]
fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> {
let cf = self.get_cf_handle(cf)?;
Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?)
}
fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> {
let prefix = prefix.as_ref();
let cf_name = cf.as_ref();
let cf = self.get_cf_handle(cf_name)?;
// NOTE: this only works assuming the column family is lexicographically ordered (which is
// the default, so we don't explicitly set it, see Options::set_comparator)
let start = prefix;
// delete_range deletes all the entries in [start, end) range, so we can just increment the
// least significant byte of the prefix
let mut end = start.to_vec();
*end.last_mut()
.expect("unreachable, the empty case is covered a few lines above") += 1;
let mut wb = WriteBatch::default();
wb.delete_range_cf(cf, start, &end);
self.db().write_opt(wb, &default_write_opts())?;
Ok(())
}
#[inline]
fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> {
let cf = self.get_cf_handle(cf.as_ref())?;
Ok(self.db().get_pinned_cf(cf, key)?.is_some())
}
fn | (&self, cf_name: &str, opts: Options) -> Result<()> {
if self.db().cf_handle(cf_name).is_none() {
self.db_mut().create_cf(cf_name, &opts)?;
}
Ok(())
}
}
fn common_options<IK, N>() -> Options
where
IK: Metakey,
N: Metakey,
{
let prefix_size = IK::SIZE + N::SIZE;
let mut opts = Options::default();
// for map state to work properly, but useful for all the states, so the bloom filters get
// populated
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize));
opts
}
impl Backend for Rocks {
fn name(&self) -> &str {
self.name.as_str()
}
fn create(path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
let mut opts = Options::default();
opts.create_if_missing(true);
let path: PathBuf = path.into();
if !path.exists() {
fs::create_dir_all(&path)?;
}
let column_families: HashSet<String> = match DB::list_cf(&opts, &path) {
Ok(cfs) => cfs.into_iter().filter(|n| n != "default").collect(),
// TODO: possibly platform-dependant error message check
Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(),
Err(e) => return Err(e.into()),
};
let cfds = if !column_families.is_empty() {
column_families
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name, Options::default()))
.collect()
} else {
vec![ColumnFamilyDescriptor::new("default", Options::default())]
};
Ok(Rocks {
inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?),
restored: false,
name,
})
}
fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
fs::create_dir_all(live_path)?;
ensure!(
fs::read_dir(live_path)?.next().is_none(),
RocksRestoreDirNotEmpty { dir: &(*live_path) }
);
let mut target_path: PathBuf = live_path.into();
target_path.push("__DUMMY"); // the file name is replaced inside the loop below
for entry in fs::read_dir(checkpoint_path)? {
let entry = entry?;
assert!(entry
.file_type()
.expect("Cannot read entry metadata")
.is_file());
let source_path = entry.path();
// replaces the __DUMMY from above the loop
target_path.set_file_name(
source_path
.file_name()
.expect("directory entry with no name?"),
);
fs::copy(&source_path, &target_path)?;
}
Rocks::create(live_path, name).map(|mut r| {
r.restored = true;
r
})
}
fn was_restored(&self) -> bool {
self.restored
}
fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> {
let db = self.db();
db.flush()?;
let checkpointer = Checkpoint::new(db)?;
if checkpoint_path.exists() {
// TODO: add a warning log here
// warn!(logger, "Checkpoint path {:?} exists, deleting");
fs::remove_dir_all(checkpoint_path)?
}
checkpointer.create_checkpoint(checkpoint_path)?;
Ok(())
}
fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ValueState<T>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<MapState<K, V>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<VecState<T>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ReducerState<T, F>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("reducer_merge", reducer_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<AggregatorState<A>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("aggregator_merge", aggregator_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
}
mod aggregator_ops;
mod map_ops;
mod reducer_ops;
mod value_ops;
mod vec_ops;
#[cfg(test)]
pub mod tests {
use super::*;
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
use tempfile::TempDir;
#[derive(Debug)]
pub struct TestDb {
rocks: Arc<Rocks>,
dir: TempDir,
}
impl TestDb {
#[allow(clippy::new_without_default)]
pub fn new() -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
fs::create_dir(&dir_path).unwrap();
let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
pub fn checkpoint(&mut self) -> PathBuf {
let mut checkpoint_dir: PathBuf = self.dir.path().into();
checkpoint_dir.push("checkpoint");
self.rocks.checkpoint(&checkpoint_dir).unwrap();
checkpoint_dir
}
pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb {
let dir = TempDir::new().unwrap();
| create_column_family | identifier_name |
mod.rs | inline]
fn put(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
value: impl AsRef<[u8]>,
) -> Result<()> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self
.db()
.put_cf_opt(cf, key, value, &default_write_opts())?)
}
#[inline]
fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> {
let cf = self.get_cf_handle(cf)?;
Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?)
}
fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> {
let prefix = prefix.as_ref();
let cf_name = cf.as_ref();
let cf = self.get_cf_handle(cf_name)?;
// NOTE: this only works assuming the column family is lexicographically ordered (which is
// the default, so we don't explicitly set it, see Options::set_comparator)
let start = prefix;
// delete_range deletes all the entries in [start, end) range, so we can just increment the
// least significant byte of the prefix
let mut end = start.to_vec();
*end.last_mut()
.expect("unreachable, the empty case is covered a few lines above") += 1;
let mut wb = WriteBatch::default();
wb.delete_range_cf(cf, start, &end);
self.db().write_opt(wb, &default_write_opts())?;
Ok(())
}
#[inline]
fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> {
let cf = self.get_cf_handle(cf.as_ref())?;
Ok(self.db().get_pinned_cf(cf, key)?.is_some())
}
fn create_column_family(&self, cf_name: &str, opts: Options) -> Result<()> {
if self.db().cf_handle(cf_name).is_none() {
self.db_mut().create_cf(cf_name, &opts)?;
}
Ok(())
}
}
fn common_options<IK, N>() -> Options
where
IK: Metakey,
N: Metakey,
{
let prefix_size = IK::SIZE + N::SIZE;
let mut opts = Options::default();
// for map state to work properly, but useful for all the states, so the bloom filters get
// populated
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize));
opts
}
impl Backend for Rocks {
fn name(&self) -> &str {
self.name.as_str()
}
fn create(path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
let mut opts = Options::default();
opts.create_if_missing(true);
let path: PathBuf = path.into();
if !path.exists() {
fs::create_dir_all(&path)?;
}
let column_families: HashSet<String> = match DB::list_cf(&opts, &path) {
Ok(cfs) => cfs.into_iter().filter(|n| n != "default").collect(),
// TODO: possibly platform-dependant error message check
Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(),
Err(e) => return Err(e.into()),
};
let cfds = if !column_families.is_empty() {
column_families
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name, Options::default()))
.collect()
} else {
vec![ColumnFamilyDescriptor::new("default", Options::default())]
};
Ok(Rocks {
inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?),
restored: false,
name,
})
}
fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
fs::create_dir_all(live_path)?;
ensure!(
fs::read_dir(live_path)?.next().is_none(),
RocksRestoreDirNotEmpty { dir: &(*live_path) }
);
let mut target_path: PathBuf = live_path.into();
target_path.push("__DUMMY"); // the file name is replaced inside the loop below
for entry in fs::read_dir(checkpoint_path)? {
let entry = entry?;
assert!(entry
.file_type()
.expect("Cannot read entry metadata")
.is_file());
let source_path = entry.path();
// replaces the __DUMMY from above the loop
target_path.set_file_name(
source_path
.file_name()
.expect("directory entry with no name?"),
);
fs::copy(&source_path, &target_path)?;
}
Rocks::create(live_path, name).map(|mut r| {
r.restored = true;
r
})
}
fn was_restored(&self) -> bool {
self.restored
}
fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> {
let db = self.db();
db.flush()?;
let checkpointer = Checkpoint::new(db)?;
if checkpoint_path.exists() |
checkpointer.create_checkpoint(checkpoint_path)?;
Ok(())
}
fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ValueState<T>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<MapState<K, V>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<VecState<T>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ReducerState<T, F>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("reducer_merge", reducer_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<AggregatorState<A>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("aggregator_merge", aggregator_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
}
mod aggregator_ops;
mod map_ops;
mod reducer_ops;
mod value_ops;
mod vec_ops;
#[cfg(test)]
pub mod tests {
use super::*;
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
use tempfile::TempDir;
#[derive(Debug)]
pub struct TestDb {
rocks: Arc<Rocks>,
dir: TempDir,
}
impl TestDb {
#[allow(clippy::new_without_default)]
pub fn new() -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
fs::create_dir(&dir_path).unwrap();
let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
pub fn checkpoint(&mut self) -> PathBuf {
let mut checkpoint_dir: PathBuf = self.dir.path().into();
checkpoint_dir.push("checkpoint");
self.rocks.checkpoint(&checkpoint_dir).unwrap();
checkpoint_dir
}
pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb {
let dir = TempDir::new().unwrap | {
// TODO: add a warning log here
// warn!(logger, "Checkpoint path {:?} exists, deleting");
fs::remove_dir_all(checkpoint_path)?
} | conditional_block |
mod.rs | inline]
fn put(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
value: impl AsRef<[u8]>,
) -> Result<()> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self
.db()
.put_cf_opt(cf, key, value, &default_write_opts())?)
}
#[inline]
fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> {
let cf = self.get_cf_handle(cf)?;
Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?)
}
fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> {
let prefix = prefix.as_ref();
let cf_name = cf.as_ref();
let cf = self.get_cf_handle(cf_name)?;
// NOTE: this only works assuming the column family is lexicographically ordered (which is
// the default, so we don't explicitly set it, see Options::set_comparator)
let start = prefix;
// delete_range deletes all the entries in [start, end) range, so we can just increment the
// least significant byte of the prefix
let mut end = start.to_vec();
*end.last_mut()
.expect("unreachable, the empty case is covered a few lines above") += 1;
let mut wb = WriteBatch::default();
wb.delete_range_cf(cf, start, &end);
self.db().write_opt(wb, &default_write_opts())?;
Ok(())
}
#[inline]
fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> {
let cf = self.get_cf_handle(cf.as_ref())?;
Ok(self.db().get_pinned_cf(cf, key)?.is_some())
}
fn create_column_family(&self, cf_name: &str, opts: Options) -> Result<()> {
if self.db().cf_handle(cf_name).is_none() {
self.db_mut().create_cf(cf_name, &opts)?;
}
Ok(())
}
}
fn common_options<IK, N>() -> Options
where
IK: Metakey,
N: Metakey,
|
impl Backend for Rocks {
fn name(&self) -> &str {
self.name.as_str()
}
fn create(path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
let mut opts = Options::default();
opts.create_if_missing(true);
let path: PathBuf = path.into();
if !path.exists() {
fs::create_dir_all(&path)?;
}
let column_families: HashSet<String> = match DB::list_cf(&opts, &path) {
Ok(cfs) => cfs.into_iter().filter(|n| n != "default").collect(),
// TODO: possibly platform-dependant error message check
Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(),
Err(e) => return Err(e.into()),
};
let cfds = if !column_families.is_empty() {
column_families
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name, Options::default()))
.collect()
} else {
vec![ColumnFamilyDescriptor::new("default", Options::default())]
};
Ok(Rocks {
inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?),
restored: false,
name,
})
}
fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
fs::create_dir_all(live_path)?;
ensure!(
fs::read_dir(live_path)?.next().is_none(),
RocksRestoreDirNotEmpty { dir: &(*live_path) }
);
let mut target_path: PathBuf = live_path.into();
target_path.push("__DUMMY"); // the file name is replaced inside the loop below
for entry in fs::read_dir(checkpoint_path)? {
let entry = entry?;
assert!(entry
.file_type()
.expect("Cannot read entry metadata")
.is_file());
let source_path = entry.path();
// replaces the __DUMMY from above the loop
target_path.set_file_name(
source_path
.file_name()
.expect("directory entry with no name?"),
);
fs::copy(&source_path, &target_path)?;
}
Rocks::create(live_path, name).map(|mut r| {
r.restored = true;
r
})
}
fn was_restored(&self) -> bool {
self.restored
}
fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> {
let db = self.db();
db.flush()?;
let checkpointer = Checkpoint::new(db)?;
if checkpoint_path.exists() {
// TODO: add a warning log here
// warn!(logger, "Checkpoint path {:?} exists, deleting");
fs::remove_dir_all(checkpoint_path)?
}
checkpointer.create_checkpoint(checkpoint_path)?;
Ok(())
}
fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ValueState<T>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<MapState<K, V>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<VecState<T>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ReducerState<T, F>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("reducer_merge", reducer_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<AggregatorState<A>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("aggregator_merge", aggregator_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
}
mod aggregator_ops;
mod map_ops;
mod reducer_ops;
mod value_ops;
mod vec_ops;
#[cfg(test)]
pub mod tests {
use super::*;
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
use tempfile::TempDir;
#[derive(Debug)]
pub struct TestDb {
rocks: Arc<Rocks>,
dir: TempDir,
}
impl TestDb {
#[allow(clippy::new_without_default)]
pub fn new() -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
fs::create_dir(&dir_path).unwrap();
let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
pub fn checkpoint(&mut self) -> PathBuf {
let mut checkpoint_dir: PathBuf = self.dir.path().into();
checkpoint_dir.push("checkpoint");
self.rocks.checkpoint(&checkpoint_dir).unwrap();
checkpoint_dir
}
pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb {
let dir = TempDir::new().unwrap();
| {
let prefix_size = IK::SIZE + N::SIZE;
let mut opts = Options::default();
// for map state to work properly, but useful for all the states, so the bloom filters get
// populated
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize));
opts
} | identifier_body |
mod.rs | #[inline]
fn put(
&self,
cf_name: impl AsRef<str>,
key: impl AsRef<[u8]>,
value: impl AsRef<[u8]>,
) -> Result<()> {
let cf = self.get_cf_handle(cf_name)?;
Ok(self
.db()
.put_cf_opt(cf, key, value, &default_write_opts())?)
}
#[inline]
fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> {
let cf = self.get_cf_handle(cf)?;
Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?)
}
fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> {
let prefix = prefix.as_ref();
let cf_name = cf.as_ref();
let cf = self.get_cf_handle(cf_name)?;
// NOTE: this only works assuming the column family is lexicographically ordered (which is
// the default, so we don't explicitly set it, see Options::set_comparator)
let start = prefix;
// delete_range deletes all the entries in [start, end) range, so we can just increment the
// least significant byte of the prefix
let mut end = start.to_vec();
*end.last_mut()
.expect("unreachable, the empty case is covered a few lines above") += 1;
let mut wb = WriteBatch::default();
wb.delete_range_cf(cf, start, &end);
self.db().write_opt(wb, &default_write_opts())?;
Ok(())
}
#[inline]
fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> {
let cf = self.get_cf_handle(cf.as_ref())?;
Ok(self.db().get_pinned_cf(cf, key)?.is_some())
}
fn create_column_family(&self, cf_name: &str, opts: Options) -> Result<()> {
if self.db().cf_handle(cf_name).is_none() {
self.db_mut().create_cf(cf_name, &opts)?;
}
Ok(())
}
}
fn common_options<IK, N>() -> Options
where
IK: Metakey,
N: Metakey,
{
let prefix_size = IK::SIZE + N::SIZE;
let mut opts = Options::default();
// for map state to work properly, but useful for all the states, so the bloom filters get
// populated
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize));
opts
}
impl Backend for Rocks {
fn name(&self) -> &str {
self.name.as_str()
}
fn create(path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
let mut opts = Options::default();
opts.create_if_missing(true);
let path: PathBuf = path.into();
if !path.exists() {
fs::create_dir_all(&path)?;
}
let column_families: HashSet<String> = match DB::list_cf(&opts, &path) {
Ok(cfs) => cfs.into_iter().filter(|n| n != "default").collect(),
// TODO: possibly platform-dependant error message check
Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(),
Err(e) => return Err(e.into()),
};
let cfds = if !column_families.is_empty() {
column_families
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name, Options::default()))
.collect()
} else {
vec![ColumnFamilyDescriptor::new("default", Options::default())]
};
Ok(Rocks {
inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?),
restored: false,
name,
})
}
fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self>
where
Self: Sized,
{
fs::create_dir_all(live_path)?;
ensure!(
fs::read_dir(live_path)?.next().is_none(),
RocksRestoreDirNotEmpty { dir: &(*live_path) }
);
let mut target_path: PathBuf = live_path.into();
target_path.push("__DUMMY"); // the file name is replaced inside the loop below
for entry in fs::read_dir(checkpoint_path)? {
let entry = entry?;
assert!(entry
.file_type()
.expect("Cannot read entry metadata")
.is_file());
let source_path = entry.path();
// replaces the __DUMMY from above the loop
target_path.set_file_name(
source_path
.file_name()
.expect("directory entry with no name?"),
);
fs::copy(&source_path, &target_path)?;
}
Rocks::create(live_path, name).map(|mut r| {
r.restored = true;
r
})
}
fn was_restored(&self) -> bool {
self.restored
}
fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> {
let db = self.db();
db.flush()?;
let checkpointer = Checkpoint::new(db)?;
if checkpoint_path.exists() { | }
checkpointer.create_checkpoint(checkpoint_path)?;
Ok(())
}
fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ValueState<T>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<MapState<K, V>, IK, N>,
) {
handle.registered = true;
let opts = common_options::<IK, N>();
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<VecState<T>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<ReducerState<T, F>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("reducer_merge", reducer_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>(
&'s self,
handle: &'s mut Handle<AggregatorState<A>, IK, N>,
) {
handle.registered = true;
let mut opts = common_options::<IK, N>();
let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone());
opts.set_merge_operator_associative("aggregator_merge", aggregator_merge);
self.create_column_family(&handle.id, opts)
.expect("Could not create column family");
}
}
mod aggregator_ops;
mod map_ops;
mod reducer_ops;
mod value_ops;
mod vec_ops;
#[cfg(test)]
pub mod tests {
use super::*;
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
use tempfile::TempDir;
#[derive(Debug)]
pub struct TestDb {
rocks: Arc<Rocks>,
dir: TempDir,
}
impl TestDb {
#[allow(clippy::new_without_default)]
pub fn new() -> TestDb {
let dir = TempDir::new().unwrap();
let mut dir_path = dir.path().to_path_buf();
dir_path.push("rocks");
fs::create_dir(&dir_path).unwrap();
let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap();
TestDb {
rocks: Arc::new(rocks),
dir,
}
}
pub fn checkpoint(&mut self) -> PathBuf {
let mut checkpoint_dir: PathBuf = self.dir.path().into();
checkpoint_dir.push("checkpoint");
self.rocks.checkpoint(&checkpoint_dir).unwrap();
checkpoint_dir
}
pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb {
let dir = TempDir::new().unwrap();
| // TODO: add a warning log here
// warn!(logger, "Checkpoint path {:?} exists, deleting");
fs::remove_dir_all(checkpoint_path)? | random_line_split |
index.ts |
debug?: boolean // default: false
transport?: {
requestTimeout?: number // milliseconds to wait for rokka server response (default: 30000)
retries?: number // number of retries when API response is 429 (default: 10) | maxTimeout?: number // maximum milliseconds between retries (default: 10000)
randomize?: boolean // randomize time between retries (default: true)
agent?: any
}
}
interface RequestOptions {
headers?: object
noAuthHeaders?: boolean
fallBackToText?: boolean
form?: boolean
multipart?: boolean
forceUseApiKey?: boolean
noTokenRefresh?: boolean
host?: string
}
const defaults = {
apiHost: 'https://api.rokka.io',
renderHost: 'https://{organization}.rokka.io',
apiVersion: 1,
transport: {
requestTimeout: 30000,
retries: 10,
minTimeout: 1000,
maxTimeout: 10000,
randomize: true,
factor: 2,
debug: false,
},
}
const getResponseBody = async (response: Response, fallbackToText = false) => {
if (response.headers && response.json) {
if (response.headers.get('content-type') === 'application/json') {
return response.json()
}
if (response.status === 204 || response.status === 201 || fallbackToText) {
return response.text()
}
return response.body
}
return response.body
}
interface Request {
method: string
headers: { 'Api-Version'?: string | number; 'Api-Key'?: string }
timeout: number | undefined
retries: number | undefined | any
retryDelay: (attempt: number) => number
form: {}
json: boolean
body: any
agent?: any
}
export interface RequestQueryParams {
[key: string]: string | number | boolean | undefined | null
}
export interface State {
apiKey: string | undefined
apiHost: string
apiVersion: number | string
renderHost: string
transportOptions: any
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenPayload: ApiTokenPayload | null
apiTokenOptions?: RequestQueryParamsNewToken | null
apiTokenRefreshTime: number
request(
method: string,
path: string,
payload?: any | null | undefined,
queryParams?: RequestQueryParams | null,
options?: RequestOptions | undefined | null,
): Promise<RokkaResponseInterface>
}
/**
* Initializing the rokka client.
*
* ```js
* const rokka = require('rokka')({
* apiKey: 'apikey', // required for certain operations
* apiTokenGetCallback?: <() => string> // return JWT token instead of API Key
* apiTokenSetCallback?: <((token: string, payload?: object|null) => void)> // Stores a newly retrieved JWT token
* apiTokenOptions?: <object> // The rokka.user.getNewToken query parameter options, default: {}
* apiTokenRefreshTime?: <number> // how many seconds before the token is expiring, it should be refreshed, default: 3600
* apiHost: '<url>', // default: https://api.rokka.io
* apiVersion: <number>, // default: 1
* renderHost: '<url>', // default: https://{organization}.rokka.io
* debug: true, // default: false
* transport: {
* requestTimeout: <number>, // milliseconds to wait for rokka server response (default: 30000)
* retries: <number>, // number of retries when API response is 429 (default: 10)
* minTimeout: <number>, // minimum milliseconds between retries (default: 1000)
* maxTimeout: <number>, // maximum milliseconds between retries (default: 10000)
* randomize: <boolean> // randomize time between retries (default: true)
* agent?: <any> // an agent to be used with node-fetch, eg. if you need a proxy (default: undefined)
* }
* });
* ```
*
* All properties are optional since certain calls don't require credentials.
*
* If you need to use a proxy, you can do the following
*
* ```js
* import { HttpsProxyAgent } from 'https-proxy-agent'
*
* const rokka = require('rokka')({
* apiKey: 'apikey'
* transport: {agent: new HttpsProxyAgent(proxy)}
* });
* ```
*
* @param {Object} [config={}] configuration properties
* @return {Object}
*
* @module rokka
*/
export default (config: Config = {}): RokkaApi => {
const state: State = {
// config
apiKey: config.apiKey,
apiHost: config.apiHost || defaults.apiHost,
apiTokenGetCallback: config.apiTokenGetCallback || null,
apiTokenSetCallback: config.apiTokenSetCallback || null,
apiTokenPayload: null,
apiTokenOptions: config.apiTokenOptions || {},
apiTokenRefreshTime: config.apiTokenRefreshTime || 3600,
apiVersion: config.apiVersion || defaults.apiVersion,
renderHost: config.renderHost || defaults.renderHost,
transportOptions: Object.assign(defaults.transport, config.transport),
// functions
async request(
method: string,
path: string,
payload: any | null = null,
queryParams: {
[key: string]: string | number | boolean
} | null = null,
options: RequestOptions = {
noAuthHeaders: false,
fallBackToText: false,
forceUseApiKey: false,
noTokenRefresh: false,
host: undefined,
},
): Promise<RokkaResponseInterface> {
let uri = [options.host || state.apiHost, path].join('/')
if (
queryParams &&
!(
Object.entries(queryParams).length === 0 &&
queryParams.constructor === Object
)
) {
uri += '?' + stringify(queryParams)
}
const headers: {
'Api-Version'?: string | number
'Api-Key'?: string
Authorization?: string
} = options.headers || {}
headers['Api-Version'] = state.apiVersion
if (options.noAuthHeaders !== true) {
if (!options.forceUseApiKey && state.apiTokenGetCallback) {
let apiToken = state.apiTokenGetCallback()
// fill apiTokenPayload, if not set, this happens when you load a page, for example
if (!state.apiTokenPayload) {
state.apiTokenPayload = _getTokenPayload(apiToken)
}
// get a new token, when it's somehow almost expired, but should still be valid
const isTokenValid =
apiToken &&
state.apiTokenPayload?.rn === true &&
_tokenValidFor(state.apiTokenPayload?.exp, apiToken) > 0
// if it's not valid, it's also not expiring...
const isTokenExpiring =
isTokenValid &&
_isTokenExpiring(
state.apiTokenPayload?.exp,
apiToken,
state.apiTokenRefreshTime,
)
if (
(!options.noTokenRefresh && isTokenValid && isTokenExpiring) ||
(!isTokenValid && state.apiKey) //or do we have an apiKey
) {
try {
apiToken = (await user(state).user.getNewToken(state.apiKey)).body
.token
} catch (e: any) {
// clear the api token so that we can enforce a new login usually
// a 403 means that we couldn't get a new token (trying to get a longer expiry time for example)
if (e && e.statusCode === 403 && state.apiTokenSetCallback) {
state.apiTokenSetCallback('', null)
}
}
}
if (!apiToken) {
const code = 401
throw {
error: {
code,
message: 'No API token (or renewing it did not work correctly)',
},
status: code,
}
}
// set apiTokenExpiry, if not set, to avoid to having to decode it all the time
headers['Authorization'] = `Bearer ${apiToken}`
} else {
if (!state.apiKey) {
return Promise.reject(
new Error('Missing required property `apiKey`'),
)
}
headers['Api-Key'] = state.apiKey
}
}
const retryDelay = (attempt: number) => {
// from https://github.com/tim-kos/node-retry/blob/master/lib/retry.js
const random = state.transportOptions.randomize ? Math.random() + 1 : 1
const timeout = Math.round(
random *
state.transportOptions.minTimeout *
Math.pow(state.transportOptions.factor, attempt),
)
return Math.min(timeout, state.transportOptions.maxTimeout)
}
| minTimeout?: number // minimum milliseconds between retries (default: 1000) | random_line_split |
index.ts |
debug?: boolean // default: false
transport?: {
requestTimeout?: number // milliseconds to wait for rokka server response (default: 30000)
retries?: number // number of retries when API response is 429 (default: 10)
minTimeout?: number // minimum milliseconds between retries (default: 1000)
maxTimeout?: number // maximum milliseconds between retries (default: 10000)
randomize?: boolean // randomize time between retries (default: true)
agent?: any
}
}
interface RequestOptions {
headers?: object
noAuthHeaders?: boolean
fallBackToText?: boolean
form?: boolean
multipart?: boolean
forceUseApiKey?: boolean
noTokenRefresh?: boolean
host?: string
}
const defaults = {
apiHost: 'https://api.rokka.io',
renderHost: 'https://{organization}.rokka.io',
apiVersion: 1,
transport: {
requestTimeout: 30000,
retries: 10,
minTimeout: 1000,
maxTimeout: 10000,
randomize: true,
factor: 2,
debug: false,
},
}
const getResponseBody = async (response: Response, fallbackToText = false) => {
if (response.headers && response.json) {
if (response.headers.get('content-type') === 'application/json') {
return response.json()
}
if (response.status === 204 || response.status === 201 || fallbackToText) {
return response.text()
}
return response.body
}
return response.body
}
interface Request {
method: string
headers: { 'Api-Version'?: string | number; 'Api-Key'?: string }
timeout: number | undefined
retries: number | undefined | any
retryDelay: (attempt: number) => number
form: {}
json: boolean
body: any
agent?: any
}
export interface RequestQueryParams {
[key: string]: string | number | boolean | undefined | null
}
export interface State {
apiKey: string | undefined
apiHost: string
apiVersion: number | string
renderHost: string
transportOptions: any
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenPayload: ApiTokenPayload | null
apiTokenOptions?: RequestQueryParamsNewToken | null
apiTokenRefreshTime: number
request(
method: string,
path: string,
payload?: any | null | undefined,
queryParams?: RequestQueryParams | null,
options?: RequestOptions | undefined | null,
): Promise<RokkaResponseInterface>
}
/**
* Initializing the rokka client.
*
* ```js
* const rokka = require('rokka')({
* apiKey: 'apikey', // required for certain operations
* apiTokenGetCallback?: <() => string> // return JWT token instead of API Key
* apiTokenSetCallback?: <((token: string, payload?: object|null) => void)> // Stores a newly retrieved JWT token
* apiTokenOptions?: <object> // The rokka.user.getNewToken query parameter options, default: {}
* apiTokenRefreshTime?: <number> // how many seconds before the token is expiring, it should be refreshed, default: 3600
* apiHost: '<url>', // default: https://api.rokka.io
* apiVersion: <number>, // default: 1
* renderHost: '<url>', // default: https://{organization}.rokka.io
* debug: true, // default: false
* transport: {
* requestTimeout: <number>, // milliseconds to wait for rokka server response (default: 30000)
* retries: <number>, // number of retries when API response is 429 (default: 10)
* minTimeout: <number>, // minimum milliseconds between retries (default: 1000)
* maxTimeout: <number>, // maximum milliseconds between retries (default: 10000)
* randomize: <boolean> // randomize time between retries (default: true)
* agent?: <any> // an agent to be used with node-fetch, eg. if you need a proxy (default: undefined)
* }
* });
* ```
*
* All properties are optional since certain calls don't require credentials.
*
* If you need to use a proxy, you can do the following
*
* ```js
* import { HttpsProxyAgent } from 'https-proxy-agent'
*
* const rokka = require('rokka')({
* apiKey: 'apikey'
* transport: {agent: new HttpsProxyAgent(proxy)}
* });
* ```
*
* @param {Object} [config={}] configuration properties
* @return {Object}
*
* @module rokka
*/
export default (config: Config = {}): RokkaApi => {
const state: State = {
// config
apiKey: config.apiKey,
apiHost: config.apiHost || defaults.apiHost,
apiTokenGetCallback: config.apiTokenGetCallback || null,
apiTokenSetCallback: config.apiTokenSetCallback || null,
apiTokenPayload: null,
apiTokenOptions: config.apiTokenOptions || {},
apiTokenRefreshTime: config.apiTokenRefreshTime || 3600,
apiVersion: config.apiVersion || defaults.apiVersion,
renderHost: config.renderHost || defaults.renderHost,
transportOptions: Object.assign(defaults.transport, config.transport),
// functions
async request(
method: string,
path: string,
payload: any | null = null,
queryParams: {
[key: string]: string | number | boolean
} | null = null,
options: RequestOptions = {
noAuthHeaders: false,
fallBackToText: false,
forceUseApiKey: false,
noTokenRefresh: false,
host: undefined,
},
): Promise<RokkaResponseInterface> | if (options.noAuthHeaders !== true) {
if (!options.forceUseApiKey && state.apiTokenGetCallback) {
let apiToken = state.apiTokenGetCallback()
// fill apiTokenPayload, if not set, this happens when you load a page, for example
if (!state.apiTokenPayload) {
state.apiTokenPayload = _getTokenPayload(apiToken)
}
// get a new token, when it's somehow almost expired, but should still be valid
const isTokenValid =
apiToken &&
state.apiTokenPayload?.rn === true &&
_tokenValidFor(state.apiTokenPayload?.exp, apiToken) > 0
// if it's not valid, it's also not expiring...
const isTokenExpiring =
isTokenValid &&
_isTokenExpiring(
state.apiTokenPayload?.exp,
apiToken,
state.apiTokenRefreshTime,
)
if (
(!options.noTokenRefresh && isTokenValid && isTokenExpiring) ||
(!isTokenValid && state.apiKey) //or do we have an apiKey
) {
try {
apiToken = (await user(state).user.getNewToken(state.apiKey)).body
.token
} catch (e: any) {
// clear the api token so that we can enforce a new login usually
// a 403 means that we couldn't get a new token (trying to get a longer expiry time for example)
if (e && e.statusCode === 403 && state.apiTokenSetCallback) {
state.apiTokenSetCallback('', null)
}
}
}
if (!apiToken) {
const code = 401
throw {
error: {
code,
message: 'No API token (or renewing it did not work correctly)',
},
status: code,
}
}
// set apiTokenExpiry, if not set, to avoid to having to decode it all the time
headers['Authorization'] = `Bearer ${apiToken}`
} else {
if (!state.apiKey) {
return Promise.reject(
new Error('Missing required property `apiKey`'),
)
}
headers['Api-Key'] = state.apiKey
}
}
const retryDelay = (attempt: number) => {
// from https://github.com/tim-kos/node-retry/blob/master/lib/retry.js
const random = state.transportOptions.randomize ? Math.random() + 1 : 1
const timeout = Math.round(
random *
state.transportOptions.minTimeout *
Math.pow(state.transportOptions.factor, attempt),
)
return Math.min(timeout, state.transportOptions.maxTimeout)
| {
let uri = [options.host || state.apiHost, path].join('/')
if (
queryParams &&
!(
Object.entries(queryParams).length === 0 &&
queryParams.constructor === Object
)
) {
uri += '?' + stringify(queryParams)
}
const headers: {
'Api-Version'?: string | number
'Api-Key'?: string
Authorization?: string
} = options.headers || {}
headers['Api-Version'] = state.apiVersion
| identifier_body |
index.ts |
debug?: boolean // default: false
transport?: {
requestTimeout?: number // milliseconds to wait for rokka server response (default: 30000)
retries?: number // number of retries when API response is 429 (default: 10)
minTimeout?: number // minimum milliseconds between retries (default: 1000)
maxTimeout?: number // maximum milliseconds between retries (default: 10000)
randomize?: boolean // randomize time between retries (default: true)
agent?: any
}
}
interface RequestOptions {
headers?: object
noAuthHeaders?: boolean
fallBackToText?: boolean
form?: boolean
multipart?: boolean
forceUseApiKey?: boolean
noTokenRefresh?: boolean
host?: string
}
const defaults = {
apiHost: 'https://api.rokka.io',
renderHost: 'https://{organization}.rokka.io',
apiVersion: 1,
transport: {
requestTimeout: 30000,
retries: 10,
minTimeout: 1000,
maxTimeout: 10000,
randomize: true,
factor: 2,
debug: false,
},
}
const getResponseBody = async (response: Response, fallbackToText = false) => {
if (response.headers && response.json) {
if (response.headers.get('content-type') === 'application/json') {
return response.json()
}
if (response.status === 204 || response.status === 201 || fallbackToText) {
return response.text()
}
return response.body
}
return response.body
}
interface Request {
method: string
headers: { 'Api-Version'?: string | number; 'Api-Key'?: string }
timeout: number | undefined
retries: number | undefined | any
retryDelay: (attempt: number) => number
form: {}
json: boolean
body: any
agent?: any
}
export interface RequestQueryParams {
[key: string]: string | number | boolean | undefined | null
}
export interface State {
apiKey: string | undefined
apiHost: string
apiVersion: number | string
renderHost: string
transportOptions: any
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenPayload: ApiTokenPayload | null
apiTokenOptions?: RequestQueryParamsNewToken | null
apiTokenRefreshTime: number
request(
method: string,
path: string,
payload?: any | null | undefined,
queryParams?: RequestQueryParams | null,
options?: RequestOptions | undefined | null,
): Promise<RokkaResponseInterface>
}
/**
* Initializing the rokka client.
*
* ```js
* const rokka = require('rokka')({
* apiKey: 'apikey', // required for certain operations
* apiTokenGetCallback?: <() => string> // return JWT token instead of API Key
* apiTokenSetCallback?: <((token: string, payload?: object|null) => void)> // Stores a newly retrieved JWT token
* apiTokenOptions?: <object> // The rokka.user.getNewToken query parameter options, default: {}
* apiTokenRefreshTime?: <number> // how many seconds before the token is expiring, it should be refreshed, default: 3600
* apiHost: '<url>', // default: https://api.rokka.io
* apiVersion: <number>, // default: 1
* renderHost: '<url>', // default: https://{organization}.rokka.io
* debug: true, // default: false
* transport: {
* requestTimeout: <number>, // milliseconds to wait for rokka server response (default: 30000)
* retries: <number>, // number of retries when API response is 429 (default: 10)
* minTimeout: <number>, // minimum milliseconds between retries (default: 1000)
* maxTimeout: <number>, // maximum milliseconds between retries (default: 10000)
* randomize: <boolean> // randomize time between retries (default: true)
* agent?: <any> // an agent to be used with node-fetch, eg. if you need a proxy (default: undefined)
* }
* });
* ```
*
* All properties are optional since certain calls don't require credentials.
*
* If you need to use a proxy, you can do the following
*
* ```js
* import { HttpsProxyAgent } from 'https-proxy-agent'
*
* const rokka = require('rokka')({
* apiKey: 'apikey'
* transport: {agent: new HttpsProxyAgent(proxy)}
* });
* ```
*
* @param {Object} [config={}] configuration properties
* @return {Object}
*
* @module rokka
*/
export default (config: Config = {}): RokkaApi => {
const state: State = {
// config
apiKey: config.apiKey,
apiHost: config.apiHost || defaults.apiHost,
apiTokenGetCallback: config.apiTokenGetCallback || null,
apiTokenSetCallback: config.apiTokenSetCallback || null,
apiTokenPayload: null,
apiTokenOptions: config.apiTokenOptions || {},
apiTokenRefreshTime: config.apiTokenRefreshTime || 3600,
apiVersion: config.apiVersion || defaults.apiVersion,
renderHost: config.renderHost || defaults.renderHost,
transportOptions: Object.assign(defaults.transport, config.transport),
// functions
async | (
method: string,
path: string,
payload: any | null = null,
queryParams: {
[key: string]: string | number | boolean
} | null = null,
options: RequestOptions = {
noAuthHeaders: false,
fallBackToText: false,
forceUseApiKey: false,
noTokenRefresh: false,
host: undefined,
},
): Promise<RokkaResponseInterface> {
let uri = [options.host || state.apiHost, path].join('/')
if (
queryParams &&
!(
Object.entries(queryParams).length === 0 &&
queryParams.constructor === Object
)
) {
uri += '?' + stringify(queryParams)
}
const headers: {
'Api-Version'?: string | number
'Api-Key'?: string
Authorization?: string
} = options.headers || {}
headers['Api-Version'] = state.apiVersion
if (options.noAuthHeaders !== true) {
if (!options.forceUseApiKey && state.apiTokenGetCallback) {
let apiToken = state.apiTokenGetCallback()
// fill apiTokenPayload, if not set, this happens when you load a page, for example
if (!state.apiTokenPayload) {
state.apiTokenPayload = _getTokenPayload(apiToken)
}
// get a new token, when it's somehow almost expired, but should still be valid
const isTokenValid =
apiToken &&
state.apiTokenPayload?.rn === true &&
_tokenValidFor(state.apiTokenPayload?.exp, apiToken) > 0
// if it's not valid, it's also not expiring...
const isTokenExpiring =
isTokenValid &&
_isTokenExpiring(
state.apiTokenPayload?.exp,
apiToken,
state.apiTokenRefreshTime,
)
if (
(!options.noTokenRefresh && isTokenValid && isTokenExpiring) ||
(!isTokenValid && state.apiKey) //or do we have an apiKey
) {
try {
apiToken = (await user(state).user.getNewToken(state.apiKey)).body
.token
} catch (e: any) {
// clear the api token so that we can enforce a new login usually
// a 403 means that we couldn't get a new token (trying to get a longer expiry time for example)
if (e && e.statusCode === 403 && state.apiTokenSetCallback) {
state.apiTokenSetCallback('', null)
}
}
}
if (!apiToken) {
const code = 401
throw {
error: {
code,
message: 'No API token (or renewing it did not work correctly)',
},
status: code,
}
}
// set apiTokenExpiry, if not set, to avoid to having to decode it all the time
headers['Authorization'] = `Bearer ${apiToken}`
} else {
if (!state.apiKey) {
return Promise.reject(
new Error('Missing required property `apiKey`'),
)
}
headers['Api-Key'] = state.apiKey
}
}
const retryDelay = (attempt: number) => {
// from https://github.com/tim-kos/node-retry/blob/master/lib/retry.js
const random = state.transportOptions.randomize ? Math.random() + 1 : 1
const timeout = Math.round(
random *
state.transportOptions.minTimeout *
Math.pow(state.transportOptions.factor, attempt),
)
return Math.min(timeout, state.transportOptions.maxTimeout)
| request | identifier_name |
index.ts | ://{organization}.rokka.io',
apiVersion: 1,
transport: {
requestTimeout: 30000,
retries: 10,
minTimeout: 1000,
maxTimeout: 10000,
randomize: true,
factor: 2,
debug: false,
},
}
const getResponseBody = async (response: Response, fallbackToText = false) => {
if (response.headers && response.json) {
if (response.headers.get('content-type') === 'application/json') {
return response.json()
}
if (response.status === 204 || response.status === 201 || fallbackToText) {
return response.text()
}
return response.body
}
return response.body
}
interface Request {
method: string
headers: { 'Api-Version'?: string | number; 'Api-Key'?: string }
timeout: number | undefined
retries: number | undefined | any
retryDelay: (attempt: number) => number
form: {}
json: boolean
body: any
agent?: any
}
export interface RequestQueryParams {
[key: string]: string | number | boolean | undefined | null
}
export interface State {
apiKey: string | undefined
apiHost: string
apiVersion: number | string
renderHost: string
transportOptions: any
apiTokenGetCallback?: ApiTokenGetCallback
apiTokenSetCallback?: ApiTokenSetCallback
apiTokenPayload: ApiTokenPayload | null
apiTokenOptions?: RequestQueryParamsNewToken | null
apiTokenRefreshTime: number
request(
method: string,
path: string,
payload?: any | null | undefined,
queryParams?: RequestQueryParams | null,
options?: RequestOptions | undefined | null,
): Promise<RokkaResponseInterface>
}
/**
* Initializing the rokka client.
*
* ```js
* const rokka = require('rokka')({
* apiKey: 'apikey', // required for certain operations
* apiTokenGetCallback?: <() => string> // return JWT token instead of API Key
* apiTokenSetCallback?: <((token: string, payload?: object|null) => void)> // Stores a newly retrieved JWT token
* apiTokenOptions?: <object> // The rokka.user.getNewToken query parameter options, default: {}
* apiTokenRefreshTime?: <number> // how many seconds before the token is expiring, it should be refreshed, default: 3600
* apiHost: '<url>', // default: https://api.rokka.io
* apiVersion: <number>, // default: 1
* renderHost: '<url>', // default: https://{organization}.rokka.io
* debug: true, // default: false
* transport: {
* requestTimeout: <number>, // milliseconds to wait for rokka server response (default: 30000)
* retries: <number>, // number of retries when API response is 429 (default: 10)
* minTimeout: <number>, // minimum milliseconds between retries (default: 1000)
* maxTimeout: <number>, // maximum milliseconds between retries (default: 10000)
* randomize: <boolean> // randomize time between retries (default: true)
* agent?: <any> // an agent to be used with node-fetch, eg. if you need a proxy (default: undefined)
* }
* });
* ```
*
* All properties are optional since certain calls don't require credentials.
*
* If you need to use a proxy, you can do the following
*
* ```js
* import { HttpsProxyAgent } from 'https-proxy-agent'
*
* const rokka = require('rokka')({
* apiKey: 'apikey'
* transport: {agent: new HttpsProxyAgent(proxy)}
* });
* ```
*
* @param {Object} [config={}] configuration properties
* @return {Object}
*
* @module rokka
*/
export default (config: Config = {}): RokkaApi => {
const state: State = {
// config
apiKey: config.apiKey,
apiHost: config.apiHost || defaults.apiHost,
apiTokenGetCallback: config.apiTokenGetCallback || null,
apiTokenSetCallback: config.apiTokenSetCallback || null,
apiTokenPayload: null,
apiTokenOptions: config.apiTokenOptions || {},
apiTokenRefreshTime: config.apiTokenRefreshTime || 3600,
apiVersion: config.apiVersion || defaults.apiVersion,
renderHost: config.renderHost || defaults.renderHost,
transportOptions: Object.assign(defaults.transport, config.transport),
// functions
async request(
method: string,
path: string,
payload: any | null = null,
queryParams: {
[key: string]: string | number | boolean
} | null = null,
options: RequestOptions = {
noAuthHeaders: false,
fallBackToText: false,
forceUseApiKey: false,
noTokenRefresh: false,
host: undefined,
},
): Promise<RokkaResponseInterface> {
let uri = [options.host || state.apiHost, path].join('/')
if (
queryParams &&
!(
Object.entries(queryParams).length === 0 &&
queryParams.constructor === Object
)
) {
uri += '?' + stringify(queryParams)
}
const headers: {
'Api-Version'?: string | number
'Api-Key'?: string
Authorization?: string
} = options.headers || {}
headers['Api-Version'] = state.apiVersion
if (options.noAuthHeaders !== true) {
if (!options.forceUseApiKey && state.apiTokenGetCallback) {
let apiToken = state.apiTokenGetCallback()
// fill apiTokenPayload, if not set, this happens when you load a page, for example
if (!state.apiTokenPayload) {
state.apiTokenPayload = _getTokenPayload(apiToken)
}
// get a new token, when it's somehow almost expired, but should still be valid
const isTokenValid =
apiToken &&
state.apiTokenPayload?.rn === true &&
_tokenValidFor(state.apiTokenPayload?.exp, apiToken) > 0
// if it's not valid, it's also not expiring...
const isTokenExpiring =
isTokenValid &&
_isTokenExpiring(
state.apiTokenPayload?.exp,
apiToken,
state.apiTokenRefreshTime,
)
if (
(!options.noTokenRefresh && isTokenValid && isTokenExpiring) ||
(!isTokenValid && state.apiKey) //or do we have an apiKey
) {
try {
apiToken = (await user(state).user.getNewToken(state.apiKey)).body
.token
} catch (e: any) {
// clear the api token so that we can enforce a new login usually
// a 403 means that we couldn't get a new token (trying to get a longer expiry time for example)
if (e && e.statusCode === 403 && state.apiTokenSetCallback) {
state.apiTokenSetCallback('', null)
}
}
}
if (!apiToken) {
const code = 401
throw {
error: {
code,
message: 'No API token (or renewing it did not work correctly)',
},
status: code,
}
}
// set apiTokenExpiry, if not set, to avoid to having to decode it all the time
headers['Authorization'] = `Bearer ${apiToken}`
} else {
if (!state.apiKey) {
return Promise.reject(
new Error('Missing required property `apiKey`'),
)
}
headers['Api-Key'] = state.apiKey
}
}
const retryDelay = (attempt: number) => {
// from https://github.com/tim-kos/node-retry/blob/master/lib/retry.js
const random = state.transportOptions.randomize ? Math.random() + 1 : 1
const timeout = Math.round(
random *
state.transportOptions.minTimeout *
Math.pow(state.transportOptions.factor, attempt),
)
return Math.min(timeout, state.transportOptions.maxTimeout)
}
const requestOptions: Request = {
method: method,
headers: headers,
timeout: state.transportOptions.requestTimeout,
retries: state.transportOptions.retries,
retryDelay,
form: {},
json: false,
body: undefined,
agent: state.transportOptions.agent,
}
if (options.form === true) {
const formData = payload || {}
const requestData = new FormData()
Object.keys(formData).forEach(function (meta) {
requestData.append(meta, formData[meta])
})
requestOptions.body = requestData
} else if (options.multipart !== true) {
requestOptions.json = true
requestOptions.body = payload
} else | {
const formData = payload.formData || {}
const requestData = new FormData()
requestData.append(payload.name, payload.contents, payload.filename)
Object.keys(formData).forEach(function (meta) {
requestData.append(meta, JSON.stringify(formData[meta]))
})
requestOptions.body = requestData
} | conditional_block |
|
test-checkpoint.py | "source": [
"# Retrieve the parent divs for all headlines and preview text\n",
"results = soup.find_all(\"div\", class_=\"list_text\")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"-----------------\n",
"Testing Proves Its Worth With Successful Mars Parachute Deployment\n",
"The giant canopy that helped land Perseverance on Mars was tested here on Earth at NASA’s Wallops Flight Facility in Virginia.\n",
"-----------------\n",
"NASA's Perseverance Rover Gives High-Definition Panoramic View of Landing Site\n",
"A 360-degree panorama taken by the rover’s Mastcam-Z instrument will be discussed during a public video chat this Thursday.\n",
"-----------------\n",
"Nearly 11 Million Names of Earthlings are on Mars Perseverance\n",
"When the Perseverance rover safely touched down on the Martian surface, inside Jezero Crater, on Feb. 18, 2021, it was also a safe landing for the nearly 11 million names on board.\n",
"-----------------\n",
"NASA's Mars Perseverance Rover Provides Front-Row Seat to Landing, First Audio Recording of Red Planet \n",
"The agency’s newest rover captured first-of-its kind footage of its Feb. 18 touchdown and has recorded audio of Martian wind.\n",
"\n",
"\n",
"-----------------\n",
"NASA to Reveal New Video, Images From Mars Perseverance Rover\n",
"First-of-its kind footage from the agency’s newest rover will be presented during a briefing this morning.\n",
"-----------------\n",
"NASA's Mars Helicopter Reports In \n",
"The technology demonstration has phoned home from where it is attached to the belly of NASA’s Perseverance rover. \n",
"-----------------\n",
"NASA's Perseverance Rover Sends Sneak Peek of Mars Landing\n",
"The six-wheeled robot’s latest data since touching down yesterday include a hi-res image captured as the rover’s jetpack lowered it to the ground.\n",
"-----------------\n",
"Touchdown! NASA's Mars Perseverance Rover Safely Lands on Red Planet\n",
"The agency’s latest and most complex mission to the Red Planet has touched down at Jezero Crater. Now it’s time to begin testing the health of the rover. \n",
"-----------------\n",
"Searching for Life in NASA's Perseverance Mars Samples\n",
"When the agency’s newest rover mission searches for fossilized microscopic life on the Red Planet, how will scientists know whether they’ve found it?\n",
"-----------------\n",
"The Mars Relay Network Connects Us to NASA's Martian Explorers\n",
"A tightly choreographed dance between NASA’s Deep Space Network and Mars orbiters will keep the agency’s Perseverance in touch with Earth during landing and beyond.\n",
"-----------------\n",
"NASA's Next Mars Rover Is Ready for the Most Precise Landing Yet\n",
"What to expect when the Mars 2020 Perseverance rover arrives at the Red Planet on Feb. 18, 2021.\n",
"-----------------\n",
"Sensors Prepare to Collect Data as Perseverance Enters Mars' Atmosphere\n",
"Technology will collect critical data about the harsh entry environment during Perseverance’s entry next Thursday.\n",
"-----------------\n",
"InSight Is Meeting the Challenge of Winter on Dusty Mars\n",
"As dust collects on the solar panels and winter comes to Elysium Planitia, the team is following a plan to reduce science operations in order to keep the lander safe.\n",
"-----------------\n",
"NASA Invites Public to Share Thrill of Mars Perseverance Rover Landing\n",
"Mark your calendars for live landing commentary, news briefings, livestreamed Q&As, virtual watch parties, student activities, and more.\n",
"-----------------\n",
"Tricky Terrain: Helping to Assure a Safe Rover Landing\n",
"How two new technologies will help Perseverance, NASA’s most sophisticated rover yet, touch down onto the surface of Mars this month.\n",
"-----------------\n",
"Where Should Future Astronauts Land on Mars? Follow the Water\n",
"A new NASA paper provides the most detailed map to date of near-surface water ice on the Red Planet.\n",
"-----------------\n",
"NASA's Perseverance Pays Off Back Home\n",
"Even as the Perseverance rover approaches Mars, technology on board is paying off on Earth.\n",
"-----------------\n",
"Could the Surface of Phobos Reveal Secrets of the Martian Past?\n",
"The Martian moon Phobos orbits through a stream of charged atoms and molecules that flow off the Red Planet’s atmosphere, new research shows.\n",
"-----------------\n",
"NASA's MAVEN Continues to Advance Mars Science and Telecommunications Relay Efforts\n",
"With a suite of new national and international spacecraft primed to explore the Red Planet after their arrival next month, NASA’s MAVEN mission is ready to provide support and continue its study of the Martian atmosphere.\n",
"-----------------\n",
"NASA's Perseverance Rover 22 Days From Mars Landing\n",
"Seven minutes of harrowing descent to the Red Planet is in the not-so-distant future for the agency’s Mars 2020 mission. \n",
"-----------------\n",
"6 Things to Know About NASA's Mars Helicopter on Its Way to Mars\n",
"Ingenuity, a technology experiment, is preparing to attempt the first powered, controlled flight on the Red Planet.\n",
"-----------------\n",
"NASA to Host Virtual Briefing on February Perseverance Mars Rover Landing\n",
"NASA leadership and members of the mission will discuss the agency’s latest rover, which touches down on the Red Planet on Feb. 18.\n",
"-----------------\n",
"NASA InSight's ‘Mole' Ends Its Journey on Mars\n",
"The heat probe hasn’t been able to gain the friction it needs to dig, but the mission has been granted an extension to carry on with its other science.\n",
"-----------------\n",
"Mars 2020 Perseverance Rover to Capture Sounds From the Red Planet\n",
"Audio gathered by the mission may not sound quite the same on Mars as it would to our ears on Earth. A new interactive online experience lets you sample the difference.\n",
"-----------------\n",
"NASA's Curiosity Rover Reaches Its 3,000th Day on Mars\n",
"As the rover has continued to ascend Mount Sharp, it’s found distinctive benchlike rock formations.\n",
"-----------------\n",
"Celebrate the Perseverance Rover Landing With NASA's Student Challenge\n",
"The rover touches down on the Red Planet next month, and students are invited to join the excitement by designing, building, and landing their own Mars mission. NASA can help.\n",
"-----------------\n",
"NASA Extends Exploration for Two Planetary Science Missions\n",
"The missions – Juno and InSight – have each increased our understanding of our solar system, as well as spurred new sets of diverse questions.\n",
"-----------------\n",
"7 Things to Know About the NASA Rover About to Land on Mars\n",
"The Mars 2020 Perseverance rover, which has started its approach to the Red Planet, will help answer the next logical question in Mars exploration.\n",
"-----------------\n",
"A Martian Roundtrip: NASA's Perseverance Rover Sample Tubes\n",
"Marvels of engineering, the rover's sample tubes must be tough enough to safely bring Red Planet samples on the long journey back to Earth in immaculate condition. \n",
"-----------------\n",
"NASA Moves Forward With Campaign to Return Mars Samples to Earth\n",
"During this next phase, the program will mature critical technologies and make critical design decisions as well as assess industry partnerships.\n",
"-----------------\n",
"3 Things We've Learned From NASA's Mars InSight \n",
"Scientists are finding new mysteries since the geophysics mission landed two years ago.\n",
"-----------------\n",
"From JPL's Mailroom to Mars and Beyond\n",
"Bill Allen has thrived as the mechanical systems design lead for three Mars rover missions, but he got his start as a teenager sorting letters for the NASA center.\n",
"-----------------\n",
"5 Hidden Gems Are Riding Aboard NASA's Perseverance Rover\n",
"The symbols, mottos, and small objects added to the agency's newest Mars rover serve a variety of purposes, from functional to decorative.\n",
"-----------------\n",
"MOXIE Could Help Future Rockets Launch Off Mars\n",
"NASA's Perseverance rover carries a device to convert Martian air into oxygen that, if produced on a larger scale, could be used not | random_line_split |
||
app.component.ts | this.script.src = "https://maps.googleapis.com/maps/api/js?key=AIzaSyDz7iXxtwOMovXzKaaWLzStFo1tDLP5PEg&libraries=places";
this.script.onload = () => {
this.map = new google.maps.Map(document.getElementById('map'), {
center: {lat: -33.8688, lng: 151.2195},
zoom: 13,
mapTypeId: 'roadmap',
gestureHandling: 'greedy'
});
this.input = document.getElementById('pac-input');
this.searchBox = new google.maps.places.SearchBox(this.input);
var me = this;
this.map.addListener('bounds_changed', function() {
me.searchBox.setBounds(me.map.getBounds());
});
var bounds = new google.maps.LatLngBounds();
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(function(position) {
bounds = new google.maps.LatLngBounds(new google.maps.LatLng(position.coords.latitude, position.coords.longitude));
me.map.fitBounds(bounds);
me.currentLat = position.coords.latitude;
me.currentLng = position.coords.longitude;
me.myMarker = new google.maps.Marker({
map: me.map,
title: "My location",
position: new google.maps.LatLng(position.coords.latitude, position.coords.longitude)
});
var latlng = {lat: me.currentLat, lng: me.currentLng};
me.geocoder.geocode({'location': latlng}, function(results, status) {
if (status === 'OK') {
if (results[0]) {
me.myLocationAddress = results[0].formatted_address;
} else {
window.alert('No results found');
}
} else {
window.alert('Geocoder failed due to: ' + status);
}
});
});
} else { }
this.detailService = new google.maps.places.PlacesService(this.map);
this.directionsService = new google.maps.DirectionsService;
this.directionsDisplay = new google.maps.DirectionsRenderer;
this.directionsDisplay.setMap(this.map);
this.geocoder = new google.maps.Geocoder;
this.searchBox.addListener('places_changed', function() {
me.places = me.searchBox.getPlaces();
if (me.places.length == 0) {
return;
}
// Clear out the old markers.
me.markers.forEach(function(marker) {
marker.setMap(null);
});
me.markers = [];
// For each place, get the icon, name and location.
bounds = new google.maps.LatLngBounds();
var place = me.places[0];
me.currentLat = place.geometry.location.lat();
me.currentLng = place.geometry.location.lng();
if (!place.geometry) {
console.log("Returned place contains no geometry");
return;
}
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
me.search();
});
}
document.body.appendChild(this.script);
}
search() | });
me.markers = [];
var bounds = new google.maps.LatLngBounds();
service.textSearch(request, function(results, status, pagination) {
if (status == google.maps.places.PlacesServiceStatus.OK) {
results.forEach(function(place) {
i++;
var icon = {
url:"https://cdn.mapmarker.io/api/v1/pin?size=120&background=%230C797D&text=" + i + "&color=%23FFFFFF&voffset=2&hoffset=1&", //"assets/pins/number_" + i + ".png",
size: new google.maps.Size(150, 150),
origin: new google.maps.Point(0, 0),
anchor: new google.maps.Point(17, 75),
scaledSize: new google.maps.Size(50, 50)
};
var newMarker = new google.maps.Marker({
map: me.map,
icon: icon,
title: place.name,
position: place.geometry.location
});
me.markers.push(newMarker);
newMarker.addListener('click', async function() {
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info-text">` + open_hours +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/bookmark.svg">
<div class="info-text">Add to my favorite</div>
</div>
</div>`;
if(me.infowindow) {
me.infowindow.close();
}
me.infowindow = new google.maps.InfoWindow({
content: contentString
});
me.infowindow.open(me.map, newMarker);
});
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
});
me.searchresults = me.searchresults.concat(results);
if(pagination.hasNextPage) {
pagination.nextPage();
}
}
});
}
async select(i) {
this.selectedNo = i;
var place = this.searchresults[i];
var newMarker = this.markers[i];
var me = this;
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info | {
var me = this;
var pyrmont = new google.maps.LatLng(this.currentLat, this.currentLng);
var request = {
location: pyrmont,
radius: this.radius,
type: ['fitness'],
query: this.additionalFilter + ' ' + this.searchFitnessService
};
// this.isLoading = true;
this.searchresults = [];
var service = new google.maps.places.PlacesService(this.map);
var i = 0;
me.markers.forEach(function(marker) {
marker.setMap(null); | identifier_body |
app.component.ts | this.input = document.getElementById('pac-input');
this.searchBox = new google.maps.places.SearchBox(this.input);
var me = this;
this.map.addListener('bounds_changed', function() {
me.searchBox.setBounds(me.map.getBounds());
});
var bounds = new google.maps.LatLngBounds();
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(function(position) {
bounds = new google.maps.LatLngBounds(new google.maps.LatLng(position.coords.latitude, position.coords.longitude));
me.map.fitBounds(bounds);
me.currentLat = position.coords.latitude;
me.currentLng = position.coords.longitude;
me.myMarker = new google.maps.Marker({
map: me.map,
title: "My location",
position: new google.maps.LatLng(position.coords.latitude, position.coords.longitude)
});
var latlng = {lat: me.currentLat, lng: me.currentLng};
me.geocoder.geocode({'location': latlng}, function(results, status) {
if (status === 'OK') {
if (results[0]) {
me.myLocationAddress = results[0].formatted_address;
} else {
window.alert('No results found');
}
} else {
window.alert('Geocoder failed due to: ' + status);
}
});
});
} else { }
this.detailService = new google.maps.places.PlacesService(this.map);
this.directionsService = new google.maps.DirectionsService;
this.directionsDisplay = new google.maps.DirectionsRenderer;
this.directionsDisplay.setMap(this.map);
this.geocoder = new google.maps.Geocoder;
this.searchBox.addListener('places_changed', function() {
me.places = me.searchBox.getPlaces();
if (me.places.length == 0) {
return;
}
// Clear out the old markers.
me.markers.forEach(function(marker) {
marker.setMap(null);
});
me.markers = [];
// For each place, get the icon, name and location.
bounds = new google.maps.LatLngBounds();
var place = me.places[0];
me.currentLat = place.geometry.location.lat();
me.currentLng = place.geometry.location.lng();
if (!place.geometry) {
console.log("Returned place contains no geometry");
return;
}
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
me.search();
});
}
document.body.appendChild(this.script);
}
search() {
var me = this;
var pyrmont = new google.maps.LatLng(this.currentLat, this.currentLng);
var request = {
location: pyrmont,
radius: this.radius,
type: ['fitness'],
query: this.additionalFilter + ' ' + this.searchFitnessService
};
// this.isLoading = true;
this.searchresults = [];
var service = new google.maps.places.PlacesService(this.map);
var i = 0;
me.markers.forEach(function(marker) {
marker.setMap(null);
});
me.markers = [];
var bounds = new google.maps.LatLngBounds();
service.textSearch(request, function(results, status, pagination) {
if (status == google.maps.places.PlacesServiceStatus.OK) {
results.forEach(function(place) {
i++;
var icon = {
url:"https://cdn.mapmarker.io/api/v1/pin?size=120&background=%230C797D&text=" + i + "&color=%23FFFFFF&voffset=2&hoffset=1&", //"assets/pins/number_" + i + ".png",
size: new google.maps.Size(150, 150),
origin: new google.maps.Point(0, 0),
anchor: new google.maps.Point(17, 75),
scaledSize: new google.maps.Size(50, 50)
};
var newMarker = new google.maps.Marker({
map: me.map,
icon: icon,
title: place.name,
position: place.geometry.location
});
me.markers.push(newMarker);
newMarker.addListener('click', async function() {
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info-text">` + open_hours +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/bookmark.svg">
<div class="info-text">Add to my favorite</div>
</div>
</div>`;
if(me.infowindow) {
me.infowindow.close();
}
me.infowindow = new google.maps.InfoWindow({
content: contentString
});
me.infowindow.open(me.map, newMarker);
});
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
});
me.searchresults = me.searchresults.concat(results);
if(pagination.hasNextPage) {
pagination.nextPage();
}
}
});
}
async select(i) {
this.selectedNo = i;
var place = this.searchresults[i];
var newMarker = this.markers[i];
var me = this;
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info-text">` + open_hours +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/bookmark.svg">
<div class="info-text">Add to my favorite</div>
</div>
</div>`;
if(me.infowindow) {
me.infowindow.close();
}
me.infowindow = new google.maps.InfoWindow({
content: contentString
});
me.infowindow.open(me.map, newMarker);
}
| onChangeAdditionalFilter | identifier_name |
|
app.component.ts | .map.getBounds());
});
var bounds = new google.maps.LatLngBounds();
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(function(position) {
bounds = new google.maps.LatLngBounds(new google.maps.LatLng(position.coords.latitude, position.coords.longitude));
me.map.fitBounds(bounds);
me.currentLat = position.coords.latitude;
me.currentLng = position.coords.longitude;
me.myMarker = new google.maps.Marker({
map: me.map,
title: "My location",
position: new google.maps.LatLng(position.coords.latitude, position.coords.longitude)
});
var latlng = {lat: me.currentLat, lng: me.currentLng};
me.geocoder.geocode({'location': latlng}, function(results, status) {
if (status === 'OK') {
if (results[0]) {
me.myLocationAddress = results[0].formatted_address;
} else {
window.alert('No results found');
}
} else {
window.alert('Geocoder failed due to: ' + status);
}
});
});
} else { }
this.detailService = new google.maps.places.PlacesService(this.map);
this.directionsService = new google.maps.DirectionsService;
this.directionsDisplay = new google.maps.DirectionsRenderer;
this.directionsDisplay.setMap(this.map);
this.geocoder = new google.maps.Geocoder;
this.searchBox.addListener('places_changed', function() {
me.places = me.searchBox.getPlaces();
if (me.places.length == 0) {
return;
}
// Clear out the old markers.
me.markers.forEach(function(marker) {
marker.setMap(null);
});
me.markers = [];
// For each place, get the icon, name and location.
bounds = new google.maps.LatLngBounds();
var place = me.places[0];
me.currentLat = place.geometry.location.lat();
me.currentLng = place.geometry.location.lng();
if (!place.geometry) {
console.log("Returned place contains no geometry");
return;
}
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
me.search();
});
}
document.body.appendChild(this.script);
}
search() {
var me = this;
var pyrmont = new google.maps.LatLng(this.currentLat, this.currentLng);
var request = {
location: pyrmont,
radius: this.radius,
type: ['fitness'],
query: this.additionalFilter + ' ' + this.searchFitnessService
};
// this.isLoading = true;
this.searchresults = [];
var service = new google.maps.places.PlacesService(this.map);
var i = 0;
me.markers.forEach(function(marker) {
marker.setMap(null);
});
me.markers = [];
var bounds = new google.maps.LatLngBounds();
service.textSearch(request, function(results, status, pagination) {
if (status == google.maps.places.PlacesServiceStatus.OK) {
results.forEach(function(place) {
i++;
var icon = {
url:"https://cdn.mapmarker.io/api/v1/pin?size=120&background=%230C797D&text=" + i + "&color=%23FFFFFF&voffset=2&hoffset=1&", //"assets/pins/number_" + i + ".png",
size: new google.maps.Size(150, 150),
origin: new google.maps.Point(0, 0),
anchor: new google.maps.Point(17, 75),
scaledSize: new google.maps.Size(50, 50)
};
var newMarker = new google.maps.Marker({
map: me.map,
icon: icon,
title: place.name,
position: place.geometry.location
});
me.markers.push(newMarker);
newMarker.addListener('click', async function() {
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info-text">` + open_hours +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/bookmark.svg">
<div class="info-text">Add to my favorite</div>
</div>
</div>`;
if(me.infowindow) {
me.infowindow.close();
}
me.infowindow = new google.maps.InfoWindow({
content: contentString
});
me.infowindow.open(me.map, newMarker);
});
if (place.geometry.viewport) {
// Only geocodes have viewport.
bounds.union(place.geometry.viewport);
} else {
bounds.extend(place.geometry.location);
}
me.map.fitBounds(bounds);
});
me.searchresults = me.searchresults.concat(results);
if(pagination.hasNextPage) {
pagination.nextPage();
}
}
});
}
async select(i) {
this.selectedNo = i;
var place = this.searchresults[i];
var newMarker = this.markers[i];
var me = this;
var img = './assets/images/service.jpg';
if( place.photos && place.photos.length > 0) {
img = place.photos[0].getUrl();
}
let placeDeatil : any = await me.getPlaceDetails(place.place_id);
var open_hours = '';
if(placeDeatil.opening_hours) {
placeDeatil.opening_hours.weekday_text.forEach(t => {
open_hours += t + "<br>";
})
}
// debugger;
var contentString =
`<div class="infowindow">
<div>
<img class="thumb" src="` + img + `">
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/pin.svg">
<div class="info-text">` + placeDeatil.formatted_address +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/open_in_new.svg">
<a class="info-text" target="_blank" href="`+placeDeatil.website+`">` + placeDeatil.website +`</a>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/phone.svg">
<div class="info-text">` + placeDeatil.formatted_phone_number +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/timeline.svg">
<div class="info-text">` + open_hours +`</div>
</div>
<div class="info-item">
<img class="info-icon" src="./assets/images/bookmark.svg">
<div class="info-text">Add to my favorite</div>
</div>
</div>`;
if(me.infowindow) {
me.infowindow.close();
}
me.infowindow = new google.maps.InfoWindow({
content: contentString
});
me.infowindow.open(me.map, newMarker);
}
onChangeAdditionalFilter(value) {
if(value == "no") {return;}
// alert(this.additionalFilter);
this.search();
}
changedRadius(value){ | if(value == "") {return;}
this.search();
} | random_line_split |
|
reading.rs | use crate::{protocols::ReturnableConnection, Pea2Pea};
use async_trait::async_trait;
use tokio::{
io::{AsyncRead, AsyncReadExt},
sync::mpsc,
time::sleep,
};
use tracing::*;
use std::{io, net::SocketAddr, time::Duration};
/// Can be used to specify and enable reading, i.e. receiving inbound messages.
/// If handshaking is enabled too, it goes into force only after the handshake has been concluded.
#[async_trait]
pub trait Reading: Pea2Pea
where
Self: Clone + Send + Sync + 'static,
{
/// The final (deserialized) type of inbound messages.
type Message: Send;
/// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout
/// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid
/// accidentally reading "borked" messages).
fn enable_reading(&self) {
let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>(
self.node().config().protocol_handler_queue_depth,
);
// the main task spawning per-connection tasks reading messages from their streams
let self_clone = self.clone();
let reading_task = tokio::spawn(async move {
trace!(parent: self_clone.node().span(), "spawned the Reading handler task");
loop {
// these objects are sent from `Node::adapt_stream`
if let Some((mut conn, conn_returner)) = conn_receiver.recv().await {
let addr = conn.addr;
let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point
let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size]
.into_boxed_slice();
let (inbound_message_sender, mut inbound_message_receiver) =
mpsc::channel(self_clone.node().config().conn_inbound_queue_depth);
// the task for processing parsed messages
let processing_clone = self_clone.clone();
let inbound_processing_task = tokio::spawn(async move {
let node = processing_clone.node();
trace!(parent: node.span(), "spawned a task for processing messages from {}", addr);
loop {
if let Some(msg) = inbound_message_receiver.recv().await {
if let Err(e) = processing_clone.process_message(addr, msg).await {
error!(parent: node.span(), "can't process an inbound message: {}", e);
node.known_peers().register_failure(addr);
}
} else {
node.disconnect(addr);
break;
}
}
});
conn.tasks.push(inbound_processing_task);
// the task for reading messages from a stream
let reader_clone = self_clone.clone();
let reader_task = tokio::spawn(async move {
let node = reader_clone.node();
trace!(parent: node.span(), "spawned a task for reading messages from {}", addr);
// postpone reads until the connection is fully established; if the process fails,
// this task gets aborted, so there is no need for a dedicated timeout
while !node.connected_addrs().contains(&addr) {
sleep(Duration::from_millis(5)).await;
}
let mut carry = 0;
loop {
match reader_clone
.read_from_stream(
addr,
&mut buffer,
&mut reader,
carry,
&inbound_message_sender,
)
.await
{
Ok(leftover) => {
carry = leftover;
}
Err(e) => {
node.known_peers().register_failure(addr);
if node.config().fatal_io_errors.contains(&e.kind()) {
node.disconnect(addr);
break;
} else {
sleep(Duration::from_secs(
node.config().invalid_read_delay_secs,
))
.await;
}
}
}
}
});
conn.tasks.push(reader_task);
// return the Connection to the Node, resuming Node::adapt_stream
if conn_returner.send(Ok(conn)).is_err() {
unreachable!("could't return a Connection to the Node");
}
} else {
error!("the Reading protocol is down!");
break;
}
}
});
self.node().tasks.lock().push(reading_task);
// register the ReadingHandler with the Node
self.node().set_reading_handler(conn_sender.into());
}
/// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of
/// simplicity for better performance. Read messages are sent to a message processing task in order to enable
/// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they
/// should be provided to the medthod on the next call as `carry`.
async fn read_from_stream<R: AsyncRead + Unpin + Send>(
&self,
addr: SocketAddr,
buffer: &mut [u8],
reader: &mut R,
carry: usize,
message_sender: &mpsc::Sender<Self::Message>,
) -> io::Result<usize> {
// perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read
match reader.read(&mut buffer[carry..]).await {
Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()),
Ok(n) => {
trace!(parent: self.node().span(), "read {}B from {}", n, addr);
let mut processed = 0;
let mut left = carry + n;
// several messages could have been read at once; process the contents of the buffer
loop {
// try to read a single message from the buffer
match self.read_message(addr, &buffer[processed..processed + left]) {
// a full message was read successfully
Ok(Some((msg, len))) => {
// advance the counters
processed += len;
left -= len;
trace!(
parent: self.node().span(),
"isolated {}B as a message from {}; {}B left to process",
len,
addr,
left
);
self.node()
.known_peers()
.register_received_message(addr, len);
self.node().stats().register_received_message(len);
// send the message for further processing
if message_sender.send(msg).await.is_err() {
error!(parent: self.node().span(), "the inbound message channel is closed");
return Err(io::ErrorKind::BrokenPipe.into());
}
// if the read is exhausted, reset the carry and return
if left == 0 {
return Ok(0);
}
}
// the message in the buffer is incomplete
Ok(None) => {
// forbid messages that are larger than the read buffer
if left >= buffer.len() {
error!(parent: self.node().span(), "a message from {} is too large", addr);
return Err(io::ErrorKind::InvalidData.into());
}
trace!(
parent: self.node().span(),
"a message from {} is incomplete; carrying {}B over",
addr,
left
);
// move the leftover bytes to the beginning of the buffer; the next read will append bytes
// starting from where the leftover ones end, allowing the message to be completed
buffer.copy_within(processed..processed + left, 0);
return Ok(left);
}
// an erroneous message (e.g. an unexpected zero-length payload)
Err(_) => {
error!(parent: self.node().span(), "a message from {} is invalid", addr);
return Err(io::ErrorKind::InvalidData.into());
}
}
}
}
// a stream read error
Err(e) => {
error!(parent: self.node().span(), "can't read from {}: {}", addr, e);
Err(e)
}
}
}
/// Reads a single message from the given buffer; `Ok(None)` indicates that the message is
/// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message.
/// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err`
/// returned here will result in the associated connection being dropped.
fn read_message(
&self,
source: SocketAddr,
buffer: &[u8],
) -> io::Result<Option<(Self::Message, usize)>>;
/// Processes an inbound message. Can be used to update state, send replies etc.
#[allow(unused_variables)]
async fn process_message(&self, source: SocketAddr, message: Self::Message) -> io::Result<()> |
}
| {
// don't do anything by default
Ok(())
} | identifier_body |
reading.rs | use crate::{protocols::ReturnableConnection, Pea2Pea};
use async_trait::async_trait;
use tokio::{
io::{AsyncRead, AsyncReadExt},
sync::mpsc,
time::sleep,
};
use tracing::*;
use std::{io, net::SocketAddr, time::Duration};
/// Can be used to specify and enable reading, i.e. receiving inbound messages.
/// If handshaking is enabled too, it goes into force only after the handshake has been concluded.
#[async_trait]
pub trait Reading: Pea2Pea
where
Self: Clone + Send + Sync + 'static,
{
/// The final (deserialized) type of inbound messages.
type Message: Send;
/// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout
/// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid
/// accidentally reading "borked" messages).
fn enable_reading(&self) {
let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>(
self.node().config().protocol_handler_queue_depth,
);
// the main task spawning per-connection tasks reading messages from their streams
let self_clone = self.clone();
let reading_task = tokio::spawn(async move {
trace!(parent: self_clone.node().span(), "spawned the Reading handler task");
loop {
// these objects are sent from `Node::adapt_stream`
if let Some((mut conn, conn_returner)) = conn_receiver.recv().await {
let addr = conn.addr;
let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point
let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size]
.into_boxed_slice();
let (inbound_message_sender, mut inbound_message_receiver) =
mpsc::channel(self_clone.node().config().conn_inbound_queue_depth);
// the task for processing parsed messages
let processing_clone = self_clone.clone();
let inbound_processing_task = tokio::spawn(async move {
let node = processing_clone.node();
trace!(parent: node.span(), "spawned a task for processing messages from {}", addr);
loop {
if let Some(msg) = inbound_message_receiver.recv().await {
if let Err(e) = processing_clone.process_message(addr, msg).await {
error!(parent: node.span(), "can't process an inbound message: {}", e);
node.known_peers().register_failure(addr);
}
} else {
node.disconnect(addr);
break;
}
}
});
conn.tasks.push(inbound_processing_task);
// the task for reading messages from a stream
let reader_clone = self_clone.clone();
let reader_task = tokio::spawn(async move {
let node = reader_clone.node();
trace!(parent: node.span(), "spawned a task for reading messages from {}", addr);
// postpone reads until the connection is fully established; if the process fails,
// this task gets aborted, so there is no need for a dedicated timeout
while !node.connected_addrs().contains(&addr) {
sleep(Duration::from_millis(5)).await;
}
let mut carry = 0;
loop {
match reader_clone
.read_from_stream(
addr,
&mut buffer,
&mut reader,
carry,
&inbound_message_sender,
)
.await
{
Ok(leftover) => {
carry = leftover;
}
Err(e) => {
node.known_peers().register_failure(addr);
if node.config().fatal_io_errors.contains(&e.kind()) {
node.disconnect(addr);
break;
} else {
sleep(Duration::from_secs(
node.config().invalid_read_delay_secs,
))
.await;
}
}
}
}
});
conn.tasks.push(reader_task);
// return the Connection to the Node, resuming Node::adapt_stream
if conn_returner.send(Ok(conn)).is_err() {
unreachable!("could't return a Connection to the Node");
}
} else {
error!("the Reading protocol is down!");
break;
}
}
});
self.node().tasks.lock().push(reading_task);
// register the ReadingHandler with the Node
self.node().set_reading_handler(conn_sender.into());
}
/// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of
/// simplicity for better performance. Read messages are sent to a message processing task in order to enable
/// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they
/// should be provided to the medthod on the next call as `carry`.
async fn read_from_stream<R: AsyncRead + Unpin + Send>(
&self,
addr: SocketAddr,
buffer: &mut [u8],
reader: &mut R,
carry: usize,
message_sender: &mpsc::Sender<Self::Message>,
) -> io::Result<usize> {
// perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read
match reader.read(&mut buffer[carry..]).await {
Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()),
Ok(n) => {
trace!(parent: self.node().span(), "read {}B from {}", n, addr);
let mut processed = 0;
let mut left = carry + n;
// several messages could have been read at once; process the contents of the buffer
loop {
// try to read a single message from the buffer
match self.read_message(addr, &buffer[processed..processed + left]) {
// a full message was read successfully
Ok(Some((msg, len))) => {
// advance the counters
processed += len;
left -= len;
trace!(
parent: self.node().span(),
"isolated {}B as a message from {}; {}B left to process",
len,
addr,
left
);
self.node()
.known_peers()
.register_received_message(addr, len);
self.node().stats().register_received_message(len);
// send the message for further processing
if message_sender.send(msg).await.is_err() {
error!(parent: self.node().span(), "the inbound message channel is closed");
return Err(io::ErrorKind::BrokenPipe.into());
}
// if the read is exhausted, reset the carry and return
if left == 0 {
return Ok(0);
}
}
// the message in the buffer is incomplete
Ok(None) => {
// forbid messages that are larger than the read buffer
if left >= buffer.len() {
error!(parent: self.node().span(), "a message from {} is too large", addr);
return Err(io::ErrorKind::InvalidData.into());
}
trace!(
parent: self.node().span(),
"a message from {} is incomplete; carrying {}B over",
addr,
left
);
// move the leftover bytes to the beginning of the buffer; the next read will append bytes
// starting from where the leftover ones end, allowing the message to be completed
buffer.copy_within(processed..processed + left, 0);
return Ok(left);
}
// an erroneous message (e.g. an unexpected zero-length payload)
Err(_) => {
error!(parent: self.node().span(), "a message from {} is invalid", addr);
return Err(io::ErrorKind::InvalidData.into());
}
}
}
}
// a stream read error
Err(e) => {
error!(parent: self.node().span(), "can't read from {}: {}", addr, e);
Err(e)
}
}
}
/// Reads a single message from the given buffer; `Ok(None)` indicates that the message is
/// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message.
/// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err`
/// returned here will result in the associated connection being dropped.
fn read_message(
&self,
source: SocketAddr,
buffer: &[u8],
) -> io::Result<Option<(Self::Message, usize)>>;
/// Processes an inbound message. Can be used to update state, send replies etc.
#[allow(unused_variables)]
async fn | (&self, source: SocketAddr, message: Self::Message) -> io::Result<()> {
// don't do anything by default
Ok(())
}
}
| process_message | identifier_name |
reading.rs | use crate::{protocols::ReturnableConnection, Pea2Pea};
use async_trait::async_trait;
use tokio::{
io::{AsyncRead, AsyncReadExt},
sync::mpsc,
time::sleep,
};
use tracing::*;
use std::{io, net::SocketAddr, time::Duration};
/// Can be used to specify and enable reading, i.e. receiving inbound messages.
/// If handshaking is enabled too, it goes into force only after the handshake has been concluded.
#[async_trait]
pub trait Reading: Pea2Pea
where
Self: Clone + Send + Sync + 'static,
{
/// The final (deserialized) type of inbound messages.
type Message: Send;
/// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout
/// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid
/// accidentally reading "borked" messages).
fn enable_reading(&self) {
let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>(
self.node().config().protocol_handler_queue_depth,
);
// the main task spawning per-connection tasks reading messages from their streams
let self_clone = self.clone();
let reading_task = tokio::spawn(async move {
trace!(parent: self_clone.node().span(), "spawned the Reading handler task");
loop {
// these objects are sent from `Node::adapt_stream`
if let Some((mut conn, conn_returner)) = conn_receiver.recv().await {
let addr = conn.addr;
let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point
let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size]
.into_boxed_slice();
let (inbound_message_sender, mut inbound_message_receiver) =
mpsc::channel(self_clone.node().config().conn_inbound_queue_depth);
// the task for processing parsed messages
let processing_clone = self_clone.clone();
let inbound_processing_task = tokio::spawn(async move {
let node = processing_clone.node();
trace!(parent: node.span(), "spawned a task for processing messages from {}", addr);
loop {
if let Some(msg) = inbound_message_receiver.recv().await {
if let Err(e) = processing_clone.process_message(addr, msg).await {
error!(parent: node.span(), "can't process an inbound message: {}", e);
node.known_peers().register_failure(addr);
}
} else {
node.disconnect(addr);
break;
}
}
});
conn.tasks.push(inbound_processing_task);
// the task for reading messages from a stream
let reader_clone = self_clone.clone();
let reader_task = tokio::spawn(async move {
let node = reader_clone.node();
trace!(parent: node.span(), "spawned a task for reading messages from {}", addr);
// postpone reads until the connection is fully established; if the process fails,
// this task gets aborted, so there is no need for a dedicated timeout
while !node.connected_addrs().contains(&addr) {
sleep(Duration::from_millis(5)).await;
}
let mut carry = 0;
loop {
match reader_clone
.read_from_stream(
addr,
&mut buffer,
&mut reader,
carry,
&inbound_message_sender,
)
.await
{
Ok(leftover) => {
carry = leftover;
}
Err(e) => {
node.known_peers().register_failure(addr);
if node.config().fatal_io_errors.contains(&e.kind()) {
node.disconnect(addr);
break;
} else {
sleep(Duration::from_secs(
node.config().invalid_read_delay_secs,
))
.await;
}
}
}
}
});
conn.tasks.push(reader_task);
// return the Connection to the Node, resuming Node::adapt_stream
if conn_returner.send(Ok(conn)).is_err() {
unreachable!("could't return a Connection to the Node");
}
} else {
error!("the Reading protocol is down!");
break;
}
}
});
self.node().tasks.lock().push(reading_task);
// register the ReadingHandler with the Node
self.node().set_reading_handler(conn_sender.into());
}
/// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of
/// simplicity for better performance. Read messages are sent to a message processing task in order to enable
/// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they
/// should be provided to the medthod on the next call as `carry`.
async fn read_from_stream<R: AsyncRead + Unpin + Send>(
&self,
addr: SocketAddr,
buffer: &mut [u8],
reader: &mut R,
carry: usize,
message_sender: &mpsc::Sender<Self::Message>,
) -> io::Result<usize> {
// perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read
match reader.read(&mut buffer[carry..]).await {
Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()),
Ok(n) => {
trace!(parent: self.node().span(), "read {}B from {}", n, addr);
let mut processed = 0;
let mut left = carry + n;
// several messages could have been read at once; process the contents of the buffer
loop {
// try to read a single message from the buffer
match self.read_message(addr, &buffer[processed..processed + left]) {
// a full message was read successfully
Ok(Some((msg, len))) => {
// advance the counters
processed += len;
left -= len;
trace!(
parent: self.node().span(),
"isolated {}B as a message from {}; {}B left to process",
len,
addr,
left
);
self.node()
.known_peers()
.register_received_message(addr, len);
self.node().stats().register_received_message(len);
// send the message for further processing
if message_sender.send(msg).await.is_err() {
error!(parent: self.node().span(), "the inbound message channel is closed");
return Err(io::ErrorKind::BrokenPipe.into());
}
// if the read is exhausted, reset the carry and return
if left == 0 {
return Ok(0);
}
}
// the message in the buffer is incomplete
Ok(None) => {
// forbid messages that are larger than the read buffer
if left >= buffer.len() {
error!(parent: self.node().span(), "a message from {} is too large", addr);
return Err(io::ErrorKind::InvalidData.into());
}
trace!(
parent: self.node().span(),
"a message from {} is incomplete; carrying {}B over",
addr,
left
);
// move the leftover bytes to the beginning of the buffer; the next read will append bytes
// starting from where the leftover ones end, allowing the message to be completed
buffer.copy_within(processed..processed + left, 0);
return Ok(left);
}
// an erroneous message (e.g. an unexpected zero-length payload)
Err(_) => {
error!(parent: self.node().span(), "a message from {} is invalid", addr);
return Err(io::ErrorKind::InvalidData.into());
}
}
}
}
// a stream read error
Err(e) => {
error!(parent: self.node().span(), "can't read from {}: {}", addr, e);
Err(e)
}
}
}
/// Reads a single message from the given buffer; `Ok(None)` indicates that the message is
/// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message.
/// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err`
/// returned here will result in the associated connection being dropped.
fn read_message( | buffer: &[u8],
) -> io::Result<Option<(Self::Message, usize)>>;
/// Processes an inbound message. Can be used to update state, send replies etc.
#[allow(unused_variables)]
async fn process_message(&self, source: SocketAddr, message: Self::Message) -> io::Result<()> {
// don't do anything by default
Ok(())
}
} | &self,
source: SocketAddr, | random_line_split |
reading.rs | use crate::{protocols::ReturnableConnection, Pea2Pea};
use async_trait::async_trait;
use tokio::{
io::{AsyncRead, AsyncReadExt},
sync::mpsc,
time::sleep,
};
use tracing::*;
use std::{io, net::SocketAddr, time::Duration};
/// Can be used to specify and enable reading, i.e. receiving inbound messages.
/// If handshaking is enabled too, it goes into force only after the handshake has been concluded.
#[async_trait]
pub trait Reading: Pea2Pea
where
Self: Clone + Send + Sync + 'static,
{
/// The final (deserialized) type of inbound messages.
type Message: Send;
/// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout
/// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid
/// accidentally reading "borked" messages).
fn enable_reading(&self) {
let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>(
self.node().config().protocol_handler_queue_depth,
);
// the main task spawning per-connection tasks reading messages from their streams
let self_clone = self.clone();
let reading_task = tokio::spawn(async move {
trace!(parent: self_clone.node().span(), "spawned the Reading handler task");
loop {
// these objects are sent from `Node::adapt_stream`
if let Some((mut conn, conn_returner)) = conn_receiver.recv().await {
let addr = conn.addr;
let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point
let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size]
.into_boxed_slice();
let (inbound_message_sender, mut inbound_message_receiver) =
mpsc::channel(self_clone.node().config().conn_inbound_queue_depth);
// the task for processing parsed messages
let processing_clone = self_clone.clone();
let inbound_processing_task = tokio::spawn(async move {
let node = processing_clone.node();
trace!(parent: node.span(), "spawned a task for processing messages from {}", addr);
loop {
if let Some(msg) = inbound_message_receiver.recv().await | else {
node.disconnect(addr);
break;
}
}
});
conn.tasks.push(inbound_processing_task);
// the task for reading messages from a stream
let reader_clone = self_clone.clone();
let reader_task = tokio::spawn(async move {
let node = reader_clone.node();
trace!(parent: node.span(), "spawned a task for reading messages from {}", addr);
// postpone reads until the connection is fully established; if the process fails,
// this task gets aborted, so there is no need for a dedicated timeout
while !node.connected_addrs().contains(&addr) {
sleep(Duration::from_millis(5)).await;
}
let mut carry = 0;
loop {
match reader_clone
.read_from_stream(
addr,
&mut buffer,
&mut reader,
carry,
&inbound_message_sender,
)
.await
{
Ok(leftover) => {
carry = leftover;
}
Err(e) => {
node.known_peers().register_failure(addr);
if node.config().fatal_io_errors.contains(&e.kind()) {
node.disconnect(addr);
break;
} else {
sleep(Duration::from_secs(
node.config().invalid_read_delay_secs,
))
.await;
}
}
}
}
});
conn.tasks.push(reader_task);
// return the Connection to the Node, resuming Node::adapt_stream
if conn_returner.send(Ok(conn)).is_err() {
unreachable!("could't return a Connection to the Node");
}
} else {
error!("the Reading protocol is down!");
break;
}
}
});
self.node().tasks.lock().push(reading_task);
// register the ReadingHandler with the Node
self.node().set_reading_handler(conn_sender.into());
}
/// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of
/// simplicity for better performance. Read messages are sent to a message processing task in order to enable
/// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they
/// should be provided to the medthod on the next call as `carry`.
async fn read_from_stream<R: AsyncRead + Unpin + Send>(
&self,
addr: SocketAddr,
buffer: &mut [u8],
reader: &mut R,
carry: usize,
message_sender: &mpsc::Sender<Self::Message>,
) -> io::Result<usize> {
// perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read
match reader.read(&mut buffer[carry..]).await {
Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()),
Ok(n) => {
trace!(parent: self.node().span(), "read {}B from {}", n, addr);
let mut processed = 0;
let mut left = carry + n;
// several messages could have been read at once; process the contents of the buffer
loop {
// try to read a single message from the buffer
match self.read_message(addr, &buffer[processed..processed + left]) {
// a full message was read successfully
Ok(Some((msg, len))) => {
// advance the counters
processed += len;
left -= len;
trace!(
parent: self.node().span(),
"isolated {}B as a message from {}; {}B left to process",
len,
addr,
left
);
self.node()
.known_peers()
.register_received_message(addr, len);
self.node().stats().register_received_message(len);
// send the message for further processing
if message_sender.send(msg).await.is_err() {
error!(parent: self.node().span(), "the inbound message channel is closed");
return Err(io::ErrorKind::BrokenPipe.into());
}
// if the read is exhausted, reset the carry and return
if left == 0 {
return Ok(0);
}
}
// the message in the buffer is incomplete
Ok(None) => {
// forbid messages that are larger than the read buffer
if left >= buffer.len() {
error!(parent: self.node().span(), "a message from {} is too large", addr);
return Err(io::ErrorKind::InvalidData.into());
}
trace!(
parent: self.node().span(),
"a message from {} is incomplete; carrying {}B over",
addr,
left
);
// move the leftover bytes to the beginning of the buffer; the next read will append bytes
// starting from where the leftover ones end, allowing the message to be completed
buffer.copy_within(processed..processed + left, 0);
return Ok(left);
}
// an erroneous message (e.g. an unexpected zero-length payload)
Err(_) => {
error!(parent: self.node().span(), "a message from {} is invalid", addr);
return Err(io::ErrorKind::InvalidData.into());
}
}
}
}
// a stream read error
Err(e) => {
error!(parent: self.node().span(), "can't read from {}: {}", addr, e);
Err(e)
}
}
}
/// Reads a single message from the given buffer; `Ok(None)` indicates that the message is
/// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message.
/// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err`
/// returned here will result in the associated connection being dropped.
fn read_message(
&self,
source: SocketAddr,
buffer: &[u8],
) -> io::Result<Option<(Self::Message, usize)>>;
/// Processes an inbound message. Can be used to update state, send replies etc.
#[allow(unused_variables)]
async fn process_message(&self, source: SocketAddr, message: Self::Message) -> io::Result<()> {
// don't do anything by default
Ok(())
}
}
| {
if let Err(e) = processing_clone.process_message(addr, msg).await {
error!(parent: node.span(), "can't process an inbound message: {}", e);
node.known_peers().register_failure(addr);
}
} | conditional_block |
lib.rs | ` files do
//! not contain a header, the header is a dummy object.
//!
//!
//! ### Adding support for new file types
//!
//! To add new file types, you need to add types that implement the
//! `ReadLhe` and `WriteLhe` traits for the additional information
//! stored in the file type.
//! The type signature of the `read_from_lhe` function of the `ReadLhe`
//! trait means that you should use [`nom`] to parse your type.
//! Your implementations need to parse the opening and end tags for
//! comments (`<!--` and `-->`) and the header (`<header>` and
//! `</header>`) respectively, but must leave the tags for the init
//! section and for events alone.
//! With these implementations you can then use `LheFileGeneric` with
//! your types to read and write `lhe` files.
//!
//!
//! [`rust`]: https://www.rust-lang.org
//! [`LesHouchesEvents`]: https://arxiv.org/abs/hep-ph/0609017
//! [`MG5_aMC@NLO`]: https://launchpad.net/mg5amcnlo
//! [`HELAC_NLO`]: http://helac-phegas.web.cern.ch/helac-phegas/
//! [`nom`]: https://github.com/Geal/nom
//! [`plain`]: plain/index.html
//! [`string`]: string/index.html
//! [`helac`]: helac/index.html
extern crate lorentz_vector;
#[macro_use]
extern crate nom;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
#[cfg(test)]
#[macro_use]
extern crate serde;
#[cfg(test)]
#[cfg(test)]
extern crate serde_json;
#[macro_use]
pub mod nom_util;
pub mod generic;
pub mod helac;
pub mod plain;
pub mod string;
use lorentz_vector::LorentzVector;
use std::error;
use std::fmt;
use std::fs;
use std::io;
use std::io::Read;
use std::marker;
use std::path::Path;
#[cfg(test)]
use quickcheck::Arbitrary;
#[cfg(test)]
use quickcheck::Gen;
use nom_util::{parse_f64, parse_i64};
/// A type to use for pdg ids
///
/// See the [Particle Data Group] website for more information.
/// A list of all particle numbers can be found [here].
///
/// [Particle Data Group]: http://pdg.lbl.gov/
/// [here]: http://pdg.lbl.gov/2017/reviews/rpp2017-rev-monte-carlo-numbering.pdf
pub type PdgId = i64;
/// A trait to read (parts of) lhe files
///
/// This trait needs to be implemented for a type to be able to use it
/// in [`LheFileGeneric`] to hold extra information.
///
/// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html
pub trait ReadLhe
where
Self: marker::Sized,
{
/// Read an lhe object from a byte string
///
/// The input to this function is the remaining input in the file
/// (or just a chunk of it) and if successful, it should return the
/// parsed object and the input left after parsing the object.
/// See the [`nom documentation`] for more information.
///
/// [`nom documentation`]: http://rust.unhandledexpression.com/nom/
fn read_lhe(&[u8]) -> nom::IResult<&[u8], Self>;
/// Read an lhe object from a file
fn read_lhe_from_file<P: AsRef<Path>>(path: &P) -> Result<Self, ReadError> {
let mut file = fs::File::open(path)?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)?;
Self::read_lhe(&contents)
.to_full_result()
.map_err(ReadError::Nom)
}
}
/// A trait to write (parts of) lhe files
///
/// This trait needs to be implemented for a type to be able to use it
/// in [`LheFileGeneric`] to hold extra information.
///
/// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html
pub trait WriteLhe {
/// Write the object to a writer
fn write_lhe<W: io::Write>(&self, &mut W) -> io::Result<()>;
/// Write the object to a file
fn write_lhe_to_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
let mut file = fs::File::create(path)?;
self.write_lhe(&mut file)
}
}
/// Errors that may occur when reading lhe objects from files
#[derive(Debug)]
pub enum ReadError {
/// An io error occured
Io(io::Error),
/// A parse error occured
Nom(nom::IError),
}
impl From<io::Error> for ReadError {
fn from(err: io::Error) -> ReadError {
ReadError::Io(err)
}
}
impl fmt::Display for ReadError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ReadError::Io(ref err) => {
write!(f, "Failed to read the lhe file with an IO error: {}", err)
}
ReadError::Nom(ref err) => write!(
f,
"Failed to read the lhe file with a parse error: {:?}",
err
),
}
}
}
impl error::Error for ReadError {
fn description(&self) -> &str |
fn cause(&self) -> Option<&error::Error> {
match *self {
ReadError::Io(ref err) => Some(err),
ReadError::Nom(_) => None,
}
}
}
/// A struct for process information
///
/// This is the per process information contained in the `init` section
/// of `lhe` files.
/// When reading a file, the `Init` struct will contain `NPRUP`
/// `ProcInfo` objects.
/// `ProcInfo` is part of the compulsory initialization information.
///
/// For more information on the fields, see the [`lhe`] paper and the
/// documentation of the [`LHA common blocks`].
/// The names in parentheses are the names of the fields in these
/// papers.
///
/// # Examples
///
/// ```rust
/// use lhef::{ProcInfo, ReadLhe};
/// use lhef::plain::LheFile;
///
/// let bytes = b"\
/// <LesHouchesEvents version=\"1.0\">
/// <init>
/// 2212 2212 6500 6500 0 0 13100 13100 3 2
/// 2.1 3.2E-03 1.0E+00 1
/// 4.0 7.4E-03 1.0E+00 2
/// </init>
/// </LesHouchesEvents>";
/// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap();
/// assert_eq!(lhe.init.process_info.len(), 2);
/// assert_eq!(lhe.init.process_info[0].xsect, 2.1);
/// assert_eq!(lhe.init.process_info[1].xsect_err, 0.0074);
/// ```
///
/// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017
/// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(test, derive(Serialize, Deserialize))]
pub struct ProcInfo {
/// The cross section of the process (`XSECUP`)
pub xsect: f64,
/// The cross section error of the process (`XERRUP`)
pub xsect_err: f64,
/// The maximum weight of the events of the process (`XMAXUP`)
pub maximum_weight: f64,
/// The process id (`LPRUP`)
pub process_id: i64,
}
impl ReadLhe for ProcInfo {
fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], ProcInfo> {
do_parse!(
input,
xsect: ws!(parse_f64) >> xsect_err: ws!(parse_f64) >> maximum_weight: ws!(parse_f64)
>> process_id: ws!(parse_i64) >> (ProcInfo {
xsect,
xsect_err,
maximum_weight,
process_id,
})
)
}
}
impl WriteLhe for ProcInfo {
fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writeln!(
writer,
"{:e} {:e} {:e} {}",
self.xsect, self.xsect_err, self.maximum_weight, self.process | {
match *self {
ReadError::Io(..) => &"Failed to read the lhe file with an IO error",
ReadError::Nom(..) => &"Failed to read the lhe file with a parse error",
}
} | identifier_body |
lib.rs | hef::{ProcInfo, ReadLhe};
/// use lhef::plain::LheFile;
///
/// let bytes = b"\
/// <LesHouchesEvents version=\"1.0\">
/// <init>
/// 2212 2212 6500 6500 0 0 13100 13100 3 2
/// 2.1 3.2E-03 1.0E+00 1
/// 4.0 7.4E-03 1.0E+00 2
/// </init>
/// </LesHouchesEvents>";
/// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap();
/// assert_eq!(lhe.init.process_info.len(), 2);
/// assert_eq!(lhe.init.process_info[0].xsect, 2.1);
/// assert_eq!(lhe.init.process_info[1].xsect_err, 0.0074);
/// ```
///
/// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017
/// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(test, derive(Serialize, Deserialize))]
pub struct ProcInfo {
/// The cross section of the process (`XSECUP`)
pub xsect: f64,
/// The cross section error of the process (`XERRUP`)
pub xsect_err: f64,
/// The maximum weight of the events of the process (`XMAXUP`)
pub maximum_weight: f64,
/// The process id (`LPRUP`)
pub process_id: i64,
}
impl ReadLhe for ProcInfo {
fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], ProcInfo> {
do_parse!(
input,
xsect: ws!(parse_f64) >> xsect_err: ws!(parse_f64) >> maximum_weight: ws!(parse_f64)
>> process_id: ws!(parse_i64) >> (ProcInfo {
xsect,
xsect_err,
maximum_weight,
process_id,
})
)
}
}
impl WriteLhe for ProcInfo {
fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writeln!(
writer,
"{:e} {:e} {:e} {}",
self.xsect, self.xsect_err, self.maximum_weight, self.process_id
)
}
}
#[cfg(test)]
impl Arbitrary for ProcInfo {
fn arbitrary<G: Gen>(gen: &mut G) -> ProcInfo {
ProcInfo {
xsect: Arbitrary::arbitrary(gen),
xsect_err: Arbitrary::arbitrary(gen),
maximum_weight: Arbitrary::arbitrary(gen),
process_id: Arbitrary::arbitrary(gen),
}
}
}
/// A particle in lhe format
///
/// An event will contain as many `Particle`s as there are particles in
/// the event.
/// `Particle` is part of the compulsory event information.
///
/// For more information on the fields, see the [`lhe`] paper and the
/// documentation of the [`LHA common blocks`].
/// The names in parentheses are the names of the fields in these
/// papers.
///
/// # Examples
///
/// ```rust
/// use lhef::{Particle, ReadLhe};
/// use lhef::plain::LheFile;
///
/// let bytes = b"\
/// <LesHouchesEvents version=\"1.0\">
/// <init>
/// 2212 2212 6500 6500 0 0 13100 13100 3 1
/// 2.1 3.2E-03 1.0E+00 1
/// </init>
/// <event>
/// 4 1 +1.04e-01 1.00e+03 7.54e-03 8.68e-02
/// -11 -1 0 0 0 0 +0.00e+00 +0.00e+00 +5.00e+02 5.00e+02 0.00e+00 0.00e+00 -1.00e+00
/// 11 -1 0 0 0 0 -0.00e+00 -0.00e+00 -5.00e+02 5.00e+02 0.00e+00 0.00e+00 1.00e+00
/// -13 1 1 2 0 0 -1.97e+02 -4.52e+02 -7.94e+01 5.00e+02 0.00e+00 0.00e+00 -1.00e+00
/// 13 1 1 2 0 0 +1.97e+02 +4.52e+02 +7.94e+01 5.00e+02 0.00e+00 0.00e+00 1.00e+00
/// </event>
/// </LesHouchesEvents>";
///
/// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap();
/// let event = &lhe.events[0];
/// assert_eq!(event.particles.len(), 4);
/// assert_eq!(event.particles[0].pdg_id, -11);
/// assert_eq!(event.particles[3].momentum.py, 452.);
/// ```
///
/// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017
/// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(test, derive(Serialize, Deserialize))]
pub struct Particle {
/// The pdg id of the particle (`IDUP`)
pub pdg_id: PdgId,
/// The status code of the particle (`ISTUP`)
pub status: i64,
/// The id of the first mother of the particle (`MOTHUP(1)`).
/// This isn't a pdg id, but a (1 based) index into the particles vector.
pub mother_1_id: i64,
/// The id of the second mother of the particle (`MOTHUP(2)`).
/// This isn't a pdg id, but a (1 based) index into the particles vector.
pub mother_2_id: i64,
/// The color of the particle (`ICOLUP(1)`)
pub color_1: i64,
/// The color of the particle (`ICOLUP(2)`)
pub color_2: i64,
/// The four momentum of the particle (`PUP` 1 - 4)
pub momentum: LorentzVector,
/// The mass of the particle (`PUP(5)`)
pub mass: f64,
/// The proper lifetime of the particle (`VTIMUP`)
pub proper_lifetime: f64,
/// The spin of the particle (`SPINUP`)
pub spin: f64,
}
impl ReadLhe for Particle {
fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], Particle> {
do_parse!(
input,
pdg_id: ws!(parse_i64) >> status: ws!(parse_i64) >> mother_1_id: ws!(parse_i64)
>> mother_2_id: ws!(parse_i64) >> color_1: ws!(parse_i64)
>> color_2: ws!(parse_i64) >> px: ws!(parse_f64) >> py: ws!(parse_f64)
>> pz: ws!(parse_f64) >> e: ws!(parse_f64) >> mass: ws!(parse_f64)
>> proper_lifetime: ws!(parse_f64) >> spin: ws!(parse_f64)
>> (Particle {
pdg_id,
status,
mother_1_id,
mother_2_id,
color_1,
color_2,
momentum: LorentzVector { e, px, py, pz },
mass,
proper_lifetime,
spin,
})
)
}
}
impl WriteLhe for Particle {
fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writeln!(
writer,
"{} {} {} {} {} {} {:e} {:e} {:e} {:e} {:e} {:e} {:e}",
self.pdg_id,
self.status,
self.mother_1_id, | self.mother_2_id, | random_line_split |
|
lib.rs | ::Formatter) -> fmt::Result {
match *self {
ReadError::Io(ref err) => {
write!(f, "Failed to read the lhe file with an IO error: {}", err)
}
ReadError::Nom(ref err) => write!(
f,
"Failed to read the lhe file with a parse error: {:?}",
err
),
}
}
}
impl error::Error for ReadError {
fn description(&self) -> &str {
match *self {
ReadError::Io(..) => &"Failed to read the lhe file with an IO error",
ReadError::Nom(..) => &"Failed to read the lhe file with a parse error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
ReadError::Io(ref err) => Some(err),
ReadError::Nom(_) => None,
}
}
}
/// A struct for process information
///
/// This is the per process information contained in the `init` section
/// of `lhe` files.
/// When reading a file, the `Init` struct will contain `NPRUP`
/// `ProcInfo` objects.
/// `ProcInfo` is part of the compulsory initialization information.
///
/// For more information on the fields, see the [`lhe`] paper and the
/// documentation of the [`LHA common blocks`].
/// The names in parentheses are the names of the fields in these
/// papers.
///
/// # Examples
///
/// ```rust
/// use lhef::{ProcInfo, ReadLhe};
/// use lhef::plain::LheFile;
///
/// let bytes = b"\
/// <LesHouchesEvents version=\"1.0\">
/// <init>
/// 2212 2212 6500 6500 0 0 13100 13100 3 2
/// 2.1 3.2E-03 1.0E+00 1
/// 4.0 7.4E-03 1.0E+00 2
/// </init>
/// </LesHouchesEvents>";
/// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap();
/// assert_eq!(lhe.init.process_info.len(), 2);
/// assert_eq!(lhe.init.process_info[0].xsect, 2.1);
/// assert_eq!(lhe.init.process_info[1].xsect_err, 0.0074);
/// ```
///
/// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017
/// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(test, derive(Serialize, Deserialize))]
pub struct ProcInfo {
/// The cross section of the process (`XSECUP`)
pub xsect: f64,
/// The cross section error of the process (`XERRUP`)
pub xsect_err: f64,
/// The maximum weight of the events of the process (`XMAXUP`)
pub maximum_weight: f64,
/// The process id (`LPRUP`)
pub process_id: i64,
}
impl ReadLhe for ProcInfo {
fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], ProcInfo> {
do_parse!(
input,
xsect: ws!(parse_f64) >> xsect_err: ws!(parse_f64) >> maximum_weight: ws!(parse_f64)
>> process_id: ws!(parse_i64) >> (ProcInfo {
xsect,
xsect_err,
maximum_weight,
process_id,
})
)
}
}
impl WriteLhe for ProcInfo {
fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writeln!(
writer,
"{:e} {:e} {:e} {}",
self.xsect, self.xsect_err, self.maximum_weight, self.process_id
)
}
}
#[cfg(test)]
impl Arbitrary for ProcInfo {
fn arbitrary<G: Gen>(gen: &mut G) -> ProcInfo {
ProcInfo {
xsect: Arbitrary::arbitrary(gen),
xsect_err: Arbitrary::arbitrary(gen),
maximum_weight: Arbitrary::arbitrary(gen),
process_id: Arbitrary::arbitrary(gen),
}
}
}
/// A particle in lhe format
///
/// An event will contain as many `Particle`s as there are particles in
/// the event.
/// `Particle` is part of the compulsory event information.
///
/// For more information on the fields, see the [`lhe`] paper and the
/// documentation of the [`LHA common blocks`].
/// The names in parentheses are the names of the fields in these
/// papers.
///
/// # Examples
///
/// ```rust
/// use lhef::{Particle, ReadLhe};
/// use lhef::plain::LheFile;
///
/// let bytes = b"\
/// <LesHouchesEvents version=\"1.0\">
/// <init>
/// 2212 2212 6500 6500 0 0 13100 13100 3 1
/// 2.1 3.2E-03 1.0E+00 1
/// </init>
/// <event>
/// 4 1 +1.04e-01 1.00e+03 7.54e-03 8.68e-02
/// -11 -1 0 0 0 0 +0.00e+00 +0.00e+00 +5.00e+02 5.00e+02 0.00e+00 0.00e+00 -1.00e+00
/// 11 -1 0 0 0 0 -0.00e+00 -0.00e+00 -5.00e+02 5.00e+02 0.00e+00 0.00e+00 1.00e+00
/// -13 1 1 2 0 0 -1.97e+02 -4.52e+02 -7.94e+01 5.00e+02 0.00e+00 0.00e+00 -1.00e+00
/// 13 1 1 2 0 0 +1.97e+02 +4.52e+02 +7.94e+01 5.00e+02 0.00e+00 0.00e+00 1.00e+00
/// </event>
/// </LesHouchesEvents>";
///
/// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap();
/// let event = &lhe.events[0];
/// assert_eq!(event.particles.len(), 4);
/// assert_eq!(event.particles[0].pdg_id, -11);
/// assert_eq!(event.particles[3].momentum.py, 452.);
/// ```
///
/// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017
/// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(test, derive(Serialize, Deserialize))]
pub struct Particle {
/// The pdg id of the particle (`IDUP`)
pub pdg_id: PdgId,
/// The status code of the particle (`ISTUP`)
pub status: i64,
/// The id of the first mother of the particle (`MOTHUP(1)`).
/// This isn't a pdg id, but a (1 based) index into the particles vector.
pub mother_1_id: i64,
/// The id of the second mother of the particle (`MOTHUP(2)`).
/// This isn't a pdg id, but a (1 based) index into the particles vector.
pub mother_2_id: i64,
/// The color of the particle (`ICOLUP(1)`)
pub color_1: i64,
/// The color of the particle (`ICOLUP(2)`)
pub color_2: i64,
/// The four momentum of the particle (`PUP` 1 - 4)
pub momentum: LorentzVector,
/// The mass of the particle (`PUP(5)`)
pub mass: f64,
/// The proper lifetime of the particle (`VTIMUP`)
pub proper_lifetime: f64,
/// The spin of the particle (`SPINUP`)
pub spin: f64,
}
impl ReadLhe for Particle {
fn | read_lhe | identifier_name |
|
TEST.py | _list(fileOrDir)
for path in fileName_list:
# w, h = getWH(path)
# imgY = getYdata(path, [w, h])
imgY = c_getYdata(path)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is two directories, including ground truth.
elif len(fileOrDir) == 2:
fileName | # else:
# while numFrames>0:
# or_imgY =getOneFrameY(pair[0])
# gt_imgY =getOneFrameY(pair[1])
# # normalize
# or_imgY = normalize(or_imgY)
#
# or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
# gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
#
# ## act as a placeholder
# or_imgCbCr = 0
# original_ycbcr.append([or_imgY, or_imgCbCr])
# gt_y.append(gt_imgY)
els
e:
print("Invalid Inputs.")
exit(0)
return original_ycbcr, gt_y, fileName_list
class Predict:
input_tensor = None
output_tensor = None
model = None
def __init__(self, model, modelpath):
self.graph = tf.Graph() # 为每个类(实例)单独创建一个graph
self.model = model
with self.graph.as_default():
self.input_tensor = tf.placeholder(tf.float32, shape=(1, None, None, 1))
#self.output_tensor = tf.make_template('input_scope', self.model)(self.input_tensor)
self.output_tensor = model(self.input_tensor)
self.output_tensor = tf.clip_by_value(self.output_tensor, 0., 1.)
self.output_tensor = tf.multiply(self.output_tensor, 255)
self.saver = tf.train.Saver()
self.sess = tf.Session(graph=self.graph,config=config) # 创建新的sess
with self.sess.as_default():
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
self.saver.restore(self.sess, modelpath) # 从恢复点恢复参数
print(modelpath)
def predict(self, fileOrDir):
#print("------------")
if (isinstance(fileOrDir, str)):
original_ycbcr, gt_y, fileName_list = prepare_test_data(fileOrDir)
imgY = original_ycbcr[0][0]
elif type(fileOrDir) is np.ndarray:
imgY = fileOrDir
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
elif (isinstance(fileOrDir, list)):
fileOrDir = np.asarray(fileOrDir, dtype='float32')
# fileOrDir = fileOrDir / 255
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
else:
imgY=None
with self.sess.as_default():
with self.sess.graph.as_default():
out = self.sess.run(self.output_tensor, feed_dict={self.input_tensor: imgY})
out = np.reshape(out, (out.shape[1], out.shape[2]))
out = np.around(out)
out = out.astype('int')
out = out.tolist()
return out
def init(sliceType, QP):
# print("init !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
global cnn17,cnn27,cnn37,cnn47,cnn7,cnn57
cnn7 =Predict(model,model_set["CNN_I_QP7"])
cnn17=Predict(model,model_set["CNN_I_QP17"])
cnn27=Predict(model,model_set["CNN_I_QP27"])
cnn37=Predict(model,model_set["CNN2_I_QP37"])
cnn47=Predict(model,model_set["CNN_I_QP47"])
cnn57 = Predict(model, model_set["CNN_I_QP57"])
def predict(file, QP, frame_type):
global cnn17, cnn27, cnn37, cnn47, cnn7, cnn57
if QP < 17:
R = cnn7.predict(file)
elif 17 <= QP < 27:
R = cnn17.predict(file)
elif 27 <= QP < 37:
R = cnn27.predict(file)
elif 37 <= QP < 47:
R = cnn37.predict(file)
elif 47 <= QP < 57:
R = cnn47.predict(file)
else:
R = cnn57.predict(file)
def showImg(inp):
h, w = inp[0], inp[1]
tem = np.asarray(inp, dtype='uint8')
#np.save(r"H:\KONG\cnn_2K%f" % time.time(),tem)
tem = Image.fromarray(tem, 'L')
tem.show()
#tem.save("D:/rec/FromPython%f.jpg" % time.time())
def test_all_ckpt(modelPath):
low_img = r"test_set\qp37"
# low_img = r"H:\KONG\FRAME_!\AV!_deblock_nocdefLr\QP53"
heigh_img = r"test_set\ori"
NUM_CNN=3 #cnn 次数
original_ycbcr, gt_y, fileName_list = prepare_test_data([low_img,heigh_img])
total_imgs = len(fileName_list)
tem = [f for f in os.listdir(modelPath) if 'data' in f]
ckptFiles = sorted([r.split('.data')[0] for r in tem])
max_psnr=0
max_epoch=0
max_ckpt_psnr = 0
for ckpt in ckptFiles:
cur_ckpt_psnr=0
#epoch = int(ckpt.split('_')[3])
# print(ckpt.split('.')[0].split('_'))
epoch = int(ckpt.split('.')[0].split('_')[-2])
# loss =int(ckpt.split('.')[0].split('_')[-1])
#
# if epoch <1000:
# continue
#print(epoch)
print(os.path.join(modelPath, ckpt))
predictor = Predict(model, os.path.join(modelPath, ckpt))
img_index = [14, 17, 4, 2, 7, 10, 12, 3, 0, 13, 16, 5, 6, 1, 15, 8, 9, 11]
for i in img_index:
# if i>5:
# continue
imgY = original_ycbcr[i][0]
gtY = gt_y[i] if gt_y else 0
#showImg(rec)
#print(np.shape(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])))
#cur_psnr[cnnTime]=psnr(denormalize(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])),np.reshape(gtY, [np.shape(imgY)[1],np.shape(imgY)[2]]))
cur_psnr=[]
rec = predictor.predict(imgY)
cur_psnr.append(psnr(rec,np.reshape(gtY, np.shape(rec))))
for cc in range(2,NUM_CNN+1):
rec = predictor.predict(rec)
cur_psnr.append(psnr(rec, np.reshape(gtY, np.shape(rec))))
# print(cur_psnr)
#print(len(cur | _list = load_file_list(fileOrDir[0])
test_list = get_train_list(load_file_list(fileOrDir[0]), load_file_list(fileOrDir[1]))
for pair in test_list:
filesize = os.path.getsize(pair[0])
picsize = getWH(pair[0])[0]*getWH(pair[0])[0] * 3 // 2
numFrames = filesize // picsize
# if numFrames ==1:
or_imgY = c_getYdata(pair[0])
gt_imgY = c_getYdata(pair[1])
# normalize
or_imgY = normalize(or_imgY)
or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
## act as a placeholder
or_imgCbCr = 0
original_ycbcr.append([or_imgY, or_imgCbCr])
gt_y.append(gt_imgY) | conditional_block |
TEST.py | _list(fileOrDir)
for path in fileName_list:
# w, h = getWH(path)
# imgY = getYdata(path, [w, h])
imgY = c_getYdata(path)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is two directories, including ground truth.
elif len(fileOrDir) == 2:
fileName_list = load_file_list(fileOrDir[0])
test_list = get_train_list(load_file_list(fileOrDir[0]), load_file_list(fileOrDir[1]))
for pair in test_list:
filesize = os.path.getsize(pair[0])
picsize = getWH(pair[0])[0]*getWH(pair[0])[0] * 3 // 2
numFrames = filesize // picsize
# if numFrames ==1:
or_imgY = c_getYdata(pair[0])
gt_imgY = c_getYdata(pair[1])
# normalize
or_imgY = normalize(or_imgY)
or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
## act as a placeholder
or_imgCbCr = 0
original_ycbcr.append([or_imgY, or_imgCbCr])
gt_y.append(gt_imgY)
# else:
# while numFrames>0:
# or_imgY =getOneFrameY(pair[0])
# gt_imgY =getOneFrameY(pair[1])
# # normalize
# or_imgY = normalize(or_imgY)
#
# or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
# gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
#
# ## act as a placeholder
# or_imgCbCr = 0
# original_ycbcr.append([or_imgY, or_imgCbCr])
# gt_y.append(gt_imgY)
else:
print("Invalid Inputs.")
exit(0)
return original_ycbcr, gt_y, fileName_list
class Predict:
input_tensor = None
output_tensor = None
model = None
def __init__(self, model, modelpath):
self.graph = tf.Graph() # 为每个类(实例)单独创建一个graph
self.model = model
with self.graph.as_default():
self.input_tensor = tf.placeholder(tf.float32, shape=(1, None, None, 1))
#self.output_tensor = tf.make_template('input_scope', self.model)(self.input_tensor)
self.output_tensor = model(self.input_tensor)
self.output_tensor = tf.clip_by_value(self.output_tensor, 0., 1.)
self.output_tensor = tf.multiply(self.output_tensor, 255)
self.saver = tf.train.Saver()
self.sess = tf.Session(graph=self.graph,config=config) # 创建新的sess
with self.sess.as_default():
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
self.saver.restore(self.sess, modelpath) # 从恢复点恢复参数
print(modelpath)
def predict(self, fileOrDir):
#print("------------")
if (isinstance(fileOrDir, str)):
original_ycbcr, gt_y, fileName_list = prepare_test_data(fileOrDir)
imgY = original_ycbcr[0][0]
elif type(fileOrDir) is np.ndarray:
imgY = fileOrDir
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
elif (isinstance(fileOrDir, list)):
fileOrDir = np.asarray(fileOrDir, dtype='float32')
# fileOrDir = fileOrDir / 255
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
else:
imgY=None
with self.sess.as_default():
with self.sess.graph.as_default():
out = self.sess.run(self.output_tensor, feed_dict={self.input_tensor: imgY})
out = np.reshape(out, (out.shape[1], out.shape[2]))
out = np.around(out)
out = out.astype('int')
out = out.tolist()
return out
def init(sliceType, QP):
# print("init !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
global cnn17,cnn27,cnn37,cnn47,cnn7,cnn57
cnn7 =Pre | n17, cnn27, cnn37, cnn47, cnn7, cnn57
if QP < 17:
R = cnn7.predict(file)
elif 17 <= QP < 27:
R = cnn17.predict(file)
elif 27 <= QP < 37:
R = cnn27.predict(file)
elif 37 <= QP < 47:
R = cnn37.predict(file)
elif 47 <= QP < 57:
R = cnn47.predict(file)
else:
R = cnn57.predict(file)
def showImg(inp):
h, w = inp[0], inp[1]
tem = np.asarray(inp, dtype='uint8')
#np.save(r"H:\KONG\cnn_2K%f" % time.time(),tem)
tem = Image.fromarray(tem, 'L')
tem.show()
#tem.save("D:/rec/FromPython%f.jpg" % time.time())
def test_all_ckpt(modelPath):
low_img = r"test_set\qp37"
# low_img = r"H:\KONG\FRAME_!\AV!_deblock_nocdefLr\QP53"
heigh_img = r"test_set\ori"
NUM_CNN=3 #cnn 次数
original_ycbcr, gt_y, fileName_list = prepare_test_data([low_img,heigh_img])
total_imgs = len(fileName_list)
tem = [f for f in os.listdir(modelPath) if 'data' in f]
ckptFiles = sorted([r.split('.data')[0] for r in tem])
max_psnr=0
max_epoch=0
max_ckpt_psnr = 0
for ckpt in ckptFiles:
cur_ckpt_psnr=0
#epoch = int(ckpt.split('_')[3])
# print(ckpt.split('.')[0].split('_'))
epoch = int(ckpt.split('.')[0].split('_')[-2])
# loss =int(ckpt.split('.')[0].split('_')[-1])
#
# if epoch <1000:
# continue
#print(epoch)
print(os.path.join(modelPath, ckpt))
predictor = Predict(model, os.path.join(modelPath, ckpt))
img_index = [14, 17, 4, 2, 7, 10, 12, 3, 0, 13, 16, 5, 6, 1, 15, 8, 9, 11]
for i in img_index:
# if i>5:
# continue
imgY = original_ycbcr[i][0]
gtY = gt_y[i] if gt_y else 0
#showImg(rec)
#print(np.shape(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])))
#cur_psnr[cnnTime]=psnr(denormalize(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])),np.reshape(gtY, [np.shape(imgY)[1],np.shape(imgY)[2]]))
cur_psnr=[]
rec = predictor.predict(imgY)
cur_psnr.append(psnr(rec,np.reshape(gtY, np.shape(rec))))
for cc in range(2,NUM_CNN+1):
rec = predictor.predict(rec)
cur_psnr.append(psnr(rec, np.reshape(gtY, np.shape(rec))))
# print(cur_psnr)
#print(len(cur | dict(model,model_set["CNN_I_QP7"])
cnn17=Predict(model,model_set["CNN_I_QP17"])
cnn27=Predict(model,model_set["CNN_I_QP27"])
cnn37=Predict(model,model_set["CNN2_I_QP37"])
cnn47=Predict(model,model_set["CNN_I_QP47"])
cnn57 = Predict(model, model_set["CNN_I_QP57"])
def predict(file, QP, frame_type):
global cn | identifier_body |
TEST.py | _file_list(fileOrDir)
for path in fileName_list:
# w, h = getWH(path)
# imgY = getYdata(path, [w, h])
imgY = c_getYdata(path)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is two directories, including ground truth.
elif len(fileOrDir) == 2:
fileName_list = load_file_list(fileOrDir[0])
test_list = get_train_list(load_file_list(fileOrDir[0]), load_file_list(fileOrDir[1]))
for pair in test_list:
filesize = os.path.getsize(pair[0])
picsize = getWH(pair[0])[0]*getWH(pair[0])[0] * 3 // 2
numFrames = filesize // picsize
# if numFrames ==1:
or_imgY = c_getYdata(pair[0])
gt_imgY = c_getYdata(pair[1])
# normalize
or_imgY = normalize(or_imgY)
or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
## act as a placeholder
or_imgCbCr = 0
original_ycbcr.append([or_imgY, or_imgCbCr])
gt_y.append(gt_imgY)
# else:
# while numFrames>0:
# or_imgY =getOneFrameY(pair[0])
# gt_imgY =getOneFrameY(pair[1])
# # normalize
# or_imgY = normalize(or_imgY)
#
# or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
# gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
#
# ## act as a placeholder
# or_imgCbCr = 0
# original_ycbcr.append([or_imgY, or_imgCbCr])
# gt_y.append(gt_imgY)
else:
print("Invalid Inputs.")
exit(0)
return original_ycbcr, gt_y, fileName_list
class Predict:
input_tensor = None
output_tensor = None | self.graph = tf.Graph() # 为每个类(实例)单独创建一个graph
self.model = model
with self.graph.as_default():
self.input_tensor = tf.placeholder(tf.float32, shape=(1, None, None, 1))
#self.output_tensor = tf.make_template('input_scope', self.model)(self.input_tensor)
self.output_tensor = model(self.input_tensor)
self.output_tensor = tf.clip_by_value(self.output_tensor, 0., 1.)
self.output_tensor = tf.multiply(self.output_tensor, 255)
self.saver = tf.train.Saver()
self.sess = tf.Session(graph=self.graph,config=config) # 创建新的sess
with self.sess.as_default():
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
self.saver.restore(self.sess, modelpath) # 从恢复点恢复参数
print(modelpath)
def predict(self, fileOrDir):
#print("------------")
if (isinstance(fileOrDir, str)):
original_ycbcr, gt_y, fileName_list = prepare_test_data(fileOrDir)
imgY = original_ycbcr[0][0]
elif type(fileOrDir) is np.ndarray:
imgY = fileOrDir
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
elif (isinstance(fileOrDir, list)):
fileOrDir = np.asarray(fileOrDir, dtype='float32')
# fileOrDir = fileOrDir / 255
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
else:
imgY=None
with self.sess.as_default():
with self.sess.graph.as_default():
out = self.sess.run(self.output_tensor, feed_dict={self.input_tensor: imgY})
out = np.reshape(out, (out.shape[1], out.shape[2]))
out = np.around(out)
out = out.astype('int')
out = out.tolist()
return out
def init(sliceType, QP):
# print("init !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
global cnn17,cnn27,cnn37,cnn47,cnn7,cnn57
cnn7 =Predict(model,model_set["CNN_I_QP7"])
cnn17=Predict(model,model_set["CNN_I_QP17"])
cnn27=Predict(model,model_set["CNN_I_QP27"])
cnn37=Predict(model,model_set["CNN2_I_QP37"])
cnn47=Predict(model,model_set["CNN_I_QP47"])
cnn57 = Predict(model, model_set["CNN_I_QP57"])
def predict(file, QP, frame_type):
global cnn17, cnn27, cnn37, cnn47, cnn7, cnn57
if QP < 17:
R = cnn7.predict(file)
elif 17 <= QP < 27:
R = cnn17.predict(file)
elif 27 <= QP < 37:
R = cnn27.predict(file)
elif 37 <= QP < 47:
R = cnn37.predict(file)
elif 47 <= QP < 57:
R = cnn47.predict(file)
else:
R = cnn57.predict(file)
def showImg(inp):
h, w = inp[0], inp[1]
tem = np.asarray(inp, dtype='uint8')
#np.save(r"H:\KONG\cnn_2K%f" % time.time(),tem)
tem = Image.fromarray(tem, 'L')
tem.show()
#tem.save("D:/rec/FromPython%f.jpg" % time.time())
def test_all_ckpt(modelPath):
low_img = r"test_set\qp37"
# low_img = r"H:\KONG\FRAME_!\AV!_deblock_nocdefLr\QP53"
heigh_img = r"test_set\ori"
NUM_CNN=3 #cnn 次数
original_ycbcr, gt_y, fileName_list = prepare_test_data([low_img,heigh_img])
total_imgs = len(fileName_list)
tem = [f for f in os.listdir(modelPath) if 'data' in f]
ckptFiles = sorted([r.split('.data')[0] for r in tem])
max_psnr=0
max_epoch=0
max_ckpt_psnr = 0
for ckpt in ckptFiles:
cur_ckpt_psnr=0
#epoch = int(ckpt.split('_')[3])
# print(ckpt.split('.')[0].split('_'))
epoch = int(ckpt.split('.')[0].split('_')[-2])
# loss =int(ckpt.split('.')[0].split('_')[-1])
#
# if epoch <1000:
# continue
#print(epoch)
print(os.path.join(modelPath, ckpt))
predictor = Predict(model, os.path.join(modelPath, ckpt))
img_index = [14, 17, 4, 2, 7, 10, 12, 3, 0, 13, 16, 5, 6, 1, 15, 8, 9, 11]
for i in img_index:
# if i>5:
# continue
imgY = original_ycbcr[i][0]
gtY = gt_y[i] if gt_y else 0
#showImg(rec)
#print(np.shape(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])))
#cur_psnr[cnnTime]=psnr(denormalize(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])),np.reshape(gtY, [np.shape(imgY)[1],np.shape(imgY)[2]]))
cur_psnr=[]
rec = predictor.predict(imgY)
cur_psnr.append(psnr(rec,np.reshape(gtY, np.shape(rec))))
for cc in range(2,NUM_CNN+1):
rec = predictor.predict(rec)
cur_psnr.append(psnr(rec, np.reshape(gtY, np.shape(rec))))
# print(cur_psnr)
#print(len(cur_psn | model = None
def __init__(self, model, modelpath): | random_line_split |
TEST.py | _list(fileOrDir)
for path in fileName_list:
# w, h = getWH(path)
# imgY = getYdata(path, [w, h])
imgY = c_getYdata(path)
imgY = normalize(imgY)
imgY = np.resize(imgY, (1, imgY.shape[0], imgY.shape[1], 1))
original_ycbcr.append([imgY, imgCbCr])
##The input is two directories, including ground truth.
elif len(fileOrDir) == 2:
fileName_list = load_file_list(fileOrDir[0])
test_list = get_train_list(load_file_list(fileOrDir[0]), load_file_list(fileOrDir[1]))
for pair in test_list:
filesize = os.path.getsize(pair[0])
picsize = getWH(pair[0])[0]*getWH(pair[0])[0] * 3 // 2
numFrames = filesize // picsize
# if numFrames ==1:
or_imgY = c_getYdata(pair[0])
gt_imgY = c_getYdata(pair[1])
# normalize
or_imgY = normalize(or_imgY)
or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
## act as a placeholder
or_imgCbCr = 0
original_ycbcr.append([or_imgY, or_imgCbCr])
gt_y.append(gt_imgY)
# else:
# while numFrames>0:
# or_imgY =getOneFrameY(pair[0])
# gt_imgY =getOneFrameY(pair[1])
# # normalize
# or_imgY = normalize(or_imgY)
#
# or_imgY = np.resize(or_imgY, (1, or_imgY.shape[0], or_imgY.shape[1], 1))
# gt_imgY = np.resize(gt_imgY, (1, gt_imgY.shape[0], gt_imgY.shape[1], 1))
#
# ## act as a placeholder
# or_imgCbCr = 0
# original_ycbcr.append([or_imgY, or_imgCbCr])
# gt_y.append(gt_imgY)
else:
print("Invalid Inputs.")
exit(0)
return original_ycbcr, gt_y, fileName_list
class Predict:
input_tensor = None
output_tensor = None
model = None
def __init__(self, model, modelpath):
self.graph = tf.Graph() # 为每个类(实例)单独创建一个graph
self.model = model
with self.graph.as_default():
self.input_tensor = tf.placeholder(tf.float32, shape=(1, None, None, 1))
#self.output_tensor = tf.make_template('input_scope', self.model)(self.input_tensor)
self.output_tensor = model(self.input_tensor)
self.output_tensor = tf.clip_by_value(self.output_tensor, 0., 1.)
self.output_tensor = tf.multiply(self.output_tensor, 255)
self.saver = tf.train.Saver()
self.sess = tf.Session(graph=self.graph,config=config) # 创建新的sess
with self.sess.as_default():
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
self.saver.restore(self.sess, modelpath) # 从恢复点恢复参数
print(modelpath)
def predict(self, fileOrDir):
#print("------------") | if (isinstance(fileOrDir, str)):
original_ycbcr, gt_y, fileName_list = prepare_test_data(fileOrDir)
imgY = original_ycbcr[0][0]
elif type(fileOrDir) is np.ndarray:
imgY = fileOrDir
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
elif (isinstance(fileOrDir, list)):
fileOrDir = np.asarray(fileOrDir, dtype='float32')
# fileOrDir = fileOrDir / 255
imgY = normalize(np.reshape(fileOrDir, (1, len(fileOrDir), len(fileOrDir[0]), 1)))
else:
imgY=None
with self.sess.as_default():
with self.sess.graph.as_default():
out = self.sess.run(self.output_tensor, feed_dict={self.input_tensor: imgY})
out = np.reshape(out, (out.shape[1], out.shape[2]))
out = np.around(out)
out = out.astype('int')
out = out.tolist()
return out
def init(sliceType, QP):
# print("init !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
global cnn17,cnn27,cnn37,cnn47,cnn7,cnn57
cnn7 =Predict(model,model_set["CNN_I_QP7"])
cnn17=Predict(model,model_set["CNN_I_QP17"])
cnn27=Predict(model,model_set["CNN_I_QP27"])
cnn37=Predict(model,model_set["CNN2_I_QP37"])
cnn47=Predict(model,model_set["CNN_I_QP47"])
cnn57 = Predict(model, model_set["CNN_I_QP57"])
def predict(file, QP, frame_type):
global cnn17, cnn27, cnn37, cnn47, cnn7, cnn57
if QP < 17:
R = cnn7.predict(file)
elif 17 <= QP < 27:
R = cnn17.predict(file)
elif 27 <= QP < 37:
R = cnn27.predict(file)
elif 37 <= QP < 47:
R = cnn37.predict(file)
elif 47 <= QP < 57:
R = cnn47.predict(file)
else:
R = cnn57.predict(file)
def showImg(inp):
h, w = inp[0], inp[1]
tem = np.asarray(inp, dtype='uint8')
#np.save(r"H:\KONG\cnn_2K%f" % time.time(),tem)
tem = Image.fromarray(tem, 'L')
tem.show()
#tem.save("D:/rec/FromPython%f.jpg" % time.time())
def test_all_ckpt(modelPath):
low_img = r"test_set\qp37"
# low_img = r"H:\KONG\FRAME_!\AV!_deblock_nocdefLr\QP53"
heigh_img = r"test_set\ori"
NUM_CNN=3 #cnn 次数
original_ycbcr, gt_y, fileName_list = prepare_test_data([low_img,heigh_img])
total_imgs = len(fileName_list)
tem = [f for f in os.listdir(modelPath) if 'data' in f]
ckptFiles = sorted([r.split('.data')[0] for r in tem])
max_psnr=0
max_epoch=0
max_ckpt_psnr = 0
for ckpt in ckptFiles:
cur_ckpt_psnr=0
#epoch = int(ckpt.split('_')[3])
# print(ckpt.split('.')[0].split('_'))
epoch = int(ckpt.split('.')[0].split('_')[-2])
# loss =int(ckpt.split('.')[0].split('_')[-1])
#
# if epoch <1000:
# continue
#print(epoch)
print(os.path.join(modelPath, ckpt))
predictor = Predict(model, os.path.join(modelPath, ckpt))
img_index = [14, 17, 4, 2, 7, 10, 12, 3, 0, 13, 16, 5, 6, 1, 15, 8, 9, 11]
for i in img_index:
# if i>5:
# continue
imgY = original_ycbcr[i][0]
gtY = gt_y[i] if gt_y else 0
#showImg(rec)
#print(np.shape(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])))
#cur_psnr[cnnTime]=psnr(denormalize(np.reshape(imgY, [np.shape(imgY)[1],np.shape(imgY)[2]])),np.reshape(gtY, [np.shape(imgY)[1],np.shape(imgY)[2]]))
cur_psnr=[]
rec = predictor.predict(imgY)
cur_psnr.append(psnr(rec,np.reshape(gtY, np.shape(rec))))
for cc in range(2,NUM_CNN+1):
rec = predictor.predict(rec)
cur_psnr.append(psnr(rec, np.reshape(gtY, np.shape(rec))))
# print(cur_psnr)
#print(len(cur | identifier_name |
|
lib.rs | Err {
fn type_of() -> &'static str {
"Dice Error"
}
}
// Instruction data
pub struct Dice {
pub roll_type: u8,
pub threshold: u8,
pub bet_amount: u32,
}
impl Sealed for Dice {}
impl Pack for Dice {
const LEN: usize = 6;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
let roll_type = src[0];
//println!("Roll Type: {}", roll_type);
if roll_type != 1 && roll_type != 2 {
msg!("You should roll under (1) or Roll Over (2)");
return Err(DiceErr::UnexpectedRollMode.into());
}
let threshold = src[1];
//println!("Threshold: {}", threshold);
if threshold < 2 || threshold > 98 {
msg!("Your guess has to in between 2 and 98");
return Err(DiceErr::IncorrectThreshold.into());
}
let bet_amount = LittleEndian::read_u32(&src[2..6]);
//println!("Bet: {}", bet_amount);
Ok(Dice { roll_type, threshold, bet_amount})
}
fn pack_into_slice(&self, _dst: &mut [u8]) {}
}
// Player's Balance structure, which is one 4 byte u32 number
pub struct PlayerBalance {
pub balance: u32,
}
impl Sealed for PlayerBalance {}
impl Pack for PlayerBalance {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PlayerBalance {
balance: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.balance);
}
}
// Prize Pool structure, which is a 4 byte u32 number
pub struct PrizePool {
pub pool_amount: u32,
}
impl Sealed for PrizePool {}
impl Pack for PrizePool {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PrizePool {
pool_amount: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.pool_amount);
}
}
// Declare and export the program's entrypoint
entrypoint!(process_instruction);
// Program entrypoint's implementation
fn process_instruction(
program_id: &Pubkey, // Public key of program account
accounts: &[AccountInfo], // data accounts
instruction_data: &[u8], // First Element: Roll type, Second Element: Threshold, [2..6] Bet Amount
) -> ProgramResult {
msg!("Rust program entrypoint");
// get Dice information
let roll_type = Dice::unpack_unchecked(&instruction_data)?.roll_type;
msg!("Roll Type: {}", roll_type);
let threshold = Dice::unpack_unchecked(&instruction_data)?.threshold;
msg!("Threshold: {}", threshold);
let bet_amount = Dice::unpack_unchecked(&instruction_data)?.bet_amount;
msg!("Bet: {}", bet_amount);
// Iterating accounts is safer then indexing
let accounts_iter = &mut accounts.iter();
// Get the account that holds the Prize Pool
let prize_pool_account = next_account_info(accounts_iter)?;
// The account must be owned by the program in order to modify its data
if prize_pool_account.owner != program_id {
msg!(
"Prize Pool account ({}) not owned by program, actual: {}, expected: {}",
prize_pool_account.key,
prize_pool_account.owner,
program_id
);
return Err(DiceErr::IncorrectOwner.into());
}
// Get the account that holds the balance for the players
let player_balance_account = next_account_info(accounts_iter)?;
// The check account must be owned by the program in order to modify its data
if player_balance_account.owner != program_id {
msg!("Check account not owned by program");
return Err(DiceErr::IncorrectOwner.into());
}
// The account must be rent exempt, i.e. live forever
let sysvar_account = next_account_info(accounts_iter)?;
let rent = &Rent::from_account_info(sysvar_account)?;
if !sysvar::rent::check_id(sysvar_account.key) {
msg!("Rent system account is not rent system account");
return Err(ProgramError::InvalidAccountData);
}
if !rent.is_exempt(player_balance_account.lamports(), player_balance_account.data_len()) {
msg!("Balance account is not rent exempt");
return Err(DiceErr::AccountNotRentExempt.into());
}
// the player
let player_account = next_account_info(accounts_iter)?;
if !player_account.is_signer {
msg!("Player account is not signer");
return Err(ProgramError::MissingRequiredSignature);
}
let expected_check_account_pubkey =
Pubkey::create_with_seed(player_account.key, "checkvote", program_id)?;
if expected_check_account_pubkey != *player_balance_account.key {
msg!("Voter fraud! not the correct balance_account");
return Err(DiceErr::AccountNotBalanceAccount.into());
}
let mut balance_data = player_balance_account.try_borrow_mut_data()?;
// this unpack reads and deserialises the account data and also checks the data is the correct length
let mut player_balance =
PlayerBalance::unpack_unchecked(&balance_data).expect("Failed to read PlayerBalance");
// Handle the bet_amount and the balance
/*if vote_check.voted_for != 0 {
msg!("Voter fraud! You already voted");
return Err(VoteError::AlreadyVoted.into());
}*/
let mut prize_pool_data = prize_pool_account.try_borrow_mut_data()?;
let mut prize_pool =
PrizePool::unpack_unchecked(&prize_pool_data).expect("Failed to read PrizePool");
///////////////////////
// Jut for Debug
if player_balance.balance == 0 {
msg!{"Airdrop some money!!!"};
player_balance.balance = 50;
}
if prize_pool.pool_amount == 0 {
msg!{"Airdrop some money!!!"};
prize_pool.pool_amount = 1000;
}
// Check the valid of the bet amount
if bet_amount > player_balance.balance |
if bet_amount == 0 {
msg!("Inavalid Bet");
return Err(DiceErr::InvalidBet.into());
}
let lucky_number:u8 = 20;
println!("Result {}", lucky_number);
let mut win_amount:u32 = 0;
if (1 == roll_type && lucky_number <= threshold) || (2 == roll_type && lucky_number >= threshold) {
if lucky_number <= 25 || lucky_number >= 75 {
win_amount = bet_amount as u32 * 2;
msg!("Win: {}", win_amount);
}else{
win_amount = bet_amount as u32;
msg!("Win: {}", win_amount);
}
}
if win_amount == 0 {
prize_pool.pool_amount += bet_amount;
player_balance.balance -= bet_amount;
msg!("You Lose!");
}else{
prize_pool.pool_amount -= win_amount;
player_balance.balance += win_amount;
msg!("You Win!");
}
PrizePool::pack(prize_pool, &mut prize_pool_data).expect("Failed to write Prize Pool");
PlayerBalance::pack(player_balance, &mut balance_data).expect("Failed to write Player Balance");
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use solana_program::instruction::InstructionError::Custom;
use solana_program::{
instruction::{AccountMeta, Instruction},
pubkey::Pubkey,
sysvar,
};
use solana_program_test::*;
use solana_sdk::transaction::TransactionError;
use solana_sdk::{
account::Account,
signature::{Keypair, Signer},
transaction::Transaction,
};
use std::mem;
use self::tokio;
impl From<DiceErr> for TransactionError {
fn from(e: DiceErr) -> Self {
TransactionError::InstructionError(0, Custom(e as u32))
}
}
#[tokio::test]
async fn test_sanity1() {
//++++++++++++++++++++++++++++++++++++
// TEST: Simply vote for Bet
//++++++++++++++++++++++++++++++++++++
let program_id = Pubkey::new_unique();
let mut program_test =
ProgramTest::new("dice", program_id, processor!(process_instruction));
// mock contract data account
let game_key = Pubkey::new_unique();
let mut data: Vec<u8> = vec![0; 4 * mem::size_of::<u8>()];
LittleEndian::write_u32(&mut data[0..4], 1000); // set prize pool to 1000
println!("Prize Pool {:?}", data);
program_test.add_account(
game_key,
Account {
lamports: | {
msg!("Not Enough Balance");
return Err(DiceErr::NotEnoughBalance.into());
} | conditional_block |
lib.rs | DiceErr {
fn type_of() -> &'static str {
"Dice Error"
}
}
// Instruction data
pub struct Dice {
pub roll_type: u8,
pub threshold: u8,
pub bet_amount: u32,
}
impl Sealed for Dice {}
impl Pack for Dice {
const LEN: usize = 6;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
let roll_type = src[0];
//println!("Roll Type: {}", roll_type);
if roll_type != 1 && roll_type != 2 {
msg!("You should roll under (1) or Roll Over (2)");
return Err(DiceErr::UnexpectedRollMode.into());
}
let threshold = src[1];
//println!("Threshold: {}", threshold);
if threshold < 2 || threshold > 98 {
msg!("Your guess has to in between 2 and 98");
return Err(DiceErr::IncorrectThreshold.into());
}
let bet_amount = LittleEndian::read_u32(&src[2..6]);
//println!("Bet: {}", bet_amount);
Ok(Dice { roll_type, threshold, bet_amount})
}
fn pack_into_slice(&self, _dst: &mut [u8]) {}
}
// Player's Balance structure, which is one 4 byte u32 number
pub struct PlayerBalance {
pub balance: u32,
}
impl Sealed for PlayerBalance {}
impl Pack for PlayerBalance {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PlayerBalance {
balance: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.balance);
}
}
// Prize Pool structure, which is a 4 byte u32 number
pub struct PrizePool {
pub pool_amount: u32,
}
impl Sealed for PrizePool {}
impl Pack for PrizePool {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PrizePool {
pool_amount: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.pool_amount);
}
}
// Declare and export the program's entrypoint
entrypoint!(process_instruction);
// Program entrypoint's implementation
fn process_instruction(
program_id: &Pubkey, // Public key of program account
accounts: &[AccountInfo], // data accounts
instruction_data: &[u8], // First Element: Roll type, Second Element: Threshold, [2..6] Bet Amount
) -> ProgramResult {
msg!("Rust program entrypoint");
// get Dice information
let roll_type = Dice::unpack_unchecked(&instruction_data)?.roll_type;
msg!("Roll Type: {}", roll_type);
let threshold = Dice::unpack_unchecked(&instruction_data)?.threshold;
msg!("Threshold: {}", threshold);
let bet_amount = Dice::unpack_unchecked(&instruction_data)?.bet_amount;
msg!("Bet: {}", bet_amount);
// Iterating accounts is safer then indexing
let accounts_iter = &mut accounts.iter();
// Get the account that holds the Prize Pool
let prize_pool_account = next_account_info(accounts_iter)?;
// The account must be owned by the program in order to modify its data
if prize_pool_account.owner != program_id {
msg!(
"Prize Pool account ({}) not owned by program, actual: {}, expected: {}",
prize_pool_account.key,
prize_pool_account.owner,
program_id
);
return Err(DiceErr::IncorrectOwner.into());
}
// Get the account that holds the balance for the players
let player_balance_account = next_account_info(accounts_iter)?;
// The check account must be owned by the program in order to modify its data
if player_balance_account.owner != program_id {
msg!("Check account not owned by program");
return Err(DiceErr::IncorrectOwner.into()); | let sysvar_account = next_account_info(accounts_iter)?;
let rent = &Rent::from_account_info(sysvar_account)?;
if !sysvar::rent::check_id(sysvar_account.key) {
msg!("Rent system account is not rent system account");
return Err(ProgramError::InvalidAccountData);
}
if !rent.is_exempt(player_balance_account.lamports(), player_balance_account.data_len()) {
msg!("Balance account is not rent exempt");
return Err(DiceErr::AccountNotRentExempt.into());
}
// the player
let player_account = next_account_info(accounts_iter)?;
if !player_account.is_signer {
msg!("Player account is not signer");
return Err(ProgramError::MissingRequiredSignature);
}
let expected_check_account_pubkey =
Pubkey::create_with_seed(player_account.key, "checkvote", program_id)?;
if expected_check_account_pubkey != *player_balance_account.key {
msg!("Voter fraud! not the correct balance_account");
return Err(DiceErr::AccountNotBalanceAccount.into());
}
let mut balance_data = player_balance_account.try_borrow_mut_data()?;
// this unpack reads and deserialises the account data and also checks the data is the correct length
let mut player_balance =
PlayerBalance::unpack_unchecked(&balance_data).expect("Failed to read PlayerBalance");
// Handle the bet_amount and the balance
/*if vote_check.voted_for != 0 {
msg!("Voter fraud! You already voted");
return Err(VoteError::AlreadyVoted.into());
}*/
let mut prize_pool_data = prize_pool_account.try_borrow_mut_data()?;
let mut prize_pool =
PrizePool::unpack_unchecked(&prize_pool_data).expect("Failed to read PrizePool");
///////////////////////
// Jut for Debug
if player_balance.balance == 0 {
msg!{"Airdrop some money!!!"};
player_balance.balance = 50;
}
if prize_pool.pool_amount == 0 {
msg!{"Airdrop some money!!!"};
prize_pool.pool_amount = 1000;
}
// Check the valid of the bet amount
if bet_amount > player_balance.balance {
msg!("Not Enough Balance");
return Err(DiceErr::NotEnoughBalance.into());
}
if bet_amount == 0 {
msg!("Inavalid Bet");
return Err(DiceErr::InvalidBet.into());
}
let lucky_number:u8 = 20;
println!("Result {}", lucky_number);
let mut win_amount:u32 = 0;
if (1 == roll_type && lucky_number <= threshold) || (2 == roll_type && lucky_number >= threshold) {
if lucky_number <= 25 || lucky_number >= 75 {
win_amount = bet_amount as u32 * 2;
msg!("Win: {}", win_amount);
}else{
win_amount = bet_amount as u32;
msg!("Win: {}", win_amount);
}
}
if win_amount == 0 {
prize_pool.pool_amount += bet_amount;
player_balance.balance -= bet_amount;
msg!("You Lose!");
}else{
prize_pool.pool_amount -= win_amount;
player_balance.balance += win_amount;
msg!("You Win!");
}
PrizePool::pack(prize_pool, &mut prize_pool_data).expect("Failed to write Prize Pool");
PlayerBalance::pack(player_balance, &mut balance_data).expect("Failed to write Player Balance");
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use solana_program::instruction::InstructionError::Custom;
use solana_program::{
instruction::{AccountMeta, Instruction},
pubkey::Pubkey,
sysvar,
};
use solana_program_test::*;
use solana_sdk::transaction::TransactionError;
use solana_sdk::{
account::Account,
signature::{Keypair, Signer},
transaction::Transaction,
};
use std::mem;
use self::tokio;
impl From<DiceErr> for TransactionError {
fn from(e: DiceErr) -> Self {
TransactionError::InstructionError(0, Custom(e as u32))
}
}
#[tokio::test]
async fn test_sanity1() {
//++++++++++++++++++++++++++++++++++++
// TEST: Simply vote for Bet
//++++++++++++++++++++++++++++++++++++
let program_id = Pubkey::new_unique();
let mut program_test =
ProgramTest::new("dice", program_id, processor!(process_instruction));
// mock contract data account
let game_key = Pubkey::new_unique();
let mut data: Vec<u8> = vec![0; 4 * mem::size_of::<u8>()];
LittleEndian::write_u32(&mut data[0..4], 1000); // set prize pool to 1000
println!("Prize Pool {:?}", data);
program_test.add_account(
game_key,
Account {
lamports: 6 | }
// The account must be rent exempt, i.e. live forever | random_line_split |
lib.rs | {
#[error("Unexpected Roll Mode")]
UnexpectedRollMode,
#[error("Incrrect Threshold")]
IncorrectThreshold,
#[error("Incorrect Owner")]
IncorrectOwner,
#[error("Account Not Rent Exempt")]
AccountNotRentExempt,
#[error("Account Not Balance Account")]
AccountNotBalanceAccount,
#[error("Not Enough Balance")]
NotEnoughBalance,
#[error("Invalid Bet")]
InvalidBet,
}
impl From<DiceErr> for ProgramError {
fn from(e: DiceErr) -> Self {
ProgramError::Custom(e as u32)
}
}
impl<T> DecodeError<T> for DiceErr {
fn type_of() -> &'static str {
"Dice Error"
}
}
// Instruction data
pub struct Dice {
pub roll_type: u8,
pub threshold: u8,
pub bet_amount: u32,
}
impl Sealed for Dice {}
impl Pack for Dice {
const LEN: usize = 6;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
let roll_type = src[0];
//println!("Roll Type: {}", roll_type);
if roll_type != 1 && roll_type != 2 {
msg!("You should roll under (1) or Roll Over (2)");
return Err(DiceErr::UnexpectedRollMode.into());
}
let threshold = src[1];
//println!("Threshold: {}", threshold);
if threshold < 2 || threshold > 98 {
msg!("Your guess has to in between 2 and 98");
return Err(DiceErr::IncorrectThreshold.into());
}
let bet_amount = LittleEndian::read_u32(&src[2..6]);
//println!("Bet: {}", bet_amount);
Ok(Dice { roll_type, threshold, bet_amount})
}
fn pack_into_slice(&self, _dst: &mut [u8]) {}
}
// Player's Balance structure, which is one 4 byte u32 number
pub struct PlayerBalance {
pub balance: u32,
}
impl Sealed for PlayerBalance {}
impl Pack for PlayerBalance {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PlayerBalance {
balance: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.balance);
}
}
// Prize Pool structure, which is a 4 byte u32 number
pub struct PrizePool {
pub pool_amount: u32,
}
impl Sealed for PrizePool {}
impl Pack for PrizePool {
const LEN: usize = 4;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
Ok(PrizePool {
pool_amount: LittleEndian::read_u32(&src[0..4]),
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
LittleEndian::write_u32(&mut dst[0..4], self.pool_amount);
}
}
// Declare and export the program's entrypoint
entrypoint!(process_instruction);
// Program entrypoint's implementation
fn process_instruction(
program_id: &Pubkey, // Public key of program account
accounts: &[AccountInfo], // data accounts
instruction_data: &[u8], // First Element: Roll type, Second Element: Threshold, [2..6] Bet Amount
) -> ProgramResult {
msg!("Rust program entrypoint");
// get Dice information
let roll_type = Dice::unpack_unchecked(&instruction_data)?.roll_type;
msg!("Roll Type: {}", roll_type);
let threshold = Dice::unpack_unchecked(&instruction_data)?.threshold;
msg!("Threshold: {}", threshold);
let bet_amount = Dice::unpack_unchecked(&instruction_data)?.bet_amount;
msg!("Bet: {}", bet_amount);
// Iterating accounts is safer then indexing
let accounts_iter = &mut accounts.iter();
// Get the account that holds the Prize Pool
let prize_pool_account = next_account_info(accounts_iter)?;
// The account must be owned by the program in order to modify its data
if prize_pool_account.owner != program_id {
msg!(
"Prize Pool account ({}) not owned by program, actual: {}, expected: {}",
prize_pool_account.key,
prize_pool_account.owner,
program_id
);
return Err(DiceErr::IncorrectOwner.into());
}
// Get the account that holds the balance for the players
let player_balance_account = next_account_info(accounts_iter)?;
// The check account must be owned by the program in order to modify its data
if player_balance_account.owner != program_id {
msg!("Check account not owned by program");
return Err(DiceErr::IncorrectOwner.into());
}
// The account must be rent exempt, i.e. live forever
let sysvar_account = next_account_info(accounts_iter)?;
let rent = &Rent::from_account_info(sysvar_account)?;
if !sysvar::rent::check_id(sysvar_account.key) {
msg!("Rent system account is not rent system account");
return Err(ProgramError::InvalidAccountData);
}
if !rent.is_exempt(player_balance_account.lamports(), player_balance_account.data_len()) {
msg!("Balance account is not rent exempt");
return Err(DiceErr::AccountNotRentExempt.into());
}
// the player
let player_account = next_account_info(accounts_iter)?;
if !player_account.is_signer {
msg!("Player account is not signer");
return Err(ProgramError::MissingRequiredSignature);
}
let expected_check_account_pubkey =
Pubkey::create_with_seed(player_account.key, "checkvote", program_id)?;
if expected_check_account_pubkey != *player_balance_account.key {
msg!("Voter fraud! not the correct balance_account");
return Err(DiceErr::AccountNotBalanceAccount.into());
}
let mut balance_data = player_balance_account.try_borrow_mut_data()?;
// this unpack reads and deserialises the account data and also checks the data is the correct length
let mut player_balance =
PlayerBalance::unpack_unchecked(&balance_data).expect("Failed to read PlayerBalance");
// Handle the bet_amount and the balance
/*if vote_check.voted_for != 0 {
msg!("Voter fraud! You already voted");
return Err(VoteError::AlreadyVoted.into());
}*/
let mut prize_pool_data = prize_pool_account.try_borrow_mut_data()?;
let mut prize_pool =
PrizePool::unpack_unchecked(&prize_pool_data).expect("Failed to read PrizePool");
///////////////////////
// Jut for Debug
if player_balance.balance == 0 {
msg!{"Airdrop some money!!!"};
player_balance.balance = 50;
}
if prize_pool.pool_amount == 0 {
msg!{"Airdrop some money!!!"};
prize_pool.pool_amount = 1000;
}
// Check the valid of the bet amount
if bet_amount > player_balance.balance {
msg!("Not Enough Balance");
return Err(DiceErr::NotEnoughBalance.into());
}
if bet_amount == 0 {
msg!("Inavalid Bet");
return Err(DiceErr::InvalidBet.into());
}
let lucky_number:u8 = 20;
println!("Result {}", lucky_number);
let mut win_amount:u32 = 0;
if (1 == roll_type && lucky_number <= threshold) || (2 == roll_type && lucky_number >= threshold) {
if lucky_number <= 25 || lucky_number >= 75 {
win_amount = bet_amount as u32 * 2;
msg!("Win: {}", win_amount);
}else{
win_amount = bet_amount as u32;
msg!("Win: {}", win_amount);
}
}
if win_amount == 0 {
prize_pool.pool_amount += bet_amount;
player_balance.balance -= bet_amount;
msg!("You Lose!");
}else{
prize_pool.pool_amount -= win_amount;
player_balance.balance += win_amount;
msg!("You Win!");
}
PrizePool::pack(prize_pool, &mut prize_pool_data).expect("Failed to write Prize Pool");
PlayerBalance::pack(player_balance, &mut balance_data).expect("Failed to write Player Balance");
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use solana_program::instruction::InstructionError::Custom;
use solana_program::{
instruction::{AccountMeta, Instruction},
pubkey::Pubkey,
sysvar,
};
use solana_program_test::*;
use solana_sdk::transaction::TransactionError;
use solana_sdk::{
account::Account,
signature::{Keypair, Signer},
transaction::Transaction,
};
use std::mem;
use self::tokio;
impl From<DiceErr> for TransactionError {
fn from(e: DiceErr) -> Self {
TransactionError::InstructionError(0, Custom(e as u32))
}
}
#[tokio::test]
async fn test_sanity1() {
//++++++++++++++++++++++++++++++++++++
// TEST: Simply vote for Bet
// | DiceErr | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.