file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
leaderboard.go | runs against each other
Timing TimingMethod
// the chosen variables (keys) and values (values) for the leaderboard, both
// given as their respective IDs
Values map[string]string
// the runs, sorted from best to worst
Runs []RankedRun
// API links to related resources
Links []Link
// do not use this field directly, use the available methods
PlatformsData interface{} `json:"platforms"`
// do not use this field directly, use the available methods
RegionsData interface{} `json:"regions"`
// do not use this field directly, use the available methods
GameData interface{} `json:"game"`
// do not use this field directly, use the available methods
CategoryData interface{} `json:"category"`
// do not use this field directly, use the available methods
LevelData interface{} `json:"level"`
// do not use this field directly, use the available methods
PlayersData interface{} `json:"players"`
// do not use this field directly, use the available methods
VariablesData interface{} `json:"variables"`
}
// RankedRun is a run with an assigned rank. As the rank only makes sense when
// a specific ruleset (video-only? realtime or ingame time? etc.) is applied,
// normal runs do not have a rank; only those in leaderboards have.
type RankedRun struct {
// the embedded run
Run Run
// the rank, starting at 1
Rank int
}
// leaderboardResponse models the actual API response from the server
type leaderboardResponse struct {
// the one leaderboard contained in the response
Data Leaderboard
}
// FullGameLeaderboard retrieves a the leaderboard for a specific game and one of
// its full-game categories. An error is returned if no category is given or if
// a per-level category is given. If no game is given, it is fetched automatically,
// but if you have it already at hand, you can save one request by specifying it.
func FullGameLeaderboard(game *Game, cat *Category, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if cat.Type != "per-game" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a full-game category."}
}
if game == nil {
var err *Error
game, err = cat.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/category/" + cat.ID, options, nil, nil, embeds})
}
// LevelLeaderboard retrieves a the leaderboard for a specific game and one of
// its levels in a specific category. An error is returned if no category or
// level is given or if a full-game category is given. If no game is given, it
// is fetched automatically, but if you have it already at hand, you can save
// one request by specifying it.
func LevelLeaderboard(game *Game, cat *Category, level *Level, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if level == nil {
return nil, &Error{"", "", ErrorBadLogic, "No level given."}
}
if cat.Type != "per-level" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a individual-level category."}
}
if game == nil {
var err *Error
game, err = level.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/level/" + level.ID + "/" + cat.ID, options, nil, nil, embeds})
}
// Game returns the game that the leaderboard is for. If it was not embedded, it
// is fetched from the network. Except for broken data on speedrun.com, this
// should never return nil.
func (lb *Leaderboard) Game(embeds string) (*Game, *Error) {
// we only have the game ID at hand
asserted, okay := lb.GameData.(string)
if okay {
return GameByID(asserted, embeds)
}
return toGame(lb.GameData, true), nil
}
// Category returns the category that the leaderboard is for. If it was not
// embedded, it is fetched from the network. Except for broken data on
// speedrun.com, this should never return nil.
func (lb *Leaderboard) Category(embeds string) (*Category, *Error) {
// we only have the category ID at hand
asserted, okay := lb.CategoryData.(string)
if okay {
return CategoryByID(asserted, embeds)
}
return toCategory(lb.CategoryData, true), nil
}
// Level returns the level that the leaderboard is for. If it's a full-game
// leaderboard, nil is returned. If the level was not embedded, it is fetched
// from the network.
func (lb *Leaderboard) Level(embeds string) (*Level, *Error) {
if lb.LevelData == nil {
return nil, nil
}
// we only have the level ID at hand
asserted, okay := lb.LevelData.(string)
if okay {
return LevelByID(asserted, embeds) | return toLevel(lb.LevelData, true), nil
}
// Platforms returns a list of all platforms that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Platforms() *PlatformCollection {
return toPlatformCollection(lb.PlatformsData)
}
// Regions returns a list of all regions that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Regions() *RegionCollection {
return toRegionCollection(lb.RegionsData)
}
// Variables returns a list of all variables that are present in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Variables() *VariableCollection {
return toVariableCollection(lb.VariablesData)
}
// Players returns a list of all players that are present in the leaderboard.
// If they have not been embedded, an empty slice is returned.
func (lb *Leaderboard) Players() *PlayerCollection {
return toPlayerCollection(lb.PlayersData)
}
// for the 'hasLinks' interface
func (lb *Leaderboard) links() []Link {
return lb.Links
}
// LeaderboardOptions are the options that can be used to further narrow down a
// leaderboard to only a subset of runs.
type LeaderboardOptions struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// The platform ID to restrict the leaderboard to.
Platform string
// The platform ID to restrict the leaderboard to.
Region string
// When set, can control if all or no runs are done on emulators.
Emulators OptionalFlag
// When set, can control if all or no runs are required to have a video.
VideoOnly OptionalFlag
// the timing method that should be used to compare runs; not all are
// allowed for all games, a server-side error will be returned if an invalid
// choice was made.
Timing TimingMethod
// ISO 8601 date; when given, only runs done before this date will be considerd
Date string
// map of variable IDs to value IDs
Values map[string]string
}
// applyToURL merged the filter into a URL.
func (lo *LeaderboardOptions) applyToURL(u *url.URL) {
if lo == nil {
return
}
values := u.Query()
if lo.Top > 0 {
values.Set("top", strconv.Itoa(lo.Top))
}
if len(lo.Platform) > 0 {
values.Set("platform", lo.Platform)
}
if len(lo.Region) > 0 {
values.Set("region", lo.Region)
}
if len(lo.Timing) > 0 {
values.Set("timing", string(lo.Timing))
}
if len(lo.Date) > 0 {
values.Set("date", lo.Date)
}
lo.Emulators.applyToQuery("emulators", &values)
lo.VideoOnly.applyToQuery("video-only", &values)
for varID, valueID := range lo.Values {
values.Set("var-"+varID, valueID)
}
u.RawQuery = values.Encode()
}
// LeaderboardFilter represents the possible filtering options when fetching a
// list of leaderboards.
type LeaderboardFilter struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// If set, can be used to skip returning empty leaderboards.
SkipEmpty OptionalFlag
}
// applyToURL merged the filter into a URL.
func ( | }
| random_line_split |
leaderboard.go | against each other
Timing TimingMethod
// the chosen variables (keys) and values (values) for the leaderboard, both
// given as their respective IDs
Values map[string]string
// the runs, sorted from best to worst
Runs []RankedRun
// API links to related resources
Links []Link
// do not use this field directly, use the available methods
PlatformsData interface{} `json:"platforms"`
// do not use this field directly, use the available methods
RegionsData interface{} `json:"regions"`
// do not use this field directly, use the available methods
GameData interface{} `json:"game"`
// do not use this field directly, use the available methods
CategoryData interface{} `json:"category"`
// do not use this field directly, use the available methods
LevelData interface{} `json:"level"`
// do not use this field directly, use the available methods
PlayersData interface{} `json:"players"`
// do not use this field directly, use the available methods
VariablesData interface{} `json:"variables"`
}
// RankedRun is a run with an assigned rank. As the rank only makes sense when
// a specific ruleset (video-only? realtime or ingame time? etc.) is applied,
// normal runs do not have a rank; only those in leaderboards have.
type RankedRun struct {
// the embedded run
Run Run
// the rank, starting at 1
Rank int
}
// leaderboardResponse models the actual API response from the server
type leaderboardResponse struct {
// the one leaderboard contained in the response
Data Leaderboard
}
// FullGameLeaderboard retrieves a the leaderboard for a specific game and one of
// its full-game categories. An error is returned if no category is given or if
// a per-level category is given. If no game is given, it is fetched automatically,
// but if you have it already at hand, you can save one request by specifying it.
func FullGameLeaderboard(game *Game, cat *Category, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if cat.Type != "per-game" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a full-game category."}
}
if game == nil |
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/category/" + cat.ID, options, nil, nil, embeds})
}
// LevelLeaderboard retrieves a the leaderboard for a specific game and one of
// its levels in a specific category. An error is returned if no category or
// level is given or if a full-game category is given. If no game is given, it
// is fetched automatically, but if you have it already at hand, you can save
// one request by specifying it.
func LevelLeaderboard(game *Game, cat *Category, level *Level, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if level == nil {
return nil, &Error{"", "", ErrorBadLogic, "No level given."}
}
if cat.Type != "per-level" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a individual-level category."}
}
if game == nil {
var err *Error
game, err = level.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/level/" + level.ID + "/" + cat.ID, options, nil, nil, embeds})
}
// Game returns the game that the leaderboard is for. If it was not embedded, it
// is fetched from the network. Except for broken data on speedrun.com, this
// should never return nil.
func (lb *Leaderboard) Game(embeds string) (*Game, *Error) {
// we only have the game ID at hand
asserted, okay := lb.GameData.(string)
if okay {
return GameByID(asserted, embeds)
}
return toGame(lb.GameData, true), nil
}
// Category returns the category that the leaderboard is for. If it was not
// embedded, it is fetched from the network. Except for broken data on
// speedrun.com, this should never return nil.
func (lb *Leaderboard) Category(embeds string) (*Category, *Error) {
// we only have the category ID at hand
asserted, okay := lb.CategoryData.(string)
if okay {
return CategoryByID(asserted, embeds)
}
return toCategory(lb.CategoryData, true), nil
}
// Level returns the level that the leaderboard is for. If it's a full-game
// leaderboard, nil is returned. If the level was not embedded, it is fetched
// from the network.
func (lb *Leaderboard) Level(embeds string) (*Level, *Error) {
if lb.LevelData == nil {
return nil, nil
}
// we only have the level ID at hand
asserted, okay := lb.LevelData.(string)
if okay {
return LevelByID(asserted, embeds)
}
return toLevel(lb.LevelData, true), nil
}
// Platforms returns a list of all platforms that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Platforms() *PlatformCollection {
return toPlatformCollection(lb.PlatformsData)
}
// Regions returns a list of all regions that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Regions() *RegionCollection {
return toRegionCollection(lb.RegionsData)
}
// Variables returns a list of all variables that are present in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Variables() *VariableCollection {
return toVariableCollection(lb.VariablesData)
}
// Players returns a list of all players that are present in the leaderboard.
// If they have not been embedded, an empty slice is returned.
func (lb *Leaderboard) Players() *PlayerCollection {
return toPlayerCollection(lb.PlayersData)
}
// for the 'hasLinks' interface
func (lb *Leaderboard) links() []Link {
return lb.Links
}
// LeaderboardOptions are the options that can be used to further narrow down a
// leaderboard to only a subset of runs.
type LeaderboardOptions struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// The platform ID to restrict the leaderboard to.
Platform string
// The platform ID to restrict the leaderboard to.
Region string
// When set, can control if all or no runs are done on emulators.
Emulators OptionalFlag
// When set, can control if all or no runs are required to have a video.
VideoOnly OptionalFlag
// the timing method that should be used to compare runs; not all are
// allowed for all games, a server-side error will be returned if an invalid
// choice was made.
Timing TimingMethod
// ISO 8601 date; when given, only runs done before this date will be considerd
Date string
// map of variable IDs to value IDs
Values map[string]string
}
// applyToURL merged the filter into a URL.
func (lo *LeaderboardOptions) applyToURL(u *url.URL) {
if lo == nil {
return
}
values := u.Query()
if lo.Top > 0 {
values.Set("top", strconv.Itoa(lo.Top))
}
if len(lo.Platform) > 0 {
values.Set("platform", lo.Platform)
}
if len(lo.Region) > 0 {
values.Set("region", lo.Region)
}
if len(lo.Timing) > 0 {
values.Set("timing", string(lo.Timing))
}
if len(lo.Date) > 0 {
values.Set("date", lo.Date)
}
lo.Emulators.applyToQuery("emulators", &values)
lo.VideoOnly.applyToQuery("video-only", &values)
for varID, valueID := range lo.Values {
values.Set("var-"+varID, valueID)
}
u.RawQuery = values.Encode()
}
// LeaderboardFilter represents the possible filtering options when fetching a
// list of leaderboards.
type LeaderboardFilter struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// If set, can be used to skip returning empty leaderboards.
SkipEmpty OptionalFlag
}
// applyToURL merged the filter into a URL.
func | {
var err *Error
game, err = cat.Game("")
if err != nil {
return nil, err
}
} | conditional_block |
leaderboard.go | against each other
Timing TimingMethod
// the chosen variables (keys) and values (values) for the leaderboard, both
// given as their respective IDs
Values map[string]string
// the runs, sorted from best to worst
Runs []RankedRun
// API links to related resources
Links []Link
// do not use this field directly, use the available methods
PlatformsData interface{} `json:"platforms"`
// do not use this field directly, use the available methods
RegionsData interface{} `json:"regions"`
// do not use this field directly, use the available methods
GameData interface{} `json:"game"`
// do not use this field directly, use the available methods
CategoryData interface{} `json:"category"`
// do not use this field directly, use the available methods
LevelData interface{} `json:"level"`
// do not use this field directly, use the available methods
PlayersData interface{} `json:"players"`
// do not use this field directly, use the available methods
VariablesData interface{} `json:"variables"`
}
// RankedRun is a run with an assigned rank. As the rank only makes sense when
// a specific ruleset (video-only? realtime or ingame time? etc.) is applied,
// normal runs do not have a rank; only those in leaderboards have.
type RankedRun struct {
// the embedded run
Run Run
// the rank, starting at 1
Rank int
}
// leaderboardResponse models the actual API response from the server
type leaderboardResponse struct {
// the one leaderboard contained in the response
Data Leaderboard
}
// FullGameLeaderboard retrieves a the leaderboard for a specific game and one of
// its full-game categories. An error is returned if no category is given or if
// a per-level category is given. If no game is given, it is fetched automatically,
// but if you have it already at hand, you can save one request by specifying it.
func FullGameLeaderboard(game *Game, cat *Category, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if cat.Type != "per-game" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a full-game category."}
}
if game == nil {
var err *Error
game, err = cat.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/category/" + cat.ID, options, nil, nil, embeds})
}
// LevelLeaderboard retrieves a the leaderboard for a specific game and one of
// its levels in a specific category. An error is returned if no category or
// level is given or if a full-game category is given. If no game is given, it
// is fetched automatically, but if you have it already at hand, you can save
// one request by specifying it.
func LevelLeaderboard(game *Game, cat *Category, level *Level, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if level == nil {
return nil, &Error{"", "", ErrorBadLogic, "No level given."}
}
if cat.Type != "per-level" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a individual-level category."}
}
if game == nil {
var err *Error
game, err = level.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/level/" + level.ID + "/" + cat.ID, options, nil, nil, embeds})
}
// Game returns the game that the leaderboard is for. If it was not embedded, it
// is fetched from the network. Except for broken data on speedrun.com, this
// should never return nil.
func (lb *Leaderboard) Game(embeds string) (*Game, *Error) {
// we only have the game ID at hand
asserted, okay := lb.GameData.(string)
if okay {
return GameByID(asserted, embeds)
}
return toGame(lb.GameData, true), nil
}
// Category returns the category that the leaderboard is for. If it was not
// embedded, it is fetched from the network. Except for broken data on
// speedrun.com, this should never return nil.
func (lb *Leaderboard) Category(embeds string) (*Category, *Error) |
// Level returns the level that the leaderboard is for. If it's a full-game
// leaderboard, nil is returned. If the level was not embedded, it is fetched
// from the network.
func (lb *Leaderboard) Level(embeds string) (*Level, *Error) {
if lb.LevelData == nil {
return nil, nil
}
// we only have the level ID at hand
asserted, okay := lb.LevelData.(string)
if okay {
return LevelByID(asserted, embeds)
}
return toLevel(lb.LevelData, true), nil
}
// Platforms returns a list of all platforms that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Platforms() *PlatformCollection {
return toPlatformCollection(lb.PlatformsData)
}
// Regions returns a list of all regions that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Regions() *RegionCollection {
return toRegionCollection(lb.RegionsData)
}
// Variables returns a list of all variables that are present in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Variables() *VariableCollection {
return toVariableCollection(lb.VariablesData)
}
// Players returns a list of all players that are present in the leaderboard.
// If they have not been embedded, an empty slice is returned.
func (lb *Leaderboard) Players() *PlayerCollection {
return toPlayerCollection(lb.PlayersData)
}
// for the 'hasLinks' interface
func (lb *Leaderboard) links() []Link {
return lb.Links
}
// LeaderboardOptions are the options that can be used to further narrow down a
// leaderboard to only a subset of runs.
type LeaderboardOptions struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// The platform ID to restrict the leaderboard to.
Platform string
// The platform ID to restrict the leaderboard to.
Region string
// When set, can control if all or no runs are done on emulators.
Emulators OptionalFlag
// When set, can control if all or no runs are required to have a video.
VideoOnly OptionalFlag
// the timing method that should be used to compare runs; not all are
// allowed for all games, a server-side error will be returned if an invalid
// choice was made.
Timing TimingMethod
// ISO 8601 date; when given, only runs done before this date will be considerd
Date string
// map of variable IDs to value IDs
Values map[string]string
}
// applyToURL merged the filter into a URL.
func (lo *LeaderboardOptions) applyToURL(u *url.URL) {
if lo == nil {
return
}
values := u.Query()
if lo.Top > 0 {
values.Set("top", strconv.Itoa(lo.Top))
}
if len(lo.Platform) > 0 {
values.Set("platform", lo.Platform)
}
if len(lo.Region) > 0 {
values.Set("region", lo.Region)
}
if len(lo.Timing) > 0 {
values.Set("timing", string(lo.Timing))
}
if len(lo.Date) > 0 {
values.Set("date", lo.Date)
}
lo.Emulators.applyToQuery("emulators", &values)
lo.VideoOnly.applyToQuery("video-only", &values)
for varID, valueID := range lo.Values {
values.Set("var-"+varID, valueID)
}
u.RawQuery = values.Encode()
}
// LeaderboardFilter represents the possible filtering options when fetching a
// list of leaderboards.
type LeaderboardFilter struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// If set, can be used to skip returning empty leaderboards.
SkipEmpty OptionalFlag
}
// applyToURL merged the filter into a URL.
func | {
// we only have the category ID at hand
asserted, okay := lb.CategoryData.(string)
if okay {
return CategoryByID(asserted, embeds)
}
return toCategory(lb.CategoryData, true), nil
} | identifier_body |
leaderboard.go | runs against each other
Timing TimingMethod
// the chosen variables (keys) and values (values) for the leaderboard, both
// given as their respective IDs
Values map[string]string
// the runs, sorted from best to worst
Runs []RankedRun
// API links to related resources
Links []Link
// do not use this field directly, use the available methods
PlatformsData interface{} `json:"platforms"`
// do not use this field directly, use the available methods
RegionsData interface{} `json:"regions"`
// do not use this field directly, use the available methods
GameData interface{} `json:"game"`
// do not use this field directly, use the available methods
CategoryData interface{} `json:"category"`
// do not use this field directly, use the available methods
LevelData interface{} `json:"level"`
// do not use this field directly, use the available methods
PlayersData interface{} `json:"players"`
// do not use this field directly, use the available methods
VariablesData interface{} `json:"variables"`
}
// RankedRun is a run with an assigned rank. As the rank only makes sense when
// a specific ruleset (video-only? realtime or ingame time? etc.) is applied,
// normal runs do not have a rank; only those in leaderboards have.
type RankedRun struct {
// the embedded run
Run Run
// the rank, starting at 1
Rank int
}
// leaderboardResponse models the actual API response from the server
type leaderboardResponse struct {
// the one leaderboard contained in the response
Data Leaderboard
}
// FullGameLeaderboard retrieves a the leaderboard for a specific game and one of
// its full-game categories. An error is returned if no category is given or if
// a per-level category is given. If no game is given, it is fetched automatically,
// but if you have it already at hand, you can save one request by specifying it.
func FullGameLeaderboard(game *Game, cat *Category, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if cat.Type != "per-game" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a full-game category."}
}
if game == nil {
var err *Error
game, err = cat.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/category/" + cat.ID, options, nil, nil, embeds})
}
// LevelLeaderboard retrieves a the leaderboard for a specific game and one of
// its levels in a specific category. An error is returned if no category or
// level is given or if a full-game category is given. If no game is given, it
// is fetched automatically, but if you have it already at hand, you can save
// one request by specifying it.
func LevelLeaderboard(game *Game, cat *Category, level *Level, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if level == nil {
return nil, &Error{"", "", ErrorBadLogic, "No level given."}
}
if cat.Type != "per-level" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a individual-level category."}
}
if game == nil {
var err *Error
game, err = level.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/level/" + level.ID + "/" + cat.ID, options, nil, nil, embeds})
}
// Game returns the game that the leaderboard is for. If it was not embedded, it
// is fetched from the network. Except for broken data on speedrun.com, this
// should never return nil.
func (lb *Leaderboard) Game(embeds string) (*Game, *Error) {
// we only have the game ID at hand
asserted, okay := lb.GameData.(string)
if okay {
return GameByID(asserted, embeds)
}
return toGame(lb.GameData, true), nil
}
// Category returns the category that the leaderboard is for. If it was not
// embedded, it is fetched from the network. Except for broken data on
// speedrun.com, this should never return nil.
func (lb *Leaderboard) Category(embeds string) (*Category, *Error) {
// we only have the category ID at hand
asserted, okay := lb.CategoryData.(string)
if okay {
return CategoryByID(asserted, embeds)
}
return toCategory(lb.CategoryData, true), nil
}
// Level returns the level that the leaderboard is for. If it's a full-game
// leaderboard, nil is returned. If the level was not embedded, it is fetched
// from the network.
func (lb *Leaderboard) Level(embeds string) (*Level, *Error) {
if lb.LevelData == nil {
return nil, nil
}
// we only have the level ID at hand
asserted, okay := lb.LevelData.(string)
if okay {
return LevelByID(asserted, embeds)
}
return toLevel(lb.LevelData, true), nil
}
// Platforms returns a list of all platforms that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Platforms() *PlatformCollection {
return toPlatformCollection(lb.PlatformsData)
}
// Regions returns a list of all regions that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Regions() *RegionCollection {
return toRegionCollection(lb.RegionsData)
}
// Variables returns a list of all variables that are present in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) | () *VariableCollection {
return toVariableCollection(lb.VariablesData)
}
// Players returns a list of all players that are present in the leaderboard.
// If they have not been embedded, an empty slice is returned.
func (lb *Leaderboard) Players() *PlayerCollection {
return toPlayerCollection(lb.PlayersData)
}
// for the 'hasLinks' interface
func (lb *Leaderboard) links() []Link {
return lb.Links
}
// LeaderboardOptions are the options that can be used to further narrow down a
// leaderboard to only a subset of runs.
type LeaderboardOptions struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// The platform ID to restrict the leaderboard to.
Platform string
// The platform ID to restrict the leaderboard to.
Region string
// When set, can control if all or no runs are done on emulators.
Emulators OptionalFlag
// When set, can control if all or no runs are required to have a video.
VideoOnly OptionalFlag
// the timing method that should be used to compare runs; not all are
// allowed for all games, a server-side error will be returned if an invalid
// choice was made.
Timing TimingMethod
// ISO 8601 date; when given, only runs done before this date will be considerd
Date string
// map of variable IDs to value IDs
Values map[string]string
}
// applyToURL merged the filter into a URL.
func (lo *LeaderboardOptions) applyToURL(u *url.URL) {
if lo == nil {
return
}
values := u.Query()
if lo.Top > 0 {
values.Set("top", strconv.Itoa(lo.Top))
}
if len(lo.Platform) > 0 {
values.Set("platform", lo.Platform)
}
if len(lo.Region) > 0 {
values.Set("region", lo.Region)
}
if len(lo.Timing) > 0 {
values.Set("timing", string(lo.Timing))
}
if len(lo.Date) > 0 {
values.Set("date", lo.Date)
}
lo.Emulators.applyToQuery("emulators", &values)
lo.VideoOnly.applyToQuery("video-only", &values)
for varID, valueID := range lo.Values {
values.Set("var-"+varID, valueID)
}
u.RawQuery = values.Encode()
}
// LeaderboardFilter represents the possible filtering options when fetching a
// list of leaderboards.
type LeaderboardFilter struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// If set, can be used to skip returning empty leaderboards.
SkipEmpty OptionalFlag
}
// applyToURL merged the filter into a URL.
func | Variables | identifier_name |
cyclone.go | is the datatype for sending out alarm notifications
type AlarmEvent struct {
Source string `json:"source"`
EventID string `json:"event_id"`
Version string `json:"version"`
Sourcehost string `json:"sourcehost"`
Oncall string `json:"on_call"`
Targethost string `json:"targethost"`
Message string `json:"message"`
Level int64 `json:"level"`
Timestamp string `json:"timestamp"`
Check string `json:"check"`
Monitoring string `json:"monitoring"`
Team string `json:"team"`
}
// run is the event loop for Cyclone
func (c *Cyclone) run( |
runloop:
for {
select {
case <-c.Shutdown:
// received shutdown, drain input channel which will be
// closed by main
goto drainloop
case msg := <-c.Input:
if msg == nil {
// this can happen if we read the closed Input channel
// before the closed Shutdown channel
continue runloop
}
if err := c.process(msg); err != nil {
c.Death <- err
<-c.Shutdown
break runloop
}
}
}
drainloop:
for {
select {
case msg := <-c.Input:
if msg == nil {
// channel is closed
break drainloop
}
c.process(msg)
}
}
}
// process evaluates a metric and raises alarms as required
func (c *Cyclone) process(msg *erebos.Transport) error {
if msg == nil || msg.Value == nil {
logrus.Warnf("Ignoring empty message from: %d", msg.HostID)
if msg != nil {
go c.commit(msg)
}
return nil
}
m := &legacy.MetricSplit{}
if err := json.Unmarshal(msg.Value, m); err != nil {
return err
}
switch m.Path {
case `_internal.cyclone.heartbeat`:
c.heartbeat()
return nil
}
// non-heartbeat metrics count towards processed metrics
metrics.GetOrRegisterMeter(`/metrics/processed.per.second`,
*c.Metrics).Mark(1)
switch m.Path {
case `/sys/cpu/ctx`:
ctx := cpu.CTX{}
id := m.AssetID
if _, ok := c.CTXData[id]; ok {
ctx = c.CTXData[id]
}
m = ctx.Update(m)
c.CTXData[id] = ctx
case `/sys/cpu/count/idle`:
fallthrough
case `/sys/cpu/count/iowait`:
fallthrough
case `/sys/cpu/count/irq`:
fallthrough
case `/sys/cpu/count/nice`:
fallthrough
case `/sys/cpu/count/softirq`:
fallthrough
case `/sys/cpu/count/system`:
fallthrough
case `/sys/cpu/count/user`:
cu := cpu.CPU{}
id := m.AssetID
if _, ok := c.CPUData[id]; ok {
cu = c.CPUData[id]
}
cu.Update(m)
m = cu.Calculate()
c.CPUData[id] = cu
case `/sys/memory/active`:
fallthrough
case `/sys/memory/buffers`:
fallthrough
case `/sys/memory/cached`:
fallthrough
case `/sys/memory/free`:
fallthrough
case `/sys/memory/inactive`:
fallthrough
case `/sys/memory/swapfree`:
fallthrough
case `/sys/memory/swaptotal`:
fallthrough
case `/sys/memory/total`:
mm := mem.Mem{}
id := m.AssetID
if _, ok := c.MemData[id]; ok {
mm = c.MemData[id]
}
mm.Update(m)
m = mm.Calculate()
c.MemData[id] = mm
case `/sys/disk/blk_total`:
fallthrough
case `/sys/disk/blk_used`:
fallthrough
case `/sys/disk/blk_read`:
fallthrough
case `/sys/disk/blk_wrtn`:
if len(m.Tags) == 0 {
m = nil
break
}
d := disk.Disk{}
id := m.AssetID
mpt := m.Tags[0]
if c.DskData[id] == nil {
c.DskData[id] = make(map[string]disk.Disk)
}
if _, ok := c.DskData[id][mpt]; !ok {
c.DskData[id][mpt] = d
}
if _, ok := c.DskData[id][mpt]; ok {
d = c.DskData[id][mpt]
}
d.Update(m)
mArr := d.Calculate()
if mArr != nil {
for _, mPtr := range mArr {
// no deadlock, channel is buffered
c.internalInput <- mPtr
}
}
c.DskData[id][mpt] = d
m = nil
}
if m == nil {
logrus.Debugf("Cyclone[%d], Metric has been consumed", c.Num)
return nil
}
lid := m.LookupID()
thr := c.Lookup(lid)
if thr == nil {
logrus.Errorf("Cyclone[%d], ERROR fetching threshold data. Lookup service available?", c.Num)
return nil
}
if len(thr) == 0 {
logrus.Debugf("Cyclone[%d], No thresholds configured for %s from %d", c.Num, m.Path, m.AssetID)
return nil
}
logrus.Debugf("Cyclone[%d], Forwarding %s from %d for evaluation (%s)", c.Num, m.Path, m.AssetID, lid)
evals := metrics.GetOrRegisterMeter(`/evaluations.per.second`,
*c.Metrics)
evals.Mark(1)
internalMetric := false
switch m.Path {
case
// internal metrics generated by cyclone
`cpu.ctx.per.second`,
`cpu.usage.percent`,
`memory.usage.percent`:
internalMetric = true
case
// internal metrics sent by main daemon
`/sys/cpu/blocked`,
`/sys/cpu/uptime`,
`/sys/load/300s`,
`/sys/load/60s`,
`/sys/load/900s`,
`/sys/load/running_proc`,
`/sys/load/total_proc`:
internalMetric = true
default:
switch {
case
strings.HasPrefix(m.Path, `disk.free:`),
strings.HasPrefix(m.Path, `disk.read.per.second:`),
strings.HasPrefix(m.Path, `disk.usage.percent:`),
strings.HasPrefix(m.Path, `disk.write.per.second:`):
internalMetric = true
}
}
evaluations := 0
thrloop:
for key := range thr {
var alarmLevel = "0"
var brokenThr int64
dispatchAlarm := false
broken := false
fVal := ``
if internalMetric {
dispatchAlarm = true
}
if len(m.Tags) > 0 && m.Tags[0] == thr[key].ID {
dispatchAlarm = true
}
if !dispatchAlarm {
continue thrloop
}
logrus.Debugf("Cyclone[%d], Evaluating metric %s from %d against config %s",
c.Num, m.Path, m.AssetID, thr[key].ID)
evaluations++
lvlloop:
for _, lvl := range []string{`9`, `8`, `7`, `6`, `5`, `4`, `3`, `2`, `1`, `0`} {
thrval, ok := thr[key].Thresholds[lvl]
if !ok {
continue
}
logrus.Debugf("Cyclone[%d], Checking %s alarmlevel %s", c.Num, thr[key].ID, lvl)
switch m.Type {
case `integer`:
fallthrough
case `long`:
broken, fVal = c.cmpInt(thr[key].Predicate,
m.Value().(int64),
thrval)
case `real`:
broken, fVal = c.cmpFlp(thr[key].Predicate,
m.Value().(float64),
thrval)
}
if broken {
alarmLevel = lvl
brokenThr = thrval
break lvlloop
}
}
al := AlarmEvent{
Source: fmt.Sprintf("%s / %s", thr[key].MetaTargethost, thr[key].MetaSource),
EventID: thr[key].ID,
Version: c.Config.Cyclone.APIVersion,
Sourcehost: thr[key].MetaTargethost,
Oncall: thr[key].Oncall,
Targethost: thr[key].MetaTargethost,
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
Check: fmt.Sprintf("cyclone(%s)", m.Path),
Monitoring: thr[key]. | ) { | identifier_name |
cyclone.go | is the datatype for sending out alarm notifications
type AlarmEvent struct {
Source string `json:"source"`
EventID string `json:"event_id"`
Version string `json:"version"`
Sourcehost string `json:"sourcehost"`
Oncall string `json:"on_call"`
Targethost string `json:"targethost"`
Message string `json:"message"`
Level int64 `json:"level"`
Timestamp string `json:"timestamp"`
Check string `json:"check"`
Monitoring string `json:"monitoring"`
Team string `json:"team"`
}
// run is the event loop for Cyclone
func (c *Cyclone) run() {
runloop:
for {
select {
case <-c.Shutdown:
// received shutdown, drain input channel which will be
// closed by main
goto drainloop
case msg := <-c.Input:
if msg == nil {
// this can happen if we read the closed Input channel
// before the closed Shutdown channel
continue runloop
}
if err := c.process(msg); err != nil {
c.Death <- err
<-c.Shutdown
break runloop
}
}
}
drainloop:
for {
select {
case msg := <-c.Input:
if msg == nil {
// channel is closed
break drainloop
}
c.process(msg)
}
}
}
// process evaluates a metric and raises alarms as required
func (c *Cyclone) process(msg *erebos.Transport) error {
if msg == nil || msg.Value == nil {
logrus.Warnf("Ignoring empty message from: %d", msg.HostID)
if msg != nil {
go c.commit(msg)
}
return nil
}
m := &legacy.MetricSplit{}
if err := json.Unmarshal(msg.Value, m); err != nil {
return err
}
switch m.Path {
case `_internal.cyclone.heartbeat`:
c.heartbeat()
return nil
}
// non-heartbeat metrics count towards processed metrics
metrics.GetOrRegisterMeter(`/metrics/processed.per.second`,
*c.Metrics).Mark(1)
switch m.Path {
case `/sys/cpu/ctx`:
ctx := cpu.CTX{}
id := m.AssetID
if _, ok := c.CTXData[id]; ok {
ctx = c.CTXData[id]
}
m = ctx.Update(m)
c.CTXData[id] = ctx
case `/sys/cpu/count/idle`:
fallthrough
case `/sys/cpu/count/iowait`:
fallthrough
case `/sys/cpu/count/irq`:
fallthrough
case `/sys/cpu/count/nice`:
fallthrough
case `/sys/cpu/count/softirq`:
fallthrough
case `/sys/cpu/count/system`:
fallthrough
case `/sys/cpu/count/user`:
cu := cpu.CPU{}
id := m.AssetID
if _, ok := c.CPUData[id]; ok {
cu = c.CPUData[id]
}
cu.Update(m)
m = cu.Calculate()
c.CPUData[id] = cu
case `/sys/memory/active`:
fallthrough
case `/sys/memory/buffers`:
fallthrough
case `/sys/memory/cached`:
fallthrough
case `/sys/memory/free`:
fallthrough
case `/sys/memory/inactive`:
fallthrough
case `/sys/memory/swapfree`:
fallthrough
case `/sys/memory/swaptotal`:
fallthrough
case `/sys/memory/total`:
mm := mem.Mem{}
id := m.AssetID
if _, ok := c.MemData[id]; ok {
| m.Update(m)
m = mm.Calculate()
c.MemData[id] = mm
case `/sys/disk/blk_total`:
fallthrough
case `/sys/disk/blk_used`:
fallthrough
case `/sys/disk/blk_read`:
fallthrough
case `/sys/disk/blk_wrtn`:
if len(m.Tags) == 0 {
m = nil
break
}
d := disk.Disk{}
id := m.AssetID
mpt := m.Tags[0]
if c.DskData[id] == nil {
c.DskData[id] = make(map[string]disk.Disk)
}
if _, ok := c.DskData[id][mpt]; !ok {
c.DskData[id][mpt] = d
}
if _, ok := c.DskData[id][mpt]; ok {
d = c.DskData[id][mpt]
}
d.Update(m)
mArr := d.Calculate()
if mArr != nil {
for _, mPtr := range mArr {
// no deadlock, channel is buffered
c.internalInput <- mPtr
}
}
c.DskData[id][mpt] = d
m = nil
}
if m == nil {
logrus.Debugf("Cyclone[%d], Metric has been consumed", c.Num)
return nil
}
lid := m.LookupID()
thr := c.Lookup(lid)
if thr == nil {
logrus.Errorf("Cyclone[%d], ERROR fetching threshold data. Lookup service available?", c.Num)
return nil
}
if len(thr) == 0 {
logrus.Debugf("Cyclone[%d], No thresholds configured for %s from %d", c.Num, m.Path, m.AssetID)
return nil
}
logrus.Debugf("Cyclone[%d], Forwarding %s from %d for evaluation (%s)", c.Num, m.Path, m.AssetID, lid)
evals := metrics.GetOrRegisterMeter(`/evaluations.per.second`,
*c.Metrics)
evals.Mark(1)
internalMetric := false
switch m.Path {
case
// internal metrics generated by cyclone
`cpu.ctx.per.second`,
`cpu.usage.percent`,
`memory.usage.percent`:
internalMetric = true
case
// internal metrics sent by main daemon
`/sys/cpu/blocked`,
`/sys/cpu/uptime`,
`/sys/load/300s`,
`/sys/load/60s`,
`/sys/load/900s`,
`/sys/load/running_proc`,
`/sys/load/total_proc`:
internalMetric = true
default:
switch {
case
strings.HasPrefix(m.Path, `disk.free:`),
strings.HasPrefix(m.Path, `disk.read.per.second:`),
strings.HasPrefix(m.Path, `disk.usage.percent:`),
strings.HasPrefix(m.Path, `disk.write.per.second:`):
internalMetric = true
}
}
evaluations := 0
thrloop:
for key := range thr {
var alarmLevel = "0"
var brokenThr int64
dispatchAlarm := false
broken := false
fVal := ``
if internalMetric {
dispatchAlarm = true
}
if len(m.Tags) > 0 && m.Tags[0] == thr[key].ID {
dispatchAlarm = true
}
if !dispatchAlarm {
continue thrloop
}
logrus.Debugf("Cyclone[%d], Evaluating metric %s from %d against config %s",
c.Num, m.Path, m.AssetID, thr[key].ID)
evaluations++
lvlloop:
for _, lvl := range []string{`9`, `8`, `7`, `6`, `5`, `4`, `3`, `2`, `1`, `0`} {
thrval, ok := thr[key].Thresholds[lvl]
if !ok {
continue
}
logrus.Debugf("Cyclone[%d], Checking %s alarmlevel %s", c.Num, thr[key].ID, lvl)
switch m.Type {
case `integer`:
fallthrough
case `long`:
broken, fVal = c.cmpInt(thr[key].Predicate,
m.Value().(int64),
thrval)
case `real`:
broken, fVal = c.cmpFlp(thr[key].Predicate,
m.Value().(float64),
thrval)
}
if broken {
alarmLevel = lvl
brokenThr = thrval
break lvlloop
}
}
al := AlarmEvent{
Source: fmt.Sprintf("%s / %s", thr[key].MetaTargethost, thr[key].MetaSource),
EventID: thr[key].ID,
Version: c.Config.Cyclone.APIVersion,
Sourcehost: thr[key].MetaTargethost,
Oncall: thr[key].Oncall,
Targethost: thr[key].MetaTargethost,
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
Check: fmt.Sprintf("cyclone(%s)", m.Path),
Monitoring: thr[key].MetaMonitoring | mm = c.MemData[id]
}
m | conditional_block |
cyclone.go | is the datatype for sending out alarm notifications
type AlarmEvent struct {
Source string `json:"source"`
EventID string `json:"event_id"`
Version string `json:"version"`
Sourcehost string `json:"sourcehost"`
Oncall string `json:"on_call"`
Targethost string `json:"targethost"`
Message string `json:"message"`
Level int64 `json:"level"`
Timestamp string `json:"timestamp"`
Check string `json:"check"`
Monitoring string `json:"monitoring"`
Team string `json:"team"`
}
// run is the event loop for Cyclone
func (c *Cyclone) run() {
r |
drainloop:
for {
select {
case msg := <-c.Input:
if msg == nil {
// channel is closed
break drainloop
}
c.process(msg)
}
}
}
//
process evaluates a metric and raises alarms as required
func (c *Cyclone) process(msg *erebos.Transport) error {
if msg == nil || msg.Value == nil {
logrus.Warnf("Ignoring empty message from: %d", msg.HostID)
if msg != nil {
go c.commit(msg)
}
return nil
}
m := &legacy.MetricSplit{}
if err := json.Unmarshal(msg.Value, m); err != nil {
return err
}
switch m.Path {
case `_internal.cyclone.heartbeat`:
c.heartbeat()
return nil
}
// non-heartbeat metrics count towards processed metrics
metrics.GetOrRegisterMeter(`/metrics/processed.per.second`,
*c.Metrics).Mark(1)
switch m.Path {
case `/sys/cpu/ctx`:
ctx := cpu.CTX{}
id := m.AssetID
if _, ok := c.CTXData[id]; ok {
ctx = c.CTXData[id]
}
m = ctx.Update(m)
c.CTXData[id] = ctx
case `/sys/cpu/count/idle`:
fallthrough
case `/sys/cpu/count/iowait`:
fallthrough
case `/sys/cpu/count/irq`:
fallthrough
case `/sys/cpu/count/nice`:
fallthrough
case `/sys/cpu/count/softirq`:
fallthrough
case `/sys/cpu/count/system`:
fallthrough
case `/sys/cpu/count/user`:
cu := cpu.CPU{}
id := m.AssetID
if _, ok := c.CPUData[id]; ok {
cu = c.CPUData[id]
}
cu.Update(m)
m = cu.Calculate()
c.CPUData[id] = cu
case `/sys/memory/active`:
fallthrough
case `/sys/memory/buffers`:
fallthrough
case `/sys/memory/cached`:
fallthrough
case `/sys/memory/free`:
fallthrough
case `/sys/memory/inactive`:
fallthrough
case `/sys/memory/swapfree`:
fallthrough
case `/sys/memory/swaptotal`:
fallthrough
case `/sys/memory/total`:
mm := mem.Mem{}
id := m.AssetID
if _, ok := c.MemData[id]; ok {
mm = c.MemData[id]
}
mm.Update(m)
m = mm.Calculate()
c.MemData[id] = mm
case `/sys/disk/blk_total`:
fallthrough
case `/sys/disk/blk_used`:
fallthrough
case `/sys/disk/blk_read`:
fallthrough
case `/sys/disk/blk_wrtn`:
if len(m.Tags) == 0 {
m = nil
break
}
d := disk.Disk{}
id := m.AssetID
mpt := m.Tags[0]
if c.DskData[id] == nil {
c.DskData[id] = make(map[string]disk.Disk)
}
if _, ok := c.DskData[id][mpt]; !ok {
c.DskData[id][mpt] = d
}
if _, ok := c.DskData[id][mpt]; ok {
d = c.DskData[id][mpt]
}
d.Update(m)
mArr := d.Calculate()
if mArr != nil {
for _, mPtr := range mArr {
// no deadlock, channel is buffered
c.internalInput <- mPtr
}
}
c.DskData[id][mpt] = d
m = nil
}
if m == nil {
logrus.Debugf("Cyclone[%d], Metric has been consumed", c.Num)
return nil
}
lid := m.LookupID()
thr := c.Lookup(lid)
if thr == nil {
logrus.Errorf("Cyclone[%d], ERROR fetching threshold data. Lookup service available?", c.Num)
return nil
}
if len(thr) == 0 {
logrus.Debugf("Cyclone[%d], No thresholds configured for %s from %d", c.Num, m.Path, m.AssetID)
return nil
}
logrus.Debugf("Cyclone[%d], Forwarding %s from %d for evaluation (%s)", c.Num, m.Path, m.AssetID, lid)
evals := metrics.GetOrRegisterMeter(`/evaluations.per.second`,
*c.Metrics)
evals.Mark(1)
internalMetric := false
switch m.Path {
case
// internal metrics generated by cyclone
`cpu.ctx.per.second`,
`cpu.usage.percent`,
`memory.usage.percent`:
internalMetric = true
case
// internal metrics sent by main daemon
`/sys/cpu/blocked`,
`/sys/cpu/uptime`,
`/sys/load/300s`,
`/sys/load/60s`,
`/sys/load/900s`,
`/sys/load/running_proc`,
`/sys/load/total_proc`:
internalMetric = true
default:
switch {
case
strings.HasPrefix(m.Path, `disk.free:`),
strings.HasPrefix(m.Path, `disk.read.per.second:`),
strings.HasPrefix(m.Path, `disk.usage.percent:`),
strings.HasPrefix(m.Path, `disk.write.per.second:`):
internalMetric = true
}
}
evaluations := 0
thrloop:
for key := range thr {
var alarmLevel = "0"
var brokenThr int64
dispatchAlarm := false
broken := false
fVal := ``
if internalMetric {
dispatchAlarm = true
}
if len(m.Tags) > 0 && m.Tags[0] == thr[key].ID {
dispatchAlarm = true
}
if !dispatchAlarm {
continue thrloop
}
logrus.Debugf("Cyclone[%d], Evaluating metric %s from %d against config %s",
c.Num, m.Path, m.AssetID, thr[key].ID)
evaluations++
lvlloop:
for _, lvl := range []string{`9`, `8`, `7`, `6`, `5`, `4`, `3`, `2`, `1`, `0`} {
thrval, ok := thr[key].Thresholds[lvl]
if !ok {
continue
}
logrus.Debugf("Cyclone[%d], Checking %s alarmlevel %s", c.Num, thr[key].ID, lvl)
switch m.Type {
case `integer`:
fallthrough
case `long`:
broken, fVal = c.cmpInt(thr[key].Predicate,
m.Value().(int64),
thrval)
case `real`:
broken, fVal = c.cmpFlp(thr[key].Predicate,
m.Value().(float64),
thrval)
}
if broken {
alarmLevel = lvl
brokenThr = thrval
break lvlloop
}
}
al := AlarmEvent{
Source: fmt.Sprintf("%s / %s", thr[key].MetaTargethost, thr[key].MetaSource),
EventID: thr[key].ID,
Version: c.Config.Cyclone.APIVersion,
Sourcehost: thr[key].MetaTargethost,
Oncall: thr[key].Oncall,
Targethost: thr[key].MetaTargethost,
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
Check: fmt.Sprintf("cyclone(%s)", m.Path),
Monitoring: thr[key]. | unloop:
for {
select {
case <-c.Shutdown:
// received shutdown, drain input channel which will be
// closed by main
goto drainloop
case msg := <-c.Input:
if msg == nil {
// this can happen if we read the closed Input channel
// before the closed Shutdown channel
continue runloop
}
if err := c.process(msg); err != nil {
c.Death <- err
<-c.Shutdown
break runloop
}
}
} | identifier_body |
cyclone.go | CTXData map[int64]cpu.CTX
DskData map[int64]map[string]disk.Disk
redis *redis.Client
internalInput chan *legacy.MetricSplit
}
// AlarmEvent is the datatype for sending out alarm notifications
type AlarmEvent struct {
Source string `json:"source"`
EventID string `json:"event_id"`
Version string `json:"version"`
Sourcehost string `json:"sourcehost"`
Oncall string `json:"on_call"`
Targethost string `json:"targethost"`
Message string `json:"message"`
Level int64 `json:"level"`
Timestamp string `json:"timestamp"`
Check string `json:"check"`
Monitoring string `json:"monitoring"`
Team string `json:"team"`
}
// run is the event loop for Cyclone
func (c *Cyclone) run() {
runloop:
for {
select {
case <-c.Shutdown:
// received shutdown, drain input channel which will be
// closed by main
goto drainloop
case msg := <-c.Input:
if msg == nil {
// this can happen if we read the closed Input channel
// before the closed Shutdown channel
continue runloop
}
if err := c.process(msg); err != nil {
c.Death <- err
<-c.Shutdown
break runloop
}
}
}
drainloop:
for {
select {
case msg := <-c.Input:
if msg == nil {
// channel is closed
break drainloop
}
c.process(msg)
}
}
}
// process evaluates a metric and raises alarms as required
func (c *Cyclone) process(msg *erebos.Transport) error {
if msg == nil || msg.Value == nil {
logrus.Warnf("Ignoring empty message from: %d", msg.HostID)
if msg != nil {
go c.commit(msg)
}
return nil
}
m := &legacy.MetricSplit{}
if err := json.Unmarshal(msg.Value, m); err != nil {
return err
}
switch m.Path {
case `_internal.cyclone.heartbeat`:
c.heartbeat()
return nil
}
// non-heartbeat metrics count towards processed metrics
metrics.GetOrRegisterMeter(`/metrics/processed.per.second`,
*c.Metrics).Mark(1)
switch m.Path {
case `/sys/cpu/ctx`:
ctx := cpu.CTX{}
id := m.AssetID
if _, ok := c.CTXData[id]; ok {
ctx = c.CTXData[id]
}
m = ctx.Update(m)
c.CTXData[id] = ctx
case `/sys/cpu/count/idle`:
fallthrough
case `/sys/cpu/count/iowait`:
fallthrough
case `/sys/cpu/count/irq`:
fallthrough
case `/sys/cpu/count/nice`:
fallthrough
case `/sys/cpu/count/softirq`:
fallthrough
case `/sys/cpu/count/system`:
fallthrough
case `/sys/cpu/count/user`:
cu := cpu.CPU{}
id := m.AssetID
if _, ok := c.CPUData[id]; ok {
cu = c.CPUData[id]
}
cu.Update(m)
m = cu.Calculate()
c.CPUData[id] = cu
case `/sys/memory/active`:
fallthrough
case `/sys/memory/buffers`:
fallthrough
case `/sys/memory/cached`:
fallthrough
case `/sys/memory/free`:
fallthrough
case `/sys/memory/inactive`:
fallthrough
case `/sys/memory/swapfree`:
fallthrough
case `/sys/memory/swaptotal`:
fallthrough
case `/sys/memory/total`:
mm := mem.Mem{}
id := m.AssetID
if _, ok := c.MemData[id]; ok {
mm = c.MemData[id]
}
mm.Update(m)
m = mm.Calculate()
c.MemData[id] = mm
case `/sys/disk/blk_total`:
fallthrough
case `/sys/disk/blk_used`:
fallthrough
case `/sys/disk/blk_read`:
fallthrough
case `/sys/disk/blk_wrtn`:
if len(m.Tags) == 0 {
m = nil
break
}
d := disk.Disk{}
id := m.AssetID
mpt := m.Tags[0]
if c.DskData[id] == nil {
c.DskData[id] = make(map[string]disk.Disk)
}
if _, ok := c.DskData[id][mpt]; !ok {
c.DskData[id][mpt] = d
}
if _, ok := c.DskData[id][mpt]; ok {
d = c.DskData[id][mpt]
}
d.Update(m)
mArr := d.Calculate()
if mArr != nil {
for _, mPtr := range mArr {
// no deadlock, channel is buffered
c.internalInput <- mPtr
}
}
c.DskData[id][mpt] = d
m = nil
}
if m == nil {
logrus.Debugf("Cyclone[%d], Metric has been consumed", c.Num)
return nil
}
lid := m.LookupID()
thr := c.Lookup(lid)
if thr == nil {
logrus.Errorf("Cyclone[%d], ERROR fetching threshold data. Lookup service available?", c.Num)
return nil
}
if len(thr) == 0 {
logrus.Debugf("Cyclone[%d], No thresholds configured for %s from %d", c.Num, m.Path, m.AssetID)
return nil
}
logrus.Debugf("Cyclone[%d], Forwarding %s from %d for evaluation (%s)", c.Num, m.Path, m.AssetID, lid)
evals := metrics.GetOrRegisterMeter(`/evaluations.per.second`,
*c.Metrics)
evals.Mark(1)
internalMetric := false
switch m.Path {
case
// internal metrics generated by cyclone
`cpu.ctx.per.second`,
`cpu.usage.percent`,
`memory.usage.percent`:
internalMetric = true
case
// internal metrics sent by main daemon
`/sys/cpu/blocked`,
`/sys/cpu/uptime`,
`/sys/load/300s`,
`/sys/load/60s`,
`/sys/load/900s`,
`/sys/load/running_proc`,
`/sys/load/total_proc`:
internalMetric = true
default:
switch {
case
strings.HasPrefix(m.Path, `disk.free:`),
strings.HasPrefix(m.Path, `disk.read.per.second:`),
strings.HasPrefix(m.Path, `disk.usage.percent:`),
strings.HasPrefix(m.Path, `disk.write.per.second:`):
internalMetric = true
}
}
evaluations := 0
thrloop:
for key := range thr {
var alarmLevel = "0"
var brokenThr int64
dispatchAlarm := false
broken := false
fVal := ``
if internalMetric {
dispatchAlarm = true
}
if len(m.Tags) > 0 && m.Tags[0] == thr[key].ID {
dispatchAlarm = true
}
if !dispatchAlarm {
continue thrloop
}
logrus.Debugf("Cyclone[%d], Evaluating metric %s from %d against config %s",
c.Num, m.Path, m.AssetID, thr[key].ID)
evaluations++
lvlloop:
for _, lvl := range []string{`9`, `8`, `7`, `6`, `5`, `4`, `3`, `2`, `1`, `0`} {
thrval, ok := thr[key].Thresholds[lvl]
if !ok {
continue
}
logrus.Debugf("Cyclone[%d], Checking %s alarmlevel %s", c.Num, thr[key].ID, lvl)
switch m.Type {
case `integer`:
fallthrough
case `long`:
broken, fVal = c.cmpInt(thr[key].Predicate,
m.Value().(int64),
thrval)
case `real`:
broken, fVal = c.cmpFlp(thr[key].Predicate,
m.Value().(float64),
thrval)
}
if broken {
alarmLevel = lvl
brokenThr = thrval
break lvlloop
}
}
al := AlarmEvent{
Source: fmt.Sprintf("%s / %s", thr[key].MetaTargethost, thr[key].MetaSource),
EventID: thr[key].ID,
Version: c.Config.Cyclone.APIVersion,
Sourcehost: thr[key].MetaTargethost,
Oncall: thr[key].Oncall,
Targethost: thr[key]. | random_line_split |
||
server.rs | ::net::tcp::TcpStream;
use http::buffer::BufferedStream;
use std::thread::Thread;
use std::sync::mpsc::{channel, Sender, Receiver};
use http::server::{Server, Request, ResponseWriter};
use http::status::SwitchingProtocols;
use http::headers::HeaderEnum;
use http::headers::response::Header::ExtensionHeader;
use http::headers::connection::Connection::Token;
use http::method::Method::Get;
pub use message::Payload::{Text, Binary, Empty};
pub use message::Opcode::{ContinuationOp, TextOp, BinaryOp, CloseOp, PingOp, PongOp};
use message::Message;
static WEBSOCKET_SALT: &'static str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
pub trait WebSocketServer: Server {
// called when a web socket connection is successfully established.
//
// this can't block! leaving implementation to trait user, in case they
// want to custom scheduling, tracking clients, reconnect logic, etc.
//
// TODO: may want to send more info in, such as the connecting IP address?
fn handle_ws_connect(&self, receiver: Receiver<Box<Message>>, sender: Sender<Box<Message>>) -> ();
// XXX: this is mostly a copy of the serve_forever fn in the Server trait.
// rust-http needs some changes in order to avoid this duplication
fn ws_serve_forever(self) {
let config = self.get_config();
debug!("About to bind to {}", config.bind_address);
let mut acceptor = match TcpListener::bind((config.bind_address.ip.to_string().as_slice(), config.bind_address.port)).listen() {
Err(err) => {
error!("bind or listen failed :-(: {}", err);
return;
},
Ok(acceptor) => acceptor,
};
debug!("listening");
loop {
let stream = match acceptor.accept() {
Err(error) => {
debug!("accept failed: {}", error);
// Question: is this the correct thing to do? We should probably be more
// intelligent, for there are some accept failures that are likely to be
// permanent, such that continuing would be a very bad idea, such as
// ENOBUFS/ENOMEM; and some where it should just be ignored, e.g.
// ECONNABORTED. TODO.
continue;
},
Ok(socket) => socket,
};
let child_self = self.clone();
Thread::spawn(move || {
let mut stream = BufferedStream::new(stream);
debug!("accepted connection");
let mut successful_handshake = false;
loop { // A keep-alive loop, condition at end
let (request, err_status) = Request::load(&mut stream);
let close_connection = request.close_connection;
let mut response = ResponseWriter::new(&mut stream);
match err_status {
Ok(()) => {
successful_handshake = child_self.handle_possible_ws_request(request, &mut response);
// Ensure that we actually do send a response:
match response.try_write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
Err(status) => {
// Uh oh, it's a response that I as a server cannot cope with.
// No good user-agent should have caused this, so for the moment
// at least I am content to send no body in the response.
response.status = status;
response.headers.content_length = Some(0);
match response.write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
}
// Ensure the request is flushed, any Transfer-Encoding completed, etc.
match response.finish_response() {
Err(err) => {
error!("finishing response failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
if successful_handshake || close_connection {
break;
}
}
if successful_handshake {
child_self.serve_websockets(stream).unwrap();
}
}).detach();
}
}
fn serve_websockets(&self, stream: BufferedStream<TcpStream>) -> IoResult<()> {
let mut stream = stream.wrapped;
let write_stream = stream.clone();
let (in_sender, in_receiver) = channel();
let (out_sender, out_receiver) = channel();
self.handle_ws_connect(in_receiver, out_sender);
// write task
Thread::spawn(move || {
// ugh: https://github.com/mozilla/rust/blob/3dbc1c34e694f38daeef741cfffc558606443c15/src/test/run-pass/kindck-implicit-close-over-mut-var.rs#L40-L44
// work to fix this is ongoing here: https://github.com/mozilla/rust/issues/11958
let mut write_stream = write_stream;
loop {
let message = out_receiver.recv().unwrap();
message.send(&mut write_stream).unwrap(); // fails this task in case of an error; FIXME make sure this fails the read (parent) task
}
}).detach();
// read task, effectively the parent of the write task
loop {
let message = Message::load(&mut stream).unwrap(); // fails the task if there's an error.
debug!("message: {}", message);
match message.opcode {
CloseOp => {
try!(stream.close_read());
try!(message.send(&mut stream)); // complete close handeshake - send the same message right back at the client
try!(stream.close_write());
break; // as this task dies, this should release the write task above, as well as the task set up in handle_ws_connection, if any
},
PingOp => {
let pong = Message {
payload: message.payload,
opcode: PongOp
};
try!(pong.send(&mut stream));
},
PongOp => (),
_ => in_sender.send(message).unwrap()
}
}
Ok(())
}
fn sec_websocket_accept(&self, sec_websocket_key: &str) -> String | let mut sh = Sha1::new();
let mut out = [0u8; 20];
sh.input_str((String::from_str(sec_websocket_key) + WEBSOCKET_SALT).as_slice());
sh.result(out.as_mut_slice());
return out.to_base64(STANDARD);
}
// check if the http request is a web socket upgrade request, and return true if so.
// otherwise, fall back on the regular http request handler
fn handle_possible_ws_request(&self, r: Request, w: &mut ResponseWriter) -> bool {
// TODO allow configuration of endpoint for websocket
match (r.method.clone(), r.headers.upgrade.clone()){
// (&Get, &Some("websocket"), &Some(box [Token(box "Upgrade")])) => //\{ FIXME this doesn't work. but client must have the header "Connection: Upgrade"
(Get, Some(ref upgrade)) => {
if !upgrade.as_slice().eq_ignore_ascii_case("websocket"){
self.handle_request(r, w);
return false;
}
// TODO client must have the header "Connection: Upgrade"
//
// TODO The request MUST include a header field with the name
// |Sec-WebSocket-Version|. The value of this header field MUST be 13.
// WebSocket Opening Handshake
w.status = SwitchingProtocols;
w.headers.upgrade = Some(String::from_str("websocket"));
// w.headers.transfer_encoding = None;
w.headers.content_length = Some(0);
w.headers.connection = Some(vec!(Token(String::from_str("Upgrade"))));
w.headers.date = Some(time::now_utc());
w | {
// NOTE from RFC 6455
//
// To prove that the handshake was received, the server has to take two
// pieces of information and combine them to form a response. The first
// piece of information comes from the |Sec-WebSocket-Key| header field
// in the client handshake:
//
// Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
//
// For this header field, the server has to take the value (as present
// in the header field, e.g., the base64-encoded [RFC4648] version minus
// any leading and trailing whitespace) and concatenate this with the
// Globally Unique Identifier (GUID, [RFC4122]) "258EAFA5-E914-47DA-
// 95CA-C5AB0DC85B11" in string form, which is unlikely to be used by
// network endpoints that do not understand the WebSocket Protocol. A
// SHA-1 hash (160 bits) [FIPS.180-3], base64-encoded (see Section 4 of
// [RFC4648]), of this concatenation is then returned in the server's
// handshake.
| identifier_body |
server.rs | ::net::tcp::TcpStream; | use http::server::{Server, Request, ResponseWriter};
use http::status::SwitchingProtocols;
use http::headers::HeaderEnum;
use http::headers::response::Header::ExtensionHeader;
use http::headers::connection::Connection::Token;
use http::method::Method::Get;
pub use message::Payload::{Text, Binary, Empty};
pub use message::Opcode::{ContinuationOp, TextOp, BinaryOp, CloseOp, PingOp, PongOp};
use message::Message;
static WEBSOCKET_SALT: &'static str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
pub trait WebSocketServer: Server {
// called when a web socket connection is successfully established.
//
// this can't block! leaving implementation to trait user, in case they
// want to custom scheduling, tracking clients, reconnect logic, etc.
//
// TODO: may want to send more info in, such as the connecting IP address?
fn handle_ws_connect(&self, receiver: Receiver<Box<Message>>, sender: Sender<Box<Message>>) -> ();
// XXX: this is mostly a copy of the serve_forever fn in the Server trait.
// rust-http needs some changes in order to avoid this duplication
fn ws_serve_forever(self) {
let config = self.get_config();
debug!("About to bind to {}", config.bind_address);
let mut acceptor = match TcpListener::bind((config.bind_address.ip.to_string().as_slice(), config.bind_address.port)).listen() {
Err(err) => {
error!("bind or listen failed :-(: {}", err);
return;
},
Ok(acceptor) => acceptor,
};
debug!("listening");
loop {
let stream = match acceptor.accept() {
Err(error) => {
debug!("accept failed: {}", error);
// Question: is this the correct thing to do? We should probably be more
// intelligent, for there are some accept failures that are likely to be
// permanent, such that continuing would be a very bad idea, such as
// ENOBUFS/ENOMEM; and some where it should just be ignored, e.g.
// ECONNABORTED. TODO.
continue;
},
Ok(socket) => socket,
};
let child_self = self.clone();
Thread::spawn(move || {
let mut stream = BufferedStream::new(stream);
debug!("accepted connection");
let mut successful_handshake = false;
loop { // A keep-alive loop, condition at end
let (request, err_status) = Request::load(&mut stream);
let close_connection = request.close_connection;
let mut response = ResponseWriter::new(&mut stream);
match err_status {
Ok(()) => {
successful_handshake = child_self.handle_possible_ws_request(request, &mut response);
// Ensure that we actually do send a response:
match response.try_write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
Err(status) => {
// Uh oh, it's a response that I as a server cannot cope with.
// No good user-agent should have caused this, so for the moment
// at least I am content to send no body in the response.
response.status = status;
response.headers.content_length = Some(0);
match response.write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
}
// Ensure the request is flushed, any Transfer-Encoding completed, etc.
match response.finish_response() {
Err(err) => {
error!("finishing response failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
if successful_handshake || close_connection {
break;
}
}
if successful_handshake {
child_self.serve_websockets(stream).unwrap();
}
}).detach();
}
}
fn serve_websockets(&self, stream: BufferedStream<TcpStream>) -> IoResult<()> {
let mut stream = stream.wrapped;
let write_stream = stream.clone();
let (in_sender, in_receiver) = channel();
let (out_sender, out_receiver) = channel();
self.handle_ws_connect(in_receiver, out_sender);
// write task
Thread::spawn(move || {
// ugh: https://github.com/mozilla/rust/blob/3dbc1c34e694f38daeef741cfffc558606443c15/src/test/run-pass/kindck-implicit-close-over-mut-var.rs#L40-L44
// work to fix this is ongoing here: https://github.com/mozilla/rust/issues/11958
let mut write_stream = write_stream;
loop {
let message = out_receiver.recv().unwrap();
message.send(&mut write_stream).unwrap(); // fails this task in case of an error; FIXME make sure this fails the read (parent) task
}
}).detach();
// read task, effectively the parent of the write task
loop {
let message = Message::load(&mut stream).unwrap(); // fails the task if there's an error.
debug!("message: {}", message);
match message.opcode {
CloseOp => {
try!(stream.close_read());
try!(message.send(&mut stream)); // complete close handeshake - send the same message right back at the client
try!(stream.close_write());
break; // as this task dies, this should release the write task above, as well as the task set up in handle_ws_connection, if any
},
PingOp => {
let pong = Message {
payload: message.payload,
opcode: PongOp
};
try!(pong.send(&mut stream));
},
PongOp => (),
_ => in_sender.send(message).unwrap()
}
}
Ok(())
}
fn sec_websocket_accept(&self, sec_websocket_key: &str) -> String {
// NOTE from RFC 6455
//
// To prove that the handshake was received, the server has to take two
// pieces of information and combine them to form a response. The first
// piece of information comes from the |Sec-WebSocket-Key| header field
// in the client handshake:
//
// Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
//
// For this header field, the server has to take the value (as present
// in the header field, e.g., the base64-encoded [RFC4648] version minus
// any leading and trailing whitespace) and concatenate this with the
// Globally Unique Identifier (GUID, [RFC4122]) "258EAFA5-E914-47DA-
// 95CA-C5AB0DC85B11" in string form, which is unlikely to be used by
// network endpoints that do not understand the WebSocket Protocol. A
// SHA-1 hash (160 bits) [FIPS.180-3], base64-encoded (see Section 4 of
// [RFC4648]), of this concatenation is then returned in the server's
// handshake.
let mut sh = Sha1::new();
let mut out = [0u8; 20];
sh.input_str((String::from_str(sec_websocket_key) + WEBSOCKET_SALT).as_slice());
sh.result(out.as_mut_slice());
return out.to_base64(STANDARD);
}
// check if the http request is a web socket upgrade request, and return true if so.
// otherwise, fall back on the regular http request handler
fn handle_possible_ws_request(&self, r: Request, w: &mut ResponseWriter) -> bool {
// TODO allow configuration of endpoint for websocket
match (r.method.clone(), r.headers.upgrade.clone()){
// (&Get, &Some("websocket"), &Some(box [Token(box "Upgrade")])) => //\{ FIXME this doesn't work. but client must have the header "Connection: Upgrade"
(Get, Some(ref upgrade)) => {
if !upgrade.as_slice().eq_ignore_ascii_case("websocket"){
self.handle_request(r, w);
return false;
}
// TODO client must have the header "Connection: Upgrade"
//
// TODO The request MUST include a header field with the name
// |Sec-WebSocket-Version|. The value of this header field MUST be 13.
// WebSocket Opening Handshake
w.status = SwitchingProtocols;
w.headers.upgrade = Some(String::from_str("websocket"));
// w.headers.transfer_encoding = None;
w.headers.content_length = Some(0);
w.headers.connection = Some(vec!(Token(String::from_str("Upgrade"))));
w.headers.date = Some(time::now_utc());
w.headers | use http::buffer::BufferedStream;
use std::thread::Thread;
use std::sync::mpsc::{channel, Sender, Receiver};
| random_line_split |
server.rs | ::net::tcp::TcpStream;
use http::buffer::BufferedStream;
use std::thread::Thread;
use std::sync::mpsc::{channel, Sender, Receiver};
use http::server::{Server, Request, ResponseWriter};
use http::status::SwitchingProtocols;
use http::headers::HeaderEnum;
use http::headers::response::Header::ExtensionHeader;
use http::headers::connection::Connection::Token;
use http::method::Method::Get;
pub use message::Payload::{Text, Binary, Empty};
pub use message::Opcode::{ContinuationOp, TextOp, BinaryOp, CloseOp, PingOp, PongOp};
use message::Message;
static WEBSOCKET_SALT: &'static str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
pub trait WebSocketServer: Server {
// called when a web socket connection is successfully established.
//
// this can't block! leaving implementation to trait user, in case they
// want to custom scheduling, tracking clients, reconnect logic, etc.
//
// TODO: may want to send more info in, such as the connecting IP address?
fn handle_ws_connect(&self, receiver: Receiver<Box<Message>>, sender: Sender<Box<Message>>) -> ();
// XXX: this is mostly a copy of the serve_forever fn in the Server trait.
// rust-http needs some changes in order to avoid this duplication
fn ws_serve_forever(self) {
let config = self.get_config();
debug!("About to bind to {}", config.bind_address);
let mut acceptor = match TcpListener::bind((config.bind_address.ip.to_string().as_slice(), config.bind_address.port)).listen() {
Err(err) => {
error!("bind or listen failed :-(: {}", err);
return;
},
Ok(acceptor) => acceptor,
};
debug!("listening");
loop {
let stream = match acceptor.accept() {
Err(error) => {
debug!("accept failed: {}", error);
// Question: is this the correct thing to do? We should probably be more
// intelligent, for there are some accept failures that are likely to be
// permanent, such that continuing would be a very bad idea, such as
// ENOBUFS/ENOMEM; and some where it should just be ignored, e.g.
// ECONNABORTED. TODO.
continue;
},
Ok(socket) => socket,
};
let child_self = self.clone();
Thread::spawn(move || {
let mut stream = BufferedStream::new(stream);
debug!("accepted connection");
let mut successful_handshake = false;
loop { // A keep-alive loop, condition at end
let (request, err_status) = Request::load(&mut stream);
let close_connection = request.close_connection;
let mut response = ResponseWriter::new(&mut stream);
match err_status {
Ok(()) => {
successful_handshake = child_self.handle_possible_ws_request(request, &mut response);
// Ensure that we actually do send a response:
match response.try_write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
Err(status) => {
// Uh oh, it's a response that I as a server cannot cope with.
// No good user-agent should have caused this, so for the moment
// at least I am content to send no body in the response.
response.status = status;
response.headers.content_length = Some(0);
match response.write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
}
// Ensure the request is flushed, any Transfer-Encoding completed, etc.
match response.finish_response() {
Err(err) => {
error!("finishing response failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
if successful_handshake || close_connection {
break;
}
}
if successful_handshake {
child_self.serve_websockets(stream).unwrap();
}
}).detach();
}
}
fn serve_websockets(&self, stream: BufferedStream<TcpStream>) -> IoResult<()> {
let mut stream = stream.wrapped;
let write_stream = stream.clone();
let (in_sender, in_receiver) = channel();
let (out_sender, out_receiver) = channel();
self.handle_ws_connect(in_receiver, out_sender);
// write task
Thread::spawn(move || {
// ugh: https://github.com/mozilla/rust/blob/3dbc1c34e694f38daeef741cfffc558606443c15/src/test/run-pass/kindck-implicit-close-over-mut-var.rs#L40-L44
// work to fix this is ongoing here: https://github.com/mozilla/rust/issues/11958
let mut write_stream = write_stream;
loop {
let message = out_receiver.recv().unwrap();
message.send(&mut write_stream).unwrap(); // fails this task in case of an error; FIXME make sure this fails the read (parent) task
}
}).detach();
// read task, effectively the parent of the write task
loop {
let message = Message::load(&mut stream).unwrap(); // fails the task if there's an error.
debug!("message: {}", message);
match message.opcode {
CloseOp => {
try!(stream.close_read());
try!(message.send(&mut stream)); // complete close handeshake - send the same message right back at the client
try!(stream.close_write());
break; // as this task dies, this should release the write task above, as well as the task set up in handle_ws_connection, if any
},
PingOp => {
let pong = Message {
payload: message.payload,
opcode: PongOp
};
try!(pong.send(&mut stream));
},
PongOp => (),
_ => in_sender.send(message).unwrap()
}
}
Ok(())
}
fn sec_websocket_accept(&self, sec_websocket_key: &str) -> String {
// NOTE from RFC 6455
//
// To prove that the handshake was received, the server has to take two
// pieces of information and combine them to form a response. The first
// piece of information comes from the |Sec-WebSocket-Key| header field
// in the client handshake:
//
// Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
//
// For this header field, the server has to take the value (as present
// in the header field, e.g., the base64-encoded [RFC4648] version minus
// any leading and trailing whitespace) and concatenate this with the
// Globally Unique Identifier (GUID, [RFC4122]) "258EAFA5-E914-47DA-
// 95CA-C5AB0DC85B11" in string form, which is unlikely to be used by
// network endpoints that do not understand the WebSocket Protocol. A
// SHA-1 hash (160 bits) [FIPS.180-3], base64-encoded (see Section 4 of
// [RFC4648]), of this concatenation is then returned in the server's
// handshake.
let mut sh = Sha1::new();
let mut out = [0u8; 20];
sh.input_str((String::from_str(sec_websocket_key) + WEBSOCKET_SALT).as_slice());
sh.result(out.as_mut_slice());
return out.to_base64(STANDARD);
}
// check if the http request is a web socket upgrade request, and return true if so.
// otherwise, fall back on the regular http request handler
fn | (&self, r: Request, w: &mut ResponseWriter) -> bool {
// TODO allow configuration of endpoint for websocket
match (r.method.clone(), r.headers.upgrade.clone()){
// (&Get, &Some("websocket"), &Some(box [Token(box "Upgrade")])) => //\{ FIXME this doesn't work. but client must have the header "Connection: Upgrade"
(Get, Some(ref upgrade)) => {
if !upgrade.as_slice().eq_ignore_ascii_case("websocket"){
self.handle_request(r, w);
return false;
}
// TODO client must have the header "Connection: Upgrade"
//
// TODO The request MUST include a header field with the name
// |Sec-WebSocket-Version|. The value of this header field MUST be 13.
// WebSocket Opening Handshake
w.status = SwitchingProtocols;
w.headers.upgrade = Some(String::from_str("websocket"));
// w.headers.transfer_encoding = None;
w.headers.content_length = Some(0);
w.headers.connection = Some(vec!(Token(String::from_str("Upgrade"))));
w.headers.date = Some(time::now_utc());
w | handle_possible_ws_request | identifier_name |
site.js | notificationBell.offsetWidth;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
}
}
//adjust notificaiton header text
function notificationHeader(){
let num = dropDown.children.length - 1;
dropDownHeader.textContent = "You have " + num + " notifications";
if(num > 0){
liveNotification.style.opacity = "1";
}
if(num === 0){
liveNotification.style.opacity = "0";
}
}
//add notifications to dropdown
function addNotification(messages) {
messages.forEach((message) => {
let notification = document.createElement("div");
notification.className = "dropdown-item";
notification.innerHTML = message +
"<i class='notification-clear fa fa-times'></i>";
dropDown.appendChild(notification);
notificationHeader();
});
}
///////////////////////////////////////////////////////////
//Alert Bar
//display purple alert bar
function displayAlert(text, type){
let message = document.createElement("div");
message.classList.add("alert-text");
message.classList.add("alert-" + type);
message.innerHTML = text;
alertMessage.appendChild(message);
}
//add listener to remove alert bar
function addAlertListener(){
for(let i = 0; i < clearAlert.length; i++){
clearAlert[i].addEventListener("click", (event) => {
let node = event.target;
let fullMessage = node.parentElement;
alertMessage.removeChild(fullMessage);
});
}
}
//////////////////////////////////////////////////////
//Traffic Overview
// function constructTrafficOverview(data, labels){
let trafficChart = new Chart(trafficOverview, {
type: 'line',
data: {
labels: labels,
datasets: [{
label: 'Traffic',
data: data,
borderWidth: 1,
lineTension: 0,
backgroundColor: 'rgba(183,185,233,.5)',
borderColor:'rgba(183,185,233,1)',
pointRadius: 5,
pointBackgroundColor: 'white',
pointBorderColor: "#7478bf",
pointBorderWidth: 2,
spanGaps: true,
}],
},
options: {
animation: {
easing: 'linear'
},
responsive:true,
maintainAspectRatio: false,
scales: {
yAxes: [{
ticks: {
padding: 25,
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
ticks: {
padding: 15,
},
gridLines: {
tickMarkLength: 0,
}
}]
}, //end scales
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
}
} //end options
}); //End Traffic Overview
//}
//add new data to chart
function addData(chart, label, data) {
let newDataLength = data.length;
for(let i = 0; i < newDataLength; i++){
chart.data.datasets[0].data.push(data[i]);
chart.data.labels.push(label[i]);
}
chart.update();
}
//remove data from chart
function removeData(chart) {
let dataLength = chart.data.datasets[0].data.length;
for(let i = 0; i < dataLength; i++){
chart.data.datasets[0].data.pop();
chart.data.labels.pop();
}
chart.update();
}
//add event listener for traffic time
function addTrafficTimeListener(){
for(let i = 0; i < trafficOverviewTime.length; i++){
trafficOverviewTime[i].addEventListener("click", (e) => {
removeClass(trafficOverviewTime, "highlight");
removeData(trafficChart);
let event = e.target;
let time = event.textContent;
if(time === "Hourly"){
data = [500, 510, 525, 520, 517, 545, 550, 560, 555, 570 ];
labels = [ 'Aug 5th, 8:00', '9:00', '10:00', '11:00', '12:00', '1:00', '2:00', '3:00', '4:00', '5:00'];
addData(trafficChart, labels, data);
event.classList.add("highlight");
} else if (time === "Daily"){
data = [500, 630, 615, 680, 745, 715, 750 ];
labels = [ 'S (8/5)', 'M (8/6)', 'T (8/7)', 'W (8/8)', 'R (8/9)', 'F (8/10)', 'S (8/11)'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Weekly"){
data = [ 0,500 ,1000, 1500, 1250, 1750, 2000, 1500, 2000, 2500, 2250];
labels = ['16-22', '23-29', '30-5', '6-12', '13-19', '20-26', '27-3', '4-10', '11-17', '18-24', '25-31'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Monthly"){
data = [ 2000, 3100, 2400, 3200, 4500, 4900, 3700, 5100, 5500, 5000, 6100, 6250];
labels = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
}
trafficChart.update();
}); //end event listener
} //end for loop
} //end function
function removeClass(array, CSSclass){
for(let i = 0; i < array.length; i++){
array[i].classList.remove(CSSclass);
}
}
///////////////////////////////////////////////
//Traffic Summary
var trafficSummaryChart = new Chart(trafficSummary, {
type: 'bar',
data: {
labels: ['S', 'M', 'T', 'W', 'T', 'F', 'S'],
datasets: [{
label: '',
data: [50, 75, 125, 100, 200, 175, 125],
backgroundColor: '#7377bf',
}]
},
options: {
responsive:true,
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
},
scales: {
yAxes: [{
ticks: {
beginAtZero: true,
suggestedMax: 250,
padding: 25
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
maxBarThickness: 35,
BarThickness: 'flex',
gridLines: {
tickMarkLength: 0,
},
ticks: {
padding: 15
}
}]
} //end scales
} //end options
});
////////////////////////////////////////////
//Mobile USERS
var mobileUsersChart = new Chart(mobileUsers, {
type: 'doughnut',
data: {
labels: ['Phones', 'Tablets', 'Desktop'],
datasets: [{
data: [25, 15 ,60],
backgroundColor: [
'#74b1bf',
'#81c98f',
'#7377bf',
],
}]
},
options: {
responsive:true,
legend: {
position: 'right',
labels: {
padding: 20,
boxWidth: 15,
fontSize: 16
}
},
} // end options
}); // end mobile users
///////////////////////////////////////////////////////////////////////////
//Toggle switches
function toggleSwitch() {
for(let i = 0; i < toggleContainer.length; i++){
toggleContainer[i].addEventListener("click", (e) => {
if(toggleText[i].textContent === "On"){
toggleOff(i);
localStorage.setItem('toggle' + i, 'off');
}
else if (toggleText[i].textContent === "Off") | {
toggleOn(i);
localStorage.setItem('toggle' + i, 'on');
} | conditional_block |
|
site.js | === 0){
liveNotification.style.opacity = "0";
}
}
//add notifications to dropdown
function addNotification(messages) {
messages.forEach((message) => {
let notification = document.createElement("div");
notification.className = "dropdown-item";
notification.innerHTML = message +
"<i class='notification-clear fa fa-times'></i>";
dropDown.appendChild(notification);
notificationHeader();
});
}
///////////////////////////////////////////////////////////
//Alert Bar
//display purple alert bar
function displayAlert(text, type){
let message = document.createElement("div");
message.classList.add("alert-text");
message.classList.add("alert-" + type);
message.innerHTML = text;
alertMessage.appendChild(message);
}
//add listener to remove alert bar
function addAlertListener(){
for(let i = 0; i < clearAlert.length; i++){
clearAlert[i].addEventListener("click", (event) => {
let node = event.target;
let fullMessage = node.parentElement;
alertMessage.removeChild(fullMessage);
});
}
}
//////////////////////////////////////////////////////
//Traffic Overview
// function constructTrafficOverview(data, labels){
let trafficChart = new Chart(trafficOverview, {
type: 'line',
data: {
labels: labels,
datasets: [{
label: 'Traffic',
data: data,
borderWidth: 1,
lineTension: 0,
backgroundColor: 'rgba(183,185,233,.5)',
borderColor:'rgba(183,185,233,1)',
pointRadius: 5,
pointBackgroundColor: 'white',
pointBorderColor: "#7478bf",
pointBorderWidth: 2,
spanGaps: true,
}],
},
options: {
animation: {
easing: 'linear'
},
responsive:true,
maintainAspectRatio: false,
scales: {
yAxes: [{
ticks: {
padding: 25,
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
ticks: {
padding: 15,
},
gridLines: {
tickMarkLength: 0,
}
}]
}, //end scales
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
}
} //end options
}); //End Traffic Overview
//}
//add new data to chart
function addData(chart, label, data) {
let newDataLength = data.length;
for(let i = 0; i < newDataLength; i++){
chart.data.datasets[0].data.push(data[i]);
chart.data.labels.push(label[i]);
}
chart.update();
}
//remove data from chart
function removeData(chart) {
let dataLength = chart.data.datasets[0].data.length;
for(let i = 0; i < dataLength; i++){
chart.data.datasets[0].data.pop();
chart.data.labels.pop();
}
chart.update();
}
//add event listener for traffic time
function addTrafficTimeListener(){
for(let i = 0; i < trafficOverviewTime.length; i++){
trafficOverviewTime[i].addEventListener("click", (e) => {
removeClass(trafficOverviewTime, "highlight");
removeData(trafficChart);
let event = e.target;
let time = event.textContent;
if(time === "Hourly"){
data = [500, 510, 525, 520, 517, 545, 550, 560, 555, 570 ];
labels = [ 'Aug 5th, 8:00', '9:00', '10:00', '11:00', '12:00', '1:00', '2:00', '3:00', '4:00', '5:00'];
addData(trafficChart, labels, data);
event.classList.add("highlight");
} else if (time === "Daily"){
data = [500, 630, 615, 680, 745, 715, 750 ];
labels = [ 'S (8/5)', 'M (8/6)', 'T (8/7)', 'W (8/8)', 'R (8/9)', 'F (8/10)', 'S (8/11)'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Weekly"){
data = [ 0,500 ,1000, 1500, 1250, 1750, 2000, 1500, 2000, 2500, 2250];
labels = ['16-22', '23-29', '30-5', '6-12', '13-19', '20-26', '27-3', '4-10', '11-17', '18-24', '25-31'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Monthly"){
data = [ 2000, 3100, 2400, 3200, 4500, 4900, 3700, 5100, 5500, 5000, 6100, 6250];
labels = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
}
trafficChart.update();
}); //end event listener
} //end for loop
} //end function
function removeClass(array, CSSclass){
for(let i = 0; i < array.length; i++){
array[i].classList.remove(CSSclass);
}
}
///////////////////////////////////////////////
//Traffic Summary
var trafficSummaryChart = new Chart(trafficSummary, {
type: 'bar',
data: {
labels: ['S', 'M', 'T', 'W', 'T', 'F', 'S'],
datasets: [{
label: '',
data: [50, 75, 125, 100, 200, 175, 125],
backgroundColor: '#7377bf',
}]
},
options: {
responsive:true,
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
},
scales: {
yAxes: [{
ticks: {
beginAtZero: true,
suggestedMax: 250,
padding: 25
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
maxBarThickness: 35,
BarThickness: 'flex',
gridLines: {
tickMarkLength: 0,
},
ticks: {
padding: 15
}
}]
} //end scales
} //end options
});
////////////////////////////////////////////
//Mobile USERS
var mobileUsersChart = new Chart(mobileUsers, {
type: 'doughnut',
data: {
labels: ['Phones', 'Tablets', 'Desktop'],
datasets: [{
data: [25, 15 ,60],
backgroundColor: [
'#74b1bf',
'#81c98f',
'#7377bf',
],
}]
},
options: {
responsive:true,
legend: {
position: 'right',
labels: {
padding: 20,
boxWidth: 15,
fontSize: 16
}
},
} // end options
}); // end mobile users
///////////////////////////////////////////////////////////////////////////
//Toggle switches
function toggleSwitch() {
for(let i = 0; i < toggleContainer.length; i++){
toggleContainer[i].addEventListener("click", (e) => {
if(toggleText[i].textContent === "On"){
toggleOff(i);
localStorage.setItem('toggle' + i, 'off');
}
else if (toggleText[i].textContent === "Off"){
toggleOn(i);
localStorage.setItem('toggle' + i, 'on');
}
});
}
}
function toggleOff(i){
toggleButton[i].style.transform = "translateX(-43px)";
toggleButton[i].style.transition = ".25s";
toggleText[i].textContent = "Off";
toggleText[i].style.transform = "translateX(25px)";
toggleText[i].style.transition = ".25s";
toggleContainer[i].style.backgroundColor = "#a8aad7";
toggleContainer[i].style.transition = ".25s";
}
function | toggleOn | identifier_name |
|
site.js | // var result = instantiatePage();
// var t1 = performance.now();
// console.log('Took', (t1 - t0).toFixed(4), 'milliseconds to generate:', result);
instantiatePage();
//Instantiate listeners, constructors
function instantiatePage(){
document.addEventListener("DOMContentLoaded", () => {
displayAlert(alertMessageText, 'general');
addAlertListener();
addTrafficTimeListener();
toggleSwitch();
addNotification(notificationText);
notificationListener();
globalClickListener();
deleteNotification();
globalKeyListener();
formListener();
setToggle();
addMessageListener();
//create array from user elements
let userArray = createArray(userName);
addSearchListener(userArray);
}); // end DOMContentLoaded
}
////////////////////////////////////////////////////////////////////
//global listener to click off notifications
function globalClickListener(){
document.addEventListener("click", (e) => {
if (dropDown.style.display === "block" &&
!(e.target.className.includes("bell") ||
e.target.parentElement.className.includes("dropdown-container") ||
e.target.className.includes("notification-clear"))) {
dropDown.style.display = "none";
dropDown.style.transform = "translate(0, 0)";
} // end if
//remove search with click
if(searchList.firstChild){
clearSearch();
}
}); //end eventlistener
} //end function
function globalKeyListener(){
search.addEventListener("keyup", (e) => {
if(!search.value){
count = -1;
}
//if user has typed and there are results
if(search.value && searchList.children){
search.style.textTransform = "capitalize";
//up arrow key
if(e.key === 'ArrowUp'){
if(count === -1){
count = -1;
}
else if(count === 0){
count = 0;
}
else{
count -= 1;
}
if(count > -1){
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
}
}
//down arrow key
else if(e.key === 'ArrowDown'){
if(count >= (listedUser.length - 1)){
count = listedUser.length - 1;
}
else {
count++;
}
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
} //end else if
} // if
}); // end listener
}
////////////////////////////////////////////////////////////////////
//NOTIFICATIONS
//add eventlistener to delete Notifications
function deleteNotification(){
for(let i = 0; i < notificationClear.length; i++){
notificationClear[i].addEventListener("click", (e) => {
let notification = e.target.parentElement;
dropDown.removeChild(notification);
sizeNotificationContainer();
notificationHeader();
});
}
}
//add eventlistener to notification bell
function notificationListener() {
notificationBell.addEventListener("click", () => {
if(dropDown.style.display !== "block"){
dropDown.style.display = "block";
sizeNotificationContainer();
}
});
}
//figure notification container size
function sizeNotificationContainer(){
let width;
let translate;
if(window.innerWidth < 400){
dropDown.style.left = "5px";
dropDown.style.transform = "translateY(40px)";
} else if (window.innerWidth < 500){
width = (dropDown.offsetWidth - notificationBell.offsetWidth) / 2;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
} else {
width = dropDown.offsetWidth - notificationBell.offsetWidth;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
}
}
//adjust notificaiton header text
function notificationHeader(){
let num = dropDown.children.length - 1;
dropDownHeader.textContent = "You have " + num + " notifications";
if(num > 0){
liveNotification.style.opacity = "1";
}
if(num === 0){
liveNotification.style.opacity = "0";
}
}
//add notifications to dropdown
function addNotification(messages) {
messages.forEach((message) => {
let notification = document.createElement("div");
notification.className = "dropdown-item";
notification.innerHTML = message +
"<i class='notification-clear fa fa-times'></i>";
dropDown.appendChild(notification);
notificationHeader();
});
}
///////////////////////////////////////////////////////////
//Alert Bar
//display purple alert bar
function displayAlert(text, type){
let message = document.createElement("div");
message.classList.add("alert-text");
message.classList.add("alert-" + type);
message.innerHTML = text;
alertMessage.appendChild(message);
}
//add listener to remove alert bar
function addAlertListener(){
for(let i = 0; i < clearAlert.length; i++){
clearAlert[i].addEventListener("click", (event) => {
let node = event.target;
let fullMessage = node.parentElement;
alertMessage.removeChild(fullMessage);
});
}
}
//////////////////////////////////////////////////////
//Traffic Overview
// function constructTrafficOverview(data, labels){
let trafficChart = new Chart(trafficOverview, {
type: 'line',
data: {
labels: labels,
datasets: [{
label: 'Traffic',
data: data,
borderWidth: 1,
lineTension: 0,
backgroundColor: 'rgba(183,185,233,.5)',
borderColor:'rgba(183,185,233,1)',
pointRadius: 5,
pointBackgroundColor: 'white',
pointBorderColor: "#7478bf",
pointBorderWidth: 2,
spanGaps: true,
}],
},
options: {
animation: {
easing: 'linear'
},
responsive:true,
maintainAspectRatio: false,
scales: {
yAxes: [{
ticks: {
padding: 25,
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
ticks: {
padding: 15,
},
gridLines: {
tickMarkLength: 0,
}
}]
}, //end scales
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
}
} //end options
}); //End Traffic Overview
//}
//add new data to chart
function addData(chart, label, data) {
let newDataLength = data.length;
for(let i = 0; i < newDataLength; i++){
chart.data.datasets[0].data.push(data[i]);
chart.data.labels.push(label[i]);
}
chart.update();
}
//remove data from chart
function removeData(chart) {
let dataLength = chart.data.datasets[0].data.length;
for(let i = 0; i < dataLength; i++){
chart.data.datasets[0].data.pop();
chart.data.labels.pop();
}
chart.update();
}
//add event listener for traffic time
function addTrafficTimeListener(){
for(let i = 0; i < trafficOverviewTime.length; i++){
trafficOverviewTime[i].addEventListener("click", (e) => {
removeClass(trafficOverviewTime, "highlight");
removeData(trafficChart);
let event = e.target;
let time = event.textContent;
if(time === "Hourly"){
data = [500, 510, 525, 520, 517, 545, 550, 560, 555, 570 ];
labels = [ 'Aug 5th, 8:00', '9:00', '10:00', '11:00', '12:00', '1:00', '2:00', '3:00', '4:00', '5:00'];
addData(trafficChart, labels, data);
event.classList.add("highlight");
} else if (time === "Daily"){
data = [500, 630, 615, 680, 745, 715, 750 ];
labels = [ 'S (8/5)', 'M (8/6)', 'T (8/7)', 'W (8/8)', 'R (8/9)', 'F (8/10)', 'S (8/11)'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} | ///////////////////////////////
//File performance
// var t0 = performance.now(); | random_line_split |
|
site.js |
function globalKeyListener(){
search.addEventListener("keyup", (e) => {
if(!search.value){
count = -1;
}
//if user has typed and there are results
if(search.value && searchList.children){
search.style.textTransform = "capitalize";
//up arrow key
if(e.key === 'ArrowUp'){
if(count === -1){
count = -1;
}
else if(count === 0){
count = 0;
}
else{
count -= 1;
}
if(count > -1){
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
}
}
//down arrow key
else if(e.key === 'ArrowDown'){
if(count >= (listedUser.length - 1)){
count = listedUser.length - 1;
}
else {
count++;
}
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
} //end else if
} // if
}); // end listener
}
////////////////////////////////////////////////////////////////////
//NOTIFICATIONS
//add eventlistener to delete Notifications
function deleteNotification(){
for(let i = 0; i < notificationClear.length; i++){
notificationClear[i].addEventListener("click", (e) => {
let notification = e.target.parentElement;
dropDown.removeChild(notification);
sizeNotificationContainer();
notificationHeader();
});
}
}
//add eventlistener to notification bell
function notificationListener() {
notificationBell.addEventListener("click", () => {
if(dropDown.style.display !== "block"){
dropDown.style.display = "block";
sizeNotificationContainer();
}
});
}
//figure notification container size
function sizeNotificationContainer(){
let width;
let translate;
if(window.innerWidth < 400){
dropDown.style.left = "5px";
dropDown.style.transform = "translateY(40px)";
} else if (window.innerWidth < 500){
width = (dropDown.offsetWidth - notificationBell.offsetWidth) / 2;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
} else {
width = dropDown.offsetWidth - notificationBell.offsetWidth;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
}
}
//adjust notificaiton header text
function notificationHeader(){
let num = dropDown.children.length - 1;
dropDownHeader.textContent = "You have " + num + " notifications";
if(num > 0){
liveNotification.style.opacity = "1";
}
if(num === 0){
liveNotification.style.opacity = "0";
}
}
//add notifications to dropdown
function addNotification(messages) {
messages.forEach((message) => {
let notification = document.createElement("div");
notification.className = "dropdown-item";
notification.innerHTML = message +
"<i class='notification-clear fa fa-times'></i>";
dropDown.appendChild(notification);
notificationHeader();
});
}
///////////////////////////////////////////////////////////
//Alert Bar
//display purple alert bar
function displayAlert(text, type){
let message = document.createElement("div");
message.classList.add("alert-text");
message.classList.add("alert-" + type);
message.innerHTML = text;
alertMessage.appendChild(message);
}
//add listener to remove alert bar
function addAlertListener(){
for(let i = 0; i < clearAlert.length; i++){
clearAlert[i].addEventListener("click", (event) => {
let node = event.target;
let fullMessage = node.parentElement;
alertMessage.removeChild(fullMessage);
});
}
}
//////////////////////////////////////////////////////
//Traffic Overview
// function constructTrafficOverview(data, labels){
let trafficChart = new Chart(trafficOverview, {
type: 'line',
data: {
labels: labels,
datasets: [{
label: 'Traffic',
data: data,
borderWidth: 1,
lineTension: 0,
backgroundColor: 'rgba(183,185,233,.5)',
borderColor:'rgba(183,185,233,1)',
pointRadius: 5,
pointBackgroundColor: 'white',
pointBorderColor: "#7478bf",
pointBorderWidth: 2,
spanGaps: true,
}],
},
options: {
animation: {
easing: 'linear'
},
responsive:true,
maintainAspectRatio: false,
scales: {
yAxes: [{
ticks: {
padding: 25,
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
ticks: {
padding: 15,
},
gridLines: {
tickMarkLength: 0,
}
}]
}, //end scales
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
}
} //end options
}); //End Traffic Overview
//}
//add new data to chart
function addData(chart, label, data) {
let newDataLength = data.length;
for(let i = 0; i < newDataLength; i++){
chart.data.datasets[0].data.push(data[i]);
chart.data.labels.push(label[i]);
}
chart.update();
}
//remove data from chart
function removeData(chart) {
let dataLength = chart.data.datasets[0].data.length;
for(let i = 0; i < dataLength; i++){
chart.data.datasets[0].data.pop();
chart.data.labels.pop();
}
chart.update();
}
//add event listener for traffic time
function addTrafficTimeListener(){
for(let i = 0; i < trafficOverviewTime.length; i++){
trafficOverviewTime[i].addEventListener("click", (e) => {
removeClass(trafficOverviewTime, "highlight");
removeData(trafficChart);
let event = e.target;
let time = event.textContent;
if(time === "Hourly"){
data = [500, 510, 525, 520, 517, 545, 550, 560, 555, 570 ];
labels = [ 'Aug 5th, 8:00', '9:00', '10:00', '11:00', '12:00', '1:00', '2:00', '3:00', '4:00', '5:00'];
addData(trafficChart, labels, data);
event.classList.add("highlight");
} else if (time === "Daily"){
data = [500, 630, 615, 680, 745, 715, 750 ];
labels = [ 'S (8/5)', 'M (8/6)', 'T (8/7)', 'W (8/8)', 'R (8/9)', 'F (8/10)', 'S (8/11)'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Weekly"){
data = [ 0,500 ,1000, 1500, 1250, 1750, 2000, 1500, 2000, 2500, 2250];
labels = ['16-22', '23-29', '30-5', '6-12', '13-19', '20-26', '27-3', '4-10', '11-17', '18-24', '25-31'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Monthly"){
data = [ 20 | {
document.addEventListener("click", (e) => {
if (dropDown.style.display === "block" &&
!(e.target.className.includes("bell") ||
e.target.parentElement.className.includes("dropdown-container") ||
e.target.className.includes("notification-clear"))) {
dropDown.style.display = "none";
dropDown.style.transform = "translate(0, 0)";
} // end if
//remove search with click
if(searchList.firstChild){
clearSearch();
}
}); //end eventlistener
} //end function | identifier_body |
|
generator.go | typeName is the name of a struct we're working on. outputName is where the generated code should go.
//genFn is the most important part, and recieves all the meta info about the targeted Type
func (g *Generator) Run(pathArgs []string, typeName string, outputName string, genFn GeneratorFunc) error {
//Parse the package
g.Prepare(pathArgs)
// Print the header and package clause.
g.Printf("// Code generated by 'exemplar %s'; DO NOT EDIT\n", strings.Join(os.Args[1:], " "))
g.Printf("\n")
g.Printf("package %s", g.pkg.name)
g.Printf("\n")
g.collectAndGenerate(typeName, genFn)
//format output
src := g.format()
// Write to file.
//TODO: Fix this to not be tied to propertizer
//DEBUG: fmt.Printf("Typename in parse: %s", typeName)
if outputName == "" {
baseName := fmt.Sprintf("%s_properties.go", typeName)
outputName = filepath.Join(g.dir, strings.ToLower(baseName))
}
fmt.Println(outputName)
err := ioutil.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
return nil
}
func (g *Generator) Prepare(args []string) {
log.Printf("Prepare - checking if path is a directory or a list of files %s", args[0])
if len(args) == 1 && isDirectory(args[0]) {
log.Printf("Prepare - found directory")
g.dir = args[0]
g.parsePackageDir(args[0])
} else {
log.Printf("Prepare - found file list")
g.dir = filepath.Dir(args[0])
g.parsePackageFiles(args)
}
}
//collect gathers all the info from all the package's files about the type
//necessary to do fun things
func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {
fields := make([]Field, 0, 100)
imports := make([]Import, 0, 100)
for _, file := range g.pkg.files {
// Set the state for this run of the walker.
file.typeName = typeName
file.fields = nil
if file.file != nil {
ast.Inspect(file.file, file.genDecl)
fields = append(fields, file.fields...)
imports = append(imports, file.imports...)
}
}
genFn(typeName, fields, imports)
}
// genDecl processes one declaration clause.
func (f *File) genDecl(node ast.Node) bool {
decl, ok := node.(*ast.GenDecl)
if !ok || decl.Tok != token.TYPE { // We only care about Type declarations.
return true
}
// The name of the type of the constants we are declaring.
// Can change if this is a multi-element declaration.
typ := ""
// Loop over the elements of the declaration. Each element is a ValueSpec:
// a list of names possibly followed by a type, possibly followed by values.
// If the type and value are both missing, we carry down the type (and value,
// but the "go/types" package takes care of that).
for _, spec := range decl.Specs {
tspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.
if tspec.Type != nil {
// "X T". We have a type. Remember it.
typ = tspec.Name.Name
}
if typ != f.typeName {
// This is not the type we're looking for.
continue
}
// We now have a list of names (from one line of source code) all being
// declared with the desired type.
structType, ok := tspec.Type.(*ast.StructType)
if !ok {
//not a struct type
continue
}
typesObj, typeObjOk := f.pkg.defs[tspec.Name]
if !typeObjOk {
log.Fatalf("no type info found for struct %s", typ)
}
for _, fieldLine := range structType.Fields.List {
for _, field := range fieldLine.Names {
//skip struct padding
if field.Name == "_" {
continue
}
fieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)
typeStr := fieldObj.Type().String()
tags := parseFieldTags(fieldLine.Tag)
//Skip here so we don't include rubbish import lines
if tags["exclude_dao"].Value == "true" {
continue
}
processedTypeStr, importPath := processTypeStr(typeStr)
//log.Printf("processedTypeStr: %s, importPath: %s", processedTypeStr, importPath)
if importPath != "" && !importExists(importPath, f.imports) {
f.imports = append(f.imports, Import{importPath})
}
v := Field{
Name: field.Name,
Tags: tags,
TypeName: processedTypeStr,
}
f.fields = append(f.fields, v)
}
}
}
return false
}
//We need to make sure that we get the type used right, with a package prefix
func processTypeStr(typeStr string) (typeName, importPath string) {
if strings.Contains(typeStr, "/") {
slashSplit := strings.Split(typeStr, "/")
pkgNameAndType := slashSplit[len(slashSplit)-1]
pkgName := strings.Split(pkgNameAndType, ".")[0]
importPath := fmt.Sprintf("%s/%s", strings.Join(slashSplit[0:len(slashSplit)-1], "/"), pkgName)
return pkgNameAndType, importPath
} else if strings.Contains(typeStr, ".") {
dotSplit := strings.Split(typeStr, ".")
importPath := dotSplit[0]
pkgNameAndType := typeStr
return pkgNameAndType, importPath
} else {
return typeStr, ""
}
}
//Check to see if a path already exists in the []Import
func importExists(pathName string, imports []Import) bool {
for _, val := range imports {
if pathName == val.ImportedName {
return true
}
}
return false
}
// parsePackageDir parses the package residing in the directory.
func (g *Generator) parsePackageDir(directory string) {
log.Printf("Collecting objects in package %s for parsing", directory)
pkg, err := build.Default.ImportDir(directory, 0)
if err != nil {
log.Fatalf("cannot process directory %s: %s", directory, err)
}
var names []string
names = append(names, pkg.GoFiles...)
names = append(names, pkg.CgoFiles...)
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
// names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package.
names = append(names, pkg.SFiles...)
names = prefixDirectory(directory, names)
log.Printf("Found object names: %+v", names)
g.parsePackage(directory, names, nil)
}
// parsePackage analyzes the single package constructed from the named files.
// If text is non-nil, it is a string to be used instead of the content of the file,
// to be used for testing. parsePackage exits if there is an error.
func (g *Generator) parsePackage(directory string, names []string, text interface{}) {
var files []*File
var astFiles []*ast.File
g.pkg = new(Package)
fs := token.NewFileSet()
for _, name := range names {
if !strings.HasSuffix(name, ".go") {
continue
}
log.Printf("Parsing file: %s", name)
parsedFile, err := parser.ParseFile(fs, name, text, 0)
if err != nil {
log.Fatalf("parsing package: %s: %s", name, err)
}
astFiles = append(astFiles, parsedFile)
files = append(files, &File{
file: parsedFile,
pkg: g.pkg,
})
}
if len(astFiles) == 0 {
log.Fatalf("%s: no buildable Go files", directory)
}
g.pkg.name = astFiles[0].Name.Name
g.pkg.files = files
g.pkg.dir = directory
// Type check the package.
g.pkg.check(fs, astFiles)
}
// parsePackageFiles parses the package occupying the named files.
func (g *Generator) parsePackageFiles(names []string) {
g.parsePackage(".", names, nil)
}
// prefixDirectory places the directory name on the beginning of each name in the list.
func prefixDirectory(directory string, names []string) []string {
if directory == "." {
return names
}
ret := make([]string, len(names))
for i, name := range names {
ret[i] = filepath.Join(directory, name)
}
return ret
}
func (g *Generator) Printf(format string, args ...interface{}) {
fmt.Fprintf(&g.Buf, format, args...)
}
func (g *Generator) Print(output string) { | fmt.Fprint(&g.Buf, output)
}
//format returns the gofmt-ed contents of the Generator's buffer. | random_line_split |
|
generator.go | the output for format.Source.
type Generator struct {
Buf bytes.Buffer // Accumulated output.
pkg *Package // Package we are scanning.
dir string
}
//Run parses the target package and generates the code, verifying the package before and after generation.
//pathArgs is a list of file paths, to either individual files or whole directories.
//typeName is the name of a struct we're working on. outputName is where the generated code should go.
//genFn is the most important part, and recieves all the meta info about the targeted Type
func (g *Generator) Run(pathArgs []string, typeName string, outputName string, genFn GeneratorFunc) error {
//Parse the package
g.Prepare(pathArgs)
// Print the header and package clause.
g.Printf("// Code generated by 'exemplar %s'; DO NOT EDIT\n", strings.Join(os.Args[1:], " "))
g.Printf("\n")
g.Printf("package %s", g.pkg.name)
g.Printf("\n")
g.collectAndGenerate(typeName, genFn)
//format output
src := g.format()
// Write to file.
//TODO: Fix this to not be tied to propertizer
//DEBUG: fmt.Printf("Typename in parse: %s", typeName)
if outputName == "" {
baseName := fmt.Sprintf("%s_properties.go", typeName)
outputName = filepath.Join(g.dir, strings.ToLower(baseName))
}
fmt.Println(outputName)
err := ioutil.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
return nil
}
func (g *Generator) Prepare(args []string) {
log.Printf("Prepare - checking if path is a directory or a list of files %s", args[0])
if len(args) == 1 && isDirectory(args[0]) {
log.Printf("Prepare - found directory")
g.dir = args[0]
g.parsePackageDir(args[0])
} else {
log.Printf("Prepare - found file list")
g.dir = filepath.Dir(args[0])
g.parsePackageFiles(args)
}
}
//collect gathers all the info from all the package's files about the type
//necessary to do fun things
func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {
fields := make([]Field, 0, 100)
imports := make([]Import, 0, 100)
for _, file := range g.pkg.files {
// Set the state for this run of the walker.
file.typeName = typeName
file.fields = nil
if file.file != nil {
ast.Inspect(file.file, file.genDecl)
fields = append(fields, file.fields...)
imports = append(imports, file.imports...)
}
}
genFn(typeName, fields, imports)
}
// genDecl processes one declaration clause.
func (f *File) genDecl(node ast.Node) bool {
decl, ok := node.(*ast.GenDecl)
if !ok || decl.Tok != token.TYPE { // We only care about Type declarations.
return true
}
// The name of the type of the constants we are declaring.
// Can change if this is a multi-element declaration.
typ := ""
// Loop over the elements of the declaration. Each element is a ValueSpec:
// a list of names possibly followed by a type, possibly followed by values.
// If the type and value are both missing, we carry down the type (and value,
// but the "go/types" package takes care of that).
for _, spec := range decl.Specs {
tspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.
if tspec.Type != nil {
// "X T". We have a type. Remember it.
typ = tspec.Name.Name
}
if typ != f.typeName {
// This is not the type we're looking for.
continue
}
// We now have a list of names (from one line of source code) all being
// declared with the desired type.
structType, ok := tspec.Type.(*ast.StructType)
if !ok {
//not a struct type
continue
}
typesObj, typeObjOk := f.pkg.defs[tspec.Name]
if !typeObjOk {
log.Fatalf("no type info found for struct %s", typ)
}
for _, fieldLine := range structType.Fields.List {
for _, field := range fieldLine.Names {
//skip struct padding
if field.Name == "_" {
continue
}
fieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)
typeStr := fieldObj.Type().String()
tags := parseFieldTags(fieldLine.Tag)
//Skip here so we don't include rubbish import lines
if tags["exclude_dao"].Value == "true" {
continue
}
processedTypeStr, importPath := processTypeStr(typeStr)
//log.Printf("processedTypeStr: %s, importPath: %s", processedTypeStr, importPath)
if importPath != "" && !importExists(importPath, f.imports) {
f.imports = append(f.imports, Import{importPath})
}
v := Field{
Name: field.Name,
Tags: tags,
TypeName: processedTypeStr,
}
f.fields = append(f.fields, v)
}
}
}
return false
}
//We need to make sure that we get the type used right, with a package prefix
func processTypeStr(typeStr string) (typeName, importPath string) {
if strings.Contains(typeStr, "/") {
slashSplit := strings.Split(typeStr, "/")
pkgNameAndType := slashSplit[len(slashSplit)-1]
pkgName := strings.Split(pkgNameAndType, ".")[0]
importPath := fmt.Sprintf("%s/%s", strings.Join(slashSplit[0:len(slashSplit)-1], "/"), pkgName)
return pkgNameAndType, importPath
} else if strings.Contains(typeStr, ".") {
dotSplit := strings.Split(typeStr, ".")
importPath := dotSplit[0]
pkgNameAndType := typeStr
return pkgNameAndType, importPath
} else {
return typeStr, ""
}
}
//Check to see if a path already exists in the []Import
func importExists(pathName string, imports []Import) bool {
for _, val := range imports {
if pathName == val.ImportedName {
return true
}
}
return false
}
// parsePackageDir parses the package residing in the directory.
func (g *Generator) parsePackageDir(directory string) {
log.Printf("Collecting objects in package %s for parsing", directory)
pkg, err := build.Default.ImportDir(directory, 0)
if err != nil {
log.Fatalf("cannot process directory %s: %s", directory, err)
}
var names []string
names = append(names, pkg.GoFiles...)
names = append(names, pkg.CgoFiles...)
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
// names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package.
names = append(names, pkg.SFiles...)
names = prefixDirectory(directory, names)
log.Printf("Found object names: %+v", names)
g.parsePackage(directory, names, nil)
}
// parsePackage analyzes the single package constructed from the named files.
// If text is non-nil, it is a string to be used instead of the content of the file,
// to be used for testing. parsePackage exits if there is an error.
func (g *Generator) parsePackage(directory string, names []string, text interface{}) | if len(astFiles) == 0 {
log.Fatalf("%s: no buildable Go files", directory)
}
g.pkg.name = astFiles[0].Name.Name
g.pkg.files = files
g.pkg.dir = directory
// Type check the package.
g.pkg.check(fs, astFiles)
}
// parsePackageFiles parses the package occupying the named files.
func (g *Generator) parsePackageFiles(names []string) {
g.parsePackage(".", names, nil)
}
// prefixDirectory places the directory name on the beginning of each name in the list.
func prefixDirectory(directory string, names []string) []string {
if directory == "." {
return names
}
ret := make([]string, len(names))
for i, name := range names {
ret[i] = filepath.Join | {
var files []*File
var astFiles []*ast.File
g.pkg = new(Package)
fs := token.NewFileSet()
for _, name := range names {
if !strings.HasSuffix(name, ".go") {
continue
}
log.Printf("Parsing file: %s", name)
parsedFile, err := parser.ParseFile(fs, name, text, 0)
if err != nil {
log.Fatalf("parsing package: %s: %s", name, err)
}
astFiles = append(astFiles, parsedFile)
files = append(files, &File{
file: parsedFile,
pkg: g.pkg,
})
} | identifier_body |
generator.go | the output for format.Source.
type Generator struct {
Buf bytes.Buffer // Accumulated output.
pkg *Package // Package we are scanning.
dir string
}
//Run parses the target package and generates the code, verifying the package before and after generation.
//pathArgs is a list of file paths, to either individual files or whole directories.
//typeName is the name of a struct we're working on. outputName is where the generated code should go.
//genFn is the most important part, and recieves all the meta info about the targeted Type
func (g *Generator) Run(pathArgs []string, typeName string, outputName string, genFn GeneratorFunc) error {
//Parse the package
g.Prepare(pathArgs)
// Print the header and package clause.
g.Printf("// Code generated by 'exemplar %s'; DO NOT EDIT\n", strings.Join(os.Args[1:], " "))
g.Printf("\n")
g.Printf("package %s", g.pkg.name)
g.Printf("\n")
g.collectAndGenerate(typeName, genFn)
//format output
src := g.format()
// Write to file.
//TODO: Fix this to not be tied to propertizer
//DEBUG: fmt.Printf("Typename in parse: %s", typeName)
if outputName == "" {
baseName := fmt.Sprintf("%s_properties.go", typeName)
outputName = filepath.Join(g.dir, strings.ToLower(baseName))
}
fmt.Println(outputName)
err := ioutil.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
return nil
}
func (g *Generator) Prepare(args []string) {
log.Printf("Prepare - checking if path is a directory or a list of files %s", args[0])
if len(args) == 1 && isDirectory(args[0]) {
log.Printf("Prepare - found directory")
g.dir = args[0]
g.parsePackageDir(args[0])
} else {
log.Printf("Prepare - found file list")
g.dir = filepath.Dir(args[0])
g.parsePackageFiles(args)
}
}
//collect gathers all the info from all the package's files about the type
//necessary to do fun things
func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {
fields := make([]Field, 0, 100)
imports := make([]Import, 0, 100)
for _, file := range g.pkg.files {
// Set the state for this run of the walker.
file.typeName = typeName
file.fields = nil
if file.file != nil {
ast.Inspect(file.file, file.genDecl)
fields = append(fields, file.fields...)
imports = append(imports, file.imports...)
}
}
genFn(typeName, fields, imports)
}
// genDecl processes one declaration clause.
func (f *File) genDecl(node ast.Node) bool {
decl, ok := node.(*ast.GenDecl)
if !ok || decl.Tok != token.TYPE { // We only care about Type declarations.
return true
}
// The name of the type of the constants we are declaring.
// Can change if this is a multi-element declaration.
typ := ""
// Loop over the elements of the declaration. Each element is a ValueSpec:
// a list of names possibly followed by a type, possibly followed by values.
// If the type and value are both missing, we carry down the type (and value,
// but the "go/types" package takes care of that).
for _, spec := range decl.Specs {
tspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.
if tspec.Type != nil {
// "X T". We have a type. Remember it.
typ = tspec.Name.Name
}
if typ != f.typeName {
// This is not the type we're looking for.
continue
}
// We now have a list of names (from one line of source code) all being
// declared with the desired type.
structType, ok := tspec.Type.(*ast.StructType)
if !ok {
//not a struct type
continue
}
typesObj, typeObjOk := f.pkg.defs[tspec.Name]
if !typeObjOk {
log.Fatalf("no type info found for struct %s", typ)
}
for _, fieldLine := range structType.Fields.List {
for _, field := range fieldLine.Names {
//skip struct padding
if field.Name == "_" {
continue
}
fieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)
typeStr := fieldObj.Type().String()
tags := parseFieldTags(fieldLine.Tag)
//Skip here so we don't include rubbish import lines
if tags["exclude_dao"].Value == "true" {
continue
}
processedTypeStr, importPath := processTypeStr(typeStr)
//log.Printf("processedTypeStr: %s, importPath: %s", processedTypeStr, importPath)
if importPath != "" && !importExists(importPath, f.imports) {
f.imports = append(f.imports, Import{importPath})
}
v := Field{
Name: field.Name,
Tags: tags,
TypeName: processedTypeStr,
}
f.fields = append(f.fields, v)
}
}
}
return false
}
//We need to make sure that we get the type used right, with a package prefix
func processTypeStr(typeStr string) (typeName, importPath string) {
if strings.Contains(typeStr, "/") {
slashSplit := strings.Split(typeStr, "/")
pkgNameAndType := slashSplit[len(slashSplit)-1]
pkgName := strings.Split(pkgNameAndType, ".")[0]
importPath := fmt.Sprintf("%s/%s", strings.Join(slashSplit[0:len(slashSplit)-1], "/"), pkgName)
return pkgNameAndType, importPath
} else if strings.Contains(typeStr, ".") {
dotSplit := strings.Split(typeStr, ".")
importPath := dotSplit[0]
pkgNameAndType := typeStr
return pkgNameAndType, importPath
} else {
return typeStr, ""
}
}
//Check to see if a path already exists in the []Import
func | (pathName string, imports []Import) bool {
for _, val := range imports {
if pathName == val.ImportedName {
return true
}
}
return false
}
// parsePackageDir parses the package residing in the directory.
func (g *Generator) parsePackageDir(directory string) {
log.Printf("Collecting objects in package %s for parsing", directory)
pkg, err := build.Default.ImportDir(directory, 0)
if err != nil {
log.Fatalf("cannot process directory %s: %s", directory, err)
}
var names []string
names = append(names, pkg.GoFiles...)
names = append(names, pkg.CgoFiles...)
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
// names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package.
names = append(names, pkg.SFiles...)
names = prefixDirectory(directory, names)
log.Printf("Found object names: %+v", names)
g.parsePackage(directory, names, nil)
}
// parsePackage analyzes the single package constructed from the named files.
// If text is non-nil, it is a string to be used instead of the content of the file,
// to be used for testing. parsePackage exits if there is an error.
func (g *Generator) parsePackage(directory string, names []string, text interface{}) {
var files []*File
var astFiles []*ast.File
g.pkg = new(Package)
fs := token.NewFileSet()
for _, name := range names {
if !strings.HasSuffix(name, ".go") {
continue
}
log.Printf("Parsing file: %s", name)
parsedFile, err := parser.ParseFile(fs, name, text, 0)
if err != nil {
log.Fatalf("parsing package: %s: %s", name, err)
}
astFiles = append(astFiles, parsedFile)
files = append(files, &File{
file: parsedFile,
pkg: g.pkg,
})
}
if len(astFiles) == 0 {
log.Fatalf("%s: no buildable Go files", directory)
}
g.pkg.name = astFiles[0].Name.Name
g.pkg.files = files
g.pkg.dir = directory
// Type check the package.
g.pkg.check(fs, astFiles)
}
// parsePackageFiles parses the package occupying the named files.
func (g *Generator) parsePackageFiles(names []string) {
g.parsePackage(".", names, nil)
}
// prefixDirectory places the directory name on the beginning of each name in the list.
func prefixDirectory(directory string, names []string) []string {
if directory == "." {
return names
}
ret := make([]string, len(names))
for i, name := range names {
ret[i] = filepath.Join | importExists | identifier_name |
generator.go | output for format.Source.
type Generator struct {
Buf bytes.Buffer // Accumulated output.
pkg *Package // Package we are scanning.
dir string
}
//Run parses the target package and generates the code, verifying the package before and after generation.
//pathArgs is a list of file paths, to either individual files or whole directories.
//typeName is the name of a struct we're working on. outputName is where the generated code should go.
//genFn is the most important part, and recieves all the meta info about the targeted Type
func (g *Generator) Run(pathArgs []string, typeName string, outputName string, genFn GeneratorFunc) error {
//Parse the package
g.Prepare(pathArgs)
// Print the header and package clause.
g.Printf("// Code generated by 'exemplar %s'; DO NOT EDIT\n", strings.Join(os.Args[1:], " "))
g.Printf("\n")
g.Printf("package %s", g.pkg.name)
g.Printf("\n")
g.collectAndGenerate(typeName, genFn)
//format output
src := g.format()
// Write to file.
//TODO: Fix this to not be tied to propertizer
//DEBUG: fmt.Printf("Typename in parse: %s", typeName)
if outputName == "" {
baseName := fmt.Sprintf("%s_properties.go", typeName)
outputName = filepath.Join(g.dir, strings.ToLower(baseName))
}
fmt.Println(outputName)
err := ioutil.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
return nil
}
func (g *Generator) Prepare(args []string) {
log.Printf("Prepare - checking if path is a directory or a list of files %s", args[0])
if len(args) == 1 && isDirectory(args[0]) {
log.Printf("Prepare - found directory")
g.dir = args[0]
g.parsePackageDir(args[0])
} else {
log.Printf("Prepare - found file list")
g.dir = filepath.Dir(args[0])
g.parsePackageFiles(args)
}
}
//collect gathers all the info from all the package's files about the type
//necessary to do fun things
func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {
fields := make([]Field, 0, 100)
imports := make([]Import, 0, 100)
for _, file := range g.pkg.files {
// Set the state for this run of the walker.
file.typeName = typeName
file.fields = nil
if file.file != nil {
ast.Inspect(file.file, file.genDecl)
fields = append(fields, file.fields...)
imports = append(imports, file.imports...)
}
}
genFn(typeName, fields, imports)
}
// genDecl processes one declaration clause.
func (f *File) genDecl(node ast.Node) bool {
decl, ok := node.(*ast.GenDecl)
if !ok || decl.Tok != token.TYPE { // We only care about Type declarations.
return true
}
// The name of the type of the constants we are declaring.
// Can change if this is a multi-element declaration.
typ := ""
// Loop over the elements of the declaration. Each element is a ValueSpec:
// a list of names possibly followed by a type, possibly followed by values.
// If the type and value are both missing, we carry down the type (and value,
// but the "go/types" package takes care of that).
for _, spec := range decl.Specs {
tspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.
if tspec.Type != nil {
// "X T". We have a type. Remember it.
typ = tspec.Name.Name
}
if typ != f.typeName {
// This is not the type we're looking for.
continue
}
// We now have a list of names (from one line of source code) all being
// declared with the desired type.
structType, ok := tspec.Type.(*ast.StructType)
if !ok {
//not a struct type
continue
}
typesObj, typeObjOk := f.pkg.defs[tspec.Name]
if !typeObjOk {
log.Fatalf("no type info found for struct %s", typ)
}
for _, fieldLine := range structType.Fields.List {
for _, field := range fieldLine.Names {
//skip struct padding
if field.Name == "_" |
fieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)
typeStr := fieldObj.Type().String()
tags := parseFieldTags(fieldLine.Tag)
//Skip here so we don't include rubbish import lines
if tags["exclude_dao"].Value == "true" {
continue
}
processedTypeStr, importPath := processTypeStr(typeStr)
//log.Printf("processedTypeStr: %s, importPath: %s", processedTypeStr, importPath)
if importPath != "" && !importExists(importPath, f.imports) {
f.imports = append(f.imports, Import{importPath})
}
v := Field{
Name: field.Name,
Tags: tags,
TypeName: processedTypeStr,
}
f.fields = append(f.fields, v)
}
}
}
return false
}
//We need to make sure that we get the type used right, with a package prefix
func processTypeStr(typeStr string) (typeName, importPath string) {
if strings.Contains(typeStr, "/") {
slashSplit := strings.Split(typeStr, "/")
pkgNameAndType := slashSplit[len(slashSplit)-1]
pkgName := strings.Split(pkgNameAndType, ".")[0]
importPath := fmt.Sprintf("%s/%s", strings.Join(slashSplit[0:len(slashSplit)-1], "/"), pkgName)
return pkgNameAndType, importPath
} else if strings.Contains(typeStr, ".") {
dotSplit := strings.Split(typeStr, ".")
importPath := dotSplit[0]
pkgNameAndType := typeStr
return pkgNameAndType, importPath
} else {
return typeStr, ""
}
}
//Check to see if a path already exists in the []Import
func importExists(pathName string, imports []Import) bool {
for _, val := range imports {
if pathName == val.ImportedName {
return true
}
}
return false
}
// parsePackageDir parses the package residing in the directory.
func (g *Generator) parsePackageDir(directory string) {
log.Printf("Collecting objects in package %s for parsing", directory)
pkg, err := build.Default.ImportDir(directory, 0)
if err != nil {
log.Fatalf("cannot process directory %s: %s", directory, err)
}
var names []string
names = append(names, pkg.GoFiles...)
names = append(names, pkg.CgoFiles...)
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
// names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package.
names = append(names, pkg.SFiles...)
names = prefixDirectory(directory, names)
log.Printf("Found object names: %+v", names)
g.parsePackage(directory, names, nil)
}
// parsePackage analyzes the single package constructed from the named files.
// If text is non-nil, it is a string to be used instead of the content of the file,
// to be used for testing. parsePackage exits if there is an error.
func (g *Generator) parsePackage(directory string, names []string, text interface{}) {
var files []*File
var astFiles []*ast.File
g.pkg = new(Package)
fs := token.NewFileSet()
for _, name := range names {
if !strings.HasSuffix(name, ".go") {
continue
}
log.Printf("Parsing file: %s", name)
parsedFile, err := parser.ParseFile(fs, name, text, 0)
if err != nil {
log.Fatalf("parsing package: %s: %s", name, err)
}
astFiles = append(astFiles, parsedFile)
files = append(files, &File{
file: parsedFile,
pkg: g.pkg,
})
}
if len(astFiles) == 0 {
log.Fatalf("%s: no buildable Go files", directory)
}
g.pkg.name = astFiles[0].Name.Name
g.pkg.files = files
g.pkg.dir = directory
// Type check the package.
g.pkg.check(fs, astFiles)
}
// parsePackageFiles parses the package occupying the named files.
func (g *Generator) parsePackageFiles(names []string) {
g.parsePackage(".", names, nil)
}
// prefixDirectory places the directory name on the beginning of each name in the list.
func prefixDirectory(directory string, names []string) []string {
if directory == "." {
return names
}
ret := make([]string, len(names))
for i, name := range names {
ret[i] = filepath.Join | {
continue
} | conditional_block |
s3driver.go | err
}
if !strings.HasSuffix(prefix, "/") {
prefix = prefix + "/"
}
var nextContinuationToken *string
files := []os.FileInfo{}
for {
objects, err := d.s3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(d.bucket),
Prefix: aws.String(prefix),
Delimiter: aws.String("/"),
ContinuationToken: nextContinuationToken,
})
if err != nil {
return nil, err
}
for _, o := range objects.Contents {
if *o.Key == prefix {
continue
}
files = append(files, &fileInfo{
name: strings.TrimPrefix(*o.Key, prefix),
size: *o.Size,
mtime: *o.LastModified,
})
}
for _, o := range objects.CommonPrefixes {
files = append(files, &fileInfo{
name: strings.TrimSuffix(strings.TrimPrefix(*o.Prefix, prefix), "/"),
size: 4096,
mtime: time.Unix(1, 0),
mode: os.ModeDir,
})
}
if !*objects.IsTruncated {
return files, nil
}
nextContinuationToken = objects.NextContinuationToken
}
}
func (d S3Driver) DeleteDir(path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
// s3 DeleteObject needs a trailing slash for directories
directoryPath := translatedPath
if !strings.HasSuffix(translatedPath, "/") {
directoryPath = translatedPath + "/"
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(directoryPath),
})
return err
}
func (d S3Driver) DeleteFile(path string) error |
func (d S3Driver) Rename(oldpath string, newpath string) error {
translatedOldpath, err := TranslatePath(d.prefix, d.homePath, oldpath)
if err != nil {
return err
}
translatedNewpath, err := TranslatePath(d.prefix, d.homePath, newpath)
if err != nil {
return err
}
input := &s3.CopyObjectInput{
Bucket: aws.String(d.bucket),
CopySource: aws.String(d.bucket + "/" + translatedOldpath),
Key: &translatedNewpath,
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
if _, err := d.s3.CopyObject(input); err != nil {
return err
}
if _, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: &translatedOldpath,
}); err != nil {
return err
}
return nil
}
func (d S3Driver) MakeDir(path string) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
if !strings.HasSuffix(localPath, "/") {
localPath += "/"
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader([]byte{}),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
return err
}
func (d S3Driver) GetFile(path string) (io.ReadCloser, error) {
denyList := map[string]string{
"5baac2eca47b2e0001fba7bc": "",
"57559919ba5df50100000371": "",
"5948272c6737440001c6d97f": "",
"5dc35c1bc06f8d000102e30f": "",
"5b9c35c6a47b2e0001fba77c": "",
"56a91ecf599456010000089e": "",
"5859884232aee60001eb363c": "",
"610c95d3f8fe797dd7069926": "",
"572227ff2ccd540100000942": "",
"5d94ff814070e90001c74ae9": "",
"562a767542fcde0100000cd3": "",
"5d727c3d091c7a0001b6167b": "",
"577ec13a78ef4c010000010c": "",
"5a2eae046e18690001b2b671": "",
"596923cd7eb87f000134bd31": "",
"5d96661ed0c8470001afd962": "",
"5a7338b0e1d9a40001ec9f6b": "",
"544662a92b57f07b1d00003f": "",
"59b9f1145f63950001db2c2f": "",
"5efe7d59472dcc000193b4f1": "",
"5d65a3c947fec2000169542d": "",
"5d38d1d91269ea0001a7666f": "",
"5c0023e2d5e6320001392a1b": "",
"59a1e74bd0b14b0001af5bcc": "",
"57e153297406ba010000069c": "",
"57d9a194fc7c6301000003ec": "",
"55a7ffb439c12e0100000012": "",
"57222718dbfe7d01000009fd": "",
"5e46ef81836224000116c303": "",
"540dff9944ee2f1443004a7e": "",
"5f28cde4e3e8ee0001f65046": "",
"59cd5854f3a91c0001017a79": "",
"5ca25ee8f59f0b0001c3755a": "",
"6359869802626ad09401b198": "",
"635c47c9b773024858b7ce2e": "",
}
ip, port := getIPAndPort(d.remoteIPAddress)
if _, ok := denyList[d.prefix]; ok {
d.lg.ErrorD("s3-get-file-blocked-district", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
for _, blockedIP := range BLOCK_DOWNLOADS_IP_ADDRESSES {
if ip == blockedIP {
| {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(translatedPath),
})
return err
} | identifier_body |
s3driver.go | 363c": "",
"610c95d3f8fe797dd7069926": "",
"572227ff2ccd540100000942": "",
"5d94ff814070e90001c74ae9": "",
"562a767542fcde0100000cd3": "",
"5d727c3d091c7a0001b6167b": "",
"577ec13a78ef4c010000010c": "",
"5a2eae046e18690001b2b671": "",
"596923cd7eb87f000134bd31": "",
"5d96661ed0c8470001afd962": "",
"5a7338b0e1d9a40001ec9f6b": "",
"544662a92b57f07b1d00003f": "",
"59b9f1145f63950001db2c2f": "",
"5efe7d59472dcc000193b4f1": "",
"5d65a3c947fec2000169542d": "",
"5d38d1d91269ea0001a7666f": "",
"5c0023e2d5e6320001392a1b": "",
"59a1e74bd0b14b0001af5bcc": "",
"57e153297406ba010000069c": "",
"57d9a194fc7c6301000003ec": "",
"55a7ffb439c12e0100000012": "",
"57222718dbfe7d01000009fd": "",
"5e46ef81836224000116c303": "",
"540dff9944ee2f1443004a7e": "",
"5f28cde4e3e8ee0001f65046": "",
"59cd5854f3a91c0001017a79": "",
"5ca25ee8f59f0b0001c3755a": "",
"6359869802626ad09401b198": "",
"635c47c9b773024858b7ce2e": "",
}
ip, port := getIPAndPort(d.remoteIPAddress)
if _, ok := denyList[d.prefix]; ok {
d.lg.ErrorD("s3-get-file-blocked-district", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
for _, blockedIP := range BLOCK_DOWNLOADS_IP_ADDRESSES {
if ip == blockedIP {
d.lg.ErrorD("s3-get-file-blocked-ip", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
}
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
obj, err := d.s3.GetObject(&s3.GetObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
})
if err != nil {
return nil, err
}
if d.lg != nil {
d.lg.InfoD("s3-get-file-success", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": localPath,
"client-ip": ip,
"client-port": port,
"file_bytes_size": obj.ContentLength,
})
}
return obj.Body, nil
}
func (d S3Driver) PutFile(path string, r io.Reader) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
rawData, err := ioutil.ReadAll(r)
if err != nil {
return err
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader(rawData),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
if err != nil {
return err
}
ip, port := getIPAndPort(d.remoteIPAddress)
if d.lg != nil {
d.lg.InfoD("s3-put-file-success", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "PUT",
"path": localPath,
"client-ip": ip,
"client-port": port,
"file_bytes_size": bytes.NewReader(rawData).Size(),
})
}
return nil
}
func (d S3Driver) RealPath(path string) string {
result, _ := TranslatePath("/", d.homePath, path)
return "/" + result
}
// translatePath takes in a S3 root prefix, a home directory, and either an absolute or relative path to append, and returns a cleaned and validated path.
// It will resolve things like '..' while disallowing the prefix to be escaped.
// It also preserves a single trailing slash if one is present, so it can be used on both directories and files.
func TranslatePath(prefix, home, path string) (string, error) {
if path == "" {
return filepath.Clean("/" + prefix + "/" + home), nil
}
var cleanPath string
if strings.HasPrefix(path, "/") {
cleanPath = filepath.Clean(prefix + path)
if !strings.HasPrefix(cleanPath, prefix) {
cleanPath = prefix
}
} else {
cleanPath = filepath.Clean("/" + prefix + "/" + home + filepath.Clean("/"+path))
}
// For some reason, filepath.Clean drops trailing /'s, so if there was one we have to put it back
if strings.HasSuffix(path, "/") {
cleanPath += "/"
}
return strings.TrimLeft(cleanPath, "/"), nil
}
// NewS3Driver creates a new S3Driver with the AWS credentials and S3 parameters.
// bucket: name of S3 bucket
// prefix: key within the S3 bucket, if applicable
// homePath: default home directory for user (can be different from prefix)
func NewS3Driver(
bucket,
prefix,
homePath,
region,
awsAccessKeyID,
awsSecretKey,
awsToken,
remoteIPAddress string,
kmsKeyID *string,
lg Logger,
) *S3Driver {
config := aws.NewConfig().
WithRegion(region).
WithCredentials(credentials.NewStaticCredentials(awsAccessKeyID, awsSecretKey, awsToken))
s3 := s3.New(session.New(), config)
blockDownloadIPAddressesStr := os.Getenv("BLOCK_DOWNLOADS_IP_ADDRESSES")
BLOCK_DOWNLOADS_IP_ADDRESSES = []string{}
for _, addr := range strings.Split(blockDownloadIPAddressesStr, ",") {
BLOCK_DOWNLOADS_IP_ADDRESSES = append(BLOCK_DOWNLOADS_IP_ADDRESSES, strings.TrimSpace(addr))
}
return &S3Driver{
s3: s3,
bucket: bucket,
prefix: prefix,
homePath: homePath,
remoteIPAddress: remoteIPAddress,
kmsKeyID: kmsKeyID,
lg: lg,
}
}
| func getIPAndPort(combined string) (string, string) { | random_line_split |
|
s3driver.go | , err
}
if !strings.HasSuffix(prefix, "/") {
prefix = prefix + "/"
}
var nextContinuationToken *string
files := []os.FileInfo{}
for {
objects, err := d.s3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(d.bucket),
Prefix: aws.String(prefix),
Delimiter: aws.String("/"),
ContinuationToken: nextContinuationToken,
})
if err != nil {
return nil, err
}
for _, o := range objects.Contents {
if *o.Key == prefix {
continue
}
files = append(files, &fileInfo{
name: strings.TrimPrefix(*o.Key, prefix),
size: *o.Size,
mtime: *o.LastModified,
})
}
for _, o := range objects.CommonPrefixes {
files = append(files, &fileInfo{
name: strings.TrimSuffix(strings.TrimPrefix(*o.Prefix, prefix), "/"),
size: 4096,
mtime: time.Unix(1, 0),
mode: os.ModeDir,
})
}
if !*objects.IsTruncated {
return files, nil
}
nextContinuationToken = objects.NextContinuationToken
}
}
func (d S3Driver) | (path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
// s3 DeleteObject needs a trailing slash for directories
directoryPath := translatedPath
if !strings.HasSuffix(translatedPath, "/") {
directoryPath = translatedPath + "/"
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(directoryPath),
})
return err
}
func (d S3Driver) DeleteFile(path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(translatedPath),
})
return err
}
func (d S3Driver) Rename(oldpath string, newpath string) error {
translatedOldpath, err := TranslatePath(d.prefix, d.homePath, oldpath)
if err != nil {
return err
}
translatedNewpath, err := TranslatePath(d.prefix, d.homePath, newpath)
if err != nil {
return err
}
input := &s3.CopyObjectInput{
Bucket: aws.String(d.bucket),
CopySource: aws.String(d.bucket + "/" + translatedOldpath),
Key: &translatedNewpath,
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
if _, err := d.s3.CopyObject(input); err != nil {
return err
}
if _, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: &translatedOldpath,
}); err != nil {
return err
}
return nil
}
func (d S3Driver) MakeDir(path string) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
if !strings.HasSuffix(localPath, "/") {
localPath += "/"
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader([]byte{}),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
return err
}
func (d S3Driver) GetFile(path string) (io.ReadCloser, error) {
denyList := map[string]string{
"5baac2eca47b2e0001fba7bc": "",
"57559919ba5df50100000371": "",
"5948272c6737440001c6d97f": "",
"5dc35c1bc06f8d000102e30f": "",
"5b9c35c6a47b2e0001fba77c": "",
"56a91ecf599456010000089e": "",
"5859884232aee60001eb363c": "",
"610c95d3f8fe797dd7069926": "",
"572227ff2ccd540100000942": "",
"5d94ff814070e90001c74ae9": "",
"562a767542fcde0100000cd3": "",
"5d727c3d091c7a0001b6167b": "",
"577ec13a78ef4c010000010c": "",
"5a2eae046e18690001b2b671": "",
"596923cd7eb87f000134bd31": "",
"5d96661ed0c8470001afd962": "",
"5a7338b0e1d9a40001ec9f6b": "",
"544662a92b57f07b1d00003f": "",
"59b9f1145f63950001db2c2f": "",
"5efe7d59472dcc000193b4f1": "",
"5d65a3c947fec2000169542d": "",
"5d38d1d91269ea0001a7666f": "",
"5c0023e2d5e6320001392a1b": "",
"59a1e74bd0b14b0001af5bcc": "",
"57e153297406ba010000069c": "",
"57d9a194fc7c6301000003ec": "",
"55a7ffb439c12e0100000012": "",
"57222718dbfe7d01000009fd": "",
"5e46ef81836224000116c303": "",
"540dff9944ee2f1443004a7e": "",
"5f28cde4e3e8ee0001f65046": "",
"59cd5854f3a91c0001017a79": "",
"5ca25ee8f59f0b0001c3755a": "",
"6359869802626ad09401b198": "",
"635c47c9b773024858b7ce2e": "",
}
ip, port := getIPAndPort(d.remoteIPAddress)
if _, ok := denyList[d.prefix]; ok {
d.lg.ErrorD("s3-get-file-blocked-district", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
for _, blockedIP := range BLOCK_DOWNLOADS_IP_ADDRESSES {
if ip == blockedIP {
| DeleteDir | identifier_name |
s3driver.go | err
}
if !strings.HasSuffix(prefix, "/") {
prefix = prefix + "/"
}
var nextContinuationToken *string
files := []os.FileInfo{}
for {
objects, err := d.s3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(d.bucket),
Prefix: aws.String(prefix),
Delimiter: aws.String("/"),
ContinuationToken: nextContinuationToken,
})
if err != nil {
return nil, err
}
for _, o := range objects.Contents {
if *o.Key == prefix |
files = append(files, &fileInfo{
name: strings.TrimPrefix(*o.Key, prefix),
size: *o.Size,
mtime: *o.LastModified,
})
}
for _, o := range objects.CommonPrefixes {
files = append(files, &fileInfo{
name: strings.TrimSuffix(strings.TrimPrefix(*o.Prefix, prefix), "/"),
size: 4096,
mtime: time.Unix(1, 0),
mode: os.ModeDir,
})
}
if !*objects.IsTruncated {
return files, nil
}
nextContinuationToken = objects.NextContinuationToken
}
}
func (d S3Driver) DeleteDir(path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
// s3 DeleteObject needs a trailing slash for directories
directoryPath := translatedPath
if !strings.HasSuffix(translatedPath, "/") {
directoryPath = translatedPath + "/"
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(directoryPath),
})
return err
}
func (d S3Driver) DeleteFile(path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(translatedPath),
})
return err
}
func (d S3Driver) Rename(oldpath string, newpath string) error {
translatedOldpath, err := TranslatePath(d.prefix, d.homePath, oldpath)
if err != nil {
return err
}
translatedNewpath, err := TranslatePath(d.prefix, d.homePath, newpath)
if err != nil {
return err
}
input := &s3.CopyObjectInput{
Bucket: aws.String(d.bucket),
CopySource: aws.String(d.bucket + "/" + translatedOldpath),
Key: &translatedNewpath,
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
if _, err := d.s3.CopyObject(input); err != nil {
return err
}
if _, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: &translatedOldpath,
}); err != nil {
return err
}
return nil
}
func (d S3Driver) MakeDir(path string) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
if !strings.HasSuffix(localPath, "/") {
localPath += "/"
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader([]byte{}),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
return err
}
func (d S3Driver) GetFile(path string) (io.ReadCloser, error) {
denyList := map[string]string{
"5baac2eca47b2e0001fba7bc": "",
"57559919ba5df50100000371": "",
"5948272c6737440001c6d97f": "",
"5dc35c1bc06f8d000102e30f": "",
"5b9c35c6a47b2e0001fba77c": "",
"56a91ecf599456010000089e": "",
"5859884232aee60001eb363c": "",
"610c95d3f8fe797dd7069926": "",
"572227ff2ccd540100000942": "",
"5d94ff814070e90001c74ae9": "",
"562a767542fcde0100000cd3": "",
"5d727c3d091c7a0001b6167b": "",
"577ec13a78ef4c010000010c": "",
"5a2eae046e18690001b2b671": "",
"596923cd7eb87f000134bd31": "",
"5d96661ed0c8470001afd962": "",
"5a7338b0e1d9a40001ec9f6b": "",
"544662a92b57f07b1d00003f": "",
"59b9f1145f63950001db2c2f": "",
"5efe7d59472dcc000193b4f1": "",
"5d65a3c947fec2000169542d": "",
"5d38d1d91269ea0001a7666f": "",
"5c0023e2d5e6320001392a1b": "",
"59a1e74bd0b14b0001af5bcc": "",
"57e153297406ba010000069c": "",
"57d9a194fc7c6301000003ec": "",
"55a7ffb439c12e0100000012": "",
"57222718dbfe7d01000009fd": "",
"5e46ef81836224000116c303": "",
"540dff9944ee2f1443004a7e": "",
"5f28cde4e3e8ee0001f65046": "",
"59cd5854f3a91c0001017a79": "",
"5ca25ee8f59f0b0001c3755a": "",
"6359869802626ad09401b198": "",
"635c47c9b773024858b7ce2e": "",
}
ip, port := getIPAndPort(d.remoteIPAddress)
if _, ok := denyList[d.prefix]; ok {
d.lg.ErrorD("s3-get-file-blocked-district", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
for _, blockedIP := range BLOCK_DOWNLOADS_IP_ADDRESSES {
if ip == blockedIP {
| {
continue
} | conditional_block |
app.js | (Math.random() * 10);
var item = `
<a href="./products-detail.html" id="${dataProducts[k].id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${dataProducts[k].url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${dataProducts[k].name}" class="sale__discript d-block">${dataProducts[k].name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${dataProducts[k].price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.innerHTML = item;
// div.id = dataProducts[k].id;
productList.appendChild(div);
products.push(dataProducts[k]);
}
}, 500);
} else {
// in ra tat cả sản phẩm có trong mảng
for (var product of dataProducts) {
var item = `
<a href="./products-detail.html" id="${product.id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${product.url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${product.name}" class="sale__discript d-block">${product.name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${product.price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.id = product.id;
div.innerHTML = item;
productList.appendChild(div);
}
}
return products;
}
// sự kiện filter
function filter(a, number) {
var btnFs = document.querySelectorAll('.btn-filter');
for (var btn of btnFs) {
if (btn.classList.contains("active")) {
btn.classList.remove("active");
break;
}
}
Redirect('./products.html');
localStorage.setItem("filterActive", number);
}
// tìm kiếm
var btnSearch = document.querySelector(".search__btn");
var inputSearch = document.getElementById("search");
inputSearch.addEventListener("keyup", ({ key }) => {
if (key === "Enter") {
dataSearch();
}
})
function dataSearch() {
var text = document.getElementById("search").value.toLowerCase();
var products = dataProducts.filter(function(product) {
return product.name.toLowerCase().includes(text);
});
localStorage.setItem("filterActive", 4);
localStorage.setItem('searchProducts', JSON.stringify(products));
window.location = "../pages/products.html";
}
btnSearch.addEventListener("click", function(e) {
e.preventDefault();
dataSearch();
});
var btnPro = document.getElementById("btnProduct");
btnPro.addEventListener("click", function(event) {
localStorage.setItem("filterActive", "0");
});
function sortFilter(n) {
if (n == 3) {
dataProducts.sort(function(data1, data2) {
return data1.price - data2.price;
});
pushProduct(dataProducts);
}
if (n == 4) {
var products = JSON.parse(localStorage.getItem("searchProducts"));
pushProduct(products);
} else {
pushProduct(dataProducts, 30);
}
}
// sự kiện khi ấn vào giỏ hàng
var cart = document.querySelector(".cart-link");
cart.addEventListener("click", function(event) {
event.preventDefault();
if (bool) {
Redirect("../pages/cart.html");
} else
alert("vui lòng đăng nhập trước");
});
// đăng ký
function checkRegister() {
var form = document.querySelector('#frmdangky');
var data = Object.fromEntries(new FormData(form).entries());
var regUserName = /(?=.*[a-zA-Z_0-9])\w{6,}/; // ít nhất phải có 6 ký tự không chứa ký tự đặc biệt
var regPassword = /^(?=.*[0-9])(?=.*[a-z])([a-zA-Z0-9]{8,})$/; //phải có 8 ký tự trở lên và có ít nhất 1 số
var regEmail = /^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$/;
var regName = /^([A-Z][a-z]+)(\s+[A-Z][a-z]+)+$/; // chữ cái đầu tiên phải bắt đầu bằng chữ in hoa và phải có họ và tên
var regPhone = /[0-9]{10}/; // số điện thoại phải là số và có 10 chữ số
var lbUserName = document.querySelector("#lbTenDangNhap");
var lbMatKhau = document.querySelector("#lbMatKhau");
var lbNhapLaiMatKhau = document.querySelector("#lbNhapLaiMatKhau");
var lbTen = document.querySelector("#lbTen");
var lbDiaChi = document.querySelector("#lbDiaChi");
var lbDt = document.querySelector("#lbDt");
var lbEmail = document.querySelector("#lbEmail");
var lbNgaySinh = document.querySelector("#lbNgaySinh");
if (!regUserName.test(data.username)) {
lbUserName.innerText = "Tên đăng nhập ít nhất phải có 6 ký tự không chứa ký tự đặc biệt";
return false;
}
lbUserName.innerText = "";
if (!regPassword.test(data.password)) {
lbMatKhau.innerText = "Mật khẩu phải có 8 ký tự trở lên và có ít nhất 1 số";
return false;
}
lbMatKhau.innerText = "";
if (data.password !== data.kh_nhaplaimatkhau) {
lbNhapLaiMatKhau.innerText = "Mật khẩu chưa khớp";
return false;
}
lbNhapLaiMatKhau.innerText = "";
if (!regName.test(data.kh_ten)) {
lbTen.innerText = "Chữ cái đầu tiên phải bắt đầu bằng chữ in hoa và phải có họ và tên";
return false;
}
lbTen.innerText = "";
if (data.kh_diachi.trim().length == 0) {
lbDiaChi.innerText = "Địa chỉ không được bỏ trống";
return false;
}
lbDiaChi.innerText = "";
if (!regPhone.test(data.kh_dienthoai)) {
lbDt.innerText = "số điện thoại phải là số và có 10 chữ số ";
return false;
}
lbDt.innerText = "";
if (!regEmail.test(data.kh_email)) {
lbEmail.innerText = "vui lòng điền đúng định dạng email";
return false;
}
lbEmail.innerText = "";
if (data.kh_namsinh > 2002) {
lbNgaySinh.innerText = "bạn phải đủ 18 tuổi";
return false;
}
lbNgaySinh.innerText = "";
return true;
}
// get thông tin
var getThongTin = function(user) {
document.getElementById("kh_ten").value = user.kh_ten;
document.getElementById("kh_gioitinh").value = user.kh_gioitinh == 0 ? "Nam" : "Nữ";
document.getElementById("kh_diachi").value = user.kh_diachi;
document.getElementById("kh_dienthoai").value = user.kh_dienthoai;
document.getElementById("kh_email").value = user.kh_email;
document.getElementById("kh_ngaysinh").value = user.kh_ngaysinh + "/" + user.kh_thangsinh + "/" + user.kh_namsinh;
}
// phần thanh toán paying.html
var pay = function() {
// lấy sản phẩm từ user ra
var list = document.getElementById("listProductPay");
var product = userCartList(userNow);
var total = 0;
for (var p of product) {
var item = `
<li class="list-group-item d-flex justify-content-between ">
<div>
<h4 class="my-0">${p.name}</h4>
<small class="text-muted">${p.price} x ${p.orderQty} </small>
</div>
<span class="text-muted">${p.orderQty}</span>
</li>
`;
list.innerHTML += item;
total += p.price * p.orderQty;
}
var totalPrice = `
<li class="list-group-item d-flex justify-content-between">
<span>Tổng thành tiền</span>
<strong id="thanhTien">${total}</strong>
</li>
`;
list.innerHTML += totalPrice;
}
// sự kiện ấn vào sản phẩm
var getProductId = function() {
var a = document.getElementsByClassName("sale__item-link");
for (var i = 0; i < a.length; i++) {
a[i].addEventListener("click", func | tion(e) {
e.preventDefault();
var productID = this.id;
w | conditional_block |
|
app.js | còn hợp vs ai bỉm sữa mà vẫn muốn trend :))" +
"Túi rất nhẹ gập gọn cất cốp được, sống ảo xịn sò luôn nha 😌😌",
orderQty: 3
},
{
id: "item-8",
name: "TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP SL15",
url: "../assets/img/items/item-8.jpg",
price: 679000,
describe1: "--- TÚI XÁCH ALISA ---" +
" [HÀNG MỚI VỀ] TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP" +
"---Đặc Điểm Nổi Bật----" +
" - Trẻ trung phong cách " +
" - Thiết kế mới 2019" +
"- Họa tiết trái tim, thắt nơ siêu xinh",
describe2: "Túi nữ 2 Ngăn Phối Nơ Phiên Bản Hàn Quốc",
describe3: "----Thông Tin Chi Tiết----" +
"- Chất Liệu: Da pu cao cấp mềm mịn" +
"- Màu sắc: , hồng" +
"- Kích thước:19*15*8*15cm" +
"- Phong Cách : Hàn Quốc" +
"- Công dụng:đi chơi , đi làm , đi học , đi du lịch…." +
"-màu sắc: màu hồng" +
"- Mix Đồ: Có Thể kết hợp với mọi trang phục khác nhau",
orderQty: 1
},
{
id: "item-9",
name: "Túi Xách Nữ Tote Da PU Cao Cấp Mềm Đẹp Phom Vuông Kèm Ví Nhỏ Xinh Có Dây Đeo Chéo Style Thời Trang Công Sở Đi Làm Đi Học",
url: "../assets/img/items/item-9.jpg",
price: 238000,
describe1: "Sức nóng của túi tote da chưa bao giờ hạ nhiệt trong giới trẻ sành mốt bởi tính tiện dụng, sang trọng, mà vô cùng cá tính. Combo túi ví tote da Pu dày đẹp với thiết kế tinh tế đem đến phong cách thời trang sành điệu cho các nàng khi đi học, đi làm hay đi chơi.",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 2
},
{
id: "item-10",
name: "Túi xách tay nữ thương hiệu NEVENKA phong cách trẻ trung thanh lịch N9291",
url: "../assets/img/items/item-10.jpg",
price: 238000,
describe1: "Phong cách: trẻ trung, thanh lịch Kiểu cách: Túi đeo vai, đeo chéo nữ, túi xách tay thời trang Vật liệu chính: Da Pu Vật liệu dây đeo: Dây da PU Bề mặt da: Da trơn",
describe2: "Công nghệ vật liệu: Da nhân tạo Vật liệu lót: PVC Hình dáng: Hình chữ nhật ngang Kích thước: 23 x 16 x 10 cm Kiểu khóa: Khóa kéo miệng túi Màu sắc: Xanh, Trắng , Đen",
describe3: "Thương hiệu: NEVENKA Xuất xứ: Trung Quốc Phù hợp sử dụng: Đi chơi, đi làm, đi dạo phố.....",
orderQty: 2
}
];
// data user
// danh sách giỏ hàng mặc định
danhsachGioHang = [{ id: "item-1", n: 3 },
{ id: "item-2", n: 1 },
{ id: "item-6", n: 2 }
];
var users = [{
username: "admin",
password: "admin",
productID: danhsachGioHang
}]
// data cart
function saveListUser() {
var list = JSON.parse(localStorage.getItem("listUser"));
if (list)
users = list;
}
saveListUser();
function Redirect(url) {
window.location = url;
}
// tạo hàm đăng ký
function checkLogin() {
var user = JSON.parse(localStorage.getItem("userLogin"));
var names = document.querySelectorAll(".user-name");
var logout = document.getElementsByClassName("logout");
var hasUser = document.querySelector('.user');
if (user) {
for (var name of names) {
name.innerHTML = `
<a class="text-danger" href="../pages/login.html">${user.username}</a>
`;
}
if (logout[0].textContent == "Đăng nhập")
logout[0].textContent = "Đăng xuất";
hasUser.classList.add("user-has-account");
return user;
}
logout[0].textContent = "Đăng nhập";
hasUser.classList.remove("user-has-account");
return "";
}
var bool = Boolean(checkLogin());
var userNow = checkLogin();
console.log(bool);
// logout
function Logout() {
var logouts = document.getElementsByClassName("logout");
for (var logout of logouts) {
logout.onclick = () => {
localStorage.removeItem("userLogin");
}
}
}
Logout();
var i = 0;
// thêm sản phẩm
function addRow(product, index) {
var table = document.getElementById("datarow");
var row = `
<tr>
<td class="text-center" >${++i}</td>
<td class="text-center" >
<img src="${product.url}" class="img-product">
</td>
<td class="text-center" >${product.name}</td>
<td class="text-center">${product.price}</td>
<td class="text-center d-flex justify-content-center">
<input style="width: 45px; border: none; outline: none;" type="number"
class="d-block" name="number" id="number" value="${product.orderQty}" onchange ="totalPrice();" min="1">
</td>
<td class="text-center">${product.price * product.orderQty}</td>
<td class="text-center">
<a id="${product.id}" class="btn btn-danger btn-delete-sanpham">
<i class="fa fa-trash" aria-hidden="true"></i> Xóa
</a>
</td>
</tr>
`;
var newRow = table.insertRow(table.length);
newRow.innerHTML = row;
}
// xoa 1 item carrt
var removeByAttr = function(arr, attr, value) {
var i = arr.length;
while (i--) {
if (arr[i] &&
arr[i].hasOwnProperty(attr) &&
(arguments.length > 2 && arr[i][attr] === value)) {
arr.splice(i, 1);
}
}
totalProduct();
return arr;
}
function deleteItemInCart(productID) {
removeByAttr(userNow.productID, "id", productID);
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// khi thay đổi số lượng sản phẩm
function whenChageQty() {
var numbers = document.querySelectorAll("#datarow #number");
var products = userNow.productID;
for (var number in numbers) {
if (numbers.hasOwnProperty(number)) {
products[number].n = numbers[number].value;
// console.log(numbers[number].value);
}
}
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// tổng giá
var totalPrice = function() {
var table = document.getElementById("datarow");
var deletes = document.querySelectorAll(".btn-delete-sanpham");
var totalPr = 0;
for (var i = 0; i < table.rows.length; ++i) {
var quantity = table.rows[i].cells[4].querySelector("input").value;
var price = table.rows[i].cells[3].innerText;
var total = quantity * price;
table.rows[i].cells[5].innerText = total;
totalPr += total;
deletes[i].onclick = () => {
table.deleteRow(--i);
totalPrice();
deleteItemInCart(deletes[i].id);
}
}
document.getElementById("totalPrice").innerText = totalPr;
return totalPr;
}
// hàm lấy ra sản phẩm từ user
function userCartList(user) {
var products = [];
if (user) {
var danhsachGioHang = user.productID;
for (var item of danhsachGioHang | ) {
var product = dataProducts.find(function(value) {
return value.id == item.id;
});
product.orderQ | identifier_body |
|
app.js | -5.jpg",
price: 259000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi Kẹp Nách Nữ Caro Vintage Hottrend KR 180- 7 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cmTúi Kẹp nách nữ có kích thước: Dài 26 cm, Rộng 6 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 7 màu Hot (Trắng Caro, Xanh Caro, Đỏ Caro, Xám Caro, Tím Hồng, Vàng Xanh, Đen Xanh) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-6",
name: "[Mã FAMAYWA giảm 10k đơn từ 50k] Túi xách nữ, túi kẹp nách da mềm trơn BH 433",
url: "../assets/img/items/item-6.jpg",
price: 299000,
describe1: "Túi xách là một phụ kiện thời trang không thể thiếu của các bạn trẻ dù là nam hay nữ, nó thể hiện phong cách cũng như cá tính của chủ nhân.",
describe2: "Nếu bạn yêu thích phong cách đơn giản nhưng tinh tế thì chiếc túi xách là một lựa chọn hoàn hảo cho bạn.Chiếc túi xách Tote 6 sở hữu thiết kế thời trang với phần nắp túi cách điệu kết hợp tông màu nổi bật, những đường may tinh tế, cùng gam màu trung tính trẻ trung năng động sẽ vô cùng hữu dụng cho bạn trong việc chứa các vật dụng cá nhân.",
describe3: " Bên cạnh đó, túi xách còn thể hiện gu thời trang và cá tính của bạn.",
orderQty: 2
},
{
id: "item-7",
name: "Túi Cói Merci",
url: "../assets/img/items/item-7.jpg",
price: 599000,
describe1: "Túi Cói Merci - nhỏ nhỏ xinh xinh nhưng tiện vô cùng . Sống ảo cũng xinh lung linhh ✨✨🔥🔥 ",
describe2: "Để mà đi du lịch sống ảo k thể thiếu em túi này lun ý ce ạ 🤩" +
"TÚI CÓI MERCI hot hit 🌴🌴🌴" +
"Túi rộng tha hồ đựng đồ nha ce",
describe3: "Size loại 1: 35x36cm" +
"size loại 2: 32x35cm,đựng vừa A4, vừa laptop, đi học đi làm , du lịch , còn hợp vs ai bỉm sữa mà vẫn muốn trend :))" +
"Túi rất nhẹ gập gọn cất cốp được, sống ảo xịn sò luôn nha 😌😌",
orderQty: 3
},
{
id: "item-8",
name: "TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP SL15",
url: "../assets/img/items/item-8.jpg",
price: 679000,
describe1: "--- TÚI XÁCH ALISA ---" +
" [HÀNG MỚI VỀ] TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP" +
"---Đặc Điểm Nổi Bật----" +
" - Trẻ trung phong cách " +
" - Thiết kế mới 2019" +
"- Họa tiết trái tim, thắt nơ siêu xinh",
describe2: "Túi nữ 2 Ngăn Phối Nơ Phiên Bản Hàn Quốc",
describe3: "----Thông Tin Chi Tiết----" +
"- Chất Liệu: Da pu cao cấp mềm mịn" +
"- Màu sắc: , hồng" +
"- Kích thước:19*15*8*15cm" +
"- Phong Cách : Hàn Quốc" +
"- Công dụng:đi chơi , đi làm , đi học , đi du lịch…." +
"-màu sắc: màu hồng" +
"- Mix Đồ: Có Thể kết hợp với mọi trang phục khác nhau",
orderQty: 1
},
{
id: "item-9",
name: "Túi Xách Nữ Tote Da PU Cao Cấp Mềm Đẹp Phom Vuông Kèm Ví Nhỏ Xinh Có Dây Đeo Chéo Style Thời Trang Công Sở Đi Làm Đi Học",
url: "../assets/img/items/item-9.jpg",
price: 238000,
describe1: "Sức nóng của túi tote da chưa bao giờ hạ nhiệt trong giới trẻ sành mốt bởi tính tiện dụng, sang trọng, mà vô cùng cá tính. Combo túi ví tote da Pu dày đẹp với thiết kế tinh tế đem đến phong cách thời trang sành điệu cho các nàng khi đi học, đi làm hay đi chơi.",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 2
},
{
id: "item-10",
name: "Túi xách tay nữ thương hiệu NEVENKA phong cách trẻ trung thanh lịch N9291",
url: "../assets/img/items/item-10.jpg",
price: 238000,
describe1: "Phong cách: trẻ trung, thanh lịch Kiểu cách: Túi đeo vai, đeo chéo nữ, túi xách tay thời trang Vật liệu chính: Da Pu Vật liệu dây đeo: Dây da PU Bề mặt da: Da trơn",
describe2: "Công nghệ vật liệu: Da nhân tạo Vật liệu lót: PVC Hình dáng: Hình chữ nhật ngang Kích thước: 23 x 16 x 10 cm Kiểu khóa: Khóa kéo miệng túi Màu sắc: Xanh, Trắng , Đen",
describe3: "Thương hiệu: NEVENKA Xuất xứ: Trung Quốc Phù hợp sử dụng: Đi chơi, đi làm, đi dạo phố.....",
orderQty: 2
}
];
// data user
// danh sách giỏ hàng mặc định
danhsachGioHang = [{ id: "item-1", n: 3 },
{ id: "item-2", n: 1 },
{ id: "item-6", n: 2 }
];
var users = [{
username: "admin",
password: "admin",
productID: danhsachGioHang
}]
// data cart
function saveListUser() {
var list = JSON.parse(localStorage.getItem("listUser"));
if (list)
users = list;
}
saveListUser();
function Redirect(url) {
window.location = url;
}
// tạo hàm đăng ký
function checkLogin() {
var user = JSON.parse(localStorage.getItem("userLogin"));
var names = document.querySelectorAll(".user-name");
var logout = document.getElementsByClassName("logout");
var hasUser = document.querySelector('.user');
if (user) {
for (var name of names) {
name.innerHTML = `
<a class="text-danger" href="../pages/login.html">${user.username}</a>
`;
}
if (logout[0].textContent == "Đăng nhập")
logout[0].textContent = "Đăng xuất";
hasUser.classList.add("user-has-account");
return user;
}
logout[0].textContent = "Đăng nhập";
hasUser.classList.remove("user-has-account");
return "";
}
var bool = Boolean(checkLogin());
var userNow = checkLogin();
console.log(bool);
// logout
function Logout() {
var logouts = document.getElementsByClassName("logout");
for (var logout of logouts) {
logout.onclick = () => {
localStorage.removeItem("userLogin");
}
}
}
Logout();
var i = 0;
// thêm sản phẩm
function addRow(product, index) {
var table = document.getElementById("datarow");
var row = `
<tr>
<td class="text-center" >${++i}</td>
<td class="text-center" >
<img sr | c="${product | identifier_name |
|
app.js | ">
</td>
<td class="text-center" >${product.name}</td>
<td class="text-center">${product.price}</td>
<td class="text-center d-flex justify-content-center">
<input style="width: 45px; border: none; outline: none;" type="number"
class="d-block" name="number" id="number" value="${product.orderQty}" onchange ="totalPrice();" min="1">
</td>
<td class="text-center">${product.price * product.orderQty}</td>
<td class="text-center">
<a id="${product.id}" class="btn btn-danger btn-delete-sanpham">
<i class="fa fa-trash" aria-hidden="true"></i> Xóa
</a>
</td>
</tr>
`;
var newRow = table.insertRow(table.length);
newRow.innerHTML = row;
}
// xoa 1 item carrt
var removeByAttr = function(arr, attr, value) {
var i = arr.length;
while (i--) {
if (arr[i] &&
arr[i].hasOwnProperty(attr) &&
(arguments.length > 2 && arr[i][attr] === value)) {
arr.splice(i, 1);
}
}
totalProduct();
return arr;
}
function deleteItemInCart(productID) {
removeByAttr(userNow.productID, "id", productID);
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// khi thay đổi số lượng sản phẩm
function whenChageQty() {
var numbers = document.querySelectorAll("#datarow #number");
var products = userNow.productID;
for (var number in numbers) {
if (numbers.hasOwnProperty(number)) {
products[number].n = numbers[number].value;
// console.log(numbers[number].value);
}
}
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// tổng giá
var totalPrice = function() {
var table = document.getElementById("datarow");
var deletes = document.querySelectorAll(".btn-delete-sanpham");
var totalPr = 0;
for (var i = 0; i < table.rows.length; ++i) {
var quantity = table.rows[i].cells[4].querySelector("input").value;
var price = table.rows[i].cells[3].innerText;
var total = quantity * price;
table.rows[i].cells[5].innerText = total;
totalPr += total;
deletes[i].onclick = () => {
table.deleteRow(--i);
totalPrice();
deleteItemInCart(deletes[i].id);
}
}
document.getElementById("totalPrice").innerText = totalPr;
return totalPr;
}
// hàm lấy ra sản phẩm từ user
function userCartList(user) {
var products = [];
if (user) {
var danhsachGioHang = user.productID;
for (var item of danhsachGioHang) {
var product = dataProducts.find(function(value) {
return value.id == item.id;
});
product.orderQty = item.n;
products.push(product)
}
}
return products;
}
// add product vào cart
// userCartList(users[0])
var addProduct = function(products) {
var prd = products(checkLogin());
if (prd) {
for (var product of prd) {
addRow(product);
}
totalPrice();
return true;
}
return false;
}
// end them sản phẩm
// tat ca san pham
var pushProduct = function(dataProducts, n) {
var productList = document.getElementById("listProducts");
var products = [];
// in ra ngẫu nhiên số sản phẩm theo n
if (n) {
setTimeout(function() {
for (let i = 0; i < n; ++i) {
let k = Math.floor(Math.random() * 10);
var item = `
<a href="./products-detail.html" id="${dataProducts[k].id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${dataProducts[k].url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${dataProducts[k].name}" class="sale__discript d-block">${dataProducts[k].name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${dataProducts[k].price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.innerHTML = item;
// div.id = dataProducts[k].id;
productList.appendChild(div);
products.push(dataProducts[k]);
}
}, 500);
} else {
// in ra tat cả sản phẩm có trong mảng
for (var product of dataProducts) {
var item = `
<a href="./products-detail.html" id="${product.id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${product.url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${product.name}" class="sale__discript d-block">${product.name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${product.price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.id = product.id;
div.innerHTML = item;
productList.appendChild(div);
}
}
return products;
}
// sự kiện filter
function filter(a, number) {
var btnFs = document.querySelectorAll('.btn-filter');
for (var btn of btnFs) {
if (btn.classList.contains("active")) {
btn.classList.remove("active");
break;
}
}
Redirect('./products.html');
localStorage.setItem("filterActive", number);
}
// tìm kiếm
var btnSearch = document.querySelector(".search__btn");
var inputSearch = document.getElementById("search");
inputSearch.addEventListener("keyup", ({ key }) => {
if (key === "Enter") {
dataSearch();
}
})
function dataSearch() {
var text = document.getElementById("search").value.toLowerCase();
var products = dataProducts.filter(function(product) {
return product.name.toLowerCase().includes(text);
});
localStorage.setItem("filterActive", 4);
localStorage.setItem('searchProducts', JSON.stringify(products));
window.location = "../pages/products.html";
}
btnSearch.addEventListener("click", function(e) {
e.preventDefault();
dataSearch();
});
var btnPro = document.getElementById("btnProduct");
btnPro.addEventListener("click", function(event) {
localStorage.setItem("filterActive", "0");
});
function sortFilter(n) {
if (n == 3) {
dataProducts.sort(function(data1, data2) {
return data1.price - data2.price;
});
pushProduct(dataProducts);
}
if (n == 4) {
var products = JSON.parse(localStorage.getItem("searchProducts"));
pushProduct(products);
} else {
pushProduct(dataProducts, 30);
}
}
// sự kiện khi ấn vào giỏ hàng
var cart = document.querySelector(".cart-link");
cart.addEventListener("click", function(event) {
event.preventDefault();
if (bool) {
Redirect("../pages/cart.html");
} else
alert("vui lòng đăng nhập trước");
});
// đăng ký
function checkRegister() {
var form = document.querySelector('#frmdangky');
var data = Object.fromEntries(new FormData(form).entries());
var regUserName = /(?=.*[a-zA-Z_0-9])\w{6,}/; // ít nhất phải có 6 ký tự không chứa ký tự đặc biệt
var regPassword = /^(?=.*[0-9])(?=.*[a-z])([a-zA-Z0-9]{8,})$/; //phải có 8 ký tự trở lên và có ít nhất 1 số
var regEmail = /^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$/;
var regName = /^([A-Z][a-z]+)(\s+[A-Z][a-z]+)+$/; // chữ cái đầu tiên phải bắt đầu bằng chữ in hoa và phải có họ và tên
var regPhone = /[0-9]{10}/; // số điện thoại phải là số và có 10 chữ số
var lbUserName = document.querySelector("#lbTenDangNhap");
var lbMatKhau = document.querySelector("#lbMatKhau"); | var lbNhapLaiMatKhau = document.querySelector("#lbNhapLaiMatKhau");
var lbTen = document.querySelector("#lbTen");
var lbDiaChi = document.querySelector("#lbDiaChi");
var lbDt = document.querySelector("#lbDt"); | random_line_split |
|
x25519.rs | }
}
impl Drop for DHOutput {
fn drop(&mut self) {
Mem::wipe(self.0)
}
}
/// A public key.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct PublicKey([u8; POINT_BYTES]);
impl PublicKey {
/// Number of raw bytes in a public key.
pub const BYTES: usize = POINT_BYTES;
/// Creates a public key from raw bytes.
pub fn new(pk: [u8; PublicKey::BYTES]) -> Self {
PublicKey(pk)
}
/// Creates a public key from a slice.
pub fn from_slice(pk: &[u8]) -> Result<Self, Error> {
let mut pk_ = [0u8; PublicKey::BYTES];
if pk.len() != pk_.len() {
return Err(Error::InvalidPublicKey);
}
Fe::reject_noncanonical(pk)?;
pk_.copy_from_slice(pk);
Ok(PublicKey::new(pk_))
}
/// Multiply a point by the cofactor, returning an error if the element is
/// in a small-order group.
pub fn clear_cofactor(&self) -> Result<[u8; PublicKey::BYTES], Error> {
let cofactor = [
8u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
];
self.ladder(&cofactor, 4)
}
/// Multiply the point represented by the public key by the scalar after
/// clamping it
pub fn dh(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
let sk = sk.clamped();
Ok(DHOutput(self.ladder(&sk.0, 255)?))
}
/// Multiply the point represented by the public key by the scalar WITHOUT
/// CLAMPING
pub fn unclamped_mul(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
self.clear_cofactor()?;
Ok(DHOutput(self.ladder(&sk.0, 256)?))
}
fn ladder(&self, s: &[u8], bits: usize) -> Result<[u8; POINT_BYTES], Error> {
let x1 = Fe::from_bytes(&self.0);
let mut x2 = FE_ONE;
let mut z2 = FE_ZERO;
let mut x3 = x1;
let mut z3 = FE_ONE;
let mut swap: u8 = 0;
let mut pos = bits - 1;
loop {
let bit = (s[pos >> 3] >> (pos & 7)) & 1;
swap ^= bit;
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
swap = bit;
let a = x2 + z2;
let b = x2 - z2;
let aa = a.square();
let bb = b.square();
x2 = aa * bb;
let e = aa - bb;
let da = (x3 - z3) * a;
let cb = (x3 + z3) * b;
x3 = (da + cb).square();
z3 = x1 * ((da - cb).square());
z2 = e * (bb + (e.mul32(121666)));
if pos == 0 {
break;
}
pos -= 1;
}
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
z2 = z2.invert();
x2 = x2 * z2;
if x2.is_zero() {
return Err(Error::WeakPublicKey);
}
Ok(x2.to_bytes())
}
/// The Curve25519 base point
#[inline]
pub fn base_point() -> PublicKey {
PublicKey(FE_CURVE25519_BASEPOINT.to_bytes())
}
}
impl Deref for PublicKey {
type Target = [u8; PublicKey::BYTES];
/// Returns a public key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for PublicKey {
/// Returns a public key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A secret key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct SecretKey([u8; SecretKey::BYTES]);
impl SecretKey {
/// Number of bytes in a secret key.
pub const BYTES: usize = 32;
/// Creates a secret key from raw bytes.
pub fn new(sk: [u8; SecretKey::BYTES]) -> Self {
SecretKey(sk)
}
/// Creates a secret key from a slice.
pub fn from_slice(sk: &[u8]) -> Result<Self, Error> {
let mut sk_ = [0u8; SecretKey::BYTES];
if sk.len() != sk_.len() |
sk_.copy_from_slice(sk);
Ok(SecretKey::new(sk_))
}
/// Perform the X25519 clamping magic
pub fn clamped(&self) -> SecretKey {
let mut clamped = self.clone();
clamped[0] &= 248;
clamped[31] &= 63;
clamped[31] |= 64;
clamped
}
/// Recover the public key
pub fn recover_public_key(&self) -> Result<PublicKey, Error> {
let sk = self.clamped();
Ok(PublicKey(PublicKey::base_point().ladder(&sk.0, 255)?))
}
/// Returns `Ok(())` if the given public key is the public counterpart of
/// this secret key.
/// Returns `Err(Error::InvalidPublicKey)` otherwise.
pub fn validate_public_key(&self, pk: &PublicKey) -> Result<(), Error> {
let recovered_pk = self.recover_public_key()?;
if recovered_pk != *pk {
return Err(Error::InvalidPublicKey);
}
Ok(())
}
}
impl Drop for SecretKey {
fn drop(&mut self) {
Mem::wipe(self.0)
}
}
impl Deref for SecretKey {
type Target = [u8; SecretKey::BYTES];
/// Returns a secret key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for SecretKey {
/// Returns a secret key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A key pair.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct KeyPair {
/// Public key part of the key pair.
pub pk: PublicKey,
/// Secret key part of the key pair.
pub sk: SecretKey,
}
impl KeyPair {
/// Generates a new key pair.
#[cfg(feature = "random")]
pub fn generate() -> KeyPair {
let mut sk = [0u8; SecretKey::BYTES];
getrandom::getrandom(&mut sk).expect("getrandom");
if Fe::from_bytes(&sk).is_zero() {
panic!("All-zero secret key");
}
let sk = SecretKey(sk);
let pk = sk
.recover_public_key()
.expect("generated public key is weak");
KeyPair { pk, sk }
}
/// Check that the public key is valid for the secret key.
pub fn validate(&self) -> Result<(), Error> {
self.sk.validate_public_key(&self.pk)
}
}
#[cfg(not(feature = "disable-signatures"))]
mod from_ed25519 {
use super::super::{
edwards25519, sha512, KeyPair as EdKeyPair, PublicKey as EdPublicKey,
SecretKey as EdSecretKey,
};
use super::*;
impl SecretKey {
/// Convert an Ed25519 secret key to a X25519 secret key.
pub fn from_ed25519(edsk: &EdSecretKey) -> Result<SecretKey, Error> {
let seed = edsk.seed();
let az: [u8; 64] = {
let mut hash_output = sha512::Hash::hash(*seed);
hash_output[0] &= 248;
hash_output[31] &= 63;
hash_output[31] |= 64;
hash_output
};
SecretKey::from_slice(&az[..32])
}
}
impl PublicKey {
/// Convert an Ed25519 public key to a X25519 public key.
pub fn from_ed25519(edpk: &EdPublicKey) -> | {
return Err(Error::InvalidSecretKey);
} | conditional_block |
x25519.rs | (dh: DHOutput) -> Self {
PublicKey(dh.0)
}
}
impl From<DHOutput> for SecretKey {
fn from(dh: DHOutput) -> Self {
SecretKey(dh.0)
}
}
impl Drop for DHOutput {
fn drop(&mut self) {
Mem::wipe(self.0)
}
}
/// A public key.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct PublicKey([u8; POINT_BYTES]);
impl PublicKey {
/// Number of raw bytes in a public key.
pub const BYTES: usize = POINT_BYTES;
/// Creates a public key from raw bytes.
pub fn new(pk: [u8; PublicKey::BYTES]) -> Self {
PublicKey(pk)
}
/// Creates a public key from a slice.
pub fn from_slice(pk: &[u8]) -> Result<Self, Error> {
let mut pk_ = [0u8; PublicKey::BYTES];
if pk.len() != pk_.len() {
return Err(Error::InvalidPublicKey);
}
Fe::reject_noncanonical(pk)?;
pk_.copy_from_slice(pk);
Ok(PublicKey::new(pk_))
}
/// Multiply a point by the cofactor, returning an error if the element is
/// in a small-order group.
pub fn clear_cofactor(&self) -> Result<[u8; PublicKey::BYTES], Error> {
let cofactor = [
8u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
];
self.ladder(&cofactor, 4)
}
/// Multiply the point represented by the public key by the scalar after
/// clamping it
pub fn dh(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
let sk = sk.clamped();
Ok(DHOutput(self.ladder(&sk.0, 255)?))
}
/// Multiply the point represented by the public key by the scalar WITHOUT
/// CLAMPING
pub fn unclamped_mul(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
self.clear_cofactor()?;
Ok(DHOutput(self.ladder(&sk.0, 256)?))
}
fn ladder(&self, s: &[u8], bits: usize) -> Result<[u8; POINT_BYTES], Error> {
let x1 = Fe::from_bytes(&self.0);
let mut x2 = FE_ONE;
let mut z2 = FE_ZERO;
let mut x3 = x1;
let mut z3 = FE_ONE;
let mut swap: u8 = 0;
let mut pos = bits - 1;
loop {
let bit = (s[pos >> 3] >> (pos & 7)) & 1;
swap ^= bit;
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
swap = bit;
let a = x2 + z2;
let b = x2 - z2;
let aa = a.square();
let bb = b.square();
x2 = aa * bb;
let e = aa - bb;
let da = (x3 - z3) * a;
let cb = (x3 + z3) * b;
x3 = (da + cb).square();
z3 = x1 * ((da - cb).square());
z2 = e * (bb + (e.mul32(121666)));
if pos == 0 {
break;
}
pos -= 1;
}
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
z2 = z2.invert();
x2 = x2 * z2;
if x2.is_zero() {
return Err(Error::WeakPublicKey);
}
Ok(x2.to_bytes())
}
/// The Curve25519 base point
#[inline]
pub fn base_point() -> PublicKey {
PublicKey(FE_CURVE25519_BASEPOINT.to_bytes())
}
}
impl Deref for PublicKey {
type Target = [u8; PublicKey::BYTES];
/// Returns a public key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for PublicKey {
/// Returns a public key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A secret key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct SecretKey([u8; SecretKey::BYTES]);
impl SecretKey {
/// Number of bytes in a secret key.
pub const BYTES: usize = 32;
/// Creates a secret key from raw bytes.
pub fn new(sk: [u8; SecretKey::BYTES]) -> Self {
SecretKey(sk)
}
/// Creates a secret key from a slice.
pub fn from_slice(sk: &[u8]) -> Result<Self, Error> {
let mut sk_ = [0u8; SecretKey::BYTES];
if sk.len() != sk_.len() {
return Err(Error::InvalidSecretKey);
}
sk_.copy_from_slice(sk);
Ok(SecretKey::new(sk_))
}
/// Perform the X25519 clamping magic
pub fn clamped(&self) -> SecretKey {
let mut clamped = self.clone();
clamped[0] &= 248;
clamped[31] &= 63;
clamped[31] |= 64;
clamped
}
/// Recover the public key
pub fn recover_public_key(&self) -> Result<PublicKey, Error> {
let sk = self.clamped();
Ok(PublicKey(PublicKey::base_point().ladder(&sk.0, 255)?))
}
/// Returns `Ok(())` if the given public key is the public counterpart of
/// this secret key.
/// Returns `Err(Error::InvalidPublicKey)` otherwise.
pub fn validate_public_key(&self, pk: &PublicKey) -> Result<(), Error> {
let recovered_pk = self.recover_public_key()?;
if recovered_pk != *pk {
return Err(Error::InvalidPublicKey);
}
Ok(())
}
}
impl Drop for SecretKey {
fn drop(&mut self) {
Mem::wipe(self.0)
}
}
impl Deref for SecretKey {
type Target = [u8; SecretKey::BYTES];
/// Returns a secret key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for SecretKey {
/// Returns a secret key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A key pair.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct KeyPair {
/// Public key part of the key pair.
pub pk: PublicKey,
/// Secret key part of the key pair.
pub sk: SecretKey,
}
impl KeyPair {
/// Generates a new key pair.
#[cfg(feature = "random")]
pub fn generate() -> KeyPair {
let mut sk = [0u8; SecretKey::BYTES];
getrandom::getrandom(&mut sk).expect("getrandom");
if Fe::from_bytes(&sk).is_zero() {
panic!("All-zero secret key");
}
let sk = SecretKey(sk);
let pk = sk
.recover_public_key()
.expect("generated public key is weak");
KeyPair { pk, sk }
}
/// Check that the public key is valid for the secret key.
pub fn validate(&self) -> Result<(), Error> {
self.sk.validate_public_key(&self.pk)
}
}
#[cfg(not(feature = "disable-signatures"))]
mod from_ed25519 {
use super::super::{
edwards25519, sha512, KeyPair as EdKeyPair, PublicKey as EdPublicKey,
SecretKey as EdSecretKey,
};
use super::*;
impl SecretKey {
/// Convert an Ed25519 secret key to a X25519 secret key.
pub fn from_ed25519(edsk: &EdSecretKey) -> Result<SecretKey, Error> {
let seed = edsk.seed();
let az: [u8; 64] = {
let mut hash_output = sha512::Hash::hash(*seed);
hash_output[0] &= 248;
hash_output[31] &= 63;
hash_output[31] |= 64;
hash_output
};
SecretKey::from_slice(&az[..32])
| from | identifier_name |
|
x25519.rs | _))
}
/// Multiply a point by the cofactor, returning an error if the element is
/// in a small-order group.
pub fn clear_cofactor(&self) -> Result<[u8; PublicKey::BYTES], Error> {
let cofactor = [
8u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
];
self.ladder(&cofactor, 4)
}
/// Multiply the point represented by the public key by the scalar after
/// clamping it
pub fn dh(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
let sk = sk.clamped();
Ok(DHOutput(self.ladder(&sk.0, 255)?))
}
/// Multiply the point represented by the public key by the scalar WITHOUT
/// CLAMPING
pub fn unclamped_mul(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
self.clear_cofactor()?;
Ok(DHOutput(self.ladder(&sk.0, 256)?))
}
fn ladder(&self, s: &[u8], bits: usize) -> Result<[u8; POINT_BYTES], Error> {
let x1 = Fe::from_bytes(&self.0);
let mut x2 = FE_ONE;
let mut z2 = FE_ZERO;
let mut x3 = x1;
let mut z3 = FE_ONE;
let mut swap: u8 = 0;
let mut pos = bits - 1;
loop {
let bit = (s[pos >> 3] >> (pos & 7)) & 1;
swap ^= bit;
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
swap = bit;
let a = x2 + z2;
let b = x2 - z2;
let aa = a.square();
let bb = b.square();
x2 = aa * bb;
let e = aa - bb;
let da = (x3 - z3) * a;
let cb = (x3 + z3) * b;
x3 = (da + cb).square();
z3 = x1 * ((da - cb).square());
z2 = e * (bb + (e.mul32(121666)));
if pos == 0 {
break;
}
pos -= 1;
}
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
z2 = z2.invert();
x2 = x2 * z2;
if x2.is_zero() {
return Err(Error::WeakPublicKey);
}
Ok(x2.to_bytes())
}
/// The Curve25519 base point
#[inline]
pub fn base_point() -> PublicKey {
PublicKey(FE_CURVE25519_BASEPOINT.to_bytes())
}
}
impl Deref for PublicKey {
type Target = [u8; PublicKey::BYTES];
/// Returns a public key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for PublicKey {
/// Returns a public key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A secret key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct SecretKey([u8; SecretKey::BYTES]);
impl SecretKey {
/// Number of bytes in a secret key.
pub const BYTES: usize = 32;
/// Creates a secret key from raw bytes.
pub fn new(sk: [u8; SecretKey::BYTES]) -> Self {
SecretKey(sk)
}
/// Creates a secret key from a slice.
pub fn from_slice(sk: &[u8]) -> Result<Self, Error> {
let mut sk_ = [0u8; SecretKey::BYTES];
if sk.len() != sk_.len() {
return Err(Error::InvalidSecretKey);
}
sk_.copy_from_slice(sk);
Ok(SecretKey::new(sk_))
}
/// Perform the X25519 clamping magic
pub fn clamped(&self) -> SecretKey {
let mut clamped = self.clone();
clamped[0] &= 248;
clamped[31] &= 63;
clamped[31] |= 64;
clamped
}
/// Recover the public key
pub fn recover_public_key(&self) -> Result<PublicKey, Error> {
let sk = self.clamped();
Ok(PublicKey(PublicKey::base_point().ladder(&sk.0, 255)?))
}
/// Returns `Ok(())` if the given public key is the public counterpart of
/// this secret key.
/// Returns `Err(Error::InvalidPublicKey)` otherwise.
pub fn validate_public_key(&self, pk: &PublicKey) -> Result<(), Error> {
let recovered_pk = self.recover_public_key()?;
if recovered_pk != *pk {
return Err(Error::InvalidPublicKey);
}
Ok(())
}
}
impl Drop for SecretKey {
fn drop(&mut self) {
Mem::wipe(self.0)
}
}
impl Deref for SecretKey {
type Target = [u8; SecretKey::BYTES];
/// Returns a secret key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for SecretKey {
/// Returns a secret key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A key pair.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct KeyPair {
/// Public key part of the key pair.
pub pk: PublicKey,
/// Secret key part of the key pair.
pub sk: SecretKey,
}
impl KeyPair {
/// Generates a new key pair.
#[cfg(feature = "random")]
pub fn generate() -> KeyPair {
let mut sk = [0u8; SecretKey::BYTES];
getrandom::getrandom(&mut sk).expect("getrandom");
if Fe::from_bytes(&sk).is_zero() {
panic!("All-zero secret key");
}
let sk = SecretKey(sk);
let pk = sk
.recover_public_key()
.expect("generated public key is weak");
KeyPair { pk, sk }
}
/// Check that the public key is valid for the secret key.
pub fn validate(&self) -> Result<(), Error> {
self.sk.validate_public_key(&self.pk)
}
}
#[cfg(not(feature = "disable-signatures"))]
mod from_ed25519 {
use super::super::{
edwards25519, sha512, KeyPair as EdKeyPair, PublicKey as EdPublicKey,
SecretKey as EdSecretKey,
};
use super::*;
impl SecretKey {
/// Convert an Ed25519 secret key to a X25519 secret key.
pub fn from_ed25519(edsk: &EdSecretKey) -> Result<SecretKey, Error> {
let seed = edsk.seed();
let az: [u8; 64] = {
let mut hash_output = sha512::Hash::hash(*seed);
hash_output[0] &= 248;
hash_output[31] &= 63;
hash_output[31] |= 64;
hash_output
};
SecretKey::from_slice(&az[..32])
}
}
impl PublicKey {
/// Convert an Ed25519 public key to a X25519 public key.
pub fn from_ed25519(edpk: &EdPublicKey) -> Result<PublicKey, Error> {
let pk = PublicKey::from_slice(
&edwards25519::ge_to_x25519_vartime(edpk).ok_or(Error::InvalidPublicKey)?,
)?;
pk.clear_cofactor()?;
Ok(pk)
}
}
impl KeyPair {
/// Convert an Ed25519 key pair to a X25519 key pair.
pub fn from_ed25519(edkp: &EdKeyPair) -> Result<KeyPair, Error> {
let pk = PublicKey::from_ed25519(&edkp.pk)?;
let sk = SecretKey::from_ed25519(&edkp.sk)?;
Ok(KeyPair { pk, sk })
}
}
}
| #[cfg(not(feature = "disable-signatures"))]
pub use from_ed25519::*;
#[test]
fn test_x25519() { | random_line_split |
|
nasdaq_itch_vwap.py | def parse_itch_data(itch_data):
# read the first byte of each message in the data file:
msg_header = itch_data.read(1)
# initialize the csv file that will store parsed Add Order and Add Order
# with MPID messages:
add_order_data = open('add_order_data.csv','w')
add_order_wrtr = csv.writer(add_order_data)
# initialize the csv file that will store parsed Order Executed messages:
ord_exec_data = open('ord_exec_data.csv','w')
ord_exec_wrtr = csv.writer(ord_exec_data)
# initialize the csv file that will store parsed Order Executed With Price
# messages:
ord_exec_pr_data = open('ord_exec_pr_data.csv','w')
ord_exec_pr_wrtr = csv.writer(ord_exec_pr_data)
# initialize the csv file that will store parsed Trade messages:
trade_data = open('trade_data.csv','w')
trade_wrtr = csv.writer(trade_data)
# iterate over all messages in the data file:
while msg_header:
# process Add Order and Add Order with MPID messages:
if msg_header == b'A' or msg_header == b'F':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQcI8cI',message)
re_pkd = struct.pack('>s4s2s6sQsI8sI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13])
parsed_ao = list(struct.unpack('>sHHQQsI8sI',re_pkd))
# filter for data with valid Buy/Sell Indicators:
if parsed_ao[5] == b'B' or parsed_ao[5] == b'S':
# further filter for data with plausible field values:
if (parsed_ao[4] < 1e14 and parsed_ao[6] < 1e8):
# write the parsed message to the csv file:
try:
sto = parsed_ao[7].decode() # stock
except:
sto = '0' # Write 0 if stock byte decode fails
tim = parsed_ao[3] # timestamp
ref = parsed_ao[4] # order reference number
sha = parsed_ao[6] # shares
pri = float(parsed_ao[8])/1e4 # price
add_order_wrtr.writerow([sto, tim, ref, sha, pri])
# process Order Executed messages:
if msg_header == b'E':
message = itch_data.read(30)
if len(message) < 30: break
un_pkd = struct.unpack('>4s6sQIQ',message)
re_pkd = struct.pack('>s4s2s6sQIQ',msg_header,un_pkd[0],b'\x00\x00',
un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4])
parsed_oe = list(struct.unpack('>sHHQQIQ',re_pkd))
# filter for data with plausible field values:
if (parsed_oe[4] < 1e14 and parsed_oe[5] < 1e8 and parsed_oe[6] < 1e11):
# write the parsed message to the csv file:
ref = parsed_oe[4] # order reference number
sha = parsed_oe[5] # shares
ord_exec_wrtr.writerow([ref, sha])
# process Order Executed With Price messages:
if msg_header == b'C':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQIQcI',message)
re_pkd = struct.pack('>s4s2s6sQIQsI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],un_pkd[5],
un_pkd[6])
parsed_oewp = list(struct.unpack('>sHHQQIQsI',re_pkd))
# filter for data with plausible field values:
if (parsed_oewp[4] < 1e14 and parsed_oewp[5] < 1e6 and
parsed_oewp[6] < 1e10 and parsed_oewp[7] == b'Y'):
# write the parsed message to the csv file:
ref = parsed_oewp[4] # order reference number
sha = parsed_oewp[5] # shares
pri = float(parsed_oewp[8])/1e4 # new price
ord_exec_pr_wrtr.writerow([ref, sha, pri])
# process Trade messages:
if msg_header == b'P':
|
# advance the file position to the next message:
msg_header = itch_data.read(1)
# close the csv files:
add_order_data.close()
ord_exec_data.close()
ord_exec_pr_data.close()
trade_data.close()
# function to calculate the hourly VWAP based on parsed ITCH data:
def calculate_vwap():
# import the parsed Add Order data into a Pandas dataframe:
add_order_df = pd.read_csv('add_order_data.csv', index_col = None,
names = ['Stock', 'Timestamp', 'Reference', 'Shares', 'Price'])
# import the parsed Order Executed data into a Pandas dataframe:
ord_exec_df = pd.read_csv('ord_exec_data.csv', index_col = None,
names = ['Reference', 'Shares'])
# import the parsed Order Executed With Price data into a Pandas dataframe:
ord_exec_pr_df = pd.read_csv('ord_exec_pr_data.csv', index_col = None,
names = ['Reference', 'Shares', 'Price'])
# import the parsed Trade data into a Pandas dataframe:
trade_1_df = pd.read_csv('trade_data.csv', index_col = 0,
names=['Stock', 'Timestamp', 'Shares', 'Price', 'Product'])
# merge the Order Executed data with the Add Order data to extract
# the executed trades data within:
trade_2_df = ord_exec_df.merge(add_order_df,on=['Reference'],how='inner')
trade_2_df = trade_2_df[trade_2_df['Stock']!='0']
trade_2_df = trade_2_df[['Stock', 'Timestamp', 'Shares_x', 'Price']].set_index('Stock')
trade_2_df = trade_2_df.rename(columns={"Shares_x": "Shares"})
trade_2_df['Product'] = trade_2_df['Price']*trade_2_df['Shares']
# merge the Order Executed With Price data with the Add Order data
# to extract the executed trades data within:
trade_3_df = ord_exec_pr_df.merge(add_order_df,on=['Reference'],how='inner')
trade_3_df = trade_3_df[trade_3_df['Stock']!='0']
trade_3_df = trade_3_df[['Stock', 'Timestamp', 'Shares_x', 'Price_x']].set_index('Stock')
trade_3_df = trade_3_df.rename(columns={"Shares_x": "Shares", "Price_x": "Price"})
trade_3_df['Product'] = trade_3_df['Price']*trade_3_df['Shares']
# concatenate all three trade dataframes (trades from Trade messages,
# trades from Executed Order messages, and trades from Executed Order
# With Price messages) into a comprehensive dataframe:
trade_df = pd.concat([trade_1_df, trade_ | message = itch_data.read(43)
if len(message) < 43: break
un_pkd = struct.unpack('>4s6sQcI8cIQ',message)
re_pkd = struct.pack('>s4s2s6sQsI8sIQ',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13],un_pkd[14])
parsed_t = list(struct.unpack('>sHHQQsI8sIQ',re_pkd))
# filter for data with valid Order Reference Numbers
# and Buy/Sell Indicators:
if parsed_t[4] == 0 and parsed_t[5] == b'B':
# write the parsed message to the csv file:
sto = parsed_t[7].decode() # stock
tim = parsed_t[3] # timestamp
sha = parsed_t[6] # shares
pri = float(parsed_t[8])/1e4 # price
pro = parsed_t[6]*float(parsed_t[8])/1e4 # product
trade_wrtr.writerow([sto, tim, sha, pri, pro]) | conditional_block |
nasdaq_itch_vwap.py | def parse_itch_data(itch_data):
# read the first byte of each message in the data file:
msg_header = itch_data.read(1)
# initialize the csv file that will store parsed Add Order and Add Order
# with MPID messages:
add_order_data = open('add_order_data.csv','w')
add_order_wrtr = csv.writer(add_order_data)
# initialize the csv file that will store parsed Order Executed messages:
ord_exec_data = open('ord_exec_data.csv','w')
ord_exec_wrtr = csv.writer(ord_exec_data)
# initialize the csv file that will store parsed Order Executed With Price
# messages:
ord_exec_pr_data = open('ord_exec_pr_data.csv','w')
ord_exec_pr_wrtr = csv.writer(ord_exec_pr_data)
# initialize the csv file that will store parsed Trade messages:
trade_data = open('trade_data.csv','w')
trade_wrtr = csv.writer(trade_data)
# iterate over all messages in the data file:
while msg_header:
# process Add Order and Add Order with MPID messages:
if msg_header == b'A' or msg_header == b'F':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQcI8cI',message)
re_pkd = struct.pack('>s4s2s6sQsI8sI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13])
parsed_ao = list(struct.unpack('>sHHQQsI8sI',re_pkd))
# filter for data with valid Buy/Sell Indicators:
if parsed_ao[5] == b'B' or parsed_ao[5] == b'S':
# further filter for data with plausible field values:
if (parsed_ao[4] < 1e14 and parsed_ao[6] < 1e8):
# write the parsed message to the csv file:
try:
sto = parsed_ao[7].decode() # stock
except:
sto = '0' # Write 0 if stock byte decode fails
tim = parsed_ao[3] # timestamp
ref = parsed_ao[4] # order reference number
sha = parsed_ao[6] # shares
pri = float(parsed_ao[8])/1e4 # price
add_order_wrtr.writerow([sto, tim, ref, sha, pri])
# process Order Executed messages:
if msg_header == b'E':
message = itch_data.read(30)
if len(message) < 30: break
un_pkd = struct.unpack('>4s6sQIQ',message)
re_pkd = struct.pack('>s4s2s6sQIQ',msg_header,un_pkd[0],b'\x00\x00',
un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4])
parsed_oe = list(struct.unpack('>sHHQQIQ',re_pkd))
# filter for data with plausible field values:
if (parsed_oe[4] < 1e14 and parsed_oe[5] < 1e8 and parsed_oe[6] < 1e11):
# write the parsed message to the csv file:
ref = parsed_oe[4] # order reference number
sha = parsed_oe[5] # shares
ord_exec_wrtr.writerow([ref, sha])
# process Order Executed With Price messages:
if msg_header == b'C':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQIQcI',message)
re_pkd = struct.pack('>s4s2s6sQIQsI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],un_pkd[5],
un_pkd[6])
parsed_oewp = list(struct.unpack('>sHHQQIQsI',re_pkd))
# filter for data with plausible field values:
| sha = parsed_oewp[5] # shares
pri = float(parsed_oewp[8])/1e4 # new price
ord_exec_pr_wrtr.writerow([ref, sha, pri])
# process Trade messages:
if msg_header == b'P':
message = itch_data.read(43)
if len(message) < 43: break
un_pkd = struct.unpack('>4s6sQcI8cIQ',message)
re_pkd = struct.pack('>s4s2s6sQsI8sIQ',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13],un_pkd[14])
parsed_t = list(struct.unpack('>sHHQQsI8sIQ',re_pkd))
# filter for data with valid Order Reference Numbers
# and Buy/Sell Indicators:
if parsed_t[4] == 0 and parsed_t[5] == b'B':
# write the parsed message to the csv file:
sto = parsed_t[7].decode() # stock
tim = parsed_t[3] # timestamp
sha = parsed_t[6] # shares
pri = float(parsed_t[8])/1e4 # price
pro = parsed_t[6]*float(parsed_t[8])/1e4 # product
trade_wrtr.writerow([sto, tim, sha, pri, pro])
# advance the file position to the next message:
msg_header = itch_data.read(1)
# close the csv files:
add_order_data.close()
ord_exec_data.close()
ord_exec_pr_data.close()
trade_data.close()
# function to calculate the hourly VWAP based on parsed ITCH data:
def calculate_vwap():
# import the parsed Add Order data into a Pandas dataframe:
add_order_df = pd.read_csv('add_order_data.csv', index_col = None,
names = ['Stock', 'Timestamp', 'Reference', 'Shares', 'Price'])
# import the parsed Order Executed data into a Pandas dataframe:
ord_exec_df = pd.read_csv('ord_exec_data.csv', index_col = None,
names = ['Reference', 'Shares'])
# import the parsed Order Executed With Price data into a Pandas dataframe:
ord_exec_pr_df = pd.read_csv('ord_exec_pr_data.csv', index_col = None,
names = ['Reference', 'Shares', 'Price'])
# import the parsed Trade data into a Pandas dataframe:
trade_1_df = pd.read_csv('trade_data.csv', index_col = 0,
names=['Stock', 'Timestamp', 'Shares', 'Price', 'Product'])
# merge the Order Executed data with the Add Order data to extract
# the executed trades data within:
trade_2_df = ord_exec_df.merge(add_order_df,on=['Reference'],how='inner')
trade_2_df = trade_2_df[trade_2_df['Stock']!='0']
trade_2_df = trade_2_df[['Stock', 'Timestamp', 'Shares_x', 'Price']].set_index('Stock')
trade_2_df = trade_2_df.rename(columns={"Shares_x": "Shares"})
trade_2_df['Product'] = trade_2_df['Price']*trade_2_df['Shares']
# merge the Order Executed With Price data with the Add Order data
# to extract the executed trades data within:
trade_3_df = ord_exec_pr_df.merge(add_order_df,on=['Reference'],how='inner')
trade_3_df = trade_3_df[trade_3_df['Stock']!='0']
trade_3_df = trade_3_df[['Stock', 'Timestamp', 'Shares_x', 'Price_x']].set_index('Stock')
trade_3_df = trade_3_df.rename(columns={"Shares_x": "Shares", "Price_x": "Price"})
trade_3_df['Product'] = trade_3_df['Price']*trade_3_df['Shares']
# concatenate all three trade dataframes (trades from Trade messages,
# trades from Executed Order messages, and trades from Executed Order
# With Price messages) into a comprehensive dataframe:
trade_df = pd.concat([trade_1_df, trade_ | if (parsed_oewp[4] < 1e14 and parsed_oewp[5] < 1e6 and
parsed_oewp[6] < 1e10 and parsed_oewp[7] == b'Y'):
# write the parsed message to the csv file:
ref = parsed_oewp[4] # order reference number
| random_line_split |
nasdaq_itch_vwap.py | parse_itch_data(itch_data):
# read the first byte of each message in the data file:
msg_header = itch_data.read(1)
# initialize the csv file that will store parsed Add Order and Add Order
# with MPID messages:
add_order_data = open('add_order_data.csv','w')
add_order_wrtr = csv.writer(add_order_data)
# initialize the csv file that will store parsed Order Executed messages:
ord_exec_data = open('ord_exec_data.csv','w')
ord_exec_wrtr = csv.writer(ord_exec_data)
# initialize the csv file that will store parsed Order Executed With Price
# messages:
ord_exec_pr_data = open('ord_exec_pr_data.csv','w')
ord_exec_pr_wrtr = csv.writer(ord_exec_pr_data)
# initialize the csv file that will store parsed Trade messages:
trade_data = open('trade_data.csv','w')
trade_wrtr = csv.writer(trade_data)
# iterate over all messages in the data file:
while msg_header:
# process Add Order and Add Order with MPID messages:
if msg_header == b'A' or msg_header == b'F':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQcI8cI',message)
re_pkd = struct.pack('>s4s2s6sQsI8sI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13])
parsed_ao = list(struct.unpack('>sHHQQsI8sI',re_pkd))
# filter for data with valid Buy/Sell Indicators:
if parsed_ao[5] == b'B' or parsed_ao[5] == b'S':
# further filter for data with plausible field values:
if (parsed_ao[4] < 1e14 and parsed_ao[6] < 1e8):
# write the parsed message to the csv file:
try:
sto = parsed_ao[7].decode() # stock
except:
sto = '0' # Write 0 if stock byte decode fails
tim = parsed_ao[3] # timestamp
ref = parsed_ao[4] # order reference number
sha = parsed_ao[6] # shares
pri = float(parsed_ao[8])/1e4 # price
add_order_wrtr.writerow([sto, tim, ref, sha, pri])
# process Order Executed messages:
if msg_header == b'E':
message = itch_data.read(30)
if len(message) < 30: break
un_pkd = struct.unpack('>4s6sQIQ',message)
re_pkd = struct.pack('>s4s2s6sQIQ',msg_header,un_pkd[0],b'\x00\x00',
un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4])
parsed_oe = list(struct.unpack('>sHHQQIQ',re_pkd))
# filter for data with plausible field values:
if (parsed_oe[4] < 1e14 and parsed_oe[5] < 1e8 and parsed_oe[6] < 1e11):
# write the parsed message to the csv file:
ref = parsed_oe[4] # order reference number
sha = parsed_oe[5] # shares
ord_exec_wrtr.writerow([ref, sha])
# process Order Executed With Price messages:
if msg_header == b'C':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQIQcI',message)
re_pkd = struct.pack('>s4s2s6sQIQsI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],un_pkd[5],
un_pkd[6])
parsed_oewp = list(struct.unpack('>sHHQQIQsI',re_pkd))
# filter for data with plausible field values:
if (parsed_oewp[4] < 1e14 and parsed_oewp[5] < 1e6 and
parsed_oewp[6] < 1e10 and parsed_oewp[7] == b'Y'):
# write the parsed message to the csv file:
ref = parsed_oewp[4] # order reference number
sha = parsed_oewp[5] # shares
pri = float(parsed_oewp[8])/1e4 # new price
ord_exec_pr_wrtr.writerow([ref, sha, pri])
# process Trade messages:
if msg_header == b'P':
message = itch_data.read(43)
if len(message) < 43: break
un_pkd = struct.unpack('>4s6sQcI8cIQ',message)
re_pkd = struct.pack('>s4s2s6sQsI8sIQ',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13],un_pkd[14])
parsed_t = list(struct.unpack('>sHHQQsI8sIQ',re_pkd))
# filter for data with valid Order Reference Numbers
# and Buy/Sell Indicators:
if parsed_t[4] == 0 and parsed_t[5] == b'B':
# write the parsed message to the csv file:
sto = parsed_t[7].decode() # stock
tim = parsed_t[3] # timestamp
sha = parsed_t[6] # shares
pri = float(parsed_t[8])/1e4 # price
pro = parsed_t[6]*float(parsed_t[8])/1e4 # product
trade_wrtr.writerow([sto, tim, sha, pri, pro])
# advance the file position to the next message:
msg_header = itch_data.read(1)
# close the csv files:
add_order_data.close()
ord_exec_data.close()
ord_exec_pr_data.close()
trade_data.close()
# function to calculate the hourly VWAP based on parsed ITCH data:
def calculate_vwap():
# import the parsed Add Order data into a Pandas dataframe:
| trade_2_df = trade_2_df.rename(columns={"Shares_x": "Shares"})
trade_2_df['Product'] = trade_2_df['Price']*trade_2_df['Shares']
# merge the Order Executed With Price data with the Add Order data
# to extract the executed trades data within:
trade_3_df = ord_exec_pr_df.merge(add_order_df,on=['Reference'],how='inner')
trade_3_df = trade_3_df[trade_3_df['Stock']!='0']
trade_3_df = trade_3_df[['Stock', 'Timestamp', 'Shares_x', 'Price_x']].set_index('Stock')
trade_3_df = trade_3_df.rename(columns={"Shares_x": "Shares", "Price_x": "Price"})
trade_3_df['Product'] = trade_3_df['Price']*trade_3_df['Shares']
# concatenate all three trade dataframes (trades from Trade messages,
# trades from Executed Order messages, and trades from Executed Order
# With Price messages) into a comprehensive dataframe:
trade_df = pd.concat([trade_1_df, trade_2 | add_order_df = pd.read_csv('add_order_data.csv', index_col = None,
names = ['Stock', 'Timestamp', 'Reference', 'Shares', 'Price'])
# import the parsed Order Executed data into a Pandas dataframe:
ord_exec_df = pd.read_csv('ord_exec_data.csv', index_col = None,
names = ['Reference', 'Shares'])
# import the parsed Order Executed With Price data into a Pandas dataframe:
ord_exec_pr_df = pd.read_csv('ord_exec_pr_data.csv', index_col = None,
names = ['Reference', 'Shares', 'Price'])
# import the parsed Trade data into a Pandas dataframe:
trade_1_df = pd.read_csv('trade_data.csv', index_col = 0,
names=['Stock', 'Timestamp', 'Shares', 'Price', 'Product'])
# merge the Order Executed data with the Add Order data to extract
# the executed trades data within:
trade_2_df = ord_exec_df.merge(add_order_df,on=['Reference'],how='inner')
trade_2_df = trade_2_df[trade_2_df['Stock']!='0']
trade_2_df = trade_2_df[['Stock', 'Timestamp', 'Shares_x', 'Price']].set_index('Stock')
| identifier_body |
nasdaq_itch_vwap.py | def parse_itch_data(itch_data):
# read the first byte of each message in the data file:
msg_header = itch_data.read(1)
# initialize the csv file that will store parsed Add Order and Add Order
# with MPID messages:
add_order_data = open('add_order_data.csv','w')
add_order_wrtr = csv.writer(add_order_data)
# initialize the csv file that will store parsed Order Executed messages:
ord_exec_data = open('ord_exec_data.csv','w')
ord_exec_wrtr = csv.writer(ord_exec_data)
# initialize the csv file that will store parsed Order Executed With Price
# messages:
ord_exec_pr_data = open('ord_exec_pr_data.csv','w')
ord_exec_pr_wrtr = csv.writer(ord_exec_pr_data)
# initialize the csv file that will store parsed Trade messages:
trade_data = open('trade_data.csv','w')
trade_wrtr = csv.writer(trade_data)
# iterate over all messages in the data file:
while msg_header:
# process Add Order and Add Order with MPID messages:
if msg_header == b'A' or msg_header == b'F':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQcI8cI',message)
re_pkd = struct.pack('>s4s2s6sQsI8sI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13])
parsed_ao = list(struct.unpack('>sHHQQsI8sI',re_pkd))
# filter for data with valid Buy/Sell Indicators:
if parsed_ao[5] == b'B' or parsed_ao[5] == b'S':
# further filter for data with plausible field values:
if (parsed_ao[4] < 1e14 and parsed_ao[6] < 1e8):
# write the parsed message to the csv file:
try:
sto = parsed_ao[7].decode() # stock
except:
sto = '0' # Write 0 if stock byte decode fails
tim = parsed_ao[3] # timestamp
ref = parsed_ao[4] # order reference number
sha = parsed_ao[6] # shares
pri = float(parsed_ao[8])/1e4 # price
add_order_wrtr.writerow([sto, tim, ref, sha, pri])
# process Order Executed messages:
if msg_header == b'E':
message = itch_data.read(30)
if len(message) < 30: break
un_pkd = struct.unpack('>4s6sQIQ',message)
re_pkd = struct.pack('>s4s2s6sQIQ',msg_header,un_pkd[0],b'\x00\x00',
un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4])
parsed_oe = list(struct.unpack('>sHHQQIQ',re_pkd))
# filter for data with plausible field values:
if (parsed_oe[4] < 1e14 and parsed_oe[5] < 1e8 and parsed_oe[6] < 1e11):
# write the parsed message to the csv file:
ref = parsed_oe[4] # order reference number
sha = parsed_oe[5] # shares
ord_exec_wrtr.writerow([ref, sha])
# process Order Executed With Price messages:
if msg_header == b'C':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQIQcI',message)
re_pkd = struct.pack('>s4s2s6sQIQsI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],un_pkd[5],
un_pkd[6])
parsed_oewp = list(struct.unpack('>sHHQQIQsI',re_pkd))
# filter for data with plausible field values:
if (parsed_oewp[4] < 1e14 and parsed_oewp[5] < 1e6 and
parsed_oewp[6] < 1e10 and parsed_oewp[7] == b'Y'):
# write the parsed message to the csv file:
ref = parsed_oewp[4] # order reference number
sha = parsed_oewp[5] # shares
pri = float(parsed_oewp[8])/1e4 # new price
ord_exec_pr_wrtr.writerow([ref, sha, pri])
# process Trade messages:
if msg_header == b'P':
message = itch_data.read(43)
if len(message) < 43: break
un_pkd = struct.unpack('>4s6sQcI8cIQ',message)
re_pkd = struct.pack('>s4s2s6sQsI8sIQ',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13],un_pkd[14])
parsed_t = list(struct.unpack('>sHHQQsI8sIQ',re_pkd))
# filter for data with valid Order Reference Numbers
# and Buy/Sell Indicators:
if parsed_t[4] == 0 and parsed_t[5] == b'B':
# write the parsed message to the csv file:
sto = parsed_t[7].decode() # stock
tim = parsed_t[3] # timestamp
sha = parsed_t[6] # shares
pri = float(parsed_t[8])/1e4 # price
pro = parsed_t[6]*float(parsed_t[8])/1e4 # product
trade_wrtr.writerow([sto, tim, sha, pri, pro])
# advance the file position to the next message:
msg_header = itch_data.read(1)
# close the csv files:
add_order_data.close()
ord_exec_data.close()
ord_exec_pr_data.close()
trade_data.close()
# function to calculate the hourly VWAP based on parsed ITCH data:
def | ():
# import the parsed Add Order data into a Pandas dataframe:
add_order_df = pd.read_csv('add_order_data.csv', index_col = None,
names = ['Stock', 'Timestamp', 'Reference', 'Shares', 'Price'])
# import the parsed Order Executed data into a Pandas dataframe:
ord_exec_df = pd.read_csv('ord_exec_data.csv', index_col = None,
names = ['Reference', 'Shares'])
# import the parsed Order Executed With Price data into a Pandas dataframe:
ord_exec_pr_df = pd.read_csv('ord_exec_pr_data.csv', index_col = None,
names = ['Reference', 'Shares', 'Price'])
# import the parsed Trade data into a Pandas dataframe:
trade_1_df = pd.read_csv('trade_data.csv', index_col = 0,
names=['Stock', 'Timestamp', 'Shares', 'Price', 'Product'])
# merge the Order Executed data with the Add Order data to extract
# the executed trades data within:
trade_2_df = ord_exec_df.merge(add_order_df,on=['Reference'],how='inner')
trade_2_df = trade_2_df[trade_2_df['Stock']!='0']
trade_2_df = trade_2_df[['Stock', 'Timestamp', 'Shares_x', 'Price']].set_index('Stock')
trade_2_df = trade_2_df.rename(columns={"Shares_x": "Shares"})
trade_2_df['Product'] = trade_2_df['Price']*trade_2_df['Shares']
# merge the Order Executed With Price data with the Add Order data
# to extract the executed trades data within:
trade_3_df = ord_exec_pr_df.merge(add_order_df,on=['Reference'],how='inner')
trade_3_df = trade_3_df[trade_3_df['Stock']!='0']
trade_3_df = trade_3_df[['Stock', 'Timestamp', 'Shares_x', 'Price_x']].set_index('Stock')
trade_3_df = trade_3_df.rename(columns={"Shares_x": "Shares", "Price_x": "Price"})
trade_3_df['Product'] = trade_3_df['Price']*trade_3_df['Shares']
# concatenate all three trade dataframes (trades from Trade messages,
# trades from Executed Order messages, and trades from Executed Order
# With Price messages) into a comprehensive dataframe:
trade_df = pd.concat([trade_1_df, trade_ | calculate_vwap | identifier_name |
physically_monotonic.rs | use std::collections::BTreeSet;
use std::marker::PhantomData;
use differential_dataflow::lattice::Lattice;
use mz_expr::{EvalError, Id, MapFilterProject, MirScalarExpr, TableFunc};
use mz_repr::{Diff, GlobalId, Row};
use timely::PartialOrder;
use crate::plan::interpret::{BoundedLattice, Context, Interpreter};
use crate::plan::join::JoinPlan;
use crate::plan::reduce::{KeyValPlan, ReducePlan};
use crate::plan::threshold::ThresholdPlan;
use crate::plan::top_k::TopKPlan;
use crate::plan::{AvailableCollections, GetPlan};
/// Represents a boolean physical monotonicity property, where the bottom value
/// is true (i.e., physically monotonic) and the top value is false (i.e. not
/// physically monotonic).
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub struct PhysicallyMonotonic(pub bool);
impl BoundedLattice for PhysicallyMonotonic {
fn top() -> Self {
PhysicallyMonotonic(false)
}
fn bottom() -> Self {
PhysicallyMonotonic(true)
}
}
impl Lattice for PhysicallyMonotonic {
fn join(&self, other: &Self) -> Self {
PhysicallyMonotonic(self.0 && other.0)
}
fn meet(&self, other: &Self) -> Self {
PhysicallyMonotonic(self.0 || other.0)
}
}
impl PartialOrder for PhysicallyMonotonic {
fn less_equal(&self, other: &Self) -> bool {
// We employ `Reverse` ordering for `bool` here to be consistent with
// the choice of `top()` being false and `bottom()` being true.
Reverse::<bool>(self.0) <= Reverse::<bool>(other.0)
}
}
/// Provides a concrete implementation of an interpreter that determines if
/// the output of `Plan` expressions is physically monotonic in a single-time
/// dataflow, potentially taking into account judgments about its inputs. We
/// note that in a single-time dataflow, expressions in non-recursive contexts
/// (i.e., outside of `LetRec` values) process streams that are at a minimum
/// logically monotonic, i.e., may contain retractions but would cease to do
/// so if consolidated. Detecting physical monotonicity, i.e., the absence
/// of retractions in a stream, enables us to disable forced consolidation
/// whenever possible.
#[derive(Debug)]
pub struct SingleTimeMonotonic<'a, T = mz_repr::Timestamp> {
monotonic_ids: &'a BTreeSet<GlobalId>,
_phantom: PhantomData<T>,
}
impl<'a, T> SingleTimeMonotonic<'a, T> {
/// Instantiates an interpreter for single-time physical monotonicity
/// analysis.
pub fn new(monotonic_ids: &'a BTreeSet<GlobalId>) -> Self {
SingleTimeMonotonic {
monotonic_ids,
_phantom: Default::default(),
}
}
}
impl<T> Interpreter<T> for SingleTimeMonotonic<'_, T> {
type Domain = PhysicallyMonotonic;
fn constant(
&self,
_ctx: &Context<Self::Domain>,
rows: &Result<Vec<(Row, T, Diff)>, EvalError>,
) -> Self::Domain {
// A constant is physically monotonic iff the constant is an `EvalError`
// or all its rows have `Diff` values greater than zero.
PhysicallyMonotonic(
rows.as_ref()
.map_or(true, |rows| rows.iter().all(|(_, _, diff)| *diff > 0)),
)
}
fn | (
&self,
ctx: &Context<Self::Domain>,
id: &Id,
_keys: &AvailableCollections,
_plan: &GetPlan,
) -> Self::Domain {
// A get operator yields physically monotonic output iff the corresponding
// `Plan::Get` is on a local or global ID that is known to provide physically
// monotonic input. The way this becomes know is through the interpreter itself
// for non-recursive local IDs or through configuration for the global IDs of
// monotonic sources and indexes. Recursive local IDs are always assumed to
// break physical monotonicity.
// TODO(vmarcos): Consider in the future if we can ascertain whether the
// restrictions on recursive local IDs can be relaxed to take into account only
// the interpreter judgement directly.
PhysicallyMonotonic(match id {
Id::Local(id) => ctx
.bindings
.get(id)
.map_or(false, |entry| !entry.is_rec && entry.value.0),
Id::Global(id) => self.monotonic_ids.contains(id),
})
}
fn mfp(
&self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_mfp: &MapFilterProject,
_input_key_val: &Option<(Vec<MirScalarExpr>, Option<Row>)>,
) -> Self::Domain {
// In a single-time context, we just propagate the monotonicity
// status of the input
input
}
fn flat_map(
&self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_func: &TableFunc,
_exprs: &Vec<MirScalarExpr>,
_mfp: &MapFilterProject,
_input_key: &Option<Vec<MirScalarExpr>>,
) -> Self::Domain {
// In a single-time context, we just propagate the monotonicity
// status of the input
input
}
fn join(
&self,
_ctx: &Context<Self::Domain>,
inputs: Vec<Self::Domain>,
_plan: &JoinPlan,
) -> Self::Domain {
// When we see a join, we must consider that the inputs could have
// been `Plan::Get`s on arrangements. These are not in general safe
// wrt. producing physically monotonic data. So here, we conservatively
// judge that output of a join to be physically monotonic iff all
// inputs are physically monotonic.
PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0))
}
fn reduce(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_key_val_plan: &KeyValPlan,
_plan: &ReducePlan,
_input_key: &Option<Vec<MirScalarExpr>>,
) -> Self::Domain {
// In a recursive context, reduce will advance across timestamps
// and may need to retract. Outside of a recursive context, the
// fact that the dataflow is single-time implies no retraction
// is emitted out of reduce. This makes the output be physically
// monotonic, regardless of the input judgment. All `ReducePlan`
// variants behave the same in this respect.
PhysicallyMonotonic(!ctx.is_rec)
}
fn top_k(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_top_k_plan: &TopKPlan,
) -> Self::Domain {
// Top-k behaves like a reduction, producing physically monotonic
// output when exposed to a single time (i.e., when the context is
// non-recursive). Note that even a monotonic top-k will consolidate
// if necessary to ensure this property.
PhysicallyMonotonic(!ctx.is_rec)
}
fn negate(&self, _ctx: &Context<Self::Domain>, _input: Self::Domain) -> Self::Domain {
// Negation produces retractions, so it breaks physical monotonicity.
PhysicallyMonotonic(false)
}
fn threshold(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_threshold_plan: &ThresholdPlan,
) -> Self::Domain {
// Thresholding is a special kind of reduction, so the judgment
// here is the same as for reduce.
PhysicallyMonotonic(!ctx.is_rec)
}
fn union(
&self,
_ctx: &Context<Self::Domain>,
inputs: Vec<Self::Domain>,
_consolidate_output: bool,
) -> Self::Domain {
// Union just concatenates the inputs, so is physically monotonic iff
// all inputs are physically monotonic.
// (Even when we do consolidation, we can't be certain that a negative diff from an input
// is actually cancelled out. For example, Union outputs negative diffs when it's part of
// the EXCEPT pattern.)
PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0))
}
fn arrange_by(
&self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_forms: &AvailableCollections,
_input_key: &Option<Vec<MirScalarExpr>>,
_input_mfp: &MapFilterProject,
) -> Self::Domain {
// `Plan::ArrangeBy` is better thought of as `ensure_collections`, i.e., it
// makes sure that the requested `forms` are present and builds them only
// if not already | get | identifier_name |
physically_monotonic.rs | use std::collections::BTreeSet;
use std::marker::PhantomData;
use differential_dataflow::lattice::Lattice;
use mz_expr::{EvalError, Id, MapFilterProject, MirScalarExpr, TableFunc};
use mz_repr::{Diff, GlobalId, Row};
use timely::PartialOrder;
use crate::plan::interpret::{BoundedLattice, Context, Interpreter};
use crate::plan::join::JoinPlan;
use crate::plan::reduce::{KeyValPlan, ReducePlan};
use crate::plan::threshold::ThresholdPlan;
use crate::plan::top_k::TopKPlan;
use crate::plan::{AvailableCollections, GetPlan};
/// Represents a boolean physical monotonicity property, where the bottom value
/// is true (i.e., physically monotonic) and the top value is false (i.e. not
/// physically monotonic).
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub struct PhysicallyMonotonic(pub bool);
impl BoundedLattice for PhysicallyMonotonic {
fn top() -> Self {
PhysicallyMonotonic(false)
}
fn bottom() -> Self {
PhysicallyMonotonic(true)
}
}
impl Lattice for PhysicallyMonotonic {
fn join(&self, other: &Self) -> Self {
PhysicallyMonotonic(self.0 && other.0)
}
fn meet(&self, other: &Self) -> Self {
PhysicallyMonotonic(self.0 || other.0)
}
}
impl PartialOrder for PhysicallyMonotonic {
fn less_equal(&self, other: &Self) -> bool {
// We employ `Reverse` ordering for `bool` here to be consistent with
// the choice of `top()` being false and `bottom()` being true.
Reverse::<bool>(self.0) <= Reverse::<bool>(other.0)
}
}
/// Provides a concrete implementation of an interpreter that determines if
/// the output of `Plan` expressions is physically monotonic in a single-time
/// dataflow, potentially taking into account judgments about its inputs. We
/// note that in a single-time dataflow, expressions in non-recursive contexts
/// (i.e., outside of `LetRec` values) process streams that are at a minimum
/// logically monotonic, i.e., may contain retractions but would cease to do
/// so if consolidated. Detecting physical monotonicity, i.e., the absence
/// of retractions in a stream, enables us to disable forced consolidation
/// whenever possible.
#[derive(Debug)]
pub struct SingleTimeMonotonic<'a, T = mz_repr::Timestamp> {
monotonic_ids: &'a BTreeSet<GlobalId>,
_phantom: PhantomData<T>,
}
impl<'a, T> SingleTimeMonotonic<'a, T> {
/// Instantiates an interpreter for single-time physical monotonicity
/// analysis.
pub fn new(monotonic_ids: &'a BTreeSet<GlobalId>) -> Self {
SingleTimeMonotonic {
monotonic_ids,
_phantom: Default::default(),
}
}
}
impl<T> Interpreter<T> for SingleTimeMonotonic<'_, T> {
type Domain = PhysicallyMonotonic;
fn constant(
&self,
_ctx: &Context<Self::Domain>,
rows: &Result<Vec<(Row, T, Diff)>, EvalError>,
) -> Self::Domain {
// A constant is physically monotonic iff the constant is an `EvalError`
// or all its rows have `Diff` values greater than zero.
PhysicallyMonotonic(
rows.as_ref()
.map_or(true, |rows| rows.iter().all(|(_, _, diff)| *diff > 0)),
)
}
fn get(
&self,
ctx: &Context<Self::Domain>,
id: &Id,
_keys: &AvailableCollections,
_plan: &GetPlan,
) -> Self::Domain {
// A get operator yields physically monotonic output iff the corresponding
// `Plan::Get` is on a local or global ID that is known to provide physically
// monotonic input. The way this becomes know is through the interpreter itself
// for non-recursive local IDs or through configuration for the global IDs of
// monotonic sources and indexes. Recursive local IDs are always assumed to
// break physical monotonicity.
// TODO(vmarcos): Consider in the future if we can ascertain whether the
// restrictions on recursive local IDs can be relaxed to take into account only
// the interpreter judgement directly.
PhysicallyMonotonic(match id {
Id::Local(id) => ctx
.bindings
.get(id)
.map_or(false, |entry| !entry.is_rec && entry.value.0),
Id::Global(id) => self.monotonic_ids.contains(id),
})
}
fn mfp(
&self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_mfp: &MapFilterProject,
_input_key_val: &Option<(Vec<MirScalarExpr>, Option<Row>)>,
) -> Self::Domain {
// In a single-time context, we just propagate the monotonicity
// status of the input
input
}
fn flat_map(
&self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_func: &TableFunc,
_exprs: &Vec<MirScalarExpr>,
_mfp: &MapFilterProject,
_input_key: &Option<Vec<MirScalarExpr>>,
) -> Self::Domain {
// In a single-time context, we just propagate the monotonicity
// status of the input
input
}
fn join(
&self,
_ctx: &Context<Self::Domain>,
inputs: Vec<Self::Domain>,
_plan: &JoinPlan,
) -> Self::Domain {
// When we see a join, we must consider that the inputs could have
// been `Plan::Get`s on arrangements. These are not in general safe
// wrt. producing physically monotonic data. So here, we conservatively
// judge that output of a join to be physically monotonic iff all
// inputs are physically monotonic.
PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0))
}
fn reduce(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_key_val_plan: &KeyValPlan,
_plan: &ReducePlan,
_input_key: &Option<Vec<MirScalarExpr>>,
) -> Self::Domain {
// In a recursive context, reduce will advance across timestamps
// and may need to retract. Outside of a recursive context, the
// fact that the dataflow is single-time implies no retraction
// is emitted out of reduce. This makes the output be physically
// monotonic, regardless of the input judgment. All `ReducePlan`
// variants behave the same in this respect.
PhysicallyMonotonic(!ctx.is_rec)
}
fn top_k(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_top_k_plan: &TopKPlan,
) -> Self::Domain {
// Top-k behaves like a reduction, producing physically monotonic
// output when exposed to a single time (i.e., when the context is
// non-recursive). Note that even a monotonic top-k will consolidate
// if necessary to ensure this property.
PhysicallyMonotonic(!ctx.is_rec)
}
fn negate(&self, _ctx: &Context<Self::Domain>, _input: Self::Domain) -> Self::Domain {
// Negation produces retractions, so it breaks physical monotonicity.
PhysicallyMonotonic(false)
}
fn threshold(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_threshold_plan: &ThresholdPlan,
) -> Self::Domain {
// Thresholding is a special kind of reduction, so the judgment
// here is the same as for reduce.
PhysicallyMonotonic(!ctx.is_rec)
}
fn union(
&self,
_ctx: &Context<Self::Domain>,
inputs: Vec<Self::Domain>,
_consolidate_output: bool,
) -> Self::Domain {
// Union just concatenates the inputs, so is physically monotonic iff
// all inputs are physically monotonic.
// (Even when we do consolidation, we can't be certain that a negative diff from an input
// is actually cancelled out. For example, Union outputs negative diffs when it's part of
// the EXCEPT pattern.)
PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0))
}
| &self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_forms: &AvailableCollections,
_input_key: &Option<Vec<MirScalarExpr>>,
_input_mfp: &MapFilterProject,
) -> Self::Domain {
// `Plan::ArrangeBy` is better thought of as `ensure_collections`, i.e., it
// makes sure that the requested `forms` are present and builds them only
// if not already available | fn arrange_by( | random_line_split |
spinning_table_states.py | ):
"stores last message"
last_msg = None
def __init__(self,topic_name,msg_type):
self.sub = rospy.Subscriber(topic_name,msg_type,self.callback)
rospy.loginfo('waiting for the first message: %s'%topic_name)
while self.last_msg is None: rospy.sleep(.01)
rospy.loginfo('ok: %s'%topic_name)
def callback(self,msg):
self.last_msg = msg
class TrajectoryControllerWrapper(object):
def __init__(self, controller_name, listener):
self.listener = listener
self.joint_names = rospy.get_param("/%s/joints"%controller_name)
self.n_joints = len(self.joint_names)
msg = self.listener.last_msg
self.ros_joint_inds = [msg.name.index(name) for name in self.joint_names]
self.controller_pub = rospy.Publisher("%s/command"%controller_name, tm.JointTrajectory)
def get_joint_positions(self):
msg = self.listener.last_msg
return [msg.position[i] for i in self.ros_joint_inds]
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
assert len(positions_goal) == len(positions_cur)
duration = norm((r_[positions_goal] - r_[positions_cur])/self.vel_limits, ord=inf)
jt = tm.JointTrajectory()
jt.joint_names = self.joint_names
jt.header.stamp = rospy.Time.now()
jtp = tm.JointTrajectoryPoint()
jtp.positions = positions_goal
jtp.velocities = zeros(len(positions_goal))
jtp.time_from_start = rospy.Duration(duration)
jt.points = [jtp]
self.controller_pub.publish(jt)
rospy.loginfo("sleeping %.2f sec"%duration)
rospy.sleep(duration)
def follow_joint_traj(self, positions, duration = None):
positions = np.r_[np.atleast_2d(self.get_joint_positions()), positions]
positions[:,4] = np.unwrap(positions[:,4])
positions[:,6] = np.unwrap(positions[:,6])
positions, velocities, times = make_traj_with_limits(positions, self.vel_limits, self.acc_limits,smooth=True)
self.follow_timed_joint_traj(positions, velocities, times)
def mirror_arm_joints(x):
"mirror image of joints (r->l or l->r)"
return r_[-x[0],x[1],-x[2],x[3],-x[4],x[5],-x[6]]
class Arm(TrajectoryControllerWrapper):
L_POSTURES = dict(
untucked = [0.4, 1.0, 0.0, -2.05, 0.0, -0.1, 0.0],
tucked = [0.06, 1.25, 1.79, -1.68, -1.73, -0.10, -0.09],
up = [ 0.33, -0.35, 2.59, -0.15, 0.59, -1.41, -0.27],
side = [ 1.832, -0.332, 1.011, -1.437, 1.1 , -2.106, 3.074]
)
def __init__(self, lr,listener):
TrajectoryControllerWrapper.__init__(self,"%s_arm_controller"%lr, listener)
| self.vel_limits = [0.42, 0.42,0.65,0.66,0.72, 0.62,0.72]
def goto_posture(self, name):
l_joints = self.L_POSTURES[name]
joints = l_joints if self.lr == 'l' else mirror_arm_joints(l_joints)
self.goto_joint_positions(joints)
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
positions_goal = closer_joint_angles(positions_goal, positions_cur)
TrajectoryControllerWrapper.goto_joint_positions(self, positions_goal)
def set_cart_target(self, quat, xyz, ref_frame):
ps = gm.PoseStamped()
ps.header.frame_id = ref_frame
ps.header.stamp = rospy.Time(0)
ps.pose.position = gm.Point(*xyz);
ps.pose.orientation = gm.Quaternion(*quat)
self.cart_command.publish(ps)
def fix_angle(angle, center_point=math.pi):
while angle > center_point + math.pi:
angle = angle - 2*math.pi
while angle < center_point - math.pi:
angle = angle + 2*math.pi
return angle
class Head(TrajectoryControllerWrapper):
def __init__(self, listener):
TrajectoryControllerWrapper.__init__(self,"head_traj_controller",listener)
self.vel_limits = [1.,1.]
def set_pan_tilt(self, pan, tilt):
self.goto_joint_positions([pan, tilt])
####################################
class MoveArmToSide(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"])
listener = TopicListener("joint_states",sensor_msgs.msg.JointState)
self.rarm = Arm("r",listener)
self.larm = Arm("l",listener)
self.head = Head(listener)
def execute(self, userdata):
rospy.sleep(1)
self.larm.goto_posture('up')
self.rarm.goto_posture('up')
self.head.set_pan_tilt(0,.7)
return "success"
class GatherDetections(smach.State):
tracker = None
def __init__(self,detector=None):
smach.State.__init__(self, outcomes=["success", "failure"],output_keys=["cylinders"])
self.detector = detector
self.cylinders = defaultdict(list) #list of (center, radius)
self.num_detections = 0
def kill_tracker(self, *args):
self.tracker.terminate()
def execute(self, userdata):
#TODO: start detector
if self.tracker is None or self.tracker.poll() is not None:
self.tracker = make_tracker()
self.done = False
print 'subscribing'
rospy.Subscriber('/spinning_tabletop/cylinders',TrackedCylinders,self.handle_detection)
sleep_time = 10
print 'waiting for %d seconds' % sleep_time
rospy.sleep(sleep_time)
self.done = True
maxLength = -1
maxKey = -1
for key in self.cylinders.keys():
if len(self.cylinders[key]) > maxLength:
maxLength = len(self.cylinders[key])
maxKey = key
if maxKey == -1:
print 'no detections!'
return "failure"
if maxLength < 4:
return "failure"
print 'chose id %s with length %d' % (maxKey,maxLength)
userdata.cylinders = self.cylinders[maxKey]
return "success"
def handle_detection(self,detection):
if self.done: return
for i in range(len(detection.ids)):
pt = PointStamped()
pt.header = detection.header
pt.point.x = detection.xs[i]
pt.point.y = detection.ys[i]
pt.point.z = detection.zs[i] + detection.hs[i]
self.cylinders[detection.ids[i]].append((pt,detection.rs[i], detection.hs[i]))
self.num_detections += 1
class FitCircle(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"],input_keys=["cylinders"],output_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius", "object_height"])
def execute(self, userdata):
print 'fitting circle'
times = []
times_mat = mat(ones((len(userdata.cylinders),1)))
x = mat(ones((len(userdata.cylinders),1)))
y = mat(ones((len(userdata.cylinders),1)))
z = ones(len(userdata.cylinders))
r = ones(len(userdata.cylinders))
h = zeros(len(userdata.cylinders))
for i in range(len(userdata.cylinders)):
x[i,0] = userdata.cylinders[i][0].point.x
y[i,0] = userdata.cylinders[i][0].point.y
z[i] = userdata.cylinders[i][0].point.z
r[i] = userdata.cylinders[i][1]
h[i] = userdata.cylinders[i][2]
times.append(userdata.cylinders[i][0].header.stamp)
times_mat[i,0] = userdata.c | self.lr = lr
self.lrlong = {"r":"right", "l":"left"}[lr]
self.tool_frame = "%s_gripper_tool_frame"%lr
self.cart_command = rospy.Publisher('%s_cart/command_pose'%lr, gm.PoseStamped)
| random_line_split |
spinning_table_states.py | (object):
"stores last message"
last_msg = None
def __init__(self,topic_name,msg_type):
self.sub = rospy.Subscriber(topic_name,msg_type,self.callback)
rospy.loginfo('waiting for the first message: %s'%topic_name)
while self.last_msg is None: rospy.sleep(.01)
rospy.loginfo('ok: %s'%topic_name)
def callback(self,msg):
self.last_msg = msg
class TrajectoryControllerWrapper(object):
def __init__(self, controller_name, listener):
self.listener = listener
self.joint_names = rospy.get_param("/%s/joints"%controller_name)
self.n_joints = len(self.joint_names)
msg = self.listener.last_msg
self.ros_joint_inds = [msg.name.index(name) for name in self.joint_names]
self.controller_pub = rospy.Publisher("%s/command"%controller_name, tm.JointTrajectory)
def get_joint_positions(self):
msg = self.listener.last_msg
return [msg.position[i] for i in self.ros_joint_inds]
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
assert len(positions_goal) == len(positions_cur)
duration = norm((r_[positions_goal] - r_[positions_cur])/self.vel_limits, ord=inf)
jt = tm.JointTrajectory()
jt.joint_names = self.joint_names
jt.header.stamp = rospy.Time.now()
jtp = tm.JointTrajectoryPoint()
jtp.positions = positions_goal
jtp.velocities = zeros(len(positions_goal))
jtp.time_from_start = rospy.Duration(duration)
jt.points = [jtp]
self.controller_pub.publish(jt)
rospy.loginfo("sleeping %.2f sec"%duration)
rospy.sleep(duration)
def follow_joint_traj(self, positions, duration = None):
positions = np.r_[np.atleast_2d(self.get_joint_positions()), positions]
positions[:,4] = np.unwrap(positions[:,4])
positions[:,6] = np.unwrap(positions[:,6])
positions, velocities, times = make_traj_with_limits(positions, self.vel_limits, self.acc_limits,smooth=True)
self.follow_timed_joint_traj(positions, velocities, times)
def mirror_arm_joints(x):
"mirror image of joints (r->l or l->r)"
return r_[-x[0],x[1],-x[2],x[3],-x[4],x[5],-x[6]]
class Arm(TrajectoryControllerWrapper):
L_POSTURES = dict(
untucked = [0.4, 1.0, 0.0, -2.05, 0.0, -0.1, 0.0],
tucked = [0.06, 1.25, 1.79, -1.68, -1.73, -0.10, -0.09],
up = [ 0.33, -0.35, 2.59, -0.15, 0.59, -1.41, -0.27],
side = [ 1.832, -0.332, 1.011, -1.437, 1.1 , -2.106, 3.074]
)
def __init__(self, lr,listener):
TrajectoryControllerWrapper.__init__(self,"%s_arm_controller"%lr, listener)
self.lr = lr
self.lrlong = {"r":"right", "l":"left"}[lr]
self.tool_frame = "%s_gripper_tool_frame"%lr
self.cart_command = rospy.Publisher('%s_cart/command_pose'%lr, gm.PoseStamped)
self.vel_limits = [0.42, 0.42,0.65,0.66,0.72, 0.62,0.72]
def goto_posture(self, name):
l_joints = self.L_POSTURES[name]
joints = l_joints if self.lr == 'l' else mirror_arm_joints(l_joints)
self.goto_joint_positions(joints)
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
positions_goal = closer_joint_angles(positions_goal, positions_cur)
TrajectoryControllerWrapper.goto_joint_positions(self, positions_goal)
def set_cart_target(self, quat, xyz, ref_frame):
ps = gm.PoseStamped()
ps.header.frame_id = ref_frame
ps.header.stamp = rospy.Time(0)
ps.pose.position = gm.Point(*xyz);
ps.pose.orientation = gm.Quaternion(*quat)
self.cart_command.publish(ps)
def fix_angle(angle, center_point=math.pi):
while angle > center_point + math.pi:
angle = angle - 2*math.pi
while angle < center_point - math.pi:
|
return angle
class Head(TrajectoryControllerWrapper):
def __init__(self, listener):
TrajectoryControllerWrapper.__init__(self,"head_traj_controller",listener)
self.vel_limits = [1.,1.]
def set_pan_tilt(self, pan, tilt):
self.goto_joint_positions([pan, tilt])
####################################
class MoveArmToSide(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"])
listener = TopicListener("joint_states",sensor_msgs.msg.JointState)
self.rarm = Arm("r",listener)
self.larm = Arm("l",listener)
self.head = Head(listener)
def execute(self, userdata):
rospy.sleep(1)
self.larm.goto_posture('up')
self.rarm.goto_posture('up')
self.head.set_pan_tilt(0,.7)
return "success"
class GatherDetections(smach.State):
tracker = None
def __init__(self,detector=None):
smach.State.__init__(self, outcomes=["success", "failure"],output_keys=["cylinders"])
self.detector = detector
self.cylinders = defaultdict(list) #list of (center, radius)
self.num_detections = 0
def kill_tracker(self, *args):
self.tracker.terminate()
def execute(self, userdata):
#TODO: start detector
if self.tracker is None or self.tracker.poll() is not None:
self.tracker = make_tracker()
self.done = False
print 'subscribing'
rospy.Subscriber('/spinning_tabletop/cylinders',TrackedCylinders,self.handle_detection)
sleep_time = 10
print 'waiting for %d seconds' % sleep_time
rospy.sleep(sleep_time)
self.done = True
maxLength = -1
maxKey = -1
for key in self.cylinders.keys():
if len(self.cylinders[key]) > maxLength:
maxLength = len(self.cylinders[key])
maxKey = key
if maxKey == -1:
print 'no detections!'
return "failure"
if maxLength < 4:
return "failure"
print 'chose id %s with length %d' % (maxKey,maxLength)
userdata.cylinders = self.cylinders[maxKey]
return "success"
def handle_detection(self,detection):
if self.done: return
for i in range(len(detection.ids)):
pt = PointStamped()
pt.header = detection.header
pt.point.x = detection.xs[i]
pt.point.y = detection.ys[i]
pt.point.z = detection.zs[i] + detection.hs[i]
self.cylinders[detection.ids[i]].append((pt,detection.rs[i], detection.hs[i]))
self.num_detections += 1
class FitCircle(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"],input_keys=["cylinders"],output_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius", "object_height"])
def execute(self, userdata):
print 'fitting circle'
times = []
times_mat = mat(ones((len(userdata.cylinders),1)))
x = mat(ones((len(userdata.cylinders),1)))
y = mat(ones((len(userdata.cylinders),1)))
z = ones(len(userdata.cylinders))
r = ones(len(userdata.cylinders))
h = zeros(len(userdata.cylinders))
for i in range(len(userdata.cylinders)):
x[i,0] = userdata.cylinders[i][0].point.x
y[i,0] = userdata.cylinders[i][0].point.y
z[i] = userdata.cylinders[i][0].point.z
r[i] = userdata.cylinders[i][1]
h[i] = userdata.cylinders[i][2]
times.append(userdata.cylinders[i][0].header.stamp)
times_mat[i,0] = userdata.cyl | angle = angle + 2*math.pi | conditional_block |
spinning_table_states.py | .1 , -2.106, 3.074]
)
def __init__(self, lr,listener):
TrajectoryControllerWrapper.__init__(self,"%s_arm_controller"%lr, listener)
self.lr = lr
self.lrlong = {"r":"right", "l":"left"}[lr]
self.tool_frame = "%s_gripper_tool_frame"%lr
self.cart_command = rospy.Publisher('%s_cart/command_pose'%lr, gm.PoseStamped)
self.vel_limits = [0.42, 0.42,0.65,0.66,0.72, 0.62,0.72]
def goto_posture(self, name):
l_joints = self.L_POSTURES[name]
joints = l_joints if self.lr == 'l' else mirror_arm_joints(l_joints)
self.goto_joint_positions(joints)
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
positions_goal = closer_joint_angles(positions_goal, positions_cur)
TrajectoryControllerWrapper.goto_joint_positions(self, positions_goal)
def set_cart_target(self, quat, xyz, ref_frame):
ps = gm.PoseStamped()
ps.header.frame_id = ref_frame
ps.header.stamp = rospy.Time(0)
ps.pose.position = gm.Point(*xyz);
ps.pose.orientation = gm.Quaternion(*quat)
self.cart_command.publish(ps)
def fix_angle(angle, center_point=math.pi):
while angle > center_point + math.pi:
angle = angle - 2*math.pi
while angle < center_point - math.pi:
angle = angle + 2*math.pi
return angle
class Head(TrajectoryControllerWrapper):
def __init__(self, listener):
TrajectoryControllerWrapper.__init__(self,"head_traj_controller",listener)
self.vel_limits = [1.,1.]
def set_pan_tilt(self, pan, tilt):
self.goto_joint_positions([pan, tilt])
####################################
class MoveArmToSide(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"])
listener = TopicListener("joint_states",sensor_msgs.msg.JointState)
self.rarm = Arm("r",listener)
self.larm = Arm("l",listener)
self.head = Head(listener)
def execute(self, userdata):
rospy.sleep(1)
self.larm.goto_posture('up')
self.rarm.goto_posture('up')
self.head.set_pan_tilt(0,.7)
return "success"
class GatherDetections(smach.State):
tracker = None
def __init__(self,detector=None):
smach.State.__init__(self, outcomes=["success", "failure"],output_keys=["cylinders"])
self.detector = detector
self.cylinders = defaultdict(list) #list of (center, radius)
self.num_detections = 0
def kill_tracker(self, *args):
self.tracker.terminate()
def execute(self, userdata):
#TODO: start detector
if self.tracker is None or self.tracker.poll() is not None:
self.tracker = make_tracker()
self.done = False
print 'subscribing'
rospy.Subscriber('/spinning_tabletop/cylinders',TrackedCylinders,self.handle_detection)
sleep_time = 10
print 'waiting for %d seconds' % sleep_time
rospy.sleep(sleep_time)
self.done = True
maxLength = -1
maxKey = -1
for key in self.cylinders.keys():
if len(self.cylinders[key]) > maxLength:
maxLength = len(self.cylinders[key])
maxKey = key
if maxKey == -1:
print 'no detections!'
return "failure"
if maxLength < 4:
return "failure"
print 'chose id %s with length %d' % (maxKey,maxLength)
userdata.cylinders = self.cylinders[maxKey]
return "success"
def handle_detection(self,detection):
if self.done: return
for i in range(len(detection.ids)):
pt = PointStamped()
pt.header = detection.header
pt.point.x = detection.xs[i]
pt.point.y = detection.ys[i]
pt.point.z = detection.zs[i] + detection.hs[i]
self.cylinders[detection.ids[i]].append((pt,detection.rs[i], detection.hs[i]))
self.num_detections += 1
class FitCircle(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"],input_keys=["cylinders"],output_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius", "object_height"])
def execute(self, userdata):
print 'fitting circle'
times = []
times_mat = mat(ones((len(userdata.cylinders),1)))
x = mat(ones((len(userdata.cylinders),1)))
y = mat(ones((len(userdata.cylinders),1)))
z = ones(len(userdata.cylinders))
r = ones(len(userdata.cylinders))
h = zeros(len(userdata.cylinders))
for i in range(len(userdata.cylinders)):
x[i,0] = userdata.cylinders[i][0].point.x
y[i,0] = userdata.cylinders[i][0].point.y
z[i] = userdata.cylinders[i][0].point.z
r[i] = userdata.cylinders[i][1]
h[i] = userdata.cylinders[i][2]
times.append(userdata.cylinders[i][0].header.stamp)
times_mat[i,0] = userdata.cylinders[i][0].header.stamp.to_sec() - userdata.cylinders[0][0].header.stamp.to_sec()
A = hstack([x, y, mat(ones(x.shape))])
b = -(power(x,2)+power(y,2))
a = asarray(linalg.lstsq(A,b)[0])
xc = -.5 * a[0];
yc = -.5 * a[1];
zc = mean(z)
center_radius = sqrt((a[0]**2+a[1]**2)/4-a[2])
object_radius = mean(r)
object_height = mean(h)
R = center_radius + object_radius
middle_ind = round(len(userdata.cylinders)/2.)
print "len %d middle ind %d" % (len(userdata.cylinders),middle_ind)
middle_angle = math.atan2(y[middle_ind,0]-yc,x[middle_ind,0]-xc)
angles = mat(ones((len(userdata.cylinders),1)))
print x.shape, y.shape, len(userdata.cylinders)
for i in range(min([len(userdata.cylinders), len(x), len(y)])):
angles[i,0] = fix_angle(math.atan2(y[i,0]-yc,x[i,0]-xc),middle_angle)
# prev_angle = angles[0,0]
# for i in range(len(userdata.cylinders)):
# while angles[i,0] < prev_angle:
# angles[i,0] = angles[i,0] + 2*math.pi
# prev_angle = angles[i,0]
A_angles = hstack([times_mat,mat(ones(angles.shape))])
#print hstack([A_angles,angles])
w_result = asarray(linalg.lstsq(A_angles,angles)[0])
w = -w_result[0]
print 'rotation rate: %.3f rad/s - one revolution in %.2f sec' % (w,2*math.pi/w)
#w = 2 * math.pi / 30.
userdata.center = Point(xc,yc,zc)
userdata.radius = R
userdata.object_radius = object_radius
userdata.object_height = object_height
userdata.rotation_rate = w
userdata.init_angle = math.atan2(y[0,0]-yc,x[0,0]-xc)
userdata.init_time = times[0]
polygon_pub = rospy.Publisher('/fit_circle', geometry_msgs.msg.PolygonStamped)
polygon1 = PolygonStamped()
polygon1.header.stamp = rospy.Time.now()
polygon1.header.frame_id = 'base_footprint'
polygon2 = PolygonStamped()
polygon2.header.stamp = rospy.Time.now()
polygon2.header.frame_id = 'base_footprint'
for angle in linspace(0,2*math.pi,math.pi/8.):
pt1 = Point(xc+center_radius+math.cos(angle),yc+center_radius+math.sin(angle),zc)
pt2 = Point(xc+R+math.cos(angle),yc+R+math.sin(angle),zc)
polygon1.polygon.points.append(pt1)
polygon2.polygon.points.append(pt2)
polygon_pub.publish(polygon1)
polygon_pub.publish(polygon2)
print 'got center (%.3f,%.3f,%.3f), radius %.3f + %.3f = %.3f' % (xc,yc,zc,center_radius,object_radius,R)
return "success"
class | ExecuteGrasp | identifier_name |
|
spinning_table_states.py | ):
"stores last message"
last_msg = None
def __init__(self,topic_name,msg_type):
self.sub = rospy.Subscriber(topic_name,msg_type,self.callback)
rospy.loginfo('waiting for the first message: %s'%topic_name)
while self.last_msg is None: rospy.sleep(.01)
rospy.loginfo('ok: %s'%topic_name)
def callback(self,msg):
self.last_msg = msg
class TrajectoryControllerWrapper(object):
def __init__(self, controller_name, listener):
self.listener = listener
self.joint_names = rospy.get_param("/%s/joints"%controller_name)
self.n_joints = len(self.joint_names)
msg = self.listener.last_msg
self.ros_joint_inds = [msg.name.index(name) for name in self.joint_names]
self.controller_pub = rospy.Publisher("%s/command"%controller_name, tm.JointTrajectory)
def get_joint_positions(self):
msg = self.listener.last_msg
return [msg.position[i] for i in self.ros_joint_inds]
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
assert len(positions_goal) == len(positions_cur)
duration = norm((r_[positions_goal] - r_[positions_cur])/self.vel_limits, ord=inf)
jt = tm.JointTrajectory()
jt.joint_names = self.joint_names
jt.header.stamp = rospy.Time.now()
jtp = tm.JointTrajectoryPoint()
jtp.positions = positions_goal
jtp.velocities = zeros(len(positions_goal))
jtp.time_from_start = rospy.Duration(duration)
jt.points = [jtp]
self.controller_pub.publish(jt)
rospy.loginfo("sleeping %.2f sec"%duration)
rospy.sleep(duration)
def follow_joint_traj(self, positions, duration = None):
positions = np.r_[np.atleast_2d(self.get_joint_positions()), positions]
positions[:,4] = np.unwrap(positions[:,4])
positions[:,6] = np.unwrap(positions[:,6])
positions, velocities, times = make_traj_with_limits(positions, self.vel_limits, self.acc_limits,smooth=True)
self.follow_timed_joint_traj(positions, velocities, times)
def mirror_arm_joints(x):
"mirror image of joints (r->l or l->r)"
return r_[-x[0],x[1],-x[2],x[3],-x[4],x[5],-x[6]]
class Arm(TrajectoryControllerWrapper):
| joints = l_joints if self.lr == 'l' else mirror_arm_joints(l_joints)
self.goto_joint_positions(joints)
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
positions_goal = closer_joint_angles(positions_goal, positions_cur)
TrajectoryControllerWrapper.goto_joint_positions(self, positions_goal)
def set_cart_target(self, quat, xyz, ref_frame):
ps = gm.PoseStamped()
ps.header.frame_id = ref_frame
ps.header.stamp = rospy.Time(0)
ps.pose.position = gm.Point(*xyz);
ps.pose.orientation = gm.Quaternion(*quat)
self.cart_command.publish(ps)
def fix_angle(angle, center_point=math.pi):
while angle > center_point + math.pi:
angle = angle - 2*math.pi
while angle < center_point - math.pi:
angle = angle + 2*math.pi
return angle
class Head(TrajectoryControllerWrapper):
def __init__(self, listener):
TrajectoryControllerWrapper.__init__(self,"head_traj_controller",listener)
self.vel_limits = [1.,1.]
def set_pan_tilt(self, pan, tilt):
self.goto_joint_positions([pan, tilt])
####################################
class MoveArmToSide(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"])
listener = TopicListener("joint_states",sensor_msgs.msg.JointState)
self.rarm = Arm("r",listener)
self.larm = Arm("l",listener)
self.head = Head(listener)
def execute(self, userdata):
rospy.sleep(1)
self.larm.goto_posture('up')
self.rarm.goto_posture('up')
self.head.set_pan_tilt(0,.7)
return "success"
class GatherDetections(smach.State):
tracker = None
def __init__(self,detector=None):
smach.State.__init__(self, outcomes=["success", "failure"],output_keys=["cylinders"])
self.detector = detector
self.cylinders = defaultdict(list) #list of (center, radius)
self.num_detections = 0
def kill_tracker(self, *args):
self.tracker.terminate()
def execute(self, userdata):
#TODO: start detector
if self.tracker is None or self.tracker.poll() is not None:
self.tracker = make_tracker()
self.done = False
print 'subscribing'
rospy.Subscriber('/spinning_tabletop/cylinders',TrackedCylinders,self.handle_detection)
sleep_time = 10
print 'waiting for %d seconds' % sleep_time
rospy.sleep(sleep_time)
self.done = True
maxLength = -1
maxKey = -1
for key in self.cylinders.keys():
if len(self.cylinders[key]) > maxLength:
maxLength = len(self.cylinders[key])
maxKey = key
if maxKey == -1:
print 'no detections!'
return "failure"
if maxLength < 4:
return "failure"
print 'chose id %s with length %d' % (maxKey,maxLength)
userdata.cylinders = self.cylinders[maxKey]
return "success"
def handle_detection(self,detection):
if self.done: return
for i in range(len(detection.ids)):
pt = PointStamped()
pt.header = detection.header
pt.point.x = detection.xs[i]
pt.point.y = detection.ys[i]
pt.point.z = detection.zs[i] + detection.hs[i]
self.cylinders[detection.ids[i]].append((pt,detection.rs[i], detection.hs[i]))
self.num_detections += 1
class FitCircle(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"],input_keys=["cylinders"],output_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius", "object_height"])
def execute(self, userdata):
print 'fitting circle'
times = []
times_mat = mat(ones((len(userdata.cylinders),1)))
x = mat(ones((len(userdata.cylinders),1)))
y = mat(ones((len(userdata.cylinders),1)))
z = ones(len(userdata.cylinders))
r = ones(len(userdata.cylinders))
h = zeros(len(userdata.cylinders))
for i in range(len(userdata.cylinders)):
x[i,0] = userdata.cylinders[i][0].point.x
y[i,0] = userdata.cylinders[i][0].point.y
z[i] = userdata.cylinders[i][0].point.z
r[i] = userdata.cylinders[i][1]
h[i] = userdata.cylinders[i][2]
times.append(userdata.cylinders[i][0].header.stamp)
times_mat[i,0] = userdata.c | L_POSTURES = dict(
untucked = [0.4, 1.0, 0.0, -2.05, 0.0, -0.1, 0.0],
tucked = [0.06, 1.25, 1.79, -1.68, -1.73, -0.10, -0.09],
up = [ 0.33, -0.35, 2.59, -0.15, 0.59, -1.41, -0.27],
side = [ 1.832, -0.332, 1.011, -1.437, 1.1 , -2.106, 3.074]
)
def __init__(self, lr,listener):
TrajectoryControllerWrapper.__init__(self,"%s_arm_controller"%lr, listener)
self.lr = lr
self.lrlong = {"r":"right", "l":"left"}[lr]
self.tool_frame = "%s_gripper_tool_frame"%lr
self.cart_command = rospy.Publisher('%s_cart/command_pose'%lr, gm.PoseStamped)
self.vel_limits = [0.42, 0.42,0.65,0.66,0.72, 0.62,0.72]
def goto_posture(self, name):
l_joints = self.L_POSTURES[name]
| identifier_body |
action.py | Action:
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ["author"]
_valid_struggles: ClassVar = [
"ethics",
"pay_and_benefits",
"working_conditions",
"discrimination",
"unfair_labor_practices",
"job_security",
]
_valid_actions: ClassVar = [
"strike",
"protest",
"open_letter",
"legal_action",
"union_drive",
"union_representation",
]
@staticmethod
def is_none(field: Any) -> bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == "none":
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def listify(self, field: Union[List[Any], Any]) -> List[Any]:
if self.is_none(field):
return None
else:
if isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(",")]
def __post_init__(self):
""" Used to validate fields. """
# self.date = datetime.strptime(self.date, "%Y-%m-%d").date()
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers)
# make sure action is a valid action
assert (
self.action in self._valid_actions
), f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
# make sure all struggles are valid struggles
for struggle in self.struggles:
assert (
struggle in self._valid_struggles
), f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
# make sure source is either a url or a html link tag <a>
for source in self.sources:
assert (
BeautifulSoup(source, "html.parser").a is not None
or urlparse(source).netloc is not ""
), f"'{source}' is in valid. source must be a valid url or an html link tag element"
# if html, extract only href from sources
self.sources = [
BeautifulSoup(source, "html.parser").a["href"]
if "href" in source
else source
for source in self.sources
]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) -> Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.items()}
def render_df(self, field: str) -> str:
""" Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ["date", "workers"]:
return str(value)
elif field in ["locations", "struggles", "companies", "tags", "sources"]:
return str(value).strip("[").strip("]").replace("'", "").replace('"', "")
else:
return value
def to_md(self, field: str, td: bs4.element.Tag) -> str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert (
field in self.__dataclass_fields__
), f"Cannot serialize {field}. Not a valid field in Action."
value = self.__getattribute__(field)
if field in ["date", "workers"]:
td.string = str(value)
elif field in ["locations", "struggles", "companies", "tags"]:
td.string = (
str(value).strip("[").strip("]").replace("'", "").replace('"', "")
) | f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(", ".join(ret)), "html.parser"))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) -> "Action":
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all("tr")
for key, val in table.attrs.items():
if key != "class":
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find("td", class_="field-key")
td_val = tr.find("td", class_="field-value")
val = "".join(str(e) for e in td_val.contents).strip()
key = "".join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) -> "Action":
""" Create an Action instance from a dataframe row. """
fields = [
key
for key, value in cls.__dataclass_fields__.items()
if value.type != ClassVar
]
d = {key: value for key, value in row.to_dict().items() if key in fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = "actions"
actions: List[Action] = field(default_factory=lambda: [])
fields: List[str] = field(
default_factory=lambda: [
key
for key, value in Action.__dataclass_fields__.items()
if value.type != ClassVar
]
)
def __len__(self) -> int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) -> "Actions":
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) -> pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient="list")
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f"<div id={self.action_id}></div>", "html.parser")
for action in self.actions:
table = soup.new_tag("table")
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag("tr")
td_key = soup.new_tag("td", attrs={"class": "field-key"})
td_val = soup.new_tag("td", attrs={"class": "field-value"})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, | elif field == "sources":
ret = []
for source in value:
tag = ( | random_line_split |
action.py | Action:
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ["author"]
_valid_struggles: ClassVar = [
"ethics",
"pay_and_benefits",
"working_conditions",
"discrimination",
"unfair_labor_practices",
"job_security",
]
_valid_actions: ClassVar = [
"strike",
"protest",
"open_letter",
"legal_action",
"union_drive",
"union_representation",
]
@staticmethod
def is_none(field: Any) -> bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == "none":
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def listify(self, field: Union[List[Any], Any]) -> List[Any]:
if self.is_none(field):
return None
else:
if isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(",")]
def __post_init__(self):
""" Used to validate fields. """
# self.date = datetime.strptime(self.date, "%Y-%m-%d").date()
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers)
# make sure action is a valid action
assert (
self.action in self._valid_actions
), f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
# make sure all struggles are valid struggles
for struggle in self.struggles:
assert (
struggle in self._valid_struggles
), f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
# make sure source is either a url or a html link tag <a>
for source in self.sources:
assert (
BeautifulSoup(source, "html.parser").a is not None
or urlparse(source).netloc is not ""
), f"'{source}' is in valid. source must be a valid url or an html link tag element"
# if html, extract only href from sources
self.sources = [
BeautifulSoup(source, "html.parser").a["href"]
if "href" in source
else source
for source in self.sources
]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) -> Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.items()}
def render_df(self, field: str) -> str:
""" Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ["date", "workers"]:
return str(value)
elif field in ["locations", "struggles", "companies", "tags", "sources"]:
return str(value).strip("[").strip("]").replace("'", "").replace('"', "")
else:
return value
def to_md(self, field: str, td: bs4.element.Tag) -> str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert (
field in self.__dataclass_fields__
), f"Cannot serialize {field}. Not a valid field in Action."
value = self.__getattribute__(field)
if field in ["date", "workers"]:
td.string = str(value)
elif field in ["locations", "struggles", "companies", "tags"]:
td.string = (
str(value).strip("[").strip("]").replace("'", "").replace('"', "")
)
elif field == "sources":
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(", ".join(ret)), "html.parser"))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) -> "Action":
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all("tr")
for key, val in table.attrs.items():
if key != "class":
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find("td", class_="field-key")
td_val = tr.find("td", class_="field-value")
val = "".join(str(e) for e in td_val.contents).strip()
key = "".join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) -> "Action":
""" Create an Action instance from a dataframe row. """
fields = [
key
for key, value in cls.__dataclass_fields__.items()
if value.type != ClassVar
]
d = {key: value for key, value in row.to_dict().items() if key in fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = "actions"
actions: List[Action] = field(default_factory=lambda: [])
fields: List[str] = field(
default_factory=lambda: [
key
for key, value in Action.__dataclass_fields__.items()
if value.type != ClassVar
]
)
def __len__(self) -> int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
|
return False
def sort(self, *args, **kwargs) -> "Actions":
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) -> pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient="list")
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f"<div id={self.action_id}></div>", "html.parser")
for action in self.actions:
table = soup.new_tag("table")
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag("tr")
td_key = soup.new_tag("td", attrs={"class": "field-key"})
td_val = soup.new_tag("td", attrs={"class": "field-value"})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, | return self.actions == other.actions | conditional_block |
action.py | :
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ["author"]
_valid_struggles: ClassVar = [
"ethics",
"pay_and_benefits",
"working_conditions",
"discrimination",
"unfair_labor_practices",
"job_security",
]
_valid_actions: ClassVar = [
"strike",
"protest",
"open_letter",
"legal_action",
"union_drive",
"union_representation",
]
@staticmethod
def is_none(field: Any) -> bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == "none":
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def | (self, field: Union[List[Any], Any]) -> List[Any]:
if self.is_none(field):
return None
else:
if isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(",")]
def __post_init__(self):
""" Used to validate fields. """
# self.date = datetime.strptime(self.date, "%Y-%m-%d").date()
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers)
# make sure action is a valid action
assert (
self.action in self._valid_actions
), f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
# make sure all struggles are valid struggles
for struggle in self.struggles:
assert (
struggle in self._valid_struggles
), f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
# make sure source is either a url or a html link tag <a>
for source in self.sources:
assert (
BeautifulSoup(source, "html.parser").a is not None
or urlparse(source).netloc is not ""
), f"'{source}' is in valid. source must be a valid url or an html link tag element"
# if html, extract only href from sources
self.sources = [
BeautifulSoup(source, "html.parser").a["href"]
if "href" in source
else source
for source in self.sources
]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) -> Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.items()}
def render_df(self, field: str) -> str:
""" Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ["date", "workers"]:
return str(value)
elif field in ["locations", "struggles", "companies", "tags", "sources"]:
return str(value).strip("[").strip("]").replace("'", "").replace('"', "")
else:
return value
def to_md(self, field: str, td: bs4.element.Tag) -> str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert (
field in self.__dataclass_fields__
), f"Cannot serialize {field}. Not a valid field in Action."
value = self.__getattribute__(field)
if field in ["date", "workers"]:
td.string = str(value)
elif field in ["locations", "struggles", "companies", "tags"]:
td.string = (
str(value).strip("[").strip("]").replace("'", "").replace('"', "")
)
elif field == "sources":
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(", ".join(ret)), "html.parser"))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) -> "Action":
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all("tr")
for key, val in table.attrs.items():
if key != "class":
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find("td", class_="field-key")
td_val = tr.find("td", class_="field-value")
val = "".join(str(e) for e in td_val.contents).strip()
key = "".join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) -> "Action":
""" Create an Action instance from a dataframe row. """
fields = [
key
for key, value in cls.__dataclass_fields__.items()
if value.type != ClassVar
]
d = {key: value for key, value in row.to_dict().items() if key in fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = "actions"
actions: List[Action] = field(default_factory=lambda: [])
fields: List[str] = field(
default_factory=lambda: [
key
for key, value in Action.__dataclass_fields__.items()
if value.type != ClassVar
]
)
def __len__(self) -> int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) -> "Actions":
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) -> pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient="list")
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f"<div id={self.action_id}></div>", "html.parser")
for action in self.actions:
table = soup.new_tag("table")
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag("tr")
td_key = soup.new_tag("td", attrs={"class": "field-key"})
td_val = soup.new_tag("td", attrs={"class": "field-value"})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, | listify | identifier_name |
action.py | Action:
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ["author"]
_valid_struggles: ClassVar = [
"ethics",
"pay_and_benefits",
"working_conditions",
"discrimination",
"unfair_labor_practices",
"job_security",
]
_valid_actions: ClassVar = [
"strike",
"protest",
"open_letter",
"legal_action",
"union_drive",
"union_representation",
]
@staticmethod
def is_none(field: Any) -> bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == "none":
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def listify(self, field: Union[List[Any], Any]) -> List[Any]:
if self.is_none(field):
return None
else:
if isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(",")]
def __post_init__(self):
""" Used to validate fields. """
# self.date = datetime.strptime(self.date, "%Y-%m-%d").date()
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers)
# make sure action is a valid action
assert (
self.action in self._valid_actions
), f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
# make sure all struggles are valid struggles
for struggle in self.struggles:
assert (
struggle in self._valid_struggles
), f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
# make sure source is either a url or a html link tag <a>
for source in self.sources:
assert (
BeautifulSoup(source, "html.parser").a is not None
or urlparse(source).netloc is not ""
), f"'{source}' is in valid. source must be a valid url or an html link tag element"
# if html, extract only href from sources
self.sources = [
BeautifulSoup(source, "html.parser").a["href"]
if "href" in source
else source
for source in self.sources
]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) -> Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.items()}
def render_df(self, field: str) -> str:
|
def to_md(self, field: str, td: bs4.element.Tag) -> str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert (
field in self.__dataclass_fields__
), f"Cannot serialize {field}. Not a valid field in Action."
value = self.__getattribute__(field)
if field in ["date", "workers"]:
td.string = str(value)
elif field in ["locations", "struggles", "companies", "tags"]:
td.string = (
str(value).strip("[").strip("]").replace("'", "").replace('"', "")
)
elif field == "sources":
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(", ".join(ret)), "html.parser"))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) -> "Action":
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all("tr")
for key, val in table.attrs.items():
if key != "class":
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find("td", class_="field-key")
td_val = tr.find("td", class_="field-value")
val = "".join(str(e) for e in td_val.contents).strip()
key = "".join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) -> "Action":
""" Create an Action instance from a dataframe row. """
fields = [
key
for key, value in cls.__dataclass_fields__.items()
if value.type != ClassVar
]
d = {key: value for key, value in row.to_dict().items() if key in fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = "actions"
actions: List[Action] = field(default_factory=lambda: [])
fields: List[str] = field(
default_factory=lambda: [
key
for key, value in Action.__dataclass_fields__.items()
if value.type != ClassVar
]
)
def __len__(self) -> int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) -> "Actions":
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) -> pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient="list")
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f"<div id={self.action_id}></div>", "html.parser")
for action in self.actions:
table = soup.new_tag("table")
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag("tr")
td_key = soup.new_tag("td", attrs={"class": "field-key"})
td_val = soup.new_tag("td", attrs={"class": "field-value"})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, | """ Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ["date", "workers"]:
return str(value)
elif field in ["locations", "struggles", "companies", "tags", "sources"]:
return str(value).strip("[").strip("]").replace("'", "").replace('"', "")
else:
return value | identifier_body |
web.rs | !("Listening on 0.0.0.0:8080");
rouille::start_server("0.0.0.0:8080", |request| {
rouille::log(request, io::stderr(), || {
let conn = &db::connection();
let user_model = &UserModel::new(conn);
let user_service = &UserService::new(user_model, b"....");
router!(request,
(GET) (/status) => { status(user_model) },
(POST) (/oauth/register) => { oauth_register(user_service, request) },
(GET) (/oauth/register/confirm) => { oauth_register_confirm(user_service, request) },
(POST) (/oauth/token) => { oauth_token(user_service, request) },
(GET) (/oauth/me) => { me(user_service, request) },
_ => Response::empty_404()
)
})
})
}
//
// Handlers
//
#[derive(Serialize, Debug)]
struct Status<'a> {
pub status: &'a str,
}
/// this is the status endpoint
fn status(user_model: &UserModel) -> Response {
let status = user_model
.find(&Uuid::new_v4())
.map(|_| Status { status: "up" })
.unwrap_or_else(|_| Status { status: "down" });
Response::json(&status)
}
#[derive(Deserialize)]
struct RegisterForm {
name: String,
password: String,
email: String,
}
/// this is the user registration endpoint
///
/// This accepts a json POST of [`RegisterForm`]
fn oauth_register(user_service: &UserService, request: &Request) -> Response {
let data: RegisterForm = try_or_400!(rouille::input::json_input(request));
let req = user::RegisterRequest {
name: &data.name,
password: &data.password,
email: &data.email,
};
user_service
.register(&req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the user confirmation endpoint
///
/// This is a GET request for a query string of `?confirm_token`
fn oauth_register_confirm(user_service: &UserService, request: &Request) -> Response {
let confirm_token: String = try_or_400!(
request
.get_param("confirm_token")
.ok_or(WebError::MissingConfirmToken)
);
let req = &user::ConfirmNewUserRequest {
confirm_token: &confirm_token,
};
user_service
.confirm_new_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the oauth token endpoint for making password or refresh grants against
///
/// This follows the protocol set up by the following specs
///
/// - [password grant](https://tools.ietf.org/html/rfc6749#section-4.3.2)
/// - [refresh grant](https://tools.ietf.org/html/rfc6749#section-6)
///
fn oauth_token(user_service: &UserService, request: &Request) -> Response {
let form = &try_or_400!(post::raw_urlencoded_post_input(request));
let grant_type = try_or_400!(find_grant_type(form));
match grant_type {
GrantType::Password => {
let req = &try_or_400!(form_to_password_grant(form));
user_service
.password_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
GrantType::Refresh => {
let req = &try_or_400!(form_to_refresh_grant(form));
user_service
.refresh_token_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
}
}
/// The current user handler
///
/// This requires a `Authorization: Bearer {access_token}` header to make the request
fn me(user_service: &UserService, request: &Request) -> Response {
let access_token = request.header("Authorization")
.and_then(move |x| x.get(7..)) // Get everything after "Bearer "
.unwrap_or("");
let req = &user::CurrentUserRequest { access_token };
user_service
.current_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
// Cenverters
//
impl From<user::CurrentUserResponse> for Response {
fn from(result: user::CurrentUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::AccessTokenResponse> for Response {
fn from(result: user::AccessTokenResponse) -> Self {
Response::json(&result)
}
}
impl From<user::ConfirmNewUserResponse> for Response {
fn from(result: user::ConfirmNewUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::RegisterResponse> for Response {
fn from(result: user::RegisterResponse) -> Self {
Response::json(&result)
}
}
///
/// This is a private Error type for things that can go wrong
///
#[derive(Debug, PartialEq)]
enum WebError {
MissingConfirmToken,
MissingPassword,
MissingUsername,
MissingRefreshToken,
InvalidGrantType,
}
impl fmt::Display for WebError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Error for WebError {
fn description(&self) -> &str {
use self::WebError::*;
match *self {
MissingUsername => "missing username",
MissingPassword => "missing password",
MissingRefreshToken => "missing refresh_token",
MissingConfirmToken => "missing confirm token",
InvalidGrantType => "invalid grant type",
}
}
}
impl From<user::ServiceError> for Response {
fn from(err: user::ServiceError) -> Self {
use services::user::ServiceError::*;
match err {
InvalidConfirmToken => Response::text("InvalidConfirmToken").with_status_code(400),
PermissionDenied => Response::text("").with_status_code(403),
UserExists => Response::text("UserExists").with_status_code(403),
DBError(_) => Response::text("").with_status_code(500),
}
}
}
///
/// This is a enum to represent the `grant_type` strings, `"password"` and `"refresh_token"`
///
/// Note: We may want to move this to the service module
#[derive(Debug, PartialEq)]
enum GrantType {
Password,
Refresh,
}
impl FromStr for GrantType {
type Err = WebError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"password" => Ok(GrantType::Password),
"refresh_token" => Ok(GrantType::Refresh),
_ => Err(WebError::InvalidGrantType),
}
}
}
#[test]
fn test_grant_type_from_str() {
assert_eq!(
GrantType::from_str("password").unwrap(),
GrantType::Password
)
}
///
/// # Helpers
///
///
/// Finds the `grant_type` in the Vector of form fields
///
type Fields = [(String, String)];
fn find_grant_type(fields: &Fields) -> Result<GrantType, WebError> {
for &(ref k, ref v) in fields.iter() {
if k == "grant_type" {
return GrantType::from_str(v);
}
}
Err(WebError::InvalidGrantType)
}
#[test]
fn test_find_grant_type() {
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "password".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Password
);
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "refresh_token".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Refresh
);
assert_eq!(
find_grant_type(&vec![("x".into(), "y".into()), ("a".into(), "b".into())]).unwrap_err(),
WebError::InvalidGrantType
);
}
fn form_to_map(fields: &Fields) -> HashMap<&str, &str> {
HashMap::from_iter(fields.iter().map(|&(ref k, ref v)| {
let k: &str = k;
let v: &str = v;
(k, v)
}))
}
///
/// Converts the Form Fields to a `PasswordGrantRequest`
///
fn form_to_password_grant(
fields: &[(String, String)],
) -> Result<user::PasswordGrantRequest, WebError> |
#[test]
fn test_form_to_password_grant() {
assert_eq!(
form_to_password_grant(&vec![
("grant_type".into(), "password".into()),
("username".into(), "test-user".into()),
("password".into(), "test-password | {
let fields = form_to_map(fields);
let username = fields.get("username").ok_or(WebError::MissingUsername)?;
let password = fields.get("password").ok_or(WebError::MissingPassword)?;
Ok(user::PasswordGrantRequest { username, password })
} | identifier_body |
web.rs | !("Listening on 0.0.0.0:8080");
rouille::start_server("0.0.0.0:8080", |request| {
rouille::log(request, io::stderr(), || {
let conn = &db::connection();
let user_model = &UserModel::new(conn);
let user_service = &UserService::new(user_model, b"....");
router!(request,
(GET) (/status) => { status(user_model) },
(POST) (/oauth/register) => { oauth_register(user_service, request) },
(GET) (/oauth/register/confirm) => { oauth_register_confirm(user_service, request) },
(POST) (/oauth/token) => { oauth_token(user_service, request) },
(GET) (/oauth/me) => { me(user_service, request) },
_ => Response::empty_404()
)
})
})
}
//
// Handlers
//
#[derive(Serialize, Debug)]
struct | <'a> {
pub status: &'a str,
}
/// this is the status endpoint
fn status(user_model: &UserModel) -> Response {
let status = user_model
.find(&Uuid::new_v4())
.map(|_| Status { status: "up" })
.unwrap_or_else(|_| Status { status: "down" });
Response::json(&status)
}
#[derive(Deserialize)]
struct RegisterForm {
name: String,
password: String,
email: String,
}
/// this is the user registration endpoint
///
/// This accepts a json POST of [`RegisterForm`]
fn oauth_register(user_service: &UserService, request: &Request) -> Response {
let data: RegisterForm = try_or_400!(rouille::input::json_input(request));
let req = user::RegisterRequest {
name: &data.name,
password: &data.password,
email: &data.email,
};
user_service
.register(&req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the user confirmation endpoint
///
/// This is a GET request for a query string of `?confirm_token`
fn oauth_register_confirm(user_service: &UserService, request: &Request) -> Response {
let confirm_token: String = try_or_400!(
request
.get_param("confirm_token")
.ok_or(WebError::MissingConfirmToken)
);
let req = &user::ConfirmNewUserRequest {
confirm_token: &confirm_token,
};
user_service
.confirm_new_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the oauth token endpoint for making password or refresh grants against
///
/// This follows the protocol set up by the following specs
///
/// - [password grant](https://tools.ietf.org/html/rfc6749#section-4.3.2)
/// - [refresh grant](https://tools.ietf.org/html/rfc6749#section-6)
///
fn oauth_token(user_service: &UserService, request: &Request) -> Response {
let form = &try_or_400!(post::raw_urlencoded_post_input(request));
let grant_type = try_or_400!(find_grant_type(form));
match grant_type {
GrantType::Password => {
let req = &try_or_400!(form_to_password_grant(form));
user_service
.password_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
GrantType::Refresh => {
let req = &try_or_400!(form_to_refresh_grant(form));
user_service
.refresh_token_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
}
}
/// The current user handler
///
/// This requires a `Authorization: Bearer {access_token}` header to make the request
fn me(user_service: &UserService, request: &Request) -> Response {
let access_token = request.header("Authorization")
.and_then(move |x| x.get(7..)) // Get everything after "Bearer "
.unwrap_or("");
let req = &user::CurrentUserRequest { access_token };
user_service
.current_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
// Cenverters
//
impl From<user::CurrentUserResponse> for Response {
fn from(result: user::CurrentUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::AccessTokenResponse> for Response {
fn from(result: user::AccessTokenResponse) -> Self {
Response::json(&result)
}
}
impl From<user::ConfirmNewUserResponse> for Response {
fn from(result: user::ConfirmNewUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::RegisterResponse> for Response {
fn from(result: user::RegisterResponse) -> Self {
Response::json(&result)
}
}
///
/// This is a private Error type for things that can go wrong
///
#[derive(Debug, PartialEq)]
enum WebError {
MissingConfirmToken,
MissingPassword,
MissingUsername,
MissingRefreshToken,
InvalidGrantType,
}
impl fmt::Display for WebError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Error for WebError {
fn description(&self) -> &str {
use self::WebError::*;
match *self {
MissingUsername => "missing username",
MissingPassword => "missing password",
MissingRefreshToken => "missing refresh_token",
MissingConfirmToken => "missing confirm token",
InvalidGrantType => "invalid grant type",
}
}
}
impl From<user::ServiceError> for Response {
fn from(err: user::ServiceError) -> Self {
use services::user::ServiceError::*;
match err {
InvalidConfirmToken => Response::text("InvalidConfirmToken").with_status_code(400),
PermissionDenied => Response::text("").with_status_code(403),
UserExists => Response::text("UserExists").with_status_code(403),
DBError(_) => Response::text("").with_status_code(500),
}
}
}
///
/// This is a enum to represent the `grant_type` strings, `"password"` and `"refresh_token"`
///
/// Note: We may want to move this to the service module
#[derive(Debug, PartialEq)]
enum GrantType {
Password,
Refresh,
}
impl FromStr for GrantType {
type Err = WebError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"password" => Ok(GrantType::Password),
"refresh_token" => Ok(GrantType::Refresh),
_ => Err(WebError::InvalidGrantType),
}
}
}
#[test]
fn test_grant_type_from_str() {
assert_eq!(
GrantType::from_str("password").unwrap(),
GrantType::Password
)
}
///
/// # Helpers
///
///
/// Finds the `grant_type` in the Vector of form fields
///
type Fields = [(String, String)];
fn find_grant_type(fields: &Fields) -> Result<GrantType, WebError> {
for &(ref k, ref v) in fields.iter() {
if k == "grant_type" {
return GrantType::from_str(v);
}
}
Err(WebError::InvalidGrantType)
}
#[test]
fn test_find_grant_type() {
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "password".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Password
);
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "refresh_token".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Refresh
);
assert_eq!(
find_grant_type(&vec![("x".into(), "y".into()), ("a".into(), "b".into())]).unwrap_err(),
WebError::InvalidGrantType
);
}
fn form_to_map(fields: &Fields) -> HashMap<&str, &str> {
HashMap::from_iter(fields.iter().map(|&(ref k, ref v)| {
let k: &str = k;
let v: &str = v;
(k, v)
}))
}
///
/// Converts the Form Fields to a `PasswordGrantRequest`
///
fn form_to_password_grant(
fields: &[(String, String)],
) -> Result<user::PasswordGrantRequest, WebError> {
let fields = form_to_map(fields);
let username = fields.get("username").ok_or(WebError::MissingUsername)?;
let password = fields.get("password").ok_or(WebError::MissingPassword)?;
Ok(user::PasswordGrantRequest { username, password })
}
#[test]
fn test_form_to_password_grant() {
assert_eq!(
form_to_password_grant(&vec![
("grant_type".into(), "password".into()),
("username".into(), "test-user".into()),
("password".into(), "test-password". | Status | identifier_name |
web.rs | ,
(GET) (/status) => { status(user_model) },
(POST) (/oauth/register) => { oauth_register(user_service, request) },
(GET) (/oauth/register/confirm) => { oauth_register_confirm(user_service, request) },
(POST) (/oauth/token) => { oauth_token(user_service, request) },
(GET) (/oauth/me) => { me(user_service, request) },
_ => Response::empty_404()
)
})
})
}
//
// Handlers
//
#[derive(Serialize, Debug)]
struct Status<'a> {
pub status: &'a str,
}
/// this is the status endpoint
fn status(user_model: &UserModel) -> Response {
let status = user_model
.find(&Uuid::new_v4())
.map(|_| Status { status: "up" })
.unwrap_or_else(|_| Status { status: "down" });
Response::json(&status)
}
#[derive(Deserialize)]
struct RegisterForm {
name: String,
password: String,
email: String,
}
/// this is the user registration endpoint
///
/// This accepts a json POST of [`RegisterForm`]
fn oauth_register(user_service: &UserService, request: &Request) -> Response {
let data: RegisterForm = try_or_400!(rouille::input::json_input(request));
let req = user::RegisterRequest {
name: &data.name,
password: &data.password,
email: &data.email,
};
user_service
.register(&req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the user confirmation endpoint
///
/// This is a GET request for a query string of `?confirm_token`
fn oauth_register_confirm(user_service: &UserService, request: &Request) -> Response {
let confirm_token: String = try_or_400!(
request
.get_param("confirm_token")
.ok_or(WebError::MissingConfirmToken)
);
let req = &user::ConfirmNewUserRequest {
confirm_token: &confirm_token,
};
user_service
.confirm_new_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the oauth token endpoint for making password or refresh grants against
///
/// This follows the protocol set up by the following specs
///
/// - [password grant](https://tools.ietf.org/html/rfc6749#section-4.3.2)
/// - [refresh grant](https://tools.ietf.org/html/rfc6749#section-6)
///
fn oauth_token(user_service: &UserService, request: &Request) -> Response {
let form = &try_or_400!(post::raw_urlencoded_post_input(request));
let grant_type = try_or_400!(find_grant_type(form));
match grant_type {
GrantType::Password => {
let req = &try_or_400!(form_to_password_grant(form));
user_service
.password_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
GrantType::Refresh => {
let req = &try_or_400!(form_to_refresh_grant(form));
user_service
.refresh_token_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
}
}
/// The current user handler
///
/// This requires a `Authorization: Bearer {access_token}` header to make the request
fn me(user_service: &UserService, request: &Request) -> Response {
let access_token = request.header("Authorization")
.and_then(move |x| x.get(7..)) // Get everything after "Bearer "
.unwrap_or("");
let req = &user::CurrentUserRequest { access_token };
user_service
.current_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
// Cenverters
//
impl From<user::CurrentUserResponse> for Response {
fn from(result: user::CurrentUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::AccessTokenResponse> for Response {
fn from(result: user::AccessTokenResponse) -> Self {
Response::json(&result)
}
}
impl From<user::ConfirmNewUserResponse> for Response {
fn from(result: user::ConfirmNewUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::RegisterResponse> for Response {
fn from(result: user::RegisterResponse) -> Self {
Response::json(&result)
}
}
///
/// This is a private Error type for things that can go wrong
///
#[derive(Debug, PartialEq)]
enum WebError {
MissingConfirmToken,
MissingPassword,
MissingUsername,
MissingRefreshToken,
InvalidGrantType,
}
impl fmt::Display for WebError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Error for WebError {
fn description(&self) -> &str {
use self::WebError::*;
match *self {
MissingUsername => "missing username",
MissingPassword => "missing password",
MissingRefreshToken => "missing refresh_token",
MissingConfirmToken => "missing confirm token",
InvalidGrantType => "invalid grant type",
}
}
}
impl From<user::ServiceError> for Response {
fn from(err: user::ServiceError) -> Self {
use services::user::ServiceError::*;
match err {
InvalidConfirmToken => Response::text("InvalidConfirmToken").with_status_code(400),
PermissionDenied => Response::text("").with_status_code(403),
UserExists => Response::text("UserExists").with_status_code(403),
DBError(_) => Response::text("").with_status_code(500),
}
}
}
///
/// This is a enum to represent the `grant_type` strings, `"password"` and `"refresh_token"`
///
/// Note: We may want to move this to the service module
#[derive(Debug, PartialEq)]
enum GrantType {
Password,
Refresh,
}
impl FromStr for GrantType {
type Err = WebError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"password" => Ok(GrantType::Password),
"refresh_token" => Ok(GrantType::Refresh),
_ => Err(WebError::InvalidGrantType),
}
}
}
#[test]
fn test_grant_type_from_str() {
assert_eq!(
GrantType::from_str("password").unwrap(),
GrantType::Password
)
}
///
/// # Helpers
///
///
/// Finds the `grant_type` in the Vector of form fields
///
type Fields = [(String, String)];
fn find_grant_type(fields: &Fields) -> Result<GrantType, WebError> {
for &(ref k, ref v) in fields.iter() {
if k == "grant_type" {
return GrantType::from_str(v);
}
}
Err(WebError::InvalidGrantType)
}
#[test]
fn test_find_grant_type() {
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "password".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Password
);
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "refresh_token".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Refresh
);
assert_eq!(
find_grant_type(&vec![("x".into(), "y".into()), ("a".into(), "b".into())]).unwrap_err(),
WebError::InvalidGrantType
);
}
fn form_to_map(fields: &Fields) -> HashMap<&str, &str> {
HashMap::from_iter(fields.iter().map(|&(ref k, ref v)| {
let k: &str = k;
let v: &str = v;
(k, v)
}))
}
///
/// Converts the Form Fields to a `PasswordGrantRequest`
///
fn form_to_password_grant(
fields: &[(String, String)],
) -> Result<user::PasswordGrantRequest, WebError> {
let fields = form_to_map(fields);
let username = fields.get("username").ok_or(WebError::MissingUsername)?;
let password = fields.get("password").ok_or(WebError::MissingPassword)?;
Ok(user::PasswordGrantRequest { username, password })
}
#[test]
fn test_form_to_password_grant() {
assert_eq!(
form_to_password_grant(&vec![
("grant_type".into(), "password".into()),
("username".into(), "test-user".into()),
("password".into(), "test-password".into()),
]).unwrap(),
user::PasswordGrantRequest {
username: "test-user".into(),
password: "test-password".into(),
}
);
assert_eq!(
form_to_password_grant(&vec![]).unwrap_err(),
WebError::MissingUsername
);
assert_eq!(
form_to_password_grant(&vec![("username".into(), "test-user".into())]).unwrap_err(), | WebError::MissingPassword | random_line_split |
|
chmod.rs |
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
use util;
use {ArgsIter, MesaError, Result, UtilSetup, UtilWrite};
use clap::{AppSettings, Arg, ArgGroup, OsValues};
use std::ffi::{OsStr, OsString};
use std::fs;
use std::io::{self, Write};
use std::os::unix::fs::{MetadataExt, PermissionsExt};
use std::path::{Path, PathBuf};
use std::result::Result as StdResult;
use uucore::fs::display_permissions_unix;
use uucore::mode;
use walkdir::WalkDir;
const NAME: &str = "chmod";
pub(crate) const DESCRIPTION: &str = "Change the file permissions of given files";
const MODE_SYNTAX: &str = "
Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'.
";
#[derive(Fail, Debug)]
enum ChmodError {
#[fail(display = "cannot stat attributes of '{}': {}", _0, _1)]
Stat(String, #[cause] io::Error),
}
#[derive(PartialEq)]
enum Verbosity {
None,
Changes,
Quiet,
Verbose,
}
enum MessageKind {
Stdout,
Stderr,
}
// FIXME: find a better way to store this (preferably avoid allocating)
// NOTE: the message setup is to avoid duplicating chmod_file() and change_file() for every generic
// type
struct Message {
kind: MessageKind,
data: String,
}
impl Message {
pub fn stdout(data: String) -> Self {
Self {
kind: MessageKind::Stdout,
data,
}
}
pub fn stderr(data: String) -> Self {
Self {
kind: MessageKind::Stderr,
data,
}
}
}
struct Options<'a> {
verbosity: Verbosity,
preserve_root: bool,
recursive: bool,
fmode: Option<u32>,
cmode: Option<&'a str>,
current_dir: Option<PathBuf>,
}
pub fn execute<S, T>(setup: &mut S, args: T) -> Result<()>
where
S: UtilSetup,
T: ArgsIter,
{
let matches = {
let app = util_app!(NAME)
.after_help(MODE_SYNTAX)
.setting(AppSettings::AllowLeadingHyphen)
.arg(Arg::with_name("recursive")
.long("recursive")
.short("R")
.help("change files and directories recursively"))
.arg(Arg::with_name("reference")
.long("reference")
.takes_value(true)
.value_name("RFILE")
.help("use RFILE's mode instead of provided MODE values"))
.arg(Arg::with_name("preserve-root")
.long("preserve-root")
.help("fail to operate recursively on '/'"))
.arg(Arg::with_name("no-preserve-root")
.long("no-preserve-root")
.overrides_with("preserve-root")
.help("do not treat '/' specially (the default)"))
.arg(Arg::with_name("verbose")
.long("verbose")
.short("v")
.help("output a diagnostic for every file processed"))
.arg(Arg::with_name("quiet")
.long("quiet")
.short("f")
.visible_alias("silent")
.help("suppress most error messages"))
.arg(Arg::with_name("changes")
.long("changes")
.short("c")
.help("like verbose but report only when a change is made"))
.group(ArgGroup::with_name("verbosity")
.args(&["verbose", "quiet", "changes"]))
// FIXME: not sure how to tell clap that MODE can be missing if --reference is
// given by the user. clap is also unhappy that FILES (which has an
// index that occurs later than MODE) is required while MODE is not
.arg(Arg::with_name("MODE")
.index(1) | .validator_os(validate_mode)
.required(true))
//.conflicts_with("reference"))
.arg(Arg::with_name("FILES")
.index(2)
.required(true)
.multiple(true));
app.get_matches_from_safe(args)?
};
let verbosity = if matches.is_present("changes") {
Verbosity::Changes
} else if matches.is_present("quiet") {
Verbosity::Quiet
} else if matches.is_present("verbose") {
Verbosity::Verbose
} else {
Verbosity::None
};
let preserve_root = matches.is_present("preserve-root");
let recursive = matches.is_present("recursive");
let fmode = match matches.value_of_os("reference") {
Some(ref_file) => Some(fs::metadata(ref_file)
.map(|data| data.mode())
.map_err(|e| ChmodError::Stat(ref_file.to_string_lossy().into(), e))?),
None => None,
};
let current_dir = setup.current_dir().map(|p| p.to_owned());
let (_, stdout, stderr) = setup.stdio();
let mut chmoder = Chmoder {
stdout: stdout.lock()?,
stderr: stderr.lock()?,
};
let options = Options {
verbosity: verbosity,
preserve_root: preserve_root,
recursive: recursive,
fmode: fmode,
cmode: matches.value_of("MODE"),
current_dir: current_dir,
};
let exitcode = chmoder.chmod(&options, matches.values_of_os("FILES").unwrap())?;
if exitcode == 0 {
Ok(())
} else {
Err(MesaError {
exitcode: exitcode,
progname: None,
err: None,
})
}
}
fn validate_mode(arg: &OsStr) -> StdResult<(), OsString> {
// NOTE: used to use regex to match the mode, but that caused the binary size to increase
// considerably
arg.to_str()
.ok_or_else(|| "mode was not a string (must be encoded using UTF-8)".into())
.and_then(|s| {
for mode in s.split(',') {
if mode::parse_numeric(0, mode).is_err()
&& mode::parse_symbolic(0, mode, false).is_err()
{
return Err("found invalid character in mode string".into());
}
}
Ok(())
})
}
struct Chmoder<O, E>
where
O: Write,
E: Write,
{
stdout: O,
stderr: E,
}
impl<'a, O, E> Chmoder<O, E>
where
O: Write,
E: Write,
{
fn chmod<'b>(&mut self, options: &Options, files: OsValues<'b>) -> Result<i32> {
let mut r = 0;
let mut msgs = [None, None];
for filename in files {
let file = util::actual_path(&options.current_dir, filename);
r |= if file.is_dir() && options.recursive {
self.chmod_dir(options, &mut msgs, &file)
} else {
let res = chmod_file(options, &mut msgs, &file);
self.write_msgs(&mut msgs).map(|_| res)
}?;
}
Ok(r)
}
fn chmod_dir(
&mut self,
options: &Options,
msgs: &mut [Option<Message>; 2],
file: &Path,
) -> Result<i32> {
let mut r = 0;
if !options.preserve_root || file != Path::new("/") {
let walker = WalkDir::new(file).contents_first(true);
for entry in walker {
match entry {
Ok(entry) => {
r |= chmod_file(options, msgs, &entry.path());
self.write_msgs(msgs)?;
}
Err(f) => display_msg!(self.stderr, "{}", f)?,
}
}
} else {
display_msg!(
self.stderr,
"could not change permissions of directory '{}'",
file.display()
)?;
r = 1;
}
Ok(r)
}
fn write_msgs(&mut self, msgs: &mut [Option<Message>; 2]) -> Result<()> {
for msg in msgs {
if let Some(msg) = msg {
match msg.kind {
MessageKind::Stdout => display_msg!(self.stdout, "{}", msg.data)?,
MessageKind::Stderr => display_msg!(self.stderr, "{}", msg.data)?,
}
}
*msg = None;
}
Ok(())
}
}
#[cfg(any(unix, target_os = "redox"))]
fn chmod_file(options: &Options, msgs: &mut [Option<Message>; 2], file: &Path) -> i32 {
let mut fperm = match fs::metadata(file) {
Ok(meta) => meta.mode() & 0o7777,
Err(err) => {
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!(
"could not | random_line_split |
|
chmod.rs | // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
use util;
use {ArgsIter, MesaError, Result, UtilSetup, UtilWrite};
use clap::{AppSettings, Arg, ArgGroup, OsValues};
use std::ffi::{OsStr, OsString};
use std::fs;
use std::io::{self, Write};
use std::os::unix::fs::{MetadataExt, PermissionsExt};
use std::path::{Path, PathBuf};
use std::result::Result as StdResult;
use uucore::fs::display_permissions_unix;
use uucore::mode;
use walkdir::WalkDir;
const NAME: &str = "chmod";
pub(crate) const DESCRIPTION: &str = "Change the file permissions of given files";
const MODE_SYNTAX: &str = "
Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'.
";
#[derive(Fail, Debug)]
enum ChmodError {
#[fail(display = "cannot stat attributes of '{}': {}", _0, _1)]
Stat(String, #[cause] io::Error),
}
#[derive(PartialEq)]
enum Verbosity {
None,
Changes,
Quiet,
Verbose,
}
enum MessageKind {
Stdout,
Stderr,
}
// FIXME: find a better way to store this (preferably avoid allocating)
// NOTE: the message setup is to avoid duplicating chmod_file() and change_file() for every generic
// type
struct Message {
kind: MessageKind,
data: String,
}
impl Message {
pub fn | (data: String) -> Self {
Self {
kind: MessageKind::Stdout,
data,
}
}
pub fn stderr(data: String) -> Self {
Self {
kind: MessageKind::Stderr,
data,
}
}
}
struct Options<'a> {
verbosity: Verbosity,
preserve_root: bool,
recursive: bool,
fmode: Option<u32>,
cmode: Option<&'a str>,
current_dir: Option<PathBuf>,
}
pub fn execute<S, T>(setup: &mut S, args: T) -> Result<()>
where
S: UtilSetup,
T: ArgsIter,
{
let matches = {
let app = util_app!(NAME)
.after_help(MODE_SYNTAX)
.setting(AppSettings::AllowLeadingHyphen)
.arg(Arg::with_name("recursive")
.long("recursive")
.short("R")
.help("change files and directories recursively"))
.arg(Arg::with_name("reference")
.long("reference")
.takes_value(true)
.value_name("RFILE")
.help("use RFILE's mode instead of provided MODE values"))
.arg(Arg::with_name("preserve-root")
.long("preserve-root")
.help("fail to operate recursively on '/'"))
.arg(Arg::with_name("no-preserve-root")
.long("no-preserve-root")
.overrides_with("preserve-root")
.help("do not treat '/' specially (the default)"))
.arg(Arg::with_name("verbose")
.long("verbose")
.short("v")
.help("output a diagnostic for every file processed"))
.arg(Arg::with_name("quiet")
.long("quiet")
.short("f")
.visible_alias("silent")
.help("suppress most error messages"))
.arg(Arg::with_name("changes")
.long("changes")
.short("c")
.help("like verbose but report only when a change is made"))
.group(ArgGroup::with_name("verbosity")
.args(&["verbose", "quiet", "changes"]))
// FIXME: not sure how to tell clap that MODE can be missing if --reference is
// given by the user. clap is also unhappy that FILES (which has an
// index that occurs later than MODE) is required while MODE is not
.arg(Arg::with_name("MODE")
.index(1)
.validator_os(validate_mode)
.required(true))
//.conflicts_with("reference"))
.arg(Arg::with_name("FILES")
.index(2)
.required(true)
.multiple(true));
app.get_matches_from_safe(args)?
};
let verbosity = if matches.is_present("changes") {
Verbosity::Changes
} else if matches.is_present("quiet") {
Verbosity::Quiet
} else if matches.is_present("verbose") {
Verbosity::Verbose
} else {
Verbosity::None
};
let preserve_root = matches.is_present("preserve-root");
let recursive = matches.is_present("recursive");
let fmode = match matches.value_of_os("reference") {
Some(ref_file) => Some(fs::metadata(ref_file)
.map(|data| data.mode())
.map_err(|e| ChmodError::Stat(ref_file.to_string_lossy().into(), e))?),
None => None,
};
let current_dir = setup.current_dir().map(|p| p.to_owned());
let (_, stdout, stderr) = setup.stdio();
let mut chmoder = Chmoder {
stdout: stdout.lock()?,
stderr: stderr.lock()?,
};
let options = Options {
verbosity: verbosity,
preserve_root: preserve_root,
recursive: recursive,
fmode: fmode,
cmode: matches.value_of("MODE"),
current_dir: current_dir,
};
let exitcode = chmoder.chmod(&options, matches.values_of_os("FILES").unwrap())?;
if exitcode == 0 {
Ok(())
} else {
Err(MesaError {
exitcode: exitcode,
progname: None,
err: None,
})
}
}
fn validate_mode(arg: &OsStr) -> StdResult<(), OsString> {
// NOTE: used to use regex to match the mode, but that caused the binary size to increase
// considerably
arg.to_str()
.ok_or_else(|| "mode was not a string (must be encoded using UTF-8)".into())
.and_then(|s| {
for mode in s.split(',') {
if mode::parse_numeric(0, mode).is_err()
&& mode::parse_symbolic(0, mode, false).is_err()
{
return Err("found invalid character in mode string".into());
}
}
Ok(())
})
}
struct Chmoder<O, E>
where
O: Write,
E: Write,
{
stdout: O,
stderr: E,
}
impl<'a, O, E> Chmoder<O, E>
where
O: Write,
E: Write,
{
fn chmod<'b>(&mut self, options: &Options, files: OsValues<'b>) -> Result<i32> {
let mut r = 0;
let mut msgs = [None, None];
for filename in files {
let file = util::actual_path(&options.current_dir, filename);
r |= if file.is_dir() && options.recursive {
self.chmod_dir(options, &mut msgs, &file)
} else {
let res = chmod_file(options, &mut msgs, &file);
self.write_msgs(&mut msgs).map(|_| res)
}?;
}
Ok(r)
}
fn chmod_dir(
&mut self,
options: &Options,
msgs: &mut [Option<Message>; 2],
file: &Path,
) -> Result<i32> {
let mut r = 0;
if !options.preserve_root || file != Path::new("/") {
let walker = WalkDir::new(file).contents_first(true);
for entry in walker {
match entry {
Ok(entry) => {
r |= chmod_file(options, msgs, &entry.path());
self.write_msgs(msgs)?;
}
Err(f) => display_msg!(self.stderr, "{}", f)?,
}
}
} else {
display_msg!(
self.stderr,
"could not change permissions of directory '{}'",
file.display()
)?;
r = 1;
}
Ok(r)
}
fn write_msgs(&mut self, msgs: &mut [Option<Message>; 2]) -> Result<()> {
for msg in msgs {
if let Some(msg) = msg {
match msg.kind {
MessageKind::Stdout => display_msg!(self.stdout, "{}", msg.data)?,
MessageKind::Stderr => display_msg!(self.stderr, "{}", msg.data)?,
}
}
*msg = None;
}
Ok(())
}
}
#[cfg(any(unix, target_os = "redox"))]
fn chmod_file(options: &Options, msgs: &mut [Option<Message>; 2], file: &Path) -> i32 {
let mut fperm = match fs::metadata(file) {
Ok(meta) => meta.mode() & 0o7777,
Err(err) => {
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!(
"could not | stdout | identifier_name |
chmod.rs | ([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'.
";
#[derive(Fail, Debug)]
enum ChmodError {
#[fail(display = "cannot stat attributes of '{}': {}", _0, _1)]
Stat(String, #[cause] io::Error),
}
#[derive(PartialEq)]
enum Verbosity {
None,
Changes,
Quiet,
Verbose,
}
enum MessageKind {
Stdout,
Stderr,
}
// FIXME: find a better way to store this (preferably avoid allocating)
// NOTE: the message setup is to avoid duplicating chmod_file() and change_file() for every generic
// type
struct Message {
kind: MessageKind,
data: String,
}
impl Message {
pub fn stdout(data: String) -> Self {
Self {
kind: MessageKind::Stdout,
data,
}
}
pub fn stderr(data: String) -> Self {
Self {
kind: MessageKind::Stderr,
data,
}
}
}
struct Options<'a> {
verbosity: Verbosity,
preserve_root: bool,
recursive: bool,
fmode: Option<u32>,
cmode: Option<&'a str>,
current_dir: Option<PathBuf>,
}
pub fn execute<S, T>(setup: &mut S, args: T) -> Result<()>
where
S: UtilSetup,
T: ArgsIter,
{
let matches = {
let app = util_app!(NAME)
.after_help(MODE_SYNTAX)
.setting(AppSettings::AllowLeadingHyphen)
.arg(Arg::with_name("recursive")
.long("recursive")
.short("R")
.help("change files and directories recursively"))
.arg(Arg::with_name("reference")
.long("reference")
.takes_value(true)
.value_name("RFILE")
.help("use RFILE's mode instead of provided MODE values"))
.arg(Arg::with_name("preserve-root")
.long("preserve-root")
.help("fail to operate recursively on '/'"))
.arg(Arg::with_name("no-preserve-root")
.long("no-preserve-root")
.overrides_with("preserve-root")
.help("do not treat '/' specially (the default)"))
.arg(Arg::with_name("verbose")
.long("verbose")
.short("v")
.help("output a diagnostic for every file processed"))
.arg(Arg::with_name("quiet")
.long("quiet")
.short("f")
.visible_alias("silent")
.help("suppress most error messages"))
.arg(Arg::with_name("changes")
.long("changes")
.short("c")
.help("like verbose but report only when a change is made"))
.group(ArgGroup::with_name("verbosity")
.args(&["verbose", "quiet", "changes"]))
// FIXME: not sure how to tell clap that MODE can be missing if --reference is
// given by the user. clap is also unhappy that FILES (which has an
// index that occurs later than MODE) is required while MODE is not
.arg(Arg::with_name("MODE")
.index(1)
.validator_os(validate_mode)
.required(true))
//.conflicts_with("reference"))
.arg(Arg::with_name("FILES")
.index(2)
.required(true)
.multiple(true));
app.get_matches_from_safe(args)?
};
let verbosity = if matches.is_present("changes") {
Verbosity::Changes
} else if matches.is_present("quiet") {
Verbosity::Quiet
} else if matches.is_present("verbose") {
Verbosity::Verbose
} else {
Verbosity::None
};
let preserve_root = matches.is_present("preserve-root");
let recursive = matches.is_present("recursive");
let fmode = match matches.value_of_os("reference") {
Some(ref_file) => Some(fs::metadata(ref_file)
.map(|data| data.mode())
.map_err(|e| ChmodError::Stat(ref_file.to_string_lossy().into(), e))?),
None => None,
};
let current_dir = setup.current_dir().map(|p| p.to_owned());
let (_, stdout, stderr) = setup.stdio();
let mut chmoder = Chmoder {
stdout: stdout.lock()?,
stderr: stderr.lock()?,
};
let options = Options {
verbosity: verbosity,
preserve_root: preserve_root,
recursive: recursive,
fmode: fmode,
cmode: matches.value_of("MODE"),
current_dir: current_dir,
};
let exitcode = chmoder.chmod(&options, matches.values_of_os("FILES").unwrap())?;
if exitcode == 0 {
Ok(())
} else {
Err(MesaError {
exitcode: exitcode,
progname: None,
err: None,
})
}
}
fn validate_mode(arg: &OsStr) -> StdResult<(), OsString> {
// NOTE: used to use regex to match the mode, but that caused the binary size to increase
// considerably
arg.to_str()
.ok_or_else(|| "mode was not a string (must be encoded using UTF-8)".into())
.and_then(|s| {
for mode in s.split(',') {
if mode::parse_numeric(0, mode).is_err()
&& mode::parse_symbolic(0, mode, false).is_err()
{
return Err("found invalid character in mode string".into());
}
}
Ok(())
})
}
struct Chmoder<O, E>
where
O: Write,
E: Write,
{
stdout: O,
stderr: E,
}
impl<'a, O, E> Chmoder<O, E>
where
O: Write,
E: Write,
{
fn chmod<'b>(&mut self, options: &Options, files: OsValues<'b>) -> Result<i32> {
let mut r = 0;
let mut msgs = [None, None];
for filename in files {
let file = util::actual_path(&options.current_dir, filename);
r |= if file.is_dir() && options.recursive {
self.chmod_dir(options, &mut msgs, &file)
} else {
let res = chmod_file(options, &mut msgs, &file);
self.write_msgs(&mut msgs).map(|_| res)
}?;
}
Ok(r)
}
fn chmod_dir(
&mut self,
options: &Options,
msgs: &mut [Option<Message>; 2],
file: &Path,
) -> Result<i32> {
let mut r = 0;
if !options.preserve_root || file != Path::new("/") {
let walker = WalkDir::new(file).contents_first(true);
for entry in walker {
match entry {
Ok(entry) => {
r |= chmod_file(options, msgs, &entry.path());
self.write_msgs(msgs)?;
}
Err(f) => display_msg!(self.stderr, "{}", f)?,
}
}
} else {
display_msg!(
self.stderr,
"could not change permissions of directory '{}'",
file.display()
)?;
r = 1;
}
Ok(r)
}
fn write_msgs(&mut self, msgs: &mut [Option<Message>; 2]) -> Result<()> {
for msg in msgs {
if let Some(msg) = msg {
match msg.kind {
MessageKind::Stdout => display_msg!(self.stdout, "{}", msg.data)?,
MessageKind::Stderr => display_msg!(self.stderr, "{}", msg.data)?,
}
}
*msg = None;
}
Ok(())
}
}
#[cfg(any(unix, target_os = "redox"))]
fn chmod_file(options: &Options, msgs: &mut [Option<Message>; 2], file: &Path) -> i32 {
let mut fperm = match fs::metadata(file) {
Ok(meta) => meta.mode() & 0o7777,
Err(err) => {
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!(
"could not stat '{}': {}",
file.display(),
err
)));
}
return 1;
}
};
match options.fmode {
Some(mode) => change_file(options, msgs, fperm, mode, file),
None => | {
let cmode_unwrapped = options.cmode.clone().unwrap();
for mode in cmode_unwrapped.split(',') {
// cmode is guaranteed to be Some in this case
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm, mode)
} else {
mode::parse_symbolic(fperm, mode, file.is_dir())
};
match result {
Ok(mode) => {
change_file(options, msgs, fperm, mode, file);
fperm = mode;
}
Err(f) => {
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!("failed to parse mode: {}", f)));
}
return 1; | conditional_block |
|
cnn.py | 50, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet18
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 448
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
if add_jpeg_layer:
if train_quant_only and not train_cnn_only:
model_ft.load_state_dict(torch.load("model.final"))
# if loadfull:
# model_ft.load_state_dict(torch.load("model.final"))
# model_ft = model_ft[1]
model_ft = nn.Sequential(JpegLayer( \
rand_qtable = rand_qtable, cnn_only = train_cnn_only, quality = quality),\
model_ft)
set_parameter_requires_grad(model_ft,\
False, feature_extract,
train_quant_only, train_cnn_only)
# model_ft.load_state_dict(torch.load("model.fail"))
return model_ft, input_size
# Initialize the model for this run
model_ft, input_size = initialize_model(args.model_name, args.num_classes, feature_extract, args.add_jpeg_layer, args.quant_only, args.cnn_only, args.rand_qtable, args.quality, use_pretrained=True)
# Print the model we just instantiated
print(model_ft)
######################################################################
# Load Data
# ---------
#
# Now that we know what the input size must be, we can initialize the data
# transforms, image datasets, and the dataloaders. Notice, the models were
# pretrained with the hard-coded normalization values, as described
# `here <https://pytorch.org/docs/master/torchvision/models.html>`__.
#
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: datasets.ImageFolder(os.path.join(args.data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=args.batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
######################################################################
# Create the Optimizer
# --------------------
#
# Now that the model structure is correct, the final step for finetuning
# and feature extracting is to create an optimizer that only updates the
# desired parameters. Recall that after loading the pretrained model, but
# before reshaping, if ``feature_extract=True`` we manually set all of the
# parameter’s ``.requires_grad`` attributes to False. Then the
# reinitialized layer’s parameters have ``.requires_grad=True`` by
# default. So now we know that *all parameters that have
# .requires_grad=True should be optimized.* Next, we make a list of such
# parameters and input this list to the SGD algorithm constructor.
#
# To verify this, check out the printed parameters to learn. When
# finetuning, this list should be long and include all of the model
# parameters. However, when feature extracting this list should be short
# and only include the weights and biases of the reshaped layers.
#
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = [] #model_ft.parameters()
print("Params to learn:")
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print('\t',name)
iftrain = not(args.quant_only and args.cnn_only)
if iftrain:
# Observe that all parameters are being optimized
#if train_quant_only:
#optimizer_ft = optim.Adam(params_to_update, lr = 0.0001)
#else:
optimizer_ft = optim.SGD(params_to_update, lr = 0.0005, momentum=0.9)
else:
optimizer_ft = None
# optim.SGD([{'params': params_to_update},\
# {'params': params_quantize, 'lr': 0.005, 'momentum':0.9}], lr=0.0005, momentum=0.9)
######################################################################
# Run Training and Validation Step
# --------------------------------
#
# Finally, the last step is to setup the loss for the model, then run the
# training and validation function for the set number of epochs. Notice,
# depending on the number of epochs this step may take a while on a CPU.
# Also, the default learning rate is not optimal for all of the models, so
# to achieve maximum accuracy it would be necessary to tune for each model
# separately.
#
# Setup the loss fxn
criterion = nn.CrossEntropyLoss()
# Train and evaluate
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=args.num_epochs, is_inception=(args.model_name=="inception"), train = iftrain)
if args.cnn_only == True and iftrain:
torch.save(model_ft.state_dict(), "model.final")
#print the trained quantization matrix
if args.qtable:
print('--------- the trained quantize table ---------')
for name,param in model_ft.named_parameters():
if param.requi | res_grad == True and\
name == "0.quantize":
print('Y',param.data[0]*255)
print('Cb',param.data[1]*255)
print('Cr',param.data[2]*255)
break
# Let's v | conditional_block |
|
cnn.py | As input, it takes a PyTorch model, a dictionary of
# dataloaders, a loss function, an optimizer, a specified number of epochs
# to train and validate for, and a boolean flag for when the model is an
# Inception model. The *is_inception* flag is used to accomodate the
# *Inception v3* model, as that architecture uses an auxiliary output and
# the overall model loss respects both the auxiliary output and the final
# output, as described
# `here <https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958>`__.
# The function trains for the specified number of epochs and after each
# epoch runs a full validation step. It also keeps track of the best
# performing model (in terms of validation accuracy), and at the end of
# training returns the best performing model. After each epoch, the
# training and validation accuracies are printed.
#
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False, train = True):
si | running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
if train:
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
#add regularization
reg_loss = 0
factor = 0.1
if args.regularize:
reg_crit = nn.L1Loss(size_average=True)
target = torch.Tensor(3,8,8).cuda()
target.fill_(0)
for name, param in model.named_parameters():
if "quantize" in name:
reg_loss = factor /reg_crit(param,target) * inputs.size(0)
break
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
loss = reg_loss + loss
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# if(epoch_acc < 0.5):
# for name, param in model.named_parameters():
# if 'quantize' in name:
# print(param*255)
# torch.save(model.state_dict(), 'model.fail')
# sys.exit(0)
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
######################################################################
# Set Model Parameters’ .requires_grad attribute
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This helper function sets the ``.requires_grad`` attribute of the
# parameters in the model to False when we are feature extracting. By
# default, when we load a pretrained model all of the parameters have
# ``.requires_grad=True``, which is fine if we are training from scratch
# or finetuning. However, if we are feature extracting and only want to
# compute gradients for the newly initialized layer then we want all of
# the other parameters to not require gradients. This will make more sense
# later.
#
def set_parameter_requires_grad(model, first, feature_extract, quant_only=False, cnn_only=False):
if first and feature_extract:
quant_only = True
cnn_only = True
for name, param in model.named_parameters():
if (quant_only and 'quantize' not in name) or\
(cnn_only and 'quantize' in name):
param.requires_grad = False
######################################################################
# Initialize and Reshape the Networks
# -----------------------------------
#
# Now to the most interesting part. Here is where we handle the reshaping
# of each network. Note, this is not an automatic procedure and is unique
# to each model. Recall, the final layer of a CNN model, which is often
# times an FC layer, has the same number of nodes as the number of output
# classes in the dataset. Since all of the models have been pretrained on
# Imagenet, they all have output layers of size 1000, one node for each
# class. The goal here is to reshape the last layer to have the same
# number of inputs as before, AND to have the same number of outputs as
# the number of classes in the dataset. In the following sections we will
# discuss how to alter the architecture of each model individually. But
# first, there is one important detail regarding the difference between
# finetuning and feature-extraction.
#
# When feature extracting, we only want to update the parameters of the
# last layer, or in other words, we only want to update the parameters for
# the layer(s) we are reshaping. Therefore, we do not need to compute the
# gradients of the parameters that we are not changing, so for efficiency
# we set the .requires_grad attribute to False. This is important because
# by default, this attribute is set to True. Then, when we initialize the
# new layer and by default the new parameters have ``.requires_grad=True``
# so only the new layer’s parameters will be updated. When we are
# finetuning we can leave all of the .required_grad’s set to the default
# of True.
#
# Finally, notice that inception_v3 requires the input size to be
# (299,299), whereas all of the other models expect (224,224).
#
# Resnet
# ~~~~~~
#
# Resnet was introduced in the paper `Deep Residual Learning for Image
# Recognition <https://arxiv.org/abs/1512.03385>`__. There are several
# variants of different sizes, including Resnet18, Resnet34, Resnet50,
# Resnet101, and Resnet152, all of which are available from torchvision
# models. Here we use Resnet18, as our dataset is small and only has two
# classes. When we print the model, we see that the last layer is a fully
# connected layer as shown below:
#
# ::
#
# (fc): Linear(in_features=512, out_features=1000, bias=True)
#
# Thus, we must reinitialize ``model.fc`` to be a Linear layer with 512
# input features and 2 output features with:
#
# ::
#
# model.fc = nn.Linear(512, num_classes)
#
# Alexnet
# ~~~~~~~
#
# Alexnet was introduced in the paper `ImageNet Classification with Deep
# Convolutional Neural
# Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`__
# and was the first very successful CNN on the ImageNet dataset. When we
# print the model architecture, we see the model output comes from the 6th
# layer of the classifier
#
# ::
#
# (classifier | nce = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
phases =['train', 'val']
if not train:
phases = ['val']
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in phases:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0 | identifier_body |
cnn.py | input, it takes a PyTorch model, a dictionary of
# dataloaders, a loss function, an optimizer, a specified number of epochs
# to train and validate for, and a boolean flag for when the model is an
# Inception model. The *is_inception* flag is used to accomodate the
# *Inception v3* model, as that architecture uses an auxiliary output and
# the overall model loss respects both the auxiliary output and the final
# output, as described
# `here <https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958>`__.
# The function trains for the specified number of epochs and after each
# epoch runs a full validation step. It also keeps track of the best
# performing model (in terms of validation accuracy), and at the end of
# training returns the best performing model. After each epoch, the
# training and validation accuracies are printed.
#
def tr | odel, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False, train = True):
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
phases =['train', 'val']
if not train:
phases = ['val']
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in phases:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
if train:
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
#add regularization
reg_loss = 0
factor = 0.1
if args.regularize:
reg_crit = nn.L1Loss(size_average=True)
target = torch.Tensor(3,8,8).cuda()
target.fill_(0)
for name, param in model.named_parameters():
if "quantize" in name:
reg_loss = factor /reg_crit(param,target) * inputs.size(0)
break
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
loss = reg_loss + loss
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# if(epoch_acc < 0.5):
# for name, param in model.named_parameters():
# if 'quantize' in name:
# print(param*255)
# torch.save(model.state_dict(), 'model.fail')
# sys.exit(0)
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
######################################################################
# Set Model Parameters’ .requires_grad attribute
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This helper function sets the ``.requires_grad`` attribute of the
# parameters in the model to False when we are feature extracting. By
# default, when we load a pretrained model all of the parameters have
# ``.requires_grad=True``, which is fine if we are training from scratch
# or finetuning. However, if we are feature extracting and only want to
# compute gradients for the newly initialized layer then we want all of
# the other parameters to not require gradients. This will make more sense
# later.
#
def set_parameter_requires_grad(model, first, feature_extract, quant_only=False, cnn_only=False):
if first and feature_extract:
quant_only = True
cnn_only = True
for name, param in model.named_parameters():
if (quant_only and 'quantize' not in name) or\
(cnn_only and 'quantize' in name):
param.requires_grad = False
######################################################################
# Initialize and Reshape the Networks
# -----------------------------------
#
# Now to the most interesting part. Here is where we handle the reshaping
# of each network. Note, this is not an automatic procedure and is unique
# to each model. Recall, the final layer of a CNN model, which is often
# times an FC layer, has the same number of nodes as the number of output
# classes in the dataset. Since all of the models have been pretrained on
# Imagenet, they all have output layers of size 1000, one node for each
# class. The goal here is to reshape the last layer to have the same
# number of inputs as before, AND to have the same number of outputs as
# the number of classes in the dataset. In the following sections we will
# discuss how to alter the architecture of each model individually. But
# first, there is one important detail regarding the difference between
# finetuning and feature-extraction.
#
# When feature extracting, we only want to update the parameters of the
# last layer, or in other words, we only want to update the parameters for
# the layer(s) we are reshaping. Therefore, we do not need to compute the
# gradients of the parameters that we are not changing, so for efficiency
# we set the .requires_grad attribute to False. This is important because
# by default, this attribute is set to True. Then, when we initialize the
# new layer and by default the new parameters have ``.requires_grad=True``
# so only the new layer’s parameters will be updated. When we are
# finetuning we can leave all of the .required_grad’s set to the default
# of True.
#
# Finally, notice that inception_v3 requires the input size to be
# (299,299), whereas all of the other models expect (224,224).
#
# Resnet
# ~~~~~~
#
# Resnet was introduced in the paper `Deep Residual Learning for Image
# Recognition <https://arxiv.org/abs/1512.03385>`__. There are several
# variants of different sizes, including Resnet18, Resnet34, Resnet50,
# Resnet101, and Resnet152, all of which are available from torchvision
# models. Here we use Resnet18, as our dataset is small and only has two
# classes. When we print the model, we see that the last layer is a fully
# connected layer as shown below:
#
# ::
#
# (fc): Linear(in_features=512, out_features=1000, bias=True)
#
# Thus, we must reinitialize ``model.fc`` to be a Linear layer with 512
# input features and 2 output features with:
#
# ::
#
# model.fc = nn.Linear(512, num_classes)
#
# Alexnet
# ~~~~~~~
#
# Alexnet was introduced in the paper `ImageNet Classification with Deep
# Convolutional Neural
# Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`__
# and was the first very successful CNN on the ImageNet dataset. When we
# print the model architecture, we see the model output comes from the 6th
# layer of the classifier
#
# ::
#
# ( | ain_model(m | identifier_name |
cnn.py | == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
#add regularization
reg_loss = 0
factor = 0.1
if args.regularize:
reg_crit = nn.L1Loss(size_average=True)
target = torch.Tensor(3,8,8).cuda()
target.fill_(0)
for name, param in model.named_parameters():
if "quantize" in name:
reg_loss = factor /reg_crit(param,target) * inputs.size(0)
break
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
loss = reg_loss + loss
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# if(epoch_acc < 0.5):
# for name, param in model.named_parameters():
# if 'quantize' in name:
# print(param*255)
# torch.save(model.state_dict(), 'model.fail')
# sys.exit(0)
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
######################################################################
# Set Model Parameters’ .requires_grad attribute
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This helper function sets the ``.requires_grad`` attribute of the
# parameters in the model to False when we are feature extracting. By
# default, when we load a pretrained model all of the parameters have
# ``.requires_grad=True``, which is fine if we are training from scratch
# or finetuning. However, if we are feature extracting and only want to
# compute gradients for the newly initialized layer then we want all of
# the other parameters to not require gradients. This will make more sense
# later.
#
def set_parameter_requires_grad(model, first, feature_extract, quant_only=False, cnn_only=False):
if first and feature_extract:
quant_only = True
cnn_only = True
for name, param in model.named_parameters():
if (quant_only and 'quantize' not in name) or\
(cnn_only and 'quantize' in name):
param.requires_grad = False
######################################################################
# Initialize and Reshape the Networks
# -----------------------------------
#
# Now to the most interesting part. Here is where we handle the reshaping
# of each network. Note, this is not an automatic procedure and is unique
# to each model. Recall, the final layer of a CNN model, which is often
# times an FC layer, has the same number of nodes as the number of output
# classes in the dataset. Since all of the models have been pretrained on
# Imagenet, they all have output layers of size 1000, one node for each
# class. The goal here is to reshape the last layer to have the same
# number of inputs as before, AND to have the same number of outputs as
# the number of classes in the dataset. In the following sections we will
# discuss how to alter the architecture of each model individually. But
# first, there is one important detail regarding the difference between
# finetuning and feature-extraction.
#
# When feature extracting, we only want to update the parameters of the
# last layer, or in other words, we only want to update the parameters for
# the layer(s) we are reshaping. Therefore, we do not need to compute the
# gradients of the parameters that we are not changing, so for efficiency
# we set the .requires_grad attribute to False. This is important because
# by default, this attribute is set to True. Then, when we initialize the
# new layer and by default the new parameters have ``.requires_grad=True``
# so only the new layer’s parameters will be updated. When we are
# finetuning we can leave all of the .required_grad’s set to the default
# of True.
#
# Finally, notice that inception_v3 requires the input size to be
# (299,299), whereas all of the other models expect (224,224).
#
# Resnet
# ~~~~~~
#
# Resnet was introduced in the paper `Deep Residual Learning for Image
# Recognition <https://arxiv.org/abs/1512.03385>`__. There are several
# variants of different sizes, including Resnet18, Resnet34, Resnet50,
# Resnet101, and Resnet152, all of which are available from torchvision
# models. Here we use Resnet18, as our dataset is small and only has two
# classes. When we print the model, we see that the last layer is a fully
# connected layer as shown below:
#
# ::
#
# (fc): Linear(in_features=512, out_features=1000, bias=True)
#
# Thus, we must reinitialize ``model.fc`` to be a Linear layer with 512
# input features and 2 output features with:
#
# ::
#
# model.fc = nn.Linear(512, num_classes)
#
# Alexnet
# ~~~~~~~
#
# Alexnet was introduced in the paper `ImageNet Classification with Deep
# Convolutional Neural
# Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`__
# and was the first very successful CNN on the ImageNet dataset. When we
# print the model architecture, we see the model output comes from the 6th
# layer of the classifier
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# To use the model with our dataset we reinitialize this layer as
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# VGG
# ~~~
#
# VGG was introduced in the paper `Very Deep Convolutional Networks for
# Large-Scale Image Recognition <https://arxiv.org/pdf/1409.1556.pdf>`__.
# Torchvision offers eight versions of VGG with various lengths and some
# that have batch normalizations layers. Here we use VGG-11 with batch
# normalization. The output layer is similar to Alexnet, i.e.
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# Therefore, we use the same technique to modify the output layer
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# Squeezenet
# ~~~~~~~~~~
#
# The Squeeznet architecture is described in the paper `SqueezeNet:
# AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
# size <https://arxiv.org/abs/1602.07360>`__ and uses a different output
# structure than any of the other models shown here. Torchvision has two
# versions of Squeezenet, we use version 1.0. The output comes from a 1x1
# convolutional layer which is the 1st layer of the classifier:
#
# ::
#
# (classifier): Sequential(
# (0): Dropout(p=0.5) | # (1): Conv2d(512, 1000, kernel_size=(1, 1), stride=(1, 1)) | random_line_split |
|
Server.go | ")
c.TCPReader = bufio.NewReader(c.Socket)
c.TCPWriter = bufio.NewWriter(c.Socket)
//c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
var (
buf = make([]byte, 1024)
)
for {
bufLength, err := c.TCPReader.Read(buf) //[0:])
//log.Printf("buffer length = %d", bufLength)
switch err {
case io.EOF:
return
case nil:
{
var i uint16 = 0
for true {
pLength := uint16(buf[i+1]) | (uint16(buf[i+2]) << 8)
//log.Printf("packet length = %d", pLength)
copySlice := buf[i : i+pLength] // copy value hope this is work :)
MainServer.Jobs <- func() { c.HandlePacket(copySlice, pLength) }
i += pLength
if i >= (uint16)(bufLength) {
break
}
}
//for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
//}
//buf = make([]byte, 512) // clear
}
default: // something wrong, when connection was losted
{
// (10054 on windows WSAECONNRESET)
if c.Game != nil {
if c.Character != nil {
var id uint32 = c.Character.GameObject().GOID
for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
if id == e.Value.(PacketResultInstanceGO).GOID {
c.Game.Packages.Remove(e)
break
}
}
DestroyCharacter := PacketCharacterState{id, CS_Destroy}
c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
c.Character.GameObject().Destroy()
c.Game.RemovePlayer(c)
}
}
delete(MainServer.Clients, c.ID)
log.Printf(err.Error())
return
}
}
}
}
//func (c *Client) Run() {
// defer c.OnPanic()
// log.Println("Income connection")
// c.TCPReader = bufio.NewReader(c.Socket)
// c.TCPWriter = bufio.NewWriter(c.Socket)
// //c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
// var (
// buf = make([]byte, 512)
// )
// for {
// //n, err := c.TCPReader.Read(buf)
// //data := string(buf[:n])
// _, err := c.TCPReader.Read(buf) //[0:])
// //n++
// packets := bytes.Split(buf, EndPacket)
// switch err {
// case io.EOF:
// return
// case nil:
// {
// for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
// }
// buf = make([]byte, 512) // clear
// }
// default: // something wrong, when connection was losted
// {
// // (10054 on windows WSAECONNRESET)
// if c.Game != nil {
// if c.Character != nil {
// var id uint32 = c.Character.GameObject().GOID
// for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
// if id == e.Value.(PacketResultInstanceGO).GOID {
// c.Game.Packages.Remove(e)
// break
// }
// }
// DestroyCharacter := PacketCharacterState{id, CS_Destroy}
// c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
// c.Character.GameObject().Destroy()
// c.Game.RemovePlayer(c)
// }
// }
// delete(MainServer.Clients, c.ID)
// log.Printf(err.Error())
// return
// }
// }
// }
//}
func (c *Client) Send(p Packet) {}
func (c *Client) OnPanic() {
if x := recover(); x != nil {
//if atomic.CompareAndSwapInt32(&c.Disconnected, 0, 1) {
// log.Println(c.Name, "Disconnected. Reason:", x)
// MainServer.Jobs <- func() {
// delete(MainServer.Clients, c.ID)
// MainServer.IDGen.PutID(c.ID)
// }
//}
}
}
func (c *Client) HandlePacket(data []byte, lenght uint16) {
defer c.OnPanic()
//if MainServer.PacketProcess[data[0]] != nil {
packet := MainServer.PacketProcess[data[0]](data[:lenght])
// if MainServer.PacketHandle[data[0]] != nil {
MainServer.PacketHandle[data[0]](c, packet)
// }
//}
//switch PacketID(data[0]) {
//case ID_Login:
// {
// packet = ProcessPacketLogin(data[:lenght])
// HandlePacketLogin(c, packet)
// }
//case ID_PlayerInput:
// {
// packet = HandlePacketPlayerInput(data[:lenght])
// OnPacketPlayerInput(c, packet.(PacketPlayerInput))
// }
//case ID_RequestGames:
// {
// packet = HandlePacketRequestGames(data[:lenght])
// OnPacketRequestGames(c, packet.(PacketRequestGames))
// }
//case ID_CreateGame:
// {
// packet = HandlePacketCreateGame(data[:lenght])
// OnPacketGameCreate(c, packet.(PacketGameCreate))
// }
//case ID_JoinGame:
// {
// packet = HandlePacketJoinGame(data[:lenght])
// OnPacketJoinGame(c, packet.(PacketJoinGame))
// }
////case ID_ResolveUDP:
//// {
//// packet = HandlePacketResolveUPD(data[:lenght])
//// OnPacketResolveUPD(c, packet.(PacketResolveUPD))
//// }
//case ID_InstanceGO:
// {
// packet = HandlePacketInstanceGO(data[:lenght])
// OnPacketInstanceGO(c, packet.(PacketInstanceGO))
// }
//case 60:
// {
// log.Printf("packet: id=%d len=%d", data[0], lenght)
// var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
// c.TCPWriter.WriteString(str)
// c.TCPWriter.Flush()
// }
//default:
// {
// log.Printf("Unhandled packet: id=%d len=%d", data[0], lenght)
// }
//}
}
func AcceptUDP(UDP_Listner *net.UDPConn) { | )
_, addr, err := UDP_Listner.ReadFromUDP(buf[0:])
if err != nil {
log.Printf("AcceptUDP error:" + err.Error())
continue
}
if buf[0] == ID_ResolveUDP {
PlayerID = ID(buf[3]) | (ID(buf[4]) << 8) | (ID(buf[5])<<16 | ID(buf[6])<<24)
for _, c := range MainServer.Clients {
if PlayerID == c.ID { // TODO: must be reply TCP message with approve connection
log.Printf("%s pid=%d", addr.String(), PlayerID)
c.UDPCon = UDP_Listner
c.UDPAddr = addr
}
}
buf = make([]byte, 1024)
continue
}
}
}
func StartServer() {
TCP_addr, TCP_err := net.ResolveTCPAddr("tcp", "0.0.0.0:4354")
if TCP_err != nil {
log.Println(TCP_err)
return
}
ln, err := net.ListenTCP("tcp", TCP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (TCP)! at [%s]", TCP_addr)
UDP_addr, UDP_err := net.ResolveUDPAddr("udp4", "0.0.0.0:4354")
if UDP_err != nil {
log.Println(UDP_err)
return
}
UDP_Listner, err := net.ListenUDP("udp | for {
var (
buf = make([]byte, 1024)
PlayerID ID | random_line_split |
Server.go | c.TCPReader = bufio.NewReader(c.Socket)
c.TCPWriter = bufio.NewWriter(c.Socket)
//c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
var (
buf = make([]byte, 1024)
)
for {
bufLength, err := c.TCPReader.Read(buf) //[0:])
//log.Printf("buffer length = %d", bufLength)
switch err {
case io.EOF:
return
case nil:
{
var i uint16 = 0
for true {
pLength := uint16(buf[i+1]) | (uint16(buf[i+2]) << 8)
//log.Printf("packet length = %d", pLength)
copySlice := buf[i : i+pLength] // copy value hope this is work :)
MainServer.Jobs <- func() { c.HandlePacket(copySlice, pLength) }
i += pLength
if i >= (uint16)(bufLength) {
break
}
}
//for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
//}
//buf = make([]byte, 512) // clear
}
default: // something wrong, when connection was losted
{
// (10054 on windows WSAECONNRESET)
if c.Game != nil {
if c.Character != nil {
var id uint32 = c.Character.GameObject().GOID
for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
if id == e.Value.(PacketResultInstanceGO).GOID {
c.Game.Packages.Remove(e)
break
}
}
DestroyCharacter := PacketCharacterState{id, CS_Destroy}
c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
c.Character.GameObject().Destroy()
c.Game.RemovePlayer(c)
}
}
delete(MainServer.Clients, c.ID)
log.Printf(err.Error())
return
}
}
}
}
//func (c *Client) Run() {
// defer c.OnPanic()
// log.Println("Income connection")
// c.TCPReader = bufio.NewReader(c.Socket)
// c.TCPWriter = bufio.NewWriter(c.Socket)
// //c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
// var (
// buf = make([]byte, 512)
// )
// for {
// //n, err := c.TCPReader.Read(buf)
// //data := string(buf[:n])
// _, err := c.TCPReader.Read(buf) //[0:])
// //n++
// packets := bytes.Split(buf, EndPacket)
// switch err {
// case io.EOF:
// return
// case nil:
// {
// for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
// }
// buf = make([]byte, 512) // clear
// }
// default: // something wrong, when connection was losted
// {
// // (10054 on windows WSAECONNRESET)
// if c.Game != nil {
// if c.Character != nil {
// var id uint32 = c.Character.GameObject().GOID
// for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
// if id == e.Value.(PacketResultInstanceGO).GOID {
// c.Game.Packages.Remove(e)
// break
// }
// }
// DestroyCharacter := PacketCharacterState{id, CS_Destroy}
// c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
// c.Character.GameObject().Destroy()
// c.Game.RemovePlayer(c)
// }
// }
// delete(MainServer.Clients, c.ID)
// log.Printf(err.Error())
// return
// }
// }
// }
//}
func (c *Client) Send(p Packet) {}
func (c *Client) OnPanic() {
if x := recover(); x != nil {
//if atomic.CompareAndSwapInt32(&c.Disconnected, 0, 1) {
// log.Println(c.Name, "Disconnected. Reason:", x)
// MainServer.Jobs <- func() {
// delete(MainServer.Clients, c.ID)
// MainServer.IDGen.PutID(c.ID)
// }
//}
}
}
func (c *Client) HandlePacket(data []byte, lenght uint16) {
defer c.OnPanic()
//if MainServer.PacketProcess[data[0]] != nil {
packet := MainServer.PacketProcess[data[0]](data[:lenght])
// if MainServer.PacketHandle[data[0]] != nil {
MainServer.PacketHandle[data[0]](c, packet)
// }
//}
//switch PacketID(data[0]) {
//case ID_Login:
// {
// packet = ProcessPacketLogin(data[:lenght])
// HandlePacketLogin(c, packet)
// }
//case ID_PlayerInput:
// {
// packet = HandlePacketPlayerInput(data[:lenght])
// OnPacketPlayerInput(c, packet.(PacketPlayerInput))
// }
//case ID_RequestGames:
// {
// packet = HandlePacketRequestGames(data[:lenght])
// OnPacketRequestGames(c, packet.(PacketRequestGames))
// }
//case ID_CreateGame:
// {
// packet = HandlePacketCreateGame(data[:lenght])
// OnPacketGameCreate(c, packet.(PacketGameCreate))
// }
//case ID_JoinGame:
// {
// packet = HandlePacketJoinGame(data[:lenght])
// OnPacketJoinGame(c, packet.(PacketJoinGame))
// }
////case ID_ResolveUDP:
//// {
//// packet = HandlePacketResolveUPD(data[:lenght])
//// OnPacketResolveUPD(c, packet.(PacketResolveUPD))
//// }
//case ID_InstanceGO:
// {
// packet = HandlePacketInstanceGO(data[:lenght])
// OnPacketInstanceGO(c, packet.(PacketInstanceGO))
// }
//case 60:
// {
// log.Printf("packet: id=%d len=%d", data[0], lenght)
// var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
// c.TCPWriter.WriteString(str)
// c.TCPWriter.Flush()
// }
//default:
// {
// log.Printf("Unhandled packet: id=%d len=%d", data[0], lenght)
// }
//}
}
func AcceptUDP(UDP_Listner *net.UDPConn) {
for {
var (
buf = make([]byte, 1024)
PlayerID ID
)
_, addr, err := UDP_Listner.ReadFromUDP(buf[0:])
if err != nil {
log.Printf("AcceptUDP error:" + err.Error())
continue
}
if buf[0] == ID_ResolveUDP {
PlayerID = ID(buf[3]) | (ID(buf[4]) << 8) | (ID(buf[5])<<16 | ID(buf[6])<<24)
for _, c := range MainServer.Clients |
buf = make([]byte, 1024)
continue
}
}
}
func StartServer() {
TCP_addr, TCP_err := net.ResolveTCPAddr("tcp", "0.0.0.0:4354")
if TCP_err != nil {
log.Println(TCP_err)
return
}
ln, err := net.ListenTCP("tcp", TCP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (TCP)! at [%s]", TCP_addr)
UDP_addr, UDP_err := net.ResolveUDPAddr("udp4", "0.0.0.0:4354")
if UDP_err != nil {
log.Println(UDP_err)
return
}
UDP_Listner, err := net.ListenUDP | {
if PlayerID == c.ID { // TODO: must be reply TCP message with approve connection
log.Printf("%s pid=%d", addr.String(), PlayerID)
c.UDPCon = UDP_Listner
c.UDPAddr = addr
}
} | conditional_block |
Server.go | () {
defer c.OnPanic()
log.Println("Income connection")
c.TCPReader = bufio.NewReader(c.Socket)
c.TCPWriter = bufio.NewWriter(c.Socket)
//c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
var (
buf = make([]byte, 1024)
)
for {
bufLength, err := c.TCPReader.Read(buf) //[0:])
//log.Printf("buffer length = %d", bufLength)
switch err {
case io.EOF:
return
case nil:
{
var i uint16 = 0
for true {
pLength := uint16(buf[i+1]) | (uint16(buf[i+2]) << 8)
//log.Printf("packet length = %d", pLength)
copySlice := buf[i : i+pLength] // copy value hope this is work :)
MainServer.Jobs <- func() { c.HandlePacket(copySlice, pLength) }
i += pLength
if i >= (uint16)(bufLength) {
break
}
}
//for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
//}
//buf = make([]byte, 512) // clear
}
default: // something wrong, when connection was losted
{
// (10054 on windows WSAECONNRESET)
if c.Game != nil {
if c.Character != nil {
var id uint32 = c.Character.GameObject().GOID
for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
if id == e.Value.(PacketResultInstanceGO).GOID {
c.Game.Packages.Remove(e)
break
}
}
DestroyCharacter := PacketCharacterState{id, CS_Destroy}
c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
c.Character.GameObject().Destroy()
c.Game.RemovePlayer(c)
}
}
delete(MainServer.Clients, c.ID)
log.Printf(err.Error())
return
}
}
}
}
//func (c *Client) Run() {
// defer c.OnPanic()
// log.Println("Income connection")
// c.TCPReader = bufio.NewReader(c.Socket)
// c.TCPWriter = bufio.NewWriter(c.Socket)
// //c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
// var (
// buf = make([]byte, 512)
// )
// for {
// //n, err := c.TCPReader.Read(buf)
// //data := string(buf[:n])
// _, err := c.TCPReader.Read(buf) //[0:])
// //n++
// packets := bytes.Split(buf, EndPacket)
// switch err {
// case io.EOF:
// return
// case nil:
// {
// for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
// }
// buf = make([]byte, 512) // clear
// }
// default: // something wrong, when connection was losted
// {
// // (10054 on windows WSAECONNRESET)
// if c.Game != nil {
// if c.Character != nil {
// var id uint32 = c.Character.GameObject().GOID
// for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
// if id == e.Value.(PacketResultInstanceGO).GOID {
// c.Game.Packages.Remove(e)
// break
// }
// }
// DestroyCharacter := PacketCharacterState{id, CS_Destroy}
// c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
// c.Character.GameObject().Destroy()
// c.Game.RemovePlayer(c)
// }
// }
// delete(MainServer.Clients, c.ID)
// log.Printf(err.Error())
// return
// }
// }
// }
//}
func (c *Client) Send(p Packet) {}
func (c *Client) OnPanic() {
if x := recover(); x != nil {
//if atomic.CompareAndSwapInt32(&c.Disconnected, 0, 1) {
// log.Println(c.Name, "Disconnected. Reason:", x)
// MainServer.Jobs <- func() {
// delete(MainServer.Clients, c.ID)
// MainServer.IDGen.PutID(c.ID)
// }
//}
}
}
func (c *Client) HandlePacket(data []byte, lenght uint16) {
defer c.OnPanic()
//if MainServer.PacketProcess[data[0]] != nil {
packet := MainServer.PacketProcess[data[0]](data[:lenght])
// if MainServer.PacketHandle[data[0]] != nil {
MainServer.PacketHandle[data[0]](c, packet)
// }
//}
//switch PacketID(data[0]) {
//case ID_Login:
// {
// packet = ProcessPacketLogin(data[:lenght])
// HandlePacketLogin(c, packet)
// }
//case ID_PlayerInput:
// {
// packet = HandlePacketPlayerInput(data[:lenght])
// OnPacketPlayerInput(c, packet.(PacketPlayerInput))
// }
//case ID_RequestGames:
// {
// packet = HandlePacketRequestGames(data[:lenght])
// OnPacketRequestGames(c, packet.(PacketRequestGames))
// }
//case ID_CreateGame:
// {
// packet = HandlePacketCreateGame(data[:lenght])
// OnPacketGameCreate(c, packet.(PacketGameCreate))
// }
//case ID_JoinGame:
// {
// packet = HandlePacketJoinGame(data[:lenght])
// OnPacketJoinGame(c, packet.(PacketJoinGame))
// }
////case ID_ResolveUDP:
//// {
//// packet = HandlePacketResolveUPD(data[:lenght])
//// OnPacketResolveUPD(c, packet.(PacketResolveUPD))
//// }
//case ID_InstanceGO:
// {
// packet = HandlePacketInstanceGO(data[:lenght])
// OnPacketInstanceGO(c, packet.(PacketInstanceGO))
// }
//case 60:
// {
// log.Printf("packet: id=%d len=%d", data[0], lenght)
// var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
// c.TCPWriter.WriteString(str)
// c.TCPWriter.Flush()
// }
//default:
// {
// log.Printf("Unhandled packet: id=%d len=%d", data[0], lenght)
// }
//}
}
func AcceptUDP(UDP_Listner *net.UDPConn) {
for {
var (
buf = make([]byte, 1024)
PlayerID ID
)
_, addr, err := UDP_Listner.ReadFromUDP(buf[0:])
if err != nil {
log.Printf("AcceptUDP error:" + err.Error())
continue
}
if buf[0] == ID_ResolveUDP {
PlayerID = ID(buf[3]) | (ID(buf[4]) << 8) | (ID(buf[5])<<16 | ID(buf[6])<<24)
for _, c := range MainServer.Clients {
if PlayerID == c.ID { // TODO: must be reply TCP message with approve connection
log.Printf("%s pid=%d", addr.String(), PlayerID)
c.UDPCon = UDP_Listner
c.UDPAddr = addr
}
}
buf = make([]byte, 1024)
continue
}
}
}
func StartServer() {
TCP_addr, TCP_err := net.ResolveTCPAddr("tcp", "0.0.0.0:4354")
if TCP_err != nil {
log.Println(TCP_err)
return
}
ln, err := net.ListenTCP("tcp", TCP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (TCP)! at [%s]", TCP_addr)
UDP_addr, UDP_err := net.ResolveUDPAddr("udp4", "0.0.0.0:4354")
if UDP_err != nil {
log.Println(UDP_err)
return | Run | identifier_name |
|
Server.go | case io.EOF:
return
case nil:
{
var i uint16 = 0
for true {
pLength := uint16(buf[i+1]) | (uint16(buf[i+2]) << 8)
//log.Printf("packet length = %d", pLength)
copySlice := buf[i : i+pLength] // copy value hope this is work :)
MainServer.Jobs <- func() { c.HandlePacket(copySlice, pLength) }
i += pLength
if i >= (uint16)(bufLength) {
break
}
}
//for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
//}
//buf = make([]byte, 512) // clear
}
default: // something wrong, when connection was losted
{
// (10054 on windows WSAECONNRESET)
if c.Game != nil {
if c.Character != nil {
var id uint32 = c.Character.GameObject().GOID
for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
if id == e.Value.(PacketResultInstanceGO).GOID {
c.Game.Packages.Remove(e)
break
}
}
DestroyCharacter := PacketCharacterState{id, CS_Destroy}
c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
c.Character.GameObject().Destroy()
c.Game.RemovePlayer(c)
}
}
delete(MainServer.Clients, c.ID)
log.Printf(err.Error())
return
}
}
}
}
//func (c *Client) Run() {
// defer c.OnPanic()
// log.Println("Income connection")
// c.TCPReader = bufio.NewReader(c.Socket)
// c.TCPWriter = bufio.NewWriter(c.Socket)
// //c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
// var (
// buf = make([]byte, 512)
// )
// for {
// //n, err := c.TCPReader.Read(buf)
// //data := string(buf[:n])
// _, err := c.TCPReader.Read(buf) //[0:])
// //n++
// packets := bytes.Split(buf, EndPacket)
// switch err {
// case io.EOF:
// return
// case nil:
// {
// for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
// }
// buf = make([]byte, 512) // clear
// }
// default: // something wrong, when connection was losted
// {
// // (10054 on windows WSAECONNRESET)
// if c.Game != nil {
// if c.Character != nil {
// var id uint32 = c.Character.GameObject().GOID
// for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
// if id == e.Value.(PacketResultInstanceGO).GOID {
// c.Game.Packages.Remove(e)
// break
// }
// }
// DestroyCharacter := PacketCharacterState{id, CS_Destroy}
// c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
// c.Character.GameObject().Destroy()
// c.Game.RemovePlayer(c)
// }
// }
// delete(MainServer.Clients, c.ID)
// log.Printf(err.Error())
// return
// }
// }
// }
//}
func (c *Client) Send(p Packet) {}
func (c *Client) OnPanic() {
if x := recover(); x != nil {
//if atomic.CompareAndSwapInt32(&c.Disconnected, 0, 1) {
// log.Println(c.Name, "Disconnected. Reason:", x)
// MainServer.Jobs <- func() {
// delete(MainServer.Clients, c.ID)
// MainServer.IDGen.PutID(c.ID)
// }
//}
}
}
func (c *Client) HandlePacket(data []byte, lenght uint16) {
defer c.OnPanic()
//if MainServer.PacketProcess[data[0]] != nil {
packet := MainServer.PacketProcess[data[0]](data[:lenght])
// if MainServer.PacketHandle[data[0]] != nil {
MainServer.PacketHandle[data[0]](c, packet)
// }
//}
//switch PacketID(data[0]) {
//case ID_Login:
// {
// packet = ProcessPacketLogin(data[:lenght])
// HandlePacketLogin(c, packet)
// }
//case ID_PlayerInput:
// {
// packet = HandlePacketPlayerInput(data[:lenght])
// OnPacketPlayerInput(c, packet.(PacketPlayerInput))
// }
//case ID_RequestGames:
// {
// packet = HandlePacketRequestGames(data[:lenght])
// OnPacketRequestGames(c, packet.(PacketRequestGames))
// }
//case ID_CreateGame:
// {
// packet = HandlePacketCreateGame(data[:lenght])
// OnPacketGameCreate(c, packet.(PacketGameCreate))
// }
//case ID_JoinGame:
// {
// packet = HandlePacketJoinGame(data[:lenght])
// OnPacketJoinGame(c, packet.(PacketJoinGame))
// }
////case ID_ResolveUDP:
//// {
//// packet = HandlePacketResolveUPD(data[:lenght])
//// OnPacketResolveUPD(c, packet.(PacketResolveUPD))
//// }
//case ID_InstanceGO:
// {
// packet = HandlePacketInstanceGO(data[:lenght])
// OnPacketInstanceGO(c, packet.(PacketInstanceGO))
// }
//case 60:
// {
// log.Printf("packet: id=%d len=%d", data[0], lenght)
// var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
// c.TCPWriter.WriteString(str)
// c.TCPWriter.Flush()
// }
//default:
// {
// log.Printf("Unhandled packet: id=%d len=%d", data[0], lenght)
// }
//}
}
func AcceptUDP(UDP_Listner *net.UDPConn) {
for {
var (
buf = make([]byte, 1024)
PlayerID ID
)
_, addr, err := UDP_Listner.ReadFromUDP(buf[0:])
if err != nil {
log.Printf("AcceptUDP error:" + err.Error())
continue
}
if buf[0] == ID_ResolveUDP {
PlayerID = ID(buf[3]) | (ID(buf[4]) << 8) | (ID(buf[5])<<16 | ID(buf[6])<<24)
for _, c := range MainServer.Clients {
if PlayerID == c.ID { // TODO: must be reply TCP message with approve connection
log.Printf("%s pid=%d", addr.String(), PlayerID)
c.UDPCon = UDP_Listner
c.UDPAddr = addr
}
}
buf = make([]byte, 1024)
continue
}
}
}
func StartServer() {
TCP_addr, TCP_err := net.ResolveTCPAddr("tcp", "0.0.0.0:4354")
if TCP_err != nil {
log.Println(TCP_err)
return
}
ln, err := net.ListenTCP("tcp", TCP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (TCP)! at [%s]", TCP_addr)
UDP_addr, UDP_err := net.ResolveUDPAddr("udp4", "0.0.0.0:4354")
if UDP_err != nil {
log.Println(UDP_err)
return
| {
defer c.OnPanic()
log.Println("Income connection")
c.TCPReader = bufio.NewReader(c.Socket)
c.TCPWriter = bufio.NewWriter(c.Socket)
//c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
var (
buf = make([]byte, 1024)
)
for {
bufLength, err := c.TCPReader.Read(buf) //[0:])
//log.Printf("buffer length = %d", bufLength)
switch err {
| identifier_body |
|
plugins.go | string{
"AssOnTheGlass", "BoltedOnBooty", "BubbleButts",
"ButtsAndBareFeet", "Cheeking", "HighResASS",
"LoveToWatchYouLeave", "NoTorso", "SpreadEm", "TheUnderbun",
"Top_Tier_Asses", "Tushy", "Underbun", "ass", "assgifs",
"bigasses", "booty", "booty_gifs", "datass", "datbuttfromthefront",
"hugeass", "juicybooty", "pawg", "twerking", "whooties",
},
What: []string{"ass"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"boobs"},
Subreddits: []string{
"BeforeAndAfterBoltons", "Bigtitssmalltits", "BoltedOnMaxed",
"Boobies", "BreastEnvy", "EpicCleavage", "HardBoltOns",
"JustOneBoob", "OneInOneOut", "PM_ME_YOUR_TITS_GIRL",
"PerfectTits", "Perky", "Rush_Boobs", "Saggy", "SloMoBoobs",
"TheHangingBoobs", "TheUnderboob", "Titsgalore", "TittyDrop",
"bananatits", "boltedontits", "boobbounce", "boobgifs",
"boobkarma", "boobland", "boobs", "breastplay", "breasts",
"cleavage", "feelthemup", "handbra", "hanging", "hersheyskisstits",
"homegrowntits", "knockers", "naturaltitties", "sideboob",
"tits", "titsagainstglass", "torpedotits", "underboob",
},
What: []string{"boobs", "tits", "titties"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"trap"},
Subreddits: []string{
"Ladyboys", "asianladyboy", "transgif", "dickgirls", "futanari",
},
What: []string{"trap"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"milf"},
Subreddits: []string{
"milf",
},
What: []string{"milf"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"redhead"},
Subreddits: []string{
"redheads", "ginger", "redhead",
},
What: []string{"redhead", "ginger"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"cat"},
Subreddits: []string{
"cat", "cats", "CatGifs", "KittenGifs", "Kittens", "CatPics",
"Kitties", "Kitty", "CatPictures", "LookAtMyCat", "CatReddit",
"CatSpotting", "Kitten", "DelightfullyChubby",
},
What: []string{"cat", "kitten", "kitty"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"dog"},
Subreddits: []string{
"dog", "dogs", "lookatmydog", "DogPictures", "dogswearinghats",
"dogswatchingyoueat",
},
What: []string{"dog", "puppy", "puppeh"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"blonde"},
Subreddits: []string{
"blonde", "blondes",
},
What: []string{"blonde"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"brunette"},
Subreddits: []string{
"brunette", "brunetteass",
},
What: []string{"brunette"},
Check: checkIsImage,
NSFW: true,
},
}
func (p *AutoJoin) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.376", p.welcome)
return &bot.PluginInfo{
Name: "AutoJoin",
Description: "Auto joins channels upon connect.",
}, nil
}
func (p *AutoJoin) welcome(name string, params []interface{}) (bool, error) {
for _, channel := range p.Channels {
err := p.bot.Message(bot.Join(channel))
if err != nil {
return false, err
}
}
return false, nil
}
func (p *AutoJoin) Unload() error {
return nil
}
func (p *Misc) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.textCmd("cmd.hey", []string{"how are you?", "heya!", "hello"})
p.bot.HandleCmdRateLimited("cmd.buzz", p.buzz)
file, _ := os.Open("config/reply.json")
defer file.Close()
decoder := json.NewDecoder(file)
replyTerms := ReplyTerms{}
err := decoder.Decode(&replyTerms)
if err != nil {
log.Fatal(err)
}
for key, value := range replyTerms {
value := value
key := key
p.textReply("irc.privmsg", value, func(line string) bool {
line = strings.ToLower(line)
return strings.HasSuffix(line, key)
})
}
p.bot.HandleIRC("irc.invite", p.invite)
p.bot.HandleIRC("irc.kick", p.kick)
p.bot.HandleIRC("irc.join", p.join)
p.bot.HandleCmdRateLimited("cmd.bs", p.bullshit)
p.bannedUsers = make(map[string]string)
return &bot.PluginInfo{
Name: "Misc",
Description: "Miscellaneous commands.",
}, nil
}
func (p *Misc) Unload() error {
return nil
}
func (p *Misc) textCmd(cmd string, texts []string) {
if len(texts) == 0 {
return
}
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
text := texts[rand.Intn(len(texts))]
if len(args) > 0 {
text = args[0] + ": " + text
}
p.bot.Message(bot.PrivMsg(target, text))
return true, nil
}
p.bot.HandleCmdRateLimited(cmd, handler)
}
func (p *Misc) textReply(cmd, text string, check func(string) bool) {
handler := func(msg *irc.Message) (bool, error) {
if !check(msg.Trailing) {
return false, nil
}
if p.bot.RateLimiter.Limited(msg.Params[0]) {
return false, nil
}
p.bot.Message(bot.PrivMsg(msg.Params[0], text))
return false, nil
}
p.bot.HandleIRC(cmd, handler)
}
func (p *Misc) bullshit(source *irc.Prefix, target string, cmd string, args []string) (bool, error) |
func (p *Misc) buzz(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("annoy") {
return true, nil
}
lines := []string{
"%s",
"%s!",
"paging %s!",
"BUZZING %s",
"%s %[1]s %[1]s %[1]s %[1]s",
"hey %s!",
"%s %[1]s %[1]s %[1]s",
"%s come on",
}
times := rand.Intn(3) + 3
for i := 0; i < times; i++ {
line := lines[rand.Intn(len(lines))]
msg := fmt.Sprintf(line, args[0])
p.bot.Message(bot.PrivMsg(target, msg))
time.Sleep(time.Duration(rand.Intn(300) + 300) * time.Millisecond)
}
return true, nil
}
func (p *Misc) | {
if len(args) == 0 {
return true, nil
}
msg := fmt.Sprintf("%s: I am bullshitting you!", args[0])
p.bot.Message(bot.PrivMsg(target, msg))
return true, nil
} | identifier_body |
plugins.go | []string{
"AssOnTheGlass", "BoltedOnBooty", "BubbleButts",
"ButtsAndBareFeet", "Cheeking", "HighResASS",
"LoveToWatchYouLeave", "NoTorso", "SpreadEm", "TheUnderbun",
"Top_Tier_Asses", "Tushy", "Underbun", "ass", "assgifs",
"bigasses", "booty", "booty_gifs", "datass", "datbuttfromthefront",
"hugeass", "juicybooty", "pawg", "twerking", "whooties",
},
What: []string{"ass"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"boobs"},
Subreddits: []string{
"BeforeAndAfterBoltons", "Bigtitssmalltits", "BoltedOnMaxed",
"Boobies", "BreastEnvy", "EpicCleavage", "HardBoltOns",
"JustOneBoob", "OneInOneOut", "PM_ME_YOUR_TITS_GIRL",
"PerfectTits", "Perky", "Rush_Boobs", "Saggy", "SloMoBoobs",
"TheHangingBoobs", "TheUnderboob", "Titsgalore", "TittyDrop",
"bananatits", "boltedontits", "boobbounce", "boobgifs",
"boobkarma", "boobland", "boobs", "breastplay", "breasts",
"cleavage", "feelthemup", "handbra", "hanging", "hersheyskisstits",
"homegrowntits", "knockers", "naturaltitties", "sideboob",
"tits", "titsagainstglass", "torpedotits", "underboob",
},
What: []string{"boobs", "tits", "titties"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"trap"},
Subreddits: []string{
"Ladyboys", "asianladyboy", "transgif", "dickgirls", "futanari",
},
What: []string{"trap"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"milf"},
Subreddits: []string{
"milf",
},
What: []string{"milf"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"redhead"},
Subreddits: []string{
"redheads", "ginger", "redhead",
},
What: []string{"redhead", "ginger"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"cat"},
Subreddits: []string{
"cat", "cats", "CatGifs", "KittenGifs", "Kittens", "CatPics",
"Kitties", "Kitty", "CatPictures", "LookAtMyCat", "CatReddit",
"CatSpotting", "Kitten", "DelightfullyChubby",
},
What: []string{"cat", "kitten", "kitty"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"dog"},
Subreddits: []string{
"dog", "dogs", "lookatmydog", "DogPictures", "dogswearinghats",
"dogswatchingyoueat",
},
What: []string{"dog", "puppy", "puppeh"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"blonde"},
Subreddits: []string{
"blonde", "blondes",
},
What: []string{"blonde"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"brunette"},
Subreddits: []string{
"brunette", "brunetteass",
},
What: []string{"brunette"},
Check: checkIsImage,
NSFW: true,
},
}
func (p *AutoJoin) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.376", p.welcome)
return &bot.PluginInfo{
Name: "AutoJoin",
Description: "Auto joins channels upon connect.",
}, nil
}
func (p *AutoJoin) welcome(name string, params []interface{}) (bool, error) {
for _, channel := range p.Channels {
err := p.bot.Message(bot.Join(channel))
if err != nil {
return false, err
}
}
return false, nil
}
func (p *AutoJoin) | () error {
return nil
}
func (p *Misc) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.textCmd("cmd.hey", []string{"how are you?", "heya!", "hello"})
p.bot.HandleCmdRateLimited("cmd.buzz", p.buzz)
file, _ := os.Open("config/reply.json")
defer file.Close()
decoder := json.NewDecoder(file)
replyTerms := ReplyTerms{}
err := decoder.Decode(&replyTerms)
if err != nil {
log.Fatal(err)
}
for key, value := range replyTerms {
value := value
key := key
p.textReply("irc.privmsg", value, func(line string) bool {
line = strings.ToLower(line)
return strings.HasSuffix(line, key)
})
}
p.bot.HandleIRC("irc.invite", p.invite)
p.bot.HandleIRC("irc.kick", p.kick)
p.bot.HandleIRC("irc.join", p.join)
p.bot.HandleCmdRateLimited("cmd.bs", p.bullshit)
p.bannedUsers = make(map[string]string)
return &bot.PluginInfo{
Name: "Misc",
Description: "Miscellaneous commands.",
}, nil
}
func (p *Misc) Unload() error {
return nil
}
func (p *Misc) textCmd(cmd string, texts []string) {
if len(texts) == 0 {
return
}
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
text := texts[rand.Intn(len(texts))]
if len(args) > 0 {
text = args[0] + ": " + text
}
p.bot.Message(bot.PrivMsg(target, text))
return true, nil
}
p.bot.HandleCmdRateLimited(cmd, handler)
}
func (p *Misc) textReply(cmd, text string, check func(string) bool) {
handler := func(msg *irc.Message) (bool, error) {
if !check(msg.Trailing) {
return false, nil
}
if p.bot.RateLimiter.Limited(msg.Params[0]) {
return false, nil
}
p.bot.Message(bot.PrivMsg(msg.Params[0], text))
return false, nil
}
p.bot.HandleIRC(cmd, handler)
}
func (p *Misc) bullshit(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
msg := fmt.Sprintf("%s: I am bullshitting you!", args[0])
p.bot.Message(bot.PrivMsg(target, msg))
return true, nil
}
func (p *Misc) buzz(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("annoy") {
return true, nil
}
lines := []string{
"%s",
"%s!",
"paging %s!",
"BUZZING %s",
"%s %[1]s %[1]s %[1]s %[1]s",
"hey %s!",
"%s %[1]s %[1]s %[1]s",
"%s come on",
}
times := rand.Intn(3) + 3
for i := 0; i < times; i++ {
line := lines[rand.Intn(len(lines))]
msg := fmt.Sprintf(line, args[0])
p.bot.Message(bot.PrivMsg(target, msg))
time.Sleep(time.Duration(rand.Intn(300) + 300) * time.Millisecond)
}
return true, nil
}
func (p *Misc) invite | Unload | identifier_name |
plugins.go | "blonde", "blondes",
},
What: []string{"blonde"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"brunette"},
Subreddits: []string{
"brunette", "brunetteass",
},
What: []string{"brunette"},
Check: checkIsImage,
NSFW: true,
},
}
func (p *AutoJoin) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.376", p.welcome)
return &bot.PluginInfo{
Name: "AutoJoin",
Description: "Auto joins channels upon connect.",
}, nil
}
func (p *AutoJoin) welcome(name string, params []interface{}) (bool, error) {
for _, channel := range p.Channels {
err := p.bot.Message(bot.Join(channel))
if err != nil {
return false, err
}
}
return false, nil
}
func (p *AutoJoin) Unload() error {
return nil
}
func (p *Misc) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.textCmd("cmd.hey", []string{"how are you?", "heya!", "hello"})
p.bot.HandleCmdRateLimited("cmd.buzz", p.buzz)
file, _ := os.Open("config/reply.json")
defer file.Close()
decoder := json.NewDecoder(file)
replyTerms := ReplyTerms{}
err := decoder.Decode(&replyTerms)
if err != nil {
log.Fatal(err)
}
for key, value := range replyTerms {
value := value
key := key
p.textReply("irc.privmsg", value, func(line string) bool {
line = strings.ToLower(line)
return strings.HasSuffix(line, key)
})
}
p.bot.HandleIRC("irc.invite", p.invite)
p.bot.HandleIRC("irc.kick", p.kick)
p.bot.HandleIRC("irc.join", p.join)
p.bot.HandleCmdRateLimited("cmd.bs", p.bullshit)
p.bannedUsers = make(map[string]string)
return &bot.PluginInfo{
Name: "Misc",
Description: "Miscellaneous commands.",
}, nil
}
func (p *Misc) Unload() error {
return nil
}
func (p *Misc) textCmd(cmd string, texts []string) {
if len(texts) == 0 {
return
}
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
text := texts[rand.Intn(len(texts))]
if len(args) > 0 {
text = args[0] + ": " + text
}
p.bot.Message(bot.PrivMsg(target, text))
return true, nil
}
p.bot.HandleCmdRateLimited(cmd, handler)
}
func (p *Misc) textReply(cmd, text string, check func(string) bool) {
handler := func(msg *irc.Message) (bool, error) {
if !check(msg.Trailing) {
return false, nil
}
if p.bot.RateLimiter.Limited(msg.Params[0]) {
return false, nil
}
p.bot.Message(bot.PrivMsg(msg.Params[0], text))
return false, nil
}
p.bot.HandleIRC(cmd, handler)
}
func (p *Misc) bullshit(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
msg := fmt.Sprintf("%s: I am bullshitting you!", args[0])
p.bot.Message(bot.PrivMsg(target, msg))
return true, nil
}
func (p *Misc) buzz(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("annoy") {
return true, nil
}
lines := []string{
"%s",
"%s!",
"paging %s!",
"BUZZING %s",
"%s %[1]s %[1]s %[1]s %[1]s",
"hey %s!",
"%s %[1]s %[1]s %[1]s",
"%s come on",
}
times := rand.Intn(3) + 3
for i := 0; i < times; i++ {
line := lines[rand.Intn(len(lines))]
msg := fmt.Sprintf(line, args[0])
p.bot.Message(bot.PrivMsg(target, msg))
time.Sleep(time.Duration(rand.Intn(300) + 300) * time.Millisecond)
}
return true, nil
}
func (p *Misc) invite(msg *irc.Message) (bool, error) {
perms, err := p.bot.Auth(msg.Prefix)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("invite") {
return true, nil
}
//channel := msg.Trailing
channel := msg.Params[1]
err = p.bot.Message(bot.Join(channel))
return true, nil
}
func (p *Misc) kick(msg *irc.Message) (bool, error) {
channel, who := msg.Params[0], msg.Params[1]
if who != p.bot.Config.Nickname {
return false, nil
}
bannedUser := msg.Prefix.Name
if bannedUser == "X" || bannedUser == "Chanserv" {
parts := strings.Fields(msg.Trailing)
bannedUser = strings.Trim(parts[len(parts) - 1], "()")
}
p.bannedUsers[channel] = bannedUser
return false, nil
}
func (p *Misc) join(msg *irc.Message) (bool, error) {
if msg.Prefix.Name != p.bot.Config.Nickname {
return false, nil
}
channel := msg.Trailing
bannedUser, ok := p.bannedUsers[channel]
if !ok {
return false, nil
}
delete(p.bannedUsers, bannedUser)
welcome := fmt.Sprintf("%s: _)_", bannedUser)
p.bot.Message(bot.PrivMsg(channel, welcome))
return false, nil
}
func (p *OPCmd) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.HandleCmd("cmd.kb", p.kickban)
return &bot.PluginInfo{
Name: "OPCmd",
Description: "OP Commands.",
}, nil
}
func (p *OPCmd) Unload() error {
return nil
}
func (p *OPCmd) kickban(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) != 1 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("opcmds") {
return true, nil
}
whom := args[0]
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", fmt.Sprintf("ban %s %s", target, whom)))
} else {
p.bot.Message(bot.PrivMsg("Chanserv", fmt.Sprintf("ban %s %s", target, whom)))
}
return true, nil
}
func (p *Login) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.001", p.welcome)
return &bot.PluginInfo{
Name: "Login",
Description: "Authenticate to the IRC server.",
}, nil
}
func (p *Login) Unload() error {
return nil
}
func (p *Login) welcome(name string, params []interface{}) (bool, error) {
if len(p.Password) > 0 {
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", "login " + p.Username + " " + p.Password))
} else {
p.bot.Message(bot.PrivMsg("Nickserv", "identify " + p.Password))
}
}
return false, nil
}
func (p *RedditParser) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.close = make(chan bool)
if p.PreloadCount < 1 {
p.PreloadCount = 10
}
for i := range RedditSearches {
RedditSearches[i].register(p)
}
p.bot.HandleCmdRateLimited("cmd.porn", p.roulette)
return &bot.PluginInfo{
Name: "RedditParser",
Description: "Parse Reddit for useful images.",
}, nil
}
func (p *RedditParser) Unload() error {
close(p.close)
return nil
}
func (p *RedditParser) roulette(source *irc.Prefix, target string, cmd string, args []string) (bool, error) { | RedditSearch := RedditSearches[rand.Intn(len(RedditSearches))] | random_line_split |
|
plugins.go | .invite)
p.bot.HandleIRC("irc.kick", p.kick)
p.bot.HandleIRC("irc.join", p.join)
p.bot.HandleCmdRateLimited("cmd.bs", p.bullshit)
p.bannedUsers = make(map[string]string)
return &bot.PluginInfo{
Name: "Misc",
Description: "Miscellaneous commands.",
}, nil
}
func (p *Misc) Unload() error {
return nil
}
func (p *Misc) textCmd(cmd string, texts []string) {
if len(texts) == 0 {
return
}
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
text := texts[rand.Intn(len(texts))]
if len(args) > 0 {
text = args[0] + ": " + text
}
p.bot.Message(bot.PrivMsg(target, text))
return true, nil
}
p.bot.HandleCmdRateLimited(cmd, handler)
}
func (p *Misc) textReply(cmd, text string, check func(string) bool) {
handler := func(msg *irc.Message) (bool, error) {
if !check(msg.Trailing) {
return false, nil
}
if p.bot.RateLimiter.Limited(msg.Params[0]) {
return false, nil
}
p.bot.Message(bot.PrivMsg(msg.Params[0], text))
return false, nil
}
p.bot.HandleIRC(cmd, handler)
}
func (p *Misc) bullshit(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
msg := fmt.Sprintf("%s: I am bullshitting you!", args[0])
p.bot.Message(bot.PrivMsg(target, msg))
return true, nil
}
func (p *Misc) buzz(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("annoy") {
return true, nil
}
lines := []string{
"%s",
"%s!",
"paging %s!",
"BUZZING %s",
"%s %[1]s %[1]s %[1]s %[1]s",
"hey %s!",
"%s %[1]s %[1]s %[1]s",
"%s come on",
}
times := rand.Intn(3) + 3
for i := 0; i < times; i++ {
line := lines[rand.Intn(len(lines))]
msg := fmt.Sprintf(line, args[0])
p.bot.Message(bot.PrivMsg(target, msg))
time.Sleep(time.Duration(rand.Intn(300) + 300) * time.Millisecond)
}
return true, nil
}
func (p *Misc) invite(msg *irc.Message) (bool, error) {
perms, err := p.bot.Auth(msg.Prefix)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("invite") {
return true, nil
}
//channel := msg.Trailing
channel := msg.Params[1]
err = p.bot.Message(bot.Join(channel))
return true, nil
}
func (p *Misc) kick(msg *irc.Message) (bool, error) {
channel, who := msg.Params[0], msg.Params[1]
if who != p.bot.Config.Nickname {
return false, nil
}
bannedUser := msg.Prefix.Name
if bannedUser == "X" || bannedUser == "Chanserv" {
parts := strings.Fields(msg.Trailing)
bannedUser = strings.Trim(parts[len(parts) - 1], "()")
}
p.bannedUsers[channel] = bannedUser
return false, nil
}
func (p *Misc) join(msg *irc.Message) (bool, error) {
if msg.Prefix.Name != p.bot.Config.Nickname {
return false, nil
}
channel := msg.Trailing
bannedUser, ok := p.bannedUsers[channel]
if !ok {
return false, nil
}
delete(p.bannedUsers, bannedUser)
welcome := fmt.Sprintf("%s: _)_", bannedUser)
p.bot.Message(bot.PrivMsg(channel, welcome))
return false, nil
}
func (p *OPCmd) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.HandleCmd("cmd.kb", p.kickban)
return &bot.PluginInfo{
Name: "OPCmd",
Description: "OP Commands.",
}, nil
}
func (p *OPCmd) Unload() error {
return nil
}
func (p *OPCmd) kickban(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) != 1 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("opcmds") {
return true, nil
}
whom := args[0]
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", fmt.Sprintf("ban %s %s", target, whom)))
} else {
p.bot.Message(bot.PrivMsg("Chanserv", fmt.Sprintf("ban %s %s", target, whom)))
}
return true, nil
}
func (p *Login) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.001", p.welcome)
return &bot.PluginInfo{
Name: "Login",
Description: "Authenticate to the IRC server.",
}, nil
}
func (p *Login) Unload() error {
return nil
}
func (p *Login) welcome(name string, params []interface{}) (bool, error) {
if len(p.Password) > 0 {
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", "login " + p.Username + " " + p.Password))
} else {
p.bot.Message(bot.PrivMsg("Nickserv", "identify " + p.Password))
}
}
return false, nil
}
func (p *RedditParser) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.close = make(chan bool)
if p.PreloadCount < 1 {
p.PreloadCount = 10
}
for i := range RedditSearches {
RedditSearches[i].register(p)
}
p.bot.HandleCmdRateLimited("cmd.porn", p.roulette)
return &bot.PluginInfo{
Name: "RedditParser",
Description: "Parse Reddit for useful images.",
}, nil
}
func (p *RedditParser) Unload() error {
close(p.close)
return nil
}
func (p *RedditParser) roulette(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
RedditSearch := RedditSearches[rand.Intn(len(RedditSearches))]
cmd = RedditSearch.Commands[0]
return p.bot.Event("cmd." + cmd, source, target, cmd, args)
}
func checkIsImage(post *reddit.Post, b *bot.Bot) bool {
linkURL, err := url.Parse(post.URL)
if err != nil {
return false
}
for _, host := range b.Config.ImageHosts {
if strings.Contains(linkURL.Host, host) {
return true
}
}
return false
}
func chooseRandStr(opt []string) string {
return opt[rand.Intn(len(opt))]
}
func (m *RedditSearch) get() *reddit.Post {
for i := 0; i < 5; i++ {
m.mu.Lock()
var post *reddit.Post
if len(m.posts) > 0 {
post = m.posts[len(m.posts) - 1]
m.posts = m.posts[:len(m.posts) - 1]
}
m.mu.Unlock()
if post != nil {
return post
}
select {
case <-m.close:
return nil
case <-time.After(time.Second):
}
}
return nil
}
func (m *RedditSearch) register(plug *RedditParser) {
m.posts = make([]*reddit.Post, 0, plug.PreloadCount)
m.close = plug.close
go func() {
if len(m.RedditListTag) > 0 {
m.getSubredditList()
}
if len(m.Subreddits) == 0 {
return
}
m.preload(plug.Lurker, plug.bot)
}()
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
what := chooseRandStr(m.What)
post := m.get()
if post == nil | {
plug.bot.Message(bot.PrivMsg(target, fmt.Sprintf("%s: haven't indexed any %s yet", source.Name, what)))
return true, nil
} | conditional_block |
|
partitions.go | return s.partitionDisk(dev, devAlias)
}, "partitioning %q", devAlias)
if err != nil {
return err
}
}
return nil
}
// partitionMatches determines if the existing partition matches the spec given. See doc/operator notes for what
// what it means for an existing partition to match the spec. spec must have non-zero Start and Size.
func partitionMatches(existing util.PartitionInfo, spec sgdisk.Partition) error {
if err := partitionMatchesCommon(existing, spec); err != nil {
return err
}
if spec.SizeInSectors != nil && *spec.SizeInSectors != existing.SizeInSectors {
return fmt.Errorf("size did not match (specified %d, got %d)", *spec.SizeInSectors, existing.SizeInSectors)
}
return nil
}
// partitionMatchesResize returns if the existing partition should be resized by evaluating if
// `resize`field is true and partition matches in all respects except size.
func partitionMatchesResize(existing util.PartitionInfo, spec sgdisk.Partition) bool {
return cutil.IsTrue(spec.Resize) && partitionMatchesCommon(existing, spec) == nil
}
// partitionMatchesCommon handles the common tests (excluding the partition size) to determine
// if the existing partition matches the spec given.
func partitionMatchesCommon(existing util.PartitionInfo, spec sgdisk.Partition) error {
if spec.Number != existing.Number {
return fmt.Errorf("partition numbers did not match (specified %d, got %d). This should not happen, please file a bug.", spec.Number, existing.Number)
}
if spec.StartSector != nil && *spec.StartSector != existing.StartSector {
return fmt.Errorf("starting sector did not match (specified %d, got %d)", *spec.StartSector, existing.StartSector)
}
if cutil.NotEmpty(spec.GUID) && !strings.EqualFold(*spec.GUID, existing.GUID) {
return fmt.Errorf("GUID did not match (specified %q, got %q)", *spec.GUID, existing.GUID)
}
if cutil.NotEmpty(spec.TypeGUID) && !strings.EqualFold(*spec.TypeGUID, existing.TypeGUID) {
return fmt.Errorf("type GUID did not match (specified %q, got %q)", *spec.TypeGUID, existing.TypeGUID)
}
if spec.Label != nil && *spec.Label != existing.Label {
return fmt.Errorf("label did not match (specified %q, got %q)", *spec.Label, existing.Label)
}
return nil
}
// partitionShouldBeInspected returns if the partition has zeroes that need to be resolved to sectors.
func partitionShouldBeInspected(part sgdisk.Partition) bool {
if part.Number == 0 {
return false
}
return (part.StartSector != nil && *part.StartSector == 0) ||
(part.SizeInSectors != nil && *part.SizeInSectors == 0)
}
func convertMiBToSectors(mib *int, sectorSize int) *int64 {
if mib != nil {
v := int64(*mib) * (1024 * 1024 / int64(sectorSize))
return &v
} else {
return nil
}
}
// getRealStartAndSize returns a map of partition numbers to a struct that contains what their real start
// and end sector should be. It runs sgdisk --pretend to determine what the partitions would look like if
// everything specified were to be (re)created.
func (s stage) getRealStartAndSize(dev types.Disk, devAlias string, diskInfo util.DiskInfo) ([]sgdisk.Partition, error) {
partitions := []sgdisk.Partition{}
for _, cpart := range dev.Partitions {
partitions = append(partitions, sgdisk.Partition{
Partition: cpart,
StartSector: convertMiBToSectors(cpart.StartMiB, diskInfo.LogicalSectorSize),
SizeInSectors: convertMiBToSectors(cpart.SizeMiB, diskInfo.LogicalSectorSize),
})
}
op := sgdisk.Begin(s.Logger, devAlias)
for _, part := range partitions {
if info, exists := diskInfo.GetPartition(part.Number); exists {
// delete all existing partitions
op.DeletePartition(part.Number)
if part.StartSector == nil && !cutil.IsTrue(part.WipePartitionEntry) {
// don't care means keep the same if we can't wipe, otherwise stick it at start 0
part.StartSector = &info.StartSector
}
if part.SizeInSectors == nil && !cutil.IsTrue(part.WipePartitionEntry) {
part.SizeInSectors = &info.SizeInSectors
}
}
if partitionShouldExist(part) {
// Clear the label. sgdisk doesn't escape control characters. This makes parsing easier
part.Label = nil
op.CreatePartition(part)
}
}
// We only care to examine partitions that have start or size 0.
partitionsToInspect := []int{}
for _, part := range partitions {
if partitionShouldBeInspected(part) {
op.Info(part.Number)
partitionsToInspect = append(partitionsToInspect, part.Number)
}
}
output, err := op.Pretend()
if err != nil {
return nil, err
}
realDimensions, err := parseSgdiskPretend(output, partitionsToInspect)
if err != nil {
return nil, err
}
result := []sgdisk.Partition{}
for _, part := range partitions {
if dims, ok := realDimensions[part.Number]; ok {
if part.StartSector != nil {
part.StartSector = &dims.start
}
if part.SizeInSectors != nil {
part.SizeInSectors = &dims.size
}
}
result = append(result, part)
}
return result, nil
}
type sgdiskOutput struct {
start int64
size int64
}
// parseLine takes a regexp that captures an int64 and a string to match on. On success it returns
// the captured int64 and nil. If the regexp does not match it returns -1 and nil. If it encountered
// an error it returns 0 and the error.
func parseLine(r *regexp.Regexp, line string) (int64, error) {
matches := r.FindStringSubmatch(line)
switch len(matches) {
case 0:
return -1, nil
case 2:
return strconv.ParseInt(matches[1], 10, 64)
default:
return 0, ErrBadSgdiskOutput
}
}
// parseSgdiskPretend parses the output of running sgdisk pretend with --info specified for each partition
// number specified in partitionNumbers. E.g. if paritionNumbers is [1,4,5], it is expected that the sgdisk
// output was from running `sgdisk --pretend <commands> --info=1 --info=4 --info=5`. It assumes the the
// partition labels are well behaved (i.e. contain no control characters). It returns a list of partitions
// matching the partition numbers specified, but with the start and size information as determined by sgdisk.
// The partition numbers need to passed in because sgdisk includes them in its output.
func | (sgdiskOut string, partitionNumbers []int) (map[int]sgdiskOutput, error) {
if len(partitionNumbers) == 0 {
return nil, nil
}
startRegex := regexp.MustCompile(`^First sector: (\d*) \(.*\)$`)
endRegex := regexp.MustCompile(`^Last sector: (\d*) \(.*\)$`)
const (
START = iota
END = iota
FAIL_ON_START_END = iota
)
output := map[int]sgdiskOutput{}
state := START
current := sgdiskOutput{}
i := 0
lines := strings.Split(sgdiskOut, "\n")
for _, line := range lines {
switch state {
case START:
start, err := parseLine(startRegex, line)
if err != nil {
return nil, err
}
if start != -1 {
current.start = start
state = END
}
case END:
end, err := parseLine(endRegex, line)
if err != nil {
return nil, err
}
if end != -1 {
current.size = 1 + end - current.start
output[partitionNumbers[i]] = current
i++
if i == len(partitionNumbers) {
state = FAIL_ON_START_END
} else {
current = sgdiskOutput{}
state = START
}
}
case FAIL_ON_START_END:
if len(startRegex.FindStringSubmatch(line)) != 0 ||
len(endRegex.FindStringSubmatch(line)) != 0 {
return nil, ErrBadSgdiskOutput
}
}
}
if state != FAIL_ON_START_END {
// We stopped parsing in the middle of a info block. Something is wrong
return nil, ErrBadSgdiskOutput
}
return output, nil
}
// partitionShouldExist returns whether a bool is indicating if a partition should exist or not.
// nil (unspecified in json) is treated the same | parseSgdiskPretend | identifier_name |
partitions.go | did not match (specified %q, got %q)", *spec.Label, existing.Label)
}
return nil
}
// partitionShouldBeInspected returns if the partition has zeroes that need to be resolved to sectors.
func partitionShouldBeInspected(part sgdisk.Partition) bool {
if part.Number == 0 {
return false
}
return (part.StartSector != nil && *part.StartSector == 0) ||
(part.SizeInSectors != nil && *part.SizeInSectors == 0)
}
func convertMiBToSectors(mib *int, sectorSize int) *int64 {
if mib != nil {
v := int64(*mib) * (1024 * 1024 / int64(sectorSize))
return &v
} else {
return nil
}
}
// getRealStartAndSize returns a map of partition numbers to a struct that contains what their real start
// and end sector should be. It runs sgdisk --pretend to determine what the partitions would look like if
// everything specified were to be (re)created.
func (s stage) getRealStartAndSize(dev types.Disk, devAlias string, diskInfo util.DiskInfo) ([]sgdisk.Partition, error) {
partitions := []sgdisk.Partition{}
for _, cpart := range dev.Partitions {
partitions = append(partitions, sgdisk.Partition{
Partition: cpart,
StartSector: convertMiBToSectors(cpart.StartMiB, diskInfo.LogicalSectorSize),
SizeInSectors: convertMiBToSectors(cpart.SizeMiB, diskInfo.LogicalSectorSize),
})
}
op := sgdisk.Begin(s.Logger, devAlias)
for _, part := range partitions {
if info, exists := diskInfo.GetPartition(part.Number); exists {
// delete all existing partitions
op.DeletePartition(part.Number)
if part.StartSector == nil && !cutil.IsTrue(part.WipePartitionEntry) {
// don't care means keep the same if we can't wipe, otherwise stick it at start 0
part.StartSector = &info.StartSector
}
if part.SizeInSectors == nil && !cutil.IsTrue(part.WipePartitionEntry) {
part.SizeInSectors = &info.SizeInSectors
}
}
if partitionShouldExist(part) {
// Clear the label. sgdisk doesn't escape control characters. This makes parsing easier
part.Label = nil
op.CreatePartition(part)
}
}
// We only care to examine partitions that have start or size 0.
partitionsToInspect := []int{}
for _, part := range partitions {
if partitionShouldBeInspected(part) {
op.Info(part.Number)
partitionsToInspect = append(partitionsToInspect, part.Number)
}
}
output, err := op.Pretend()
if err != nil {
return nil, err
}
realDimensions, err := parseSgdiskPretend(output, partitionsToInspect)
if err != nil {
return nil, err
}
result := []sgdisk.Partition{}
for _, part := range partitions {
if dims, ok := realDimensions[part.Number]; ok {
if part.StartSector != nil {
part.StartSector = &dims.start
}
if part.SizeInSectors != nil {
part.SizeInSectors = &dims.size
}
}
result = append(result, part)
}
return result, nil
}
type sgdiskOutput struct {
start int64
size int64
}
// parseLine takes a regexp that captures an int64 and a string to match on. On success it returns
// the captured int64 and nil. If the regexp does not match it returns -1 and nil. If it encountered
// an error it returns 0 and the error.
func parseLine(r *regexp.Regexp, line string) (int64, error) {
matches := r.FindStringSubmatch(line)
switch len(matches) {
case 0:
return -1, nil
case 2:
return strconv.ParseInt(matches[1], 10, 64)
default:
return 0, ErrBadSgdiskOutput
}
}
// parseSgdiskPretend parses the output of running sgdisk pretend with --info specified for each partition
// number specified in partitionNumbers. E.g. if paritionNumbers is [1,4,5], it is expected that the sgdisk
// output was from running `sgdisk --pretend <commands> --info=1 --info=4 --info=5`. It assumes the the
// partition labels are well behaved (i.e. contain no control characters). It returns a list of partitions
// matching the partition numbers specified, but with the start and size information as determined by sgdisk.
// The partition numbers need to passed in because sgdisk includes them in its output.
func parseSgdiskPretend(sgdiskOut string, partitionNumbers []int) (map[int]sgdiskOutput, error) {
if len(partitionNumbers) == 0 {
return nil, nil
}
startRegex := regexp.MustCompile(`^First sector: (\d*) \(.*\)$`)
endRegex := regexp.MustCompile(`^Last sector: (\d*) \(.*\)$`)
const (
START = iota
END = iota
FAIL_ON_START_END = iota
)
output := map[int]sgdiskOutput{}
state := START
current := sgdiskOutput{}
i := 0
lines := strings.Split(sgdiskOut, "\n")
for _, line := range lines {
switch state {
case START:
start, err := parseLine(startRegex, line)
if err != nil {
return nil, err
}
if start != -1 {
current.start = start
state = END
}
case END:
end, err := parseLine(endRegex, line)
if err != nil {
return nil, err
}
if end != -1 {
current.size = 1 + end - current.start
output[partitionNumbers[i]] = current
i++
if i == len(partitionNumbers) {
state = FAIL_ON_START_END
} else {
current = sgdiskOutput{}
state = START
}
}
case FAIL_ON_START_END:
if len(startRegex.FindStringSubmatch(line)) != 0 ||
len(endRegex.FindStringSubmatch(line)) != 0 {
return nil, ErrBadSgdiskOutput
}
}
}
if state != FAIL_ON_START_END {
// We stopped parsing in the middle of a info block. Something is wrong
return nil, ErrBadSgdiskOutput
}
return output, nil
}
// partitionShouldExist returns whether a bool is indicating if a partition should exist or not.
// nil (unspecified in json) is treated the same as true.
func partitionShouldExist(part sgdisk.Partition) bool {
return !cutil.IsFalse(part.ShouldExist)
}
// getPartitionMap returns a map of partitions on device, indexed by partition number
func (s stage) getPartitionMap(device string) (util.DiskInfo, error) {
info := util.DiskInfo{}
err := s.Logger.LogOp(
func() error {
var err error
info, err = util.DumpDisk(device)
return err
}, "reading partition table of %q", device)
if err != nil {
return util.DiskInfo{}, err
}
return info, nil
}
// Allow sorting partitions (must be a stable sort) so partition number 0 happens last
// regardless of where it was in the list.
type PartitionList []types.Partition
func (p PartitionList) Len() int {
return len(p)
}
// We only care about partitions with number 0 being considered the "largest" elements
// so they are processed last.
func (p PartitionList) Less(i, j int) bool {
return p[i].Number != 0 && p[j].Number == 0
}
func (p PartitionList) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
// partitionDisk partitions devAlias according to the spec given by dev
func (s stage) partitionDisk(dev types.Disk, devAlias string) error {
if cutil.IsTrue(dev.WipeTable) {
op := sgdisk.Begin(s.Logger, devAlias)
s.Logger.Info("wiping partition table requested on %q", devAlias)
op.WipeTable(true)
if err := op.Commit(); err != nil {
return err
}
}
// Ensure all partitions with number 0 are last
sort.Stable(PartitionList(dev.Partitions))
op := sgdisk.Begin(s.Logger, devAlias)
diskInfo, err := s.getPartitionMap(devAlias)
if err != nil {
return err
}
// get a list of parititions that have size and start 0 replaced with the real sizes
// that would be used if all specified partitions were to be created anew.
// Also calculate sectors for all of the start/size values. | resolvedPartitions, err := s.getRealStartAndSize(dev, devAlias, diskInfo)
if err != nil {
return err | random_line_split |
|
partitions.go | return s.partitionDisk(dev, devAlias)
}, "partitioning %q", devAlias)
if err != nil {
return err
}
}
return nil
}
// partitionMatches determines if the existing partition matches the spec given. See doc/operator notes for what
// what it means for an existing partition to match the spec. spec must have non-zero Start and Size.
func partitionMatches(existing util.PartitionInfo, spec sgdisk.Partition) error {
if err := partitionMatchesCommon(existing, spec); err != nil {
return err
}
if spec.SizeInSectors != nil && *spec.SizeInSectors != existing.SizeInSectors {
return fmt.Errorf("size did not match (specified %d, got %d)", *spec.SizeInSectors, existing.SizeInSectors)
}
return nil
}
// partitionMatchesResize returns if the existing partition should be resized by evaluating if
// `resize`field is true and partition matches in all respects except size.
func partitionMatchesResize(existing util.PartitionInfo, spec sgdisk.Partition) bool {
return cutil.IsTrue(spec.Resize) && partitionMatchesCommon(existing, spec) == nil
}
// partitionMatchesCommon handles the common tests (excluding the partition size) to determine
// if the existing partition matches the spec given.
func partitionMatchesCommon(existing util.PartitionInfo, spec sgdisk.Partition) error {
if spec.Number != existing.Number {
return fmt.Errorf("partition numbers did not match (specified %d, got %d). This should not happen, please file a bug.", spec.Number, existing.Number)
}
if spec.StartSector != nil && *spec.StartSector != existing.StartSector {
return fmt.Errorf("starting sector did not match (specified %d, got %d)", *spec.StartSector, existing.StartSector)
}
if cutil.NotEmpty(spec.GUID) && !strings.EqualFold(*spec.GUID, existing.GUID) {
return fmt.Errorf("GUID did not match (specified %q, got %q)", *spec.GUID, existing.GUID)
}
if cutil.NotEmpty(spec.TypeGUID) && !strings.EqualFold(*spec.TypeGUID, existing.TypeGUID) {
return fmt.Errorf("type GUID did not match (specified %q, got %q)", *spec.TypeGUID, existing.TypeGUID)
}
if spec.Label != nil && *spec.Label != existing.Label {
return fmt.Errorf("label did not match (specified %q, got %q)", *spec.Label, existing.Label)
}
return nil
}
// partitionShouldBeInspected returns if the partition has zeroes that need to be resolved to sectors.
func partitionShouldBeInspected(part sgdisk.Partition) bool {
if part.Number == 0 {
return false
}
return (part.StartSector != nil && *part.StartSector == 0) ||
(part.SizeInSectors != nil && *part.SizeInSectors == 0)
}
func convertMiBToSectors(mib *int, sectorSize int) *int64 {
if mib != nil {
v := int64(*mib) * (1024 * 1024 / int64(sectorSize))
return &v
} else {
return nil
}
}
// getRealStartAndSize returns a map of partition numbers to a struct that contains what their real start
// and end sector should be. It runs sgdisk --pretend to determine what the partitions would look like if
// everything specified were to be (re)created.
func (s stage) getRealStartAndSize(dev types.Disk, devAlias string, diskInfo util.DiskInfo) ([]sgdisk.Partition, error) {
partitions := []sgdisk.Partition{}
for _, cpart := range dev.Partitions {
partitions = append(partitions, sgdisk.Partition{
Partition: cpart,
StartSector: convertMiBToSectors(cpart.StartMiB, diskInfo.LogicalSectorSize),
SizeInSectors: convertMiBToSectors(cpart.SizeMiB, diskInfo.LogicalSectorSize),
})
}
op := sgdisk.Begin(s.Logger, devAlias)
for _, part := range partitions {
if info, exists := diskInfo.GetPartition(part.Number); exists {
// delete all existing partitions
op.DeletePartition(part.Number)
if part.StartSector == nil && !cutil.IsTrue(part.WipePartitionEntry) {
// don't care means keep the same if we can't wipe, otherwise stick it at start 0
part.StartSector = &info.StartSector
}
if part.SizeInSectors == nil && !cutil.IsTrue(part.WipePartitionEntry) {
part.SizeInSectors = &info.SizeInSectors
}
}
if partitionShouldExist(part) {
// Clear the label. sgdisk doesn't escape control characters. This makes parsing easier
part.Label = nil
op.CreatePartition(part)
}
}
// We only care to examine partitions that have start or size 0.
partitionsToInspect := []int{}
for _, part := range partitions {
if partitionShouldBeInspected(part) {
op.Info(part.Number)
partitionsToInspect = append(partitionsToInspect, part.Number)
}
}
output, err := op.Pretend()
if err != nil {
return nil, err
}
realDimensions, err := parseSgdiskPretend(output, partitionsToInspect)
if err != nil {
return nil, err
}
result := []sgdisk.Partition{}
for _, part := range partitions {
if dims, ok := realDimensions[part.Number]; ok {
if part.StartSector != nil {
part.StartSector = &dims.start
}
if part.SizeInSectors != nil {
part.SizeInSectors = &dims.size
}
}
result = append(result, part)
}
return result, nil
}
type sgdiskOutput struct {
start int64
size int64
}
// parseLine takes a regexp that captures an int64 and a string to match on. On success it returns
// the captured int64 and nil. If the regexp does not match it returns -1 and nil. If it encountered
// an error it returns 0 and the error.
func parseLine(r *regexp.Regexp, line string) (int64, error) {
matches := r.FindStringSubmatch(line)
switch len(matches) {
case 0:
return -1, nil
case 2:
return strconv.ParseInt(matches[1], 10, 64)
default:
return 0, ErrBadSgdiskOutput
}
}
// parseSgdiskPretend parses the output of running sgdisk pretend with --info specified for each partition
// number specified in partitionNumbers. E.g. if paritionNumbers is [1,4,5], it is expected that the sgdisk
// output was from running `sgdisk --pretend <commands> --info=1 --info=4 --info=5`. It assumes the the
// partition labels are well behaved (i.e. contain no control characters). It returns a list of partitions
// matching the partition numbers specified, but with the start and size information as determined by sgdisk.
// The partition numbers need to passed in because sgdisk includes them in its output.
func parseSgdiskPretend(sgdiskOut string, partitionNumbers []int) (map[int]sgdiskOutput, error) {
if len(partitionNumbers) == 0 {
return nil, nil
}
startRegex := regexp.MustCompile(`^First sector: (\d*) \(.*\)$`)
endRegex := regexp.MustCompile(`^Last sector: (\d*) \(.*\)$`)
const (
START = iota
END = iota
FAIL_ON_START_END = iota
)
output := map[int]sgdiskOutput{}
state := START
current := sgdiskOutput{}
i := 0
lines := strings.Split(sgdiskOut, "\n")
for _, line := range lines {
switch state {
case START:
start, err := parseLine(startRegex, line)
if err != nil {
return nil, err
}
if start != -1 {
current.start = start
state = END
}
case END:
end, err := parseLine(endRegex, line)
if err != nil |
if end != -1 {
current.size = 1 + end - current.start
output[partitionNumbers[i]] = current
i++
if i == len(partitionNumbers) {
state = FAIL_ON_START_END
} else {
current = sgdiskOutput{}
state = START
}
}
case FAIL_ON_START_END:
if len(startRegex.FindStringSubmatch(line)) != 0 ||
len(endRegex.FindStringSubmatch(line)) != 0 {
return nil, ErrBadSgdiskOutput
}
}
}
if state != FAIL_ON_START_END {
// We stopped parsing in the middle of a info block. Something is wrong
return nil, ErrBadSgdiskOutput
}
return output, nil
}
// partitionShouldExist returns whether a bool is indicating if a partition should exist or not.
// nil (unspecified in json) is treated the | {
return nil, err
} | conditional_block |
partitions.go | return s.partitionDisk(dev, devAlias)
}, "partitioning %q", devAlias)
if err != nil {
return err
}
}
return nil
}
// partitionMatches determines if the existing partition matches the spec given. See doc/operator notes for what
// what it means for an existing partition to match the spec. spec must have non-zero Start and Size.
func partitionMatches(existing util.PartitionInfo, spec sgdisk.Partition) error {
if err := partitionMatchesCommon(existing, spec); err != nil {
return err
}
if spec.SizeInSectors != nil && *spec.SizeInSectors != existing.SizeInSectors {
return fmt.Errorf("size did not match (specified %d, got %d)", *spec.SizeInSectors, existing.SizeInSectors)
}
return nil
}
// partitionMatchesResize returns if the existing partition should be resized by evaluating if
// `resize`field is true and partition matches in all respects except size.
func partitionMatchesResize(existing util.PartitionInfo, spec sgdisk.Partition) bool {
return cutil.IsTrue(spec.Resize) && partitionMatchesCommon(existing, spec) == nil
}
// partitionMatchesCommon handles the common tests (excluding the partition size) to determine
// if the existing partition matches the spec given.
func partitionMatchesCommon(existing util.PartitionInfo, spec sgdisk.Partition) error {
if spec.Number != existing.Number {
return fmt.Errorf("partition numbers did not match (specified %d, got %d). This should not happen, please file a bug.", spec.Number, existing.Number)
}
if spec.StartSector != nil && *spec.StartSector != existing.StartSector {
return fmt.Errorf("starting sector did not match (specified %d, got %d)", *spec.StartSector, existing.StartSector)
}
if cutil.NotEmpty(spec.GUID) && !strings.EqualFold(*spec.GUID, existing.GUID) {
return fmt.Errorf("GUID did not match (specified %q, got %q)", *spec.GUID, existing.GUID)
}
if cutil.NotEmpty(spec.TypeGUID) && !strings.EqualFold(*spec.TypeGUID, existing.TypeGUID) {
return fmt.Errorf("type GUID did not match (specified %q, got %q)", *spec.TypeGUID, existing.TypeGUID)
}
if spec.Label != nil && *spec.Label != existing.Label {
return fmt.Errorf("label did not match (specified %q, got %q)", *spec.Label, existing.Label)
}
return nil
}
// partitionShouldBeInspected returns if the partition has zeroes that need to be resolved to sectors.
func partitionShouldBeInspected(part sgdisk.Partition) bool |
func convertMiBToSectors(mib *int, sectorSize int) *int64 {
if mib != nil {
v := int64(*mib) * (1024 * 1024 / int64(sectorSize))
return &v
} else {
return nil
}
}
// getRealStartAndSize returns a map of partition numbers to a struct that contains what their real start
// and end sector should be. It runs sgdisk --pretend to determine what the partitions would look like if
// everything specified were to be (re)created.
func (s stage) getRealStartAndSize(dev types.Disk, devAlias string, diskInfo util.DiskInfo) ([]sgdisk.Partition, error) {
partitions := []sgdisk.Partition{}
for _, cpart := range dev.Partitions {
partitions = append(partitions, sgdisk.Partition{
Partition: cpart,
StartSector: convertMiBToSectors(cpart.StartMiB, diskInfo.LogicalSectorSize),
SizeInSectors: convertMiBToSectors(cpart.SizeMiB, diskInfo.LogicalSectorSize),
})
}
op := sgdisk.Begin(s.Logger, devAlias)
for _, part := range partitions {
if info, exists := diskInfo.GetPartition(part.Number); exists {
// delete all existing partitions
op.DeletePartition(part.Number)
if part.StartSector == nil && !cutil.IsTrue(part.WipePartitionEntry) {
// don't care means keep the same if we can't wipe, otherwise stick it at start 0
part.StartSector = &info.StartSector
}
if part.SizeInSectors == nil && !cutil.IsTrue(part.WipePartitionEntry) {
part.SizeInSectors = &info.SizeInSectors
}
}
if partitionShouldExist(part) {
// Clear the label. sgdisk doesn't escape control characters. This makes parsing easier
part.Label = nil
op.CreatePartition(part)
}
}
// We only care to examine partitions that have start or size 0.
partitionsToInspect := []int{}
for _, part := range partitions {
if partitionShouldBeInspected(part) {
op.Info(part.Number)
partitionsToInspect = append(partitionsToInspect, part.Number)
}
}
output, err := op.Pretend()
if err != nil {
return nil, err
}
realDimensions, err := parseSgdiskPretend(output, partitionsToInspect)
if err != nil {
return nil, err
}
result := []sgdisk.Partition{}
for _, part := range partitions {
if dims, ok := realDimensions[part.Number]; ok {
if part.StartSector != nil {
part.StartSector = &dims.start
}
if part.SizeInSectors != nil {
part.SizeInSectors = &dims.size
}
}
result = append(result, part)
}
return result, nil
}
type sgdiskOutput struct {
start int64
size int64
}
// parseLine takes a regexp that captures an int64 and a string to match on. On success it returns
// the captured int64 and nil. If the regexp does not match it returns -1 and nil. If it encountered
// an error it returns 0 and the error.
func parseLine(r *regexp.Regexp, line string) (int64, error) {
matches := r.FindStringSubmatch(line)
switch len(matches) {
case 0:
return -1, nil
case 2:
return strconv.ParseInt(matches[1], 10, 64)
default:
return 0, ErrBadSgdiskOutput
}
}
// parseSgdiskPretend parses the output of running sgdisk pretend with --info specified for each partition
// number specified in partitionNumbers. E.g. if paritionNumbers is [1,4,5], it is expected that the sgdisk
// output was from running `sgdisk --pretend <commands> --info=1 --info=4 --info=5`. It assumes the the
// partition labels are well behaved (i.e. contain no control characters). It returns a list of partitions
// matching the partition numbers specified, but with the start and size information as determined by sgdisk.
// The partition numbers need to passed in because sgdisk includes them in its output.
func parseSgdiskPretend(sgdiskOut string, partitionNumbers []int) (map[int]sgdiskOutput, error) {
if len(partitionNumbers) == 0 {
return nil, nil
}
startRegex := regexp.MustCompile(`^First sector: (\d*) \(.*\)$`)
endRegex := regexp.MustCompile(`^Last sector: (\d*) \(.*\)$`)
const (
START = iota
END = iota
FAIL_ON_START_END = iota
)
output := map[int]sgdiskOutput{}
state := START
current := sgdiskOutput{}
i := 0
lines := strings.Split(sgdiskOut, "\n")
for _, line := range lines {
switch state {
case START:
start, err := parseLine(startRegex, line)
if err != nil {
return nil, err
}
if start != -1 {
current.start = start
state = END
}
case END:
end, err := parseLine(endRegex, line)
if err != nil {
return nil, err
}
if end != -1 {
current.size = 1 + end - current.start
output[partitionNumbers[i]] = current
i++
if i == len(partitionNumbers) {
state = FAIL_ON_START_END
} else {
current = sgdiskOutput{}
state = START
}
}
case FAIL_ON_START_END:
if len(startRegex.FindStringSubmatch(line)) != 0 ||
len(endRegex.FindStringSubmatch(line)) != 0 {
return nil, ErrBadSgdiskOutput
}
}
}
if state != FAIL_ON_START_END {
// We stopped parsing in the middle of a info block. Something is wrong
return nil, ErrBadSgdiskOutput
}
return output, nil
}
// partitionShouldExist returns whether a bool is indicating if a partition should exist or not.
// nil (unspecified in json) is treated the | {
if part.Number == 0 {
return false
}
return (part.StartSector != nil && *part.StartSector == 0) ||
(part.SizeInSectors != nil && *part.SizeInSectors == 0)
} | identifier_body |
file.rs | 32-bit emb.proc",
77 => "Infineon Technologies 32-bit emb.proc",
78 => "Element 14 64-bit DSP Processor",
79 => "LSI Logic 16-bit DSP Processor",
80 => "Donald Knuth's educational 64-bit proc",
81 => "Harvard University machine-independent object files",
82 => "SiTera Prism",
83 => "Atmel AVR 8-bit microcontroller",
84 => "Fujitsu FR30",
85 => "Mitsubishi D10V",
86 => "Mitsubishi D30V",
87 => "NEC v850",
88 => "Mitsubishi M32R",
89 => "Matsushita MN10300",
90 => "Matsushita MN10200",
91 => "picoJava",
92 => "OpenRISC 32-bit embedded processor",
93 => "ARC International ARCompact",
94 => "Tensilica Xtensa Architecture",
95 => "Alphamosaic VideoCore",
96 => "Thompson Multimedia General Purpose Proc",
97 => "National Semi. 32000",
98 => "Tenor Network TPC",
99 => "Trebia SNP 1000",
100 => "STMicroelectronics ST200",
101 => "Ubicom IP2xxx",
102 => "MAX processor",
103 => "National Semi. CompactRISC",
104 => "Fujitsu F2MC16",
105 => "Texas Instruments msp430",
106 => "Analog Devices Blackfin DSP",
107 => "Seiko Epson S1C33 family",
108 => "Sharp embedded microprocessor",
109 => "Arca RISC",
110 => "PKU-Unity & MPRC Peking Uni. mc series",
111 => "eXcess configurable cpu",
112 => "Icera Semi. Deep Execution Processor",
113 => "Altera Nios II",
114 => "National Semi. CompactRISC CRX",
115 => "Motorola XGATE",
116 => "Infineon C16x/XC16x",
117 => "Renesas M16C",
118 => "Microchip Technology dsPIC30F",
119 => "Freescale Communication Engine RISC",
120 => "Renesas M32C",
131 => "Altium TSK3000",
132 => "Freescale RS08",
133 => "Analog Devices SHARC family",
134 => "Cyan Technology eCOG2",
135 => "Sunplus S+core7 RISC",
136 => "New Japan Radio (NJR) 24-bit DSP",
137 => "Broadcom VideoCore III",
138 => "RISC for Lattice FPGA",
139 => "Seiko Epson C17",
140 => "Texas Instruments TMS320C6000 DSP",
141 => "Texas Instruments TMS320C2000 DSP",
142 => "Texas Instruments TMS320C55x DSP",
143 => "Texas Instruments App. Specific RISC",
144 => "Texas Instruments Prog. Realtime Unit",
160 => "STMicroelectronics 64bit VLIW DSP",
161 => "Cypress M8C",
162 => "Renesas R32C",
163 => "NXP Semi. TriMedia",
164 => "QUALCOMM DSP6",
165 => "Intel 8051 and variants",
166 => "STMicroelectronics STxP7x",
167 => "Andes Tech. compact code emb. RISC",
168 => "Cyan Technology eCOG1X",
169 => "Dallas Semi. MAXQ30 mc",
170 => "New Japan Radio (NJR) 16-bit DSP",
171 => "M2000 Reconfigurable RISC",
172 => "Cray NV2 vector architecture",
173 => "Renesas RX",
174 => "Imagination Tech. META",
175 => "MCST Elbrus",
176 => "Cyan Technology eCOG16",
177 => "National Semi. CompactRISC CR16",
178 => "Freescale Extended Time Processing Unit",
179 => "Infineon Tech. SLE9X",
180 => "Intel L10M",
181 => "Intel K10M",
182 => "reserved 182",
183 => "ARM AARCH64",
184 => "reserved 184",
185 => "Amtel 32-bit microprocessor",
186 => "STMicroelectronics STM8",
187 => "Tileta TILE64",
188 => "Tilera TILEPro",
189 => "Xilinx MicroBlaze",
190 => "NVIDIA CUDA",
191 => "Tilera TILE-Gx",
192 => "CloudShield",
193 => "KIPO-KAIST Core-A 1st gen.",
194 => "KIPO-KAIST Core-A 2nd gen.",
195 => "Synopsys ARCompact V2",
196 => "Open8 RISC",
197 => "Renesas RL78",
198 => "Broadcom VideoCore V",
199 => "Renesas 78KOR",
200 => "Freescale 56800EX DSC",
201 => "Beyond BA1",
202 => "Beyond BA2",
203 => "XMOS xCORE",
204 => "Microchip 8-bit PIC(r)",
210 => "KM211 KM32",
211 => "KM211 KMX32",
212 => "KM211 KMX16",
213 => "KM211 KMX8",
214 => "KM211 KVARC",
215 => "Paneve CDP",
216 => "Cognitive Smart Memory Processor",
217 => "Bluechip CoolEngine",
218 => "Nanoradio Optimized RISC",
219 => "CSR Kalimba",
220 => "Zilog Z80",
221 => "Controls and Data Services VISIUMcore",
222 => "FTDI Chip FT32",
223 => "Moxie processor",
224 => "AMD GPU",
243 => "RISC-V",
247 => "Linux BPF -- in-kernel virtual machine",
_ => "Unknown",
}
} | pub enum FileClass {
// Invalid class
None,
// 32-bit objects
ElfClass32,
// 64 bit objects
ElfClass64,
// Unknown class
Invalid(u8),
}
#[derive(Debug)]
pub enum Encoding {
// Invalid data encoding
None,
// 2's complement, little endian
LittleEndian,
// 2's complement big endian
BigEndian,
// Uknown data encoding
Invalid(u8),
}
#[derive(Debug)]
pub enum OsAbi {
// UNIX System V ABI
UnixVSystem,
// HP-UX
HpUx,
// NetBDS
NetBsd,
// Object uses GNU ELF extensions
GnuElfExtensions,
// SUN Solaris
SunSolaris,
// IBM AIX
IbmAix,
// SGI Irix
SgiIrix,
// FreeBSD
FreeBsd,
// Compaq TRU64 UNIX
CompaqTru64Unix,
// Novell Modesto
NovellModesto,
// OpenBSD
OpenBsd,
// ARM EABI
ArmEabi | #[derive(Debug)] | random_line_split |
file.rs | 32-bit emb.proc",
77 => "Infineon Technologies 32-bit emb.proc",
78 => "Element 14 64-bit DSP Processor",
79 => "LSI Logic 16-bit DSP Processor",
80 => "Donald Knuth's educational 64-bit proc",
81 => "Harvard University machine-independent object files",
82 => "SiTera Prism",
83 => "Atmel AVR 8-bit microcontroller",
84 => "Fujitsu FR30",
85 => "Mitsubishi D10V",
86 => "Mitsubishi D30V",
87 => "NEC v850",
88 => "Mitsubishi M32R",
89 => "Matsushita MN10300",
90 => "Matsushita MN10200",
91 => "picoJava",
92 => "OpenRISC 32-bit embedded processor",
93 => "ARC International ARCompact",
94 => "Tensilica Xtensa Architecture",
95 => "Alphamosaic VideoCore",
96 => "Thompson Multimedia General Purpose Proc",
97 => "National Semi. 32000",
98 => "Tenor Network TPC",
99 => "Trebia SNP 1000",
100 => "STMicroelectronics ST200",
101 => "Ubicom IP2xxx",
102 => "MAX processor",
103 => "National Semi. CompactRISC",
104 => "Fujitsu F2MC16",
105 => "Texas Instruments msp430",
106 => "Analog Devices Blackfin DSP",
107 => "Seiko Epson S1C33 family",
108 => "Sharp embedded microprocessor",
109 => "Arca RISC",
110 => "PKU-Unity & MPRC Peking Uni. mc series",
111 => "eXcess configurable cpu",
112 => "Icera Semi. Deep Execution Processor",
113 => "Altera Nios II",
114 => "National Semi. CompactRISC CRX",
115 => "Motorola XGATE",
116 => "Infineon C16x/XC16x",
117 => "Renesas M16C",
118 => "Microchip Technology dsPIC30F",
119 => "Freescale Communication Engine RISC",
120 => "Renesas M32C",
131 => "Altium TSK3000",
132 => "Freescale RS08",
133 => "Analog Devices SHARC family",
134 => "Cyan Technology eCOG2",
135 => "Sunplus S+core7 RISC",
136 => "New Japan Radio (NJR) 24-bit DSP",
137 => "Broadcom VideoCore III",
138 => "RISC for Lattice FPGA",
139 => "Seiko Epson C17",
140 => "Texas Instruments TMS320C6000 DSP",
141 => "Texas Instruments TMS320C2000 DSP",
142 => "Texas Instruments TMS320C55x DSP",
143 => "Texas Instruments App. Specific RISC",
144 => "Texas Instruments Prog. Realtime Unit",
160 => "STMicroelectronics 64bit VLIW DSP",
161 => "Cypress M8C",
162 => "Renesas R32C",
163 => "NXP Semi. TriMedia",
164 => "QUALCOMM DSP6",
165 => "Intel 8051 and variants",
166 => "STMicroelectronics STxP7x",
167 => "Andes Tech. compact code emb. RISC",
168 => "Cyan Technology eCOG1X",
169 => "Dallas Semi. MAXQ30 mc",
170 => "New Japan Radio (NJR) 16-bit DSP",
171 => "M2000 Reconfigurable RISC",
172 => "Cray NV2 vector architecture",
173 => "Renesas RX",
174 => "Imagination Tech. META",
175 => "MCST Elbrus",
176 => "Cyan Technology eCOG16",
177 => "National Semi. CompactRISC CR16",
178 => "Freescale Extended Time Processing Unit",
179 => "Infineon Tech. SLE9X",
180 => "Intel L10M",
181 => "Intel K10M",
182 => "reserved 182",
183 => "ARM AARCH64",
184 => "reserved 184",
185 => "Amtel 32-bit microprocessor",
186 => "STMicroelectronics STM8",
187 => "Tileta TILE64",
188 => "Tilera TILEPro",
189 => "Xilinx MicroBlaze",
190 => "NVIDIA CUDA",
191 => "Tilera TILE-Gx",
192 => "CloudShield",
193 => "KIPO-KAIST Core-A 1st gen.",
194 => "KIPO-KAIST Core-A 2nd gen.",
195 => "Synopsys ARCompact V2",
196 => "Open8 RISC",
197 => "Renesas RL78",
198 => "Broadcom VideoCore V",
199 => "Renesas 78KOR",
200 => "Freescale 56800EX DSC",
201 => "Beyond BA1",
202 => "Beyond BA2",
203 => "XMOS xCORE",
204 => "Microchip 8-bit PIC(r)",
210 => "KM211 KM32",
211 => "KM211 KMX32",
212 => "KM211 KMX16",
213 => "KM211 KMX8",
214 => "KM211 KVARC",
215 => "Paneve CDP",
216 => "Cognitive Smart Memory Processor",
217 => "Bluechip CoolEngine",
218 => "Nanoradio Optimized RISC",
219 => "CSR Kalimba",
220 => "Zilog Z80",
221 => "Controls and Data Services VISIUMcore",
222 => "FTDI Chip FT32",
223 => "Moxie processor",
224 => "AMD GPU",
243 => "RISC-V",
247 => "Linux BPF -- in-kernel virtual machine",
_ => "Unknown",
}
}
#[derive(Debug)]
pub enum FileClass {
// Invalid class
None,
// 32-bit objects
ElfClass32,
// 64 bit objects
ElfClass64,
// Unknown class
Invalid(u8),
}
#[derive(Debug)]
pub enum | {
// Invalid data encoding
None,
// 2's complement, little endian
LittleEndian,
// 2's complement big endian
BigEndian,
// Uknown data encoding
Invalid(u8),
}
#[derive(Debug)]
pub enum OsAbi {
// UNIX System V ABI
UnixVSystem,
// HP-UX
HpUx,
// NetBDS
NetBsd,
// Object uses GNU ELF extensions
GnuElfExtensions,
// SUN Solaris
SunSolaris,
// IBM AIX
IbmAix,
// SGI Irix
SgiIrix,
// FreeBSD
FreeBsd,
// Compaq TRU64 UNIX
CompaqTru64Unix,
// Novell Modesto
NovellModesto,
// OpenBSD
OpenBsd,
// ARM EABI
ArmE | Encoding | identifier_name |
mod.rs | strm: StreamWrapper,
}
const GZIP_HEADER_ID1: u8 = 0x1f;
const GZIP_HEADER_ID2: u8 = 0x8b;
impl ZlibInner {
#[allow(clippy::too_many_arguments)]
fn start_write(
&mut self,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
flush: Flush,
) -> Result<(), AnyError> {
check(self.init_done, "write before init")?;
check(!self.write_in_progress, "write already in progress")?;
check(!self.pending_close, "close already in progress")?;
self.write_in_progress = true;
let next_in = input
.get(in_off as usize..in_off as usize + in_len as usize)
.ok_or_else(|| type_error("invalid input range"))?
.as_ptr() as *mut _;
let next_out = out
.get_mut(out_off as usize..out_off as usize + out_len as usize)
.ok_or_else(|| type_error("invalid output range"))?
.as_mut_ptr();
self.strm.avail_in = in_len;
self.strm.next_in = next_in;
self.strm.avail_out = out_len;
self.strm.next_out = next_out;
self.flush = flush;
Ok(())
}
fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> {
self.flush = flush;
match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => {
self.err = self.strm.deflate(flush);
}
// Auto-detect mode.
Mode::Unzip if self.strm.avail_in > 0 => 'blck: {
let mut next_expected_header_byte = Some(0);
// SAFETY: `self.strm.next_in` is valid pointer to the input buffer.
// `self.strm.avail_in` is the length of the input buffer that is only set by
// `start_write`.
let strm = unsafe {
std::slice::from_raw_parts(
self.strm.next_in,
self.strm.avail_in as usize,
)
};
if self.gzib_id_bytes_read == 0 {
if strm[0] == GZIP_HEADER_ID1 {
self.gzib_id_bytes_read = 1;
next_expected_header_byte = Some(1);
// Not enough.
if self.strm.avail_in == 1 {
break 'blck;
}
} else {
self.mode = Mode::Inflate;
next_expected_header_byte = None;
}
}
if self.gzib_id_bytes_read == 1 {
let byte = match next_expected_header_byte {
Some(i) => strm[i],
None => break 'blck,
};
if byte == GZIP_HEADER_ID2 {
self.gzib_id_bytes_read = 2;
self.mode = Mode::Gunzip;
} else {
self.mode = Mode::Inflate;
}
} else if next_expected_header_byte.is_some() {
return Err(type_error(
"invalid number of gzip magic number bytes read",
));
}
}
_ => {}
}
match self.mode {
Mode::Inflate
| Mode::Gunzip
| Mode::InflateRaw
// We're still reading the header.
| Mode::Unzip => {
self.err = self.strm.inflate(self.flush);
// TODO(@littledivy): Use if let chain when it is stable.
// https://github.com/rust-lang/rust/issues/53667
//
// Data was encoded with dictionary
if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) {
self.err = self.strm.inflate_set_dictionary(dictionary);
if self.err == Z_OK {
self.err = self.strm.inflate(flush);
} else if self.err == Z_DATA_ERROR {
self.err = Z_NEED_DICT;
}
}
while self.strm.avail_in > 0
&& self.mode == Mode::Gunzip
&& self.err == Z_STREAM_END
// SAFETY: `strm` is a valid pointer to zlib strm.
// `strm.next_in` is initialized to the input buffer.
&& unsafe { *self.strm.next_in } != 0x00
{
self.err = self.strm.reset(self.mode);
self.err = self.strm.inflate(flush);
}
}
_ => {}
}
let done = self.strm.avail_out != 0 && self.flush == Flush::Finish;
// We're are not done yet, but output buffer is full
if self.err == Z_BUF_ERROR && !done {
// Set to Z_OK to avoid reporting the error in JS.
self.err = Z_OK;
}
self.write_in_progress = false;
Ok(())
}
fn init_stream(&mut self) -> Result<(), AnyError> {
match self.mode {
Mode::Gzip | Mode::Gunzip => self.window_bits += 16,
Mode::Unzip => self.window_bits += 32,
Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1,
_ => {}
}
self.err = match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init(
self.level,
self.window_bits,
self.mem_level,
self.strategy,
),
Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => {
self.strm.inflate_init(self.window_bits)
}
Mode::None => return Err(type_error("Unknown mode")),
};
self.write_in_progress = false;
self.init_done = true;
Ok(())
}
fn close(&mut self) -> Result<bool, AnyError> {
if self.write_in_progress {
self.pending_close = true;
return Ok(false);
}
self.pending_close = false;
check(self.init_done, "close before init")?;
self.strm.end(self.mode);
self.mode = Mode::None;
Ok(true)
}
fn reset_stream(&mut self) -> Result<(), AnyError> {
self.err = self.strm.reset(self.mode);
Ok(())
}
}
struct Zlib {
inner: RefCell<ZlibInner>,
}
impl deno_core::Resource for Zlib {
fn name(&self) -> Cow<str> {
"zlib".into()
}
}
#[op]
pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> {
let mode = Mode::try_from(mode)?;
let inner = ZlibInner {
mode,
..Default::default()
};
Ok(state.resource_table.add(Zlib {
inner: RefCell::new(inner),
}))
}
#[op]
pub fn | (state: &mut OpState, handle: u32) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
// If there is a pending write, defer the close until the write is done.
zlib.close()?;
Ok(())
}
#[op]
pub fn op_zlib_write_async(
state: Rc<RefCell<OpState>>,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
) -> Result<
impl Future<Output = Result<(i32, u32, u32), AnyError>> + 'static,
AnyError,
> {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut strm = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
let state = state.clone();
Ok(async move {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.do_write(flush)?;
Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in))
})
}
#[op]
pub fn op_zlib_write(
state: &mut OpState,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
result: &mut [u32],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
zlib.do_write(flush)?;
result[0] = | op_zlib_close | identifier_name |
mod.rs | : StreamWrapper,
}
const GZIP_HEADER_ID1: u8 = 0x1f;
const GZIP_HEADER_ID2: u8 = 0x8b;
impl ZlibInner {
#[allow(clippy::too_many_arguments)]
fn start_write(
&mut self,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
flush: Flush,
) -> Result<(), AnyError> {
check(self.init_done, "write before init")?;
check(!self.write_in_progress, "write already in progress")?;
check(!self.pending_close, "close already in progress")?;
self.write_in_progress = true;
let next_in = input
.get(in_off as usize..in_off as usize + in_len as usize)
.ok_or_else(|| type_error("invalid input range"))?
.as_ptr() as *mut _;
let next_out = out
.get_mut(out_off as usize..out_off as usize + out_len as usize)
.ok_or_else(|| type_error("invalid output range"))?
.as_mut_ptr();
self.strm.avail_in = in_len;
self.strm.next_in = next_in;
self.strm.avail_out = out_len;
self.strm.next_out = next_out;
self.flush = flush;
Ok(())
}
fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> {
self.flush = flush;
match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => {
self.err = self.strm.deflate(flush);
}
// Auto-detect mode.
Mode::Unzip if self.strm.avail_in > 0 => 'blck: {
let mut next_expected_header_byte = Some(0);
// SAFETY: `self.strm.next_in` is valid pointer to the input buffer.
// `self.strm.avail_in` is the length of the input buffer that is only set by
// `start_write`.
let strm = unsafe {
std::slice::from_raw_parts(
self.strm.next_in,
self.strm.avail_in as usize,
)
};
if self.gzib_id_bytes_read == 0 {
if strm[0] == GZIP_HEADER_ID1 {
self.gzib_id_bytes_read = 1;
next_expected_header_byte = Some(1);
// Not enough.
if self.strm.avail_in == 1 {
break 'blck;
}
} else {
self.mode = Mode::Inflate;
next_expected_header_byte = None;
}
}
if self.gzib_id_bytes_read == 1 {
let byte = match next_expected_header_byte {
Some(i) => strm[i],
None => break 'blck,
};
if byte == GZIP_HEADER_ID2 {
self.gzib_id_bytes_read = 2;
self.mode = Mode::Gunzip;
} else {
self.mode = Mode::Inflate;
}
} else if next_expected_header_byte.is_some() {
return Err(type_error(
"invalid number of gzip magic number bytes read",
));
}
}
_ => {}
}
match self.mode {
Mode::Inflate
| Mode::Gunzip
| Mode::InflateRaw
// We're still reading the header.
| Mode::Unzip => {
self.err = self.strm.inflate(self.flush);
// TODO(@littledivy): Use if let chain when it is stable.
// https://github.com/rust-lang/rust/issues/53667
//
// Data was encoded with dictionary
if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) {
self.err = self.strm.inflate_set_dictionary(dictionary);
if self.err == Z_OK {
self.err = self.strm.inflate(flush);
} else if self.err == Z_DATA_ERROR {
self.err = Z_NEED_DICT;
}
}
while self.strm.avail_in > 0
&& self.mode == Mode::Gunzip
&& self.err == Z_STREAM_END
// SAFETY: `strm` is a valid pointer to zlib strm.
// `strm.next_in` is initialized to the input buffer.
&& unsafe { *self.strm.next_in } != 0x00
{
self.err = self.strm.reset(self.mode);
self.err = self.strm.inflate(flush);
}
}
_ => {}
}
let done = self.strm.avail_out != 0 && self.flush == Flush::Finish;
// We're are not done yet, but output buffer is full
if self.err == Z_BUF_ERROR && !done {
// Set to Z_OK to avoid reporting the error in JS.
self.err = Z_OK;
}
self.write_in_progress = false;
Ok(())
}
fn init_stream(&mut self) -> Result<(), AnyError> {
match self.mode {
Mode::Gzip | Mode::Gunzip => self.window_bits += 16,
Mode::Unzip => self.window_bits += 32,
Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1,
_ => |
}
self.err = match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init(
self.level,
self.window_bits,
self.mem_level,
self.strategy,
),
Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => {
self.strm.inflate_init(self.window_bits)
}
Mode::None => return Err(type_error("Unknown mode")),
};
self.write_in_progress = false;
self.init_done = true;
Ok(())
}
fn close(&mut self) -> Result<bool, AnyError> {
if self.write_in_progress {
self.pending_close = true;
return Ok(false);
}
self.pending_close = false;
check(self.init_done, "close before init")?;
self.strm.end(self.mode);
self.mode = Mode::None;
Ok(true)
}
fn reset_stream(&mut self) -> Result<(), AnyError> {
self.err = self.strm.reset(self.mode);
Ok(())
}
}
struct Zlib {
inner: RefCell<ZlibInner>,
}
impl deno_core::Resource for Zlib {
fn name(&self) -> Cow<str> {
"zlib".into()
}
}
#[op]
pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> {
let mode = Mode::try_from(mode)?;
let inner = ZlibInner {
mode,
..Default::default()
};
Ok(state.resource_table.add(Zlib {
inner: RefCell::new(inner),
}))
}
#[op]
pub fn op_zlib_close(state: &mut OpState, handle: u32) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
// If there is a pending write, defer the close until the write is done.
zlib.close()?;
Ok(())
}
#[op]
pub fn op_zlib_write_async(
state: Rc<RefCell<OpState>>,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
) -> Result<
impl Future<Output = Result<(i32, u32, u32), AnyError>> + 'static,
AnyError,
> {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut strm = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
let state = state.clone();
Ok(async move {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.do_write(flush)?;
Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in))
})
}
#[op]
pub fn op_zlib_write(
state: &mut OpState,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
result: &mut [u32],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
zlib.do_write(flush)?;
result[0] = | {} | conditional_block |
mod.rs | : StreamWrapper,
}
const GZIP_HEADER_ID1: u8 = 0x1f;
const GZIP_HEADER_ID2: u8 = 0x8b;
impl ZlibInner {
#[allow(clippy::too_many_arguments)]
fn start_write(
&mut self,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
flush: Flush,
) -> Result<(), AnyError> {
check(self.init_done, "write before init")?;
check(!self.write_in_progress, "write already in progress")?;
check(!self.pending_close, "close already in progress")?;
self.write_in_progress = true;
let next_in = input
.get(in_off as usize..in_off as usize + in_len as usize)
.ok_or_else(|| type_error("invalid input range"))?
.as_ptr() as *mut _;
let next_out = out
.get_mut(out_off as usize..out_off as usize + out_len as usize)
.ok_or_else(|| type_error("invalid output range"))?
.as_mut_ptr();
self.strm.avail_in = in_len;
self.strm.next_in = next_in;
self.strm.avail_out = out_len;
self.strm.next_out = next_out;
self.flush = flush;
Ok(())
}
fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> {
self.flush = flush;
match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => {
self.err = self.strm.deflate(flush);
}
// Auto-detect mode.
Mode::Unzip if self.strm.avail_in > 0 => 'blck: {
let mut next_expected_header_byte = Some(0);
// SAFETY: `self.strm.next_in` is valid pointer to the input buffer.
// `self.strm.avail_in` is the length of the input buffer that is only set by
// `start_write`.
let strm = unsafe {
std::slice::from_raw_parts(
self.strm.next_in,
self.strm.avail_in as usize,
)
};
if self.gzib_id_bytes_read == 0 {
if strm[0] == GZIP_HEADER_ID1 {
self.gzib_id_bytes_read = 1;
next_expected_header_byte = Some(1);
// Not enough.
if self.strm.avail_in == 1 {
break 'blck;
}
} else {
self.mode = Mode::Inflate;
next_expected_header_byte = None;
}
}
if self.gzib_id_bytes_read == 1 {
let byte = match next_expected_header_byte {
Some(i) => strm[i],
None => break 'blck,
};
if byte == GZIP_HEADER_ID2 {
self.gzib_id_bytes_read = 2;
self.mode = Mode::Gunzip;
} else {
self.mode = Mode::Inflate;
}
} else if next_expected_header_byte.is_some() {
return Err(type_error(
"invalid number of gzip magic number bytes read",
));
}
}
_ => {}
}
match self.mode {
Mode::Inflate
| Mode::Gunzip
| Mode::InflateRaw
// We're still reading the header.
| Mode::Unzip => {
self.err = self.strm.inflate(self.flush);
// TODO(@littledivy): Use if let chain when it is stable.
// https://github.com/rust-lang/rust/issues/53667
//
// Data was encoded with dictionary
if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) {
self.err = self.strm.inflate_set_dictionary(dictionary);
if self.err == Z_OK {
self.err = self.strm.inflate(flush);
} else if self.err == Z_DATA_ERROR {
self.err = Z_NEED_DICT;
}
}
while self.strm.avail_in > 0
&& self.mode == Mode::Gunzip
&& self.err == Z_STREAM_END
// SAFETY: `strm` is a valid pointer to zlib strm.
// `strm.next_in` is initialized to the input buffer.
&& unsafe { *self.strm.next_in } != 0x00
{
self.err = self.strm.reset(self.mode);
self.err = self.strm.inflate(flush);
}
}
_ => {}
}
let done = self.strm.avail_out != 0 && self.flush == Flush::Finish;
// We're are not done yet, but output buffer is full
if self.err == Z_BUF_ERROR && !done {
// Set to Z_OK to avoid reporting the error in JS.
self.err = Z_OK;
}
self.write_in_progress = false;
Ok(())
}
fn init_stream(&mut self) -> Result<(), AnyError> {
match self.mode {
Mode::Gzip | Mode::Gunzip => self.window_bits += 16,
Mode::Unzip => self.window_bits += 32,
Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1,
_ => {}
}
self.err = match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init(
self.level,
self.window_bits,
self.mem_level,
self.strategy,
),
Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => {
self.strm.inflate_init(self.window_bits)
}
Mode::None => return Err(type_error("Unknown mode")),
};
self.write_in_progress = false;
self.init_done = true;
Ok(())
}
fn close(&mut self) -> Result<bool, AnyError> {
if self.write_in_progress {
self.pending_close = true;
return Ok(false);
}
self.pending_close = false;
check(self.init_done, "close before init")?;
self.strm.end(self.mode);
self.mode = Mode::None;
Ok(true)
}
fn reset_stream(&mut self) -> Result<(), AnyError> |
}
struct Zlib {
inner: RefCell<ZlibInner>,
}
impl deno_core::Resource for Zlib {
fn name(&self) -> Cow<str> {
"zlib".into()
}
}
#[op]
pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> {
let mode = Mode::try_from(mode)?;
let inner = ZlibInner {
mode,
..Default::default()
};
Ok(state.resource_table.add(Zlib {
inner: RefCell::new(inner),
}))
}
#[op]
pub fn op_zlib_close(state: &mut OpState, handle: u32) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
// If there is a pending write, defer the close until the write is done.
zlib.close()?;
Ok(())
}
#[op]
pub fn op_zlib_write_async(
state: Rc<RefCell<OpState>>,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
) -> Result<
impl Future<Output = Result<(i32, u32, u32), AnyError>> + 'static,
AnyError,
> {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut strm = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
let state = state.clone();
Ok(async move {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.do_write(flush)?;
Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in))
})
}
#[op]
pub fn op_zlib_write(
state: &mut OpState,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
result: &mut [u32],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
zlib.do_write(flush)?;
result[0] = | {
self.err = self.strm.reset(self.mode);
Ok(())
} | identifier_body |
mod.rs | strm: StreamWrapper,
}
const GZIP_HEADER_ID1: u8 = 0x1f;
const GZIP_HEADER_ID2: u8 = 0x8b;
impl ZlibInner {
#[allow(clippy::too_many_arguments)]
fn start_write(
&mut self,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
flush: Flush,
) -> Result<(), AnyError> {
check(self.init_done, "write before init")?;
check(!self.write_in_progress, "write already in progress")?;
check(!self.pending_close, "close already in progress")?;
self.write_in_progress = true;
let next_in = input
.get(in_off as usize..in_off as usize + in_len as usize)
.ok_or_else(|| type_error("invalid input range"))?
.as_ptr() as *mut _;
let next_out = out
.get_mut(out_off as usize..out_off as usize + out_len as usize)
.ok_or_else(|| type_error("invalid output range"))?
.as_mut_ptr();
self.strm.avail_in = in_len;
self.strm.next_in = next_in;
self.strm.avail_out = out_len;
self.strm.next_out = next_out;
self.flush = flush;
Ok(())
}
fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> {
self.flush = flush;
match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => {
self.err = self.strm.deflate(flush);
}
// Auto-detect mode.
Mode::Unzip if self.strm.avail_in > 0 => 'blck: {
let mut next_expected_header_byte = Some(0);
// SAFETY: `self.strm.next_in` is valid pointer to the input buffer.
// `self.strm.avail_in` is the length of the input buffer that is only set by
// `start_write`.
let strm = unsafe {
std::slice::from_raw_parts(
self.strm.next_in,
self.strm.avail_in as usize,
)
};
if self.gzib_id_bytes_read == 0 {
if strm[0] == GZIP_HEADER_ID1 {
self.gzib_id_bytes_read = 1;
next_expected_header_byte = Some(1);
// Not enough.
if self.strm.avail_in == 1 {
break 'blck;
}
} else {
self.mode = Mode::Inflate;
next_expected_header_byte = None;
}
}
if self.gzib_id_bytes_read == 1 {
let byte = match next_expected_header_byte {
Some(i) => strm[i],
None => break 'blck,
};
if byte == GZIP_HEADER_ID2 {
self.gzib_id_bytes_read = 2;
self.mode = Mode::Gunzip;
} else {
self.mode = Mode::Inflate;
}
} else if next_expected_header_byte.is_some() {
return Err(type_error(
"invalid number of gzip magic number bytes read",
));
}
}
_ => {}
}
match self.mode {
Mode::Inflate
| Mode::Gunzip
| Mode::InflateRaw
// We're still reading the header.
| Mode::Unzip => {
self.err = self.strm.inflate(self.flush);
// TODO(@littledivy): Use if let chain when it is stable.
// https://github.com/rust-lang/rust/issues/53667
//
// Data was encoded with dictionary
if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) {
self.err = self.strm.inflate_set_dictionary(dictionary);
if self.err == Z_OK {
self.err = self.strm.inflate(flush);
} else if self.err == Z_DATA_ERROR {
self.err = Z_NEED_DICT;
}
}
while self.strm.avail_in > 0
&& self.mode == Mode::Gunzip
&& self.err == Z_STREAM_END
// SAFETY: `strm` is a valid pointer to zlib strm.
// `strm.next_in` is initialized to the input buffer.
&& unsafe { *self.strm.next_in } != 0x00
{
self.err = self.strm.reset(self.mode);
self.err = self.strm.inflate(flush);
}
}
_ => {}
}
let done = self.strm.avail_out != 0 && self.flush == Flush::Finish;
// We're are not done yet, but output buffer is full
if self.err == Z_BUF_ERROR && !done {
// Set to Z_OK to avoid reporting the error in JS.
self.err = Z_OK;
}
self.write_in_progress = false;
Ok(())
}
fn init_stream(&mut self) -> Result<(), AnyError> {
match self.mode {
Mode::Gzip | Mode::Gunzip => self.window_bits += 16,
Mode::Unzip => self.window_bits += 32,
Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1,
_ => {}
}
self.err = match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init(
self.level,
self.window_bits,
self.mem_level,
self.strategy,
),
Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => {
self.strm.inflate_init(self.window_bits)
}
Mode::None => return Err(type_error("Unknown mode")),
};
self.write_in_progress = false;
self.init_done = true;
Ok(())
}
fn close(&mut self) -> Result<bool, AnyError> {
if self.write_in_progress { | self.pending_close = false;
check(self.init_done, "close before init")?;
self.strm.end(self.mode);
self.mode = Mode::None;
Ok(true)
}
fn reset_stream(&mut self) -> Result<(), AnyError> {
self.err = self.strm.reset(self.mode);
Ok(())
}
}
struct Zlib {
inner: RefCell<ZlibInner>,
}
impl deno_core::Resource for Zlib {
fn name(&self) -> Cow<str> {
"zlib".into()
}
}
#[op]
pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> {
let mode = Mode::try_from(mode)?;
let inner = ZlibInner {
mode,
..Default::default()
};
Ok(state.resource_table.add(Zlib {
inner: RefCell::new(inner),
}))
}
#[op]
pub fn op_zlib_close(state: &mut OpState, handle: u32) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
// If there is a pending write, defer the close until the write is done.
zlib.close()?;
Ok(())
}
#[op]
pub fn op_zlib_write_async(
state: Rc<RefCell<OpState>>,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
) -> Result<
impl Future<Output = Result<(i32, u32, u32), AnyError>> + 'static,
AnyError,
> {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut strm = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
let state = state.clone();
Ok(async move {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.do_write(flush)?;
Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in))
})
}
#[op]
pub fn op_zlib_write(
state: &mut OpState,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
result: &mut [u32],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
zlib.do_write(flush)?;
result[0] = zlib | self.pending_close = true;
return Ok(false);
}
| random_line_split |
seq2seq_chatbot_Learning.py | 사
if type == DECODER_TARGET:
# 디코더 목표일 경우 맨 뒤에 END 태그 추가
if len(sentence_index) >= max_sequences:
sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]
else:
sentence_index += [vocabulary[END]]
else:
if len(sentence_index) > max_sequences:
sentence_index = sentence_index[:max_sequences]
# 최대 길이에 없는 공간은 패딩 인덱스로 채움
sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]
# 문장의 인덱스 배열을 추가
sentences_index.append(sentence_index)
return np.asarray(sentences_index)
# 인코더 입력 인덱스 변환
x_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)
# 첫 번째 인코더 입력 출력 (12시 땡)
x_encoder[0]
# 디코더 입력 인덱스 변환
x_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)
# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)
x_decoder[0]
len(x_decoder[0])
# 디코더 목표 인덱스 변환
y_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)
# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)
print(y_decoder[0])
# 원핫인코딩 초기화
one_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))
# 디코더 목표를 원핫인코딩으로 변환
# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
for i, sequence in enumerate(y_decoder):
for j, index in enumerate(sequence):
one_hot_data[i, j, index] = 1
# 디코더 목표 설정
y_decoder = one_hot_data
# 첫 번째 디코더 목표 출력
print(y_decoder[0])
#--------------------------------------------
# 훈련 모델 인코더 정의
#--------------------------------------------
# 입력 문장의 인덱스 시퀀스를 입력으로 받음
encoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
encoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)
# return_state가 True면 상태값 리턴
# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재
encoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True)(encoder_outputs)
# 히든 상태와 셀 상태를 하나로 묶음
encoder_states = [state_h, state_c]
#--------------------------------------------
# 훈련 모델 디코더 정의
#--------------------------------------------
# 목표 문장의 인덱스 시퀀스를 입력으로 받음
decoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
decoder_embedding = layers.Embedding(len(words), embedding_dim)
decoder_outputs = decoder_embedding(decoder_inputs)
# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴
# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함
decoder_lstm = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True,
return_sequences=True)
# initial_state를 인코더의 상태로 초기화
decoder_outputs, _, _ = decoder_lstm(decoder_outputs,
initial_state=encoder_states)
# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력
decoder_dense = layers.Dense(len(words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#--------------------------------------------
# 훈련 모델 정의
#--------------------------------------------
# 입력과 출력으로 함수형 API 모델 생성
model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
# 학습 방법 설정
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#--------------------------------------------
# 예측 모델 인코더 정의
#--------------------------------------------
# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정
encoder_model = models.Model(encoder_inputs, encoder_states)
#--------------------------------------------
# 예측 모델 디코더 정의
#--------------------------------------------
# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행
# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정
decoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))
decoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# 임베딩 레이어
decoder_outputs = decoder_embedding(decoder_inputs)
# LSTM 레이어
decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,
initial_state=decoder_states_inputs)
# 히든 상태와 셀 상태를 하나로 묶음
decoder_states = [state_h, state_c]
# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력
decoder_outputs = decoder_dense(decoder_outputs)
# 예측 모델 디코더 설정
decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 인덱스를 문장으로 변환
def convert_index_to_text(indexs, vocabulary):
sentence = ''
# 모든 문장에 대해서 반복
for index in indexs:
if index == END_INDEX:
# 종료 인덱스면 중지
break;
if vocabulary.get(index) is not None:
# 사전에 있는 인덱스면 해당 단어를 추가
sentence += vocabulary[index]
else:
# 사전에 없는 인덱스면 OOV 단어를 추가
sentence.extend([vocabulary[OOV_INDEX]])
# 빈칸 추가
sentence += ' '
return sentence
# len(x_decoder)
#
# len(y_decoder)
#model.summary()
#encoder_model.summary()
#decoder_model.summary()
from tqdm import tqdm
#에폭 반복
for epoch in range(10):
print('Total Epoch :', epoch + 1)
history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100, batch_size=64, verbose=1)
model.summary()
# 정확도와 손실 출력
print('accuracy :', history.history['accuracy'][-1])
print('loss :', history.history['loss'][-1])
# 문장 예측 테스트
# (3 박 4일 놀러 가고 싶다) -> (여행 은 언제나 좋죠)
input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])
input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])
results = model.predict([input_encoder, input_decoder])
# 결과의 원핫인코딩 형식을 인덱스로 변환
# 1축을 기준으로 가장 높은 값의 위치를 구함
indexs = np.argmax(results[0], 1)
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
#모델 가중치 저장
model.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')
encoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')
decoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')
# 예측을 위한 입력 생성
def make_predict_input(sentence):
sentences = []
sentences.append(sentence)
sentences = pos_tag(sentences)
input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)
return input_seq
# 텍스트 생성
def generate_text(input_seq):
# 입력을 인코더에 넣어 마지막 상태 구함
states = encoder_model.predict(input_seq)
# 목표 시퀀스 초기화
target_seq = np.zeros((1, 1))
# 목표 시퀀스의 첫 번째에 <START> 태그 추가
target_seq[0, 0] = STA_INDEX
# 인덱스 초기화
indexs = []
# 디코더 타임 스텝 반복
while 1:
# 디코더로 현재 타임 스텝 출력 구함
# 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화
decoder_outputs, state_h, state_c = decoder_model.pr | edict(
| conditional_block |
|
seq2seq_chatbot_Learning.py | 의 출력을 띄어쓰기로 구분하여 붙임
| words.append(word)
# 길이가 0인 단어는 삭제
words = [word for word in words if len(word) > 0]
#
중복된 단어 삭제
words = list(set(words))
# 제일 앞에 태그 단어 삽입
words[:0] = [PAD, STA, END, OOV]
# 단어 개수
len(words)
# 단어와 인덱스의 딕셔너리 생성
word_to_index = {word: index for index, word in enumerate(words)}
index_to_word = {index: word for index, word in enumerate(words)}
#word_index vocab 저장 - >
with open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:
pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)
with open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:
pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)
# 단어 -> 인덱스
# 문장을 인덱스로 변환하여 모델 입력으로 사용
print(dict(list(word_to_index.items())[:20]))
# 인덱스 -> 단어
# 모델의 예측 결과인 인덱스를 문장으로 변환시 사용
print(dict(list(index_to_word.items())[:20]))
# 문장을 인덱스로 변환
def convert_text_to_index(sentences, vocabulary, type):
sentences_index = []
# 모든 문장에 대해서 반복
for sentence in sentences:
sentence_index = []
# 디코더 입력일 경우 맨 앞에 START 태그 추가
if type == DECODER_INPUT:
sentence_index.extend([vocabulary[STA]])
# 문장의 단어들을 띄어쓰기로 분리
for word in sentence.split():
if vocabulary.get(word) is not None:
# 사전에 있는 단어면 해당 인덱스를 추가
sentence_index.extend([vocabulary[word]])
else:
# 사전에 없는 단어면 OOV 인덱스를 추가
sentence_index.extend([vocabulary[OOV]])
# 최대 길이 검사
if type == DECODER_TARGET:
# 디코더 목표일 경우 맨 뒤에 END 태그 추가
if len(sentence_index) >= max_sequences:
sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]
else:
sentence_index += [vocabulary[END]]
else:
if len(sentence_index) > max_sequences:
sentence_index = sentence_index[:max_sequences]
# 최대 길이에 없는 공간은 패딩 인덱스로 채움
sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]
# 문장의 인덱스 배열을 추가
sentences_index.append(sentence_index)
return np.asarray(sentences_index)
# 인코더 입력 인덱스 변환
x_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)
# 첫 번째 인코더 입력 출력 (12시 땡)
x_encoder[0]
# 디코더 입력 인덱스 변환
x_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)
# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)
x_decoder[0]
len(x_decoder[0])
# 디코더 목표 인덱스 변환
y_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)
# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)
print(y_decoder[0])
# 원핫인코딩 초기화
one_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))
# 디코더 목표를 원핫인코딩으로 변환
# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
for i, sequence in enumerate(y_decoder):
for j, index in enumerate(sequence):
one_hot_data[i, j, index] = 1
# 디코더 목표 설정
y_decoder = one_hot_data
# 첫 번째 디코더 목표 출력
print(y_decoder[0])
#--------------------------------------------
# 훈련 모델 인코더 정의
#--------------------------------------------
# 입력 문장의 인덱스 시퀀스를 입력으로 받음
encoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
encoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)
# return_state가 True면 상태값 리턴
# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재
encoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True)(encoder_outputs)
# 히든 상태와 셀 상태를 하나로 묶음
encoder_states = [state_h, state_c]
#--------------------------------------------
# 훈련 모델 디코더 정의
#--------------------------------------------
# 목표 문장의 인덱스 시퀀스를 입력으로 받음
decoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
decoder_embedding = layers.Embedding(len(words), embedding_dim)
decoder_outputs = decoder_embedding(decoder_inputs)
# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴
# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함
decoder_lstm = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True,
return_sequences=True)
# initial_state를 인코더의 상태로 초기화
decoder_outputs, _, _ = decoder_lstm(decoder_outputs,
initial_state=encoder_states)
# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력
decoder_dense = layers.Dense(len(words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#--------------------------------------------
# 훈련 모델 정의
#--------------------------------------------
# 입력과 출력으로 함수형 API 모델 생성
model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
# 학습 방법 설정
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#--------------------------------------------
# 예측 모델 인코더 정의
#--------------------------------------------
# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정
encoder_model = models.Model(encoder_inputs, encoder_states)
#--------------------------------------------
# 예측 모델 디코더 정의
#--------------------------------------------
# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행
# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정
decoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))
decoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# 임베딩 레이어
decoder_outputs = decoder_embedding(decoder_inputs)
# LSTM 레이어
decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,
initial_state=decoder_states_inputs)
# 히든 상태와 셀 상태를 하나로 묶음
decoder_states = [state_h, state_c]
# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력
decoder_outputs = decoder_dense(decoder_outputs)
# 예측 모델 디코더 설정
decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 인덱스를 문장으로 변환
def convert_index_to_text(indexs, vocabulary):
sentence = ''
# 모든 문장에 대해서 반복
for index in indexs:
if index == END_INDEX:
# 종료 인덱스면 중지
break;
if vocabulary.get(index) is not None:
# 사전에 있는 인덱스면 해당 단어를 추가
sentence += vocabulary[index]
else:
# 사전에 없는 인덱스면 OOV 단어를 추가
sentence.extend([vocabulary[OOV_INDEX]])
# 빈칸 추가
sentence += ' '
return sentence
# len(x_decoder)
#
# len(y_decoder)
#model.summary()
#encoder_model.summary()
#decoder_model.summary()
from tqdm import tqdm
| sentence = " ".join(tagger.morphs(sentence))
sentences_pos.append(sentence)
return sentences_pos
# 형태소분석 수행
question = pos_tag(question)
answer = pos_tag(answer)
# 질문과 대답 문장들을 하나로 합침
sentences = []
sentences.extend(question)
sentences.extend(answer)
words = []
# 단어들의 배열 생성
for sentence in sentences:
for word in sentence.split(): | identifier_body |
seq2seq_chatbot_Learning.py | 석의 출력을 띄어쓰기로 구분하여 붙임
sentence = " ".join(tagger.morphs(sentence))
sentences_pos.append(sentence)
return sentences_pos
# 형태소분석 수행
question = pos_tag(question)
answer = pos_tag(answer)
# 질문과 대답 문장들을 하나로 합침
sentences = []
sentences.extend(question)
sentences.extend(answer)
words = []
# 단어들의 배열 생성
for sentence in sentences:
for word in sentence.split():
words.append(word)
# 길이가 0인 단어는 삭제
words = [word for word in words if len(word) > 0]
# 중복된 단어 삭제
words = list(set(words))
# 제일 앞에 태그 단어 삽입
words[:0] = [PAD, STA, END, OOV]
# 단어 개수
len(words)
# 단어와 인덱스의 딕셔너리 생성
word_to_index = {word: index for index, word in enumerate(words)}
index_to_word = {index: word for index, word in enumerate(words)}
#word_index vocab 저장 - >
with open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:
pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)
with open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:
pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)
# 단어 -> 인덱스
# 문장을 인덱스로 변환하여 모델 입력으로 사용
print(dict(list(word_to_index.items())[:20]))
# 인덱스 -> 단어
# 모델의 예측 결과인 인덱스를 문장으로 변환시 사용
print(dict(list(index_to_word.items())[:20]))
# 문장을 인덱스로 변환
def convert_text_to_index(sentences, vocabulary, type):
sentences_index = []
# 모든 문장에 대해서 반복
for sentence in sentences:
sentence_index = []
# 디코더 입력일 경우 맨 앞에 START 태그 추가
if type == DECODER_INPUT:
sentence_index.extend([vocabulary[STA]])
# 문장의 단어들을 띄어쓰기로 분리
for word in sentence.split():
if vocabulary.get(word) is not None:
# 사전에 있는 단어면 해당 인덱스를 추가
sentence_index.extend([vocabulary[word]])
else:
# 사전에 없는 단어면 OOV 인덱스를 추가
sentence_index.extend([vocabulary[OOV]])
# 최대 길이 검사
if type == DECODER_TARGET:
# 디코더 목표일 경우 맨 뒤에 END 태그 추가
if len(sentence_index) >= max_sequences:
sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]
else:
sentence_index += [vocabulary[END]]
else:
if len(sentence_index) > max_sequences:
sentence_index = sentence_index[:max_sequences]
# 최대 길이에 없는 공간은 패딩 인덱스로 채움
sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]
# 문장의 인덱스 배열을 추가
sentences_index.append(sentence_index)
return np.asarray(sentences_index)
# 인코더 입력 인덱스 변환
x_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)
# 첫 번째 인코더 입력 출력 (12시 땡)
x_encoder[0]
# 디코더 입력 인덱스 변환
x_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)
# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)
x_decoder[0]
len(x_decoder[0])
# 디코더 목표 인덱스 변환
y_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)
# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)
print(y_decoder[0])
# 원핫인코딩 초기화
one_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))
# 디코더 목표를 원핫인코딩으로 변환
# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
for i, sequence in enumerate(y_decoder):
for j, index in enumerate(sequence):
one_hot_data[i, j, index] = 1
# 디코더 목표 설정
y_decoder = one_hot_data
# 첫 번째 디코더 목표 출력
print(y_decoder[0])
#--------------------------------------------
# 훈련 모델 인코더 정의
#--------------------------------------------
# 입력 문장의 인덱스 시퀀스를 입력으로 받음
encoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
encoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)
# return_state가 True면 상태값 리턴
# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재
encoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim, | dropout=0.1,
recurrent_dropout=0.5,
return_state=True)(encoder_outputs)
# 히든 상태와 셀 상태를 하나로 묶음
encoder_states = [state_h, state_c]
#--------------------------------------------
# 훈련 모델 디코더 정의
#--------------------------------------------
# 목표 문장의 인덱스 시퀀스를 입력으로 받음
decoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
decoder_embedding = layers.Embedding(len(words), embedding_dim)
decoder_outputs = decoder_embedding(decoder_inputs)
# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴
# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함
decoder_lstm = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True,
return_sequences=True)
# initial_state를 인코더의 상태로 초기화
decoder_outputs, _, _ = decoder_lstm(decoder_outputs,
initial_state=encoder_states)
# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력
decoder_dense = layers.Dense(len(words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#--------------------------------------------
# 훈련 모델 정의
#--------------------------------------------
# 입력과 출력으로 함수형 API 모델 생성
model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
# 학습 방법 설정
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#--------------------------------------------
# 예측 모델 인코더 정의
#--------------------------------------------
# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정
encoder_model = models.Model(encoder_inputs, encoder_states)
#--------------------------------------------
# 예측 모델 디코더 정의
#--------------------------------------------
# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행
# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정
decoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))
decoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# 임베딩 레이어
decoder_outputs = decoder_embedding(decoder_inputs)
# LSTM 레이어
decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,
initial_state=decoder_states_inputs)
# 히든 상태와 셀 상태를 하나로 묶음
decoder_states = [state_h, state_c]
# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력
decoder_outputs = decoder_dense(decoder_outputs)
# 예측 모델 디코더 설정
decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 인덱스를 문장으로 변환
def convert_index_to_text(indexs, vocabulary):
sentence = ''
# 모든 문장에 대해서 반복
for index in indexs:
if index == END_INDEX:
# 종료 인덱스면 중지
break;
if vocabulary.get(index) is not None:
# 사전에 있는 인덱스면 해당 단어를 추가
sentence += vocabulary[index]
else:
# 사전에 없는 인덱스면 OOV 단어를 추가
sentence.extend([vocabulary[OOV_INDEX]])
# 빈칸 추가
sentence += ' '
return sentence
# len(x_decoder)
#
# len(y_decoder)
#model.summary()
#encoder_model.summary()
#decoder_model.summary()
from tqdm import tqdm
# | random_line_split |
|
seq2seq_chatbot_Learning.py | 0]))
# 문장을 인덱스로 변환
def convert_text_to_index(sentences, vocabulary, type):
sentences_index = []
# 모든 문장에 대해서 반복
for sentence in sentences:
sentence_index = []
# 디코더 입력일 경우 맨 앞에 START 태그 추가
if type == DECODER_INPUT:
sentence_index.extend([vocabulary[STA]])
# 문장의 단어들을 띄어쓰기로 분리
for word in sentence.split():
if vocabulary.get(word) is not None:
# 사전에 있는 단어면 해당 인덱스를 추가
sentence_index.extend([vocabulary[word]])
else:
# 사전에 없는 단어면 OOV 인덱스를 추가
sentence_index.extend([vocabulary[OOV]])
# 최대 길이 검사
if type == DECODER_TARGET:
# 디코더 목표일 경우 맨 뒤에 END 태그 추가
if len(sentence_index) >= max_sequences:
sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]
else:
sentence_index += [vocabulary[END]]
else:
if len(sentence_index) > max_sequences:
sentence_index = sentence_index[:max_sequences]
# 최대 길이에 없는 공간은 패딩 인덱스로 채움
sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]
# 문장의 인덱스 배열을 추가
sentences_index.append(sentence_index)
return np.asarray(sentences_index)
# 인코더 입력 인덱스 변환
x_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)
# 첫 번째 인코더 입력 출력 (12시 땡)
x_encoder[0]
# 디코더 입력 인덱스 변환
x_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)
# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)
x_decoder[0]
len(x_decoder[0])
# 디코더 목표 인덱스 변환
y_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)
# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)
print(y_decoder[0])
# 원핫인코딩 초기화
one_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))
# 디코더 목표를 원핫인코딩으로 변환
# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
for i, sequence in enumerate(y_decoder):
for j, index in enumerate(sequence):
one_hot_data[i, j, index] = 1
# 디코더 목표 설정
y_decoder = one_hot_data
# 첫 번째 디코더 목표 출력
print(y_decoder[0])
#--------------------------------------------
# 훈련 모델 인코더 정의
#--------------------------------------------
# 입력 문장의 인덱스 시퀀스를 입력으로 받음
encoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
encoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)
# return_state가 True면 상태값 리턴
# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재
encoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True)(encoder_outputs)
# 히든 상태와 셀 상태를 하나로 묶음
encoder_states = [state_h, state_c]
#--------------------------------------------
# 훈련 모델 디코더 정의
#--------------------------------------------
# 목표 문장의 인덱스 시퀀스를 입력으로 받음
decoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
decoder_embedding = layers.Embedding(len(words), embedding_dim)
decoder_outputs = decoder_embedding(decoder_inputs)
# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴
# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함
decoder_lstm = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True,
return_sequences=True)
# initial_state를 인코더의 상태로 초기화
decoder_outputs, _, _ = decoder_lstm(decoder_outputs,
initial_state=encoder_states)
# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력
decoder_dense = layers.Dense(len(words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#--------------------------------------------
# 훈련 모델 정의
#--------------------------------------------
# 입력과 출력으로 함수형 API 모델 생성
model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
# 학습 방법 설정
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#--------------------------------------------
# 예측 모델 인코더 정의
#--------------------------------------------
# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정
encoder_model = models.Model(encoder_inputs, encoder_states)
#--------------------------------------------
# 예측 모델 디코더 정의
#--------------------------------------------
# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행
# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정
decoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))
decoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# 임베딩 레이어
decoder_outputs = decoder_embedding(decoder_inputs)
# LSTM 레이어
decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,
initial_state=decoder_states_inputs)
# 히든 상태와 셀 상태를 하나로 묶음
decoder_states = [state_h, state_c]
# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력
decoder_outputs = decoder_dense(decoder_outputs)
# 예측 모델 디코더 설정
decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 인덱스를 문장으로 변환
def convert_index_to_text(indexs, vocabulary):
sentence = ''
# 모든 문장에 대해서 반복
for index in indexs:
if index == END_INDEX:
# 종료 인덱스면 중지
break;
if vocabulary.get(index) is not None:
# 사전에 있는 인덱스면 해당 단어를 추가
sentence += vocabulary[index]
else:
# 사전에 없는 인덱스면 OOV 단어를 추가
sentence.extend([vocabulary[OOV_INDEX]])
# 빈칸 추가
sentence += ' '
return sentence
# len(x_decoder)
#
# len(y_decoder)
#model.summary()
#encoder_model.summary()
#decoder_model.summary()
from tqdm import tqdm
#에폭 반복
for epoch in range(10):
print('Total Epoch :', epoch + 1)
history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100, batch_size=64, verbose=1)
model.summary()
# 정확도와 손실 출력
print('accuracy :', history.history['accuracy'][-1])
print('loss :', history.history['loss'][-1])
# 문장 예측 테스트
# (3 박 4일 놀러 가고 싶다) -> (여행 은 언제나 좋죠)
input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])
input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])
results = model.predict([input_encoder, input_decoder])
# 결과의 원핫인코딩 형식을 인덱스로 변환
# 1축을 기준으로 가장 높은 값의 위치를 구함
indexs = np.argmax(results[0], 1)
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
#모델 가중치 저장
model.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')
encoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')
decoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')
# 예측을 위한 입력 생성
def make_predict_input(sentence):
sentences = []
sentences.append(sentence)
sentences = pos_tag(sentences)
input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)
|
return input | identifier_name |
|
mod.rs | Box<dyn Fn(&Shell<'_>, &Pipeline<RefinedJob<'_>>) + 'a>;
/// A callback that is executed when a background event occurs
pub type BackgroundEventCallback = Arc<dyn Fn(usize, Pid, BackgroundEvent) + Send + Sync>;
impl<'a> Default for Shell<'a> {
#[must_use]
fn default() -> Self { Self::new() }
}
impl<'a> Shell<'a> {
/// Install signal handlers necessary for the shell to work
fn install_signal_handler() {
extern "C" fn handler(signal: i32) {
let signal = signal::Signal::try_from(signal).unwrap();
let signal = match signal {
signal::Signal::SIGINT => signals::SIGINT,
signal::Signal::SIGHUP => signals::SIGHUP,
signal::Signal::SIGTERM => signals::SIGTERM,
_ => unreachable!(),
};
signals::PENDING.store(signal as usize, Ordering::SeqCst);
}
unsafe {
let _ = signal::signal(signal::Signal::SIGHUP, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGINT, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGTERM, SigHandler::Handler(handler));
}
}
/// Create a new shell with default settings
#[must_use]
pub fn new() -> Self { Self::with_builtins(BuiltinMap::default()) }
/// Create a shell with custom builtins
#[must_use]
pub fn with_builtins(builtins: BuiltinMap<'a>) -> Self {
Self::install_signal_handler();
// This will block SIGTSTP, SIGTTOU, SIGTTIN, and SIGCHLD, which is required
// for this shell to manage its own process group / children / etc.
signals::block();
Shell {
builtins,
variables: Variables::default(),
flow_control: Block::with_capacity(5),
directory_stack: DirectoryStack::new(),
previous_job: !0,
previous_status: Status::SUCCESS,
opts: Options::default(),
background: Arc::new(Mutex::new(Vec::new())),
foreground_signals: Arc::new(foreground::Signals::new()),
on_command: None,
pre_command: None,
background_event: None,
stdin: None,
stdout: None,
stderr: None,
}
}
/// Replace the default stdin
pub fn stdin<T: Into<Option<File>>>(&mut self, stdin: T) -> Option<File> {
mem::replace(&mut self.stdin, stdin.into())
}
/// Replace the default stdout
pub fn stdout<T: Into<Option<File>>>(&mut self, stdout: T) -> Option<File> {
mem::replace(&mut self.stdout, stdout.into())
}
/// Replace the default stderr
pub fn stderr<T: Into<Option<File>>>(&mut self, stderr: T) -> Option<File> {
mem::replace(&mut self.stderr, stderr.into())
}
/// Access the directory stack
#[must_use]
pub const fn dir_stack(&self) -> &DirectoryStack { &self.directory_stack }
/// Mutable access to the directory stack
#[must_use]
pub fn dir_stack_mut(&mut self) -> &mut DirectoryStack { &mut self.directory_stack }
/// Resets the flow control fields to their default values.
pub fn reset_flow(&mut self) { self.flow_control.clear(); }
/// Exit the current block
pub fn exit_block(&mut self) -> Result<(), BlockError> {
self.flow_control.pop().map(|_| ()).ok_or(BlockError::UnmatchedEnd)
}
/// Get the depth of the current block
#[must_use]
pub fn block_len(&self) -> usize { self.flow_control.len() }
/// A method for executing a function, using `args` as the input.
pub fn execute_function<S: AsRef<str>>(
&mut self,
function: &Rc<Function>,
args: &[S],
) -> Result<Status, IonError> {
function.clone().execute(self, args)?;
Ok(self.previous_status)
}
/// A method for executing commands in the Ion shell without capturing. It takes command(s)
/// as
/// a string argument, parses them, and executes them the same as it would if you had
/// executed
/// the command(s) in the command line REPL interface for Ion. If the supplied command is
/// not
/// terminated, then an error will be returned.
pub fn execute_command<T: std::io::Read>(&mut self, command: T) -> Result<Status, IonError> {
self.on_command(command.bytes().filter_map(Result::ok), true)?;
if let Some(block) = self.flow_control.last().map(Statement::to_string) {
self.previous_status = Status::from_exit_code(1);
Err(IonError::StatementFlowError(BlockError::UnclosedBlock(block)))
} else {
Ok(self.previous_status)
}
}
/// Executes a pipeline and returns the final exit status of the pipeline.
pub fn run_pipeline(&mut self, pipeline: &Pipeline<Job>) -> Result<Status, IonError> {
let command_start_time = SystemTime::now();
let mut pipeline = pipeline.expand(self)?;
let null_file =
if pipeline.pipe == PipeType::Disown { File::open(NULL_PATH).ok() } else { None };
let (stderr, stdout) = (
null_file.as_ref().or_else(|| self.stderr.as_ref()),
null_file.as_ref().or_else(|| self.stdout.as_ref()),
);
for item in &mut pipeline.items {
item.job.stdin = self
.stdin
.as_ref()
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stdout = stdout
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stderr = stderr
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
}
if let Some(ref callback) = self.pre_command {
callback(self, &pipeline);
}
// Don't execute commands when the `-n` flag is passed.
let exit_status = if self.opts.no_exec {
Ok(Status::SUCCESS)
} else if pipeline.requires_piping()
|| self.stderr.is_some()
|| self.stdin.is_some()
|| self.stdout.is_some()
{
self.execute_pipeline(pipeline).map_err(Into::into)
} else if let Some(main) = self.builtins.get(pipeline.items[0].command()) {
Ok(main(&pipeline.items[0].job.args, self))
} else if let Some(Value::Function(function)) =
self.variables.get(&pipeline.items[0].job.args[0]).cloned()
{
function.execute(self, &pipeline.items[0].job.args).map(|_| self.previous_status)
} else {
self.execute_pipeline(pipeline).map_err(Into::into)
}?;
if let Some(ref callback) = self.on_command {
if let Ok(elapsed_time) = command_start_time.elapsed() {
callback(self, elapsed_time);
}
}
if self.opts.err_exit && !exit_status.is_success() {
return Err(PipelineError::EarlyExit(exit_status).into());
}
Ok(exit_status)
}
/// Get the pid of the last executed job
#[must_use]
pub const fn previous_job(&self) -> Option<usize> {
if self.previous_job == !0 {
None
} else {
Some(self.previous_job)
}
}
/// Set the callback to call before each command
pub fn set_background_event(&mut self, callback: Option<BackgroundEventCallback>) {
self.background_event = callback;
}
/// Set the callback to call before each command
#[must_use]
pub fn background_event_mut(&mut self) -> &mut Option<BackgroundEventCallback> {
&mut self.background_event
}
/// Set the callback to call before each command
pub fn set_pre_command(&mut self, callback: Option<PreCommandCallback<'a>>) {
self.pre_command = callback;
}
/// Set the callback to call before each command
#[must_use]
pub fn pre_command_mut(&mut self) -> &mut Option<PreCommandCallback<'a>> {
&mut self.pre_command
}
/// Set the callback to call on each command
pub fn set_on_command(&mut self, callback: Option<OnCommandCallback<'a>>) {
self.on_command = callback;
}
/// Set the callback to call on each command
pub fn on_command_mut(&mut self) -> &mut Option<OnCommandCallback<'a>> { &mut self.on_command }
/// Get access to the builtins
#[must_use]
pub const fn builtins(&self) -> &BuiltinMap<'a> { &self.builtins }
/// Get a mutable access to the builtins
///
/// Warning: Previously defined functions will rely on previous versions of the builtins, even
/// if they are redefined. It is strongly advised to avoid mutating the builtins while the shell
/// is running
#[must_use]
pub fn | builtins_mut | identifier_name |
|
mod.rs | fg` command is run, this will be used to communicate with the specified
/// background process.
foreground_signals: Arc<foreground::Signals>,
// Callbacks
/// Custom callback for each command call
on_command: Option<OnCommandCallback<'a>>,
/// Custom callback before each command call
pre_command: Option<PreCommandCallback<'a>>,
/// Custom callback when a background event occurs
background_event: Option<BackgroundEventCallback>,
// Default std pipes
stdin: Option<File>,
stdout: Option<File>,
stderr: Option<File>,
}
/// A callback that is executed after each pipeline is run
pub type OnCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, std::time::Duration) + 'a>;
/// A callback that is executed before each pipeline is run
pub type PreCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, &Pipeline<RefinedJob<'_>>) + 'a>;
/// A callback that is executed when a background event occurs
pub type BackgroundEventCallback = Arc<dyn Fn(usize, Pid, BackgroundEvent) + Send + Sync>;
impl<'a> Default for Shell<'a> {
#[must_use]
fn default() -> Self { Self::new() }
}
impl<'a> Shell<'a> {
/// Install signal handlers necessary for the shell to work
fn install_signal_handler() {
extern "C" fn handler(signal: i32) {
let signal = signal::Signal::try_from(signal).unwrap();
let signal = match signal {
signal::Signal::SIGINT => signals::SIGINT,
signal::Signal::SIGHUP => signals::SIGHUP,
signal::Signal::SIGTERM => signals::SIGTERM,
_ => unreachable!(),
};
signals::PENDING.store(signal as usize, Ordering::SeqCst);
}
unsafe {
let _ = signal::signal(signal::Signal::SIGHUP, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGINT, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGTERM, SigHandler::Handler(handler));
}
}
/// Create a new shell with default settings
#[must_use]
pub fn new() -> Self { Self::with_builtins(BuiltinMap::default()) }
/// Create a shell with custom builtins
#[must_use]
pub fn with_builtins(builtins: BuiltinMap<'a>) -> Self {
Self::install_signal_handler();
// This will block SIGTSTP, SIGTTOU, SIGTTIN, and SIGCHLD, which is required
// for this shell to manage its own process group / children / etc.
signals::block();
Shell {
builtins,
variables: Variables::default(),
flow_control: Block::with_capacity(5),
directory_stack: DirectoryStack::new(),
previous_job: !0,
previous_status: Status::SUCCESS,
opts: Options::default(),
background: Arc::new(Mutex::new(Vec::new())),
foreground_signals: Arc::new(foreground::Signals::new()),
on_command: None,
pre_command: None,
background_event: None,
stdin: None,
stdout: None,
stderr: None,
}
}
/// Replace the default stdin
pub fn stdin<T: Into<Option<File>>>(&mut self, stdin: T) -> Option<File> {
mem::replace(&mut self.stdin, stdin.into())
}
/// Replace the default stdout
pub fn stdout<T: Into<Option<File>>>(&mut self, stdout: T) -> Option<File> {
mem::replace(&mut self.stdout, stdout.into())
}
/// Replace the default stderr
pub fn stderr<T: Into<Option<File>>>(&mut self, stderr: T) -> Option<File> {
mem::replace(&mut self.stderr, stderr.into())
}
/// Access the directory stack
#[must_use]
pub const fn dir_stack(&self) -> &DirectoryStack { &self.directory_stack }
/// Mutable access to the directory stack
#[must_use]
pub fn dir_stack_mut(&mut self) -> &mut DirectoryStack { &mut self.directory_stack }
/// Resets the flow control fields to their default values.
pub fn reset_flow(&mut self) { self.flow_control.clear(); }
/// Exit the current block
pub fn exit_block(&mut self) -> Result<(), BlockError> {
self.flow_control.pop().map(|_| ()).ok_or(BlockError::UnmatchedEnd)
}
/// Get the depth of the current block
#[must_use]
pub fn block_len(&self) -> usize { self.flow_control.len() }
/// A method for executing a function, using `args` as the input.
pub fn execute_function<S: AsRef<str>>(
&mut self,
function: &Rc<Function>,
args: &[S],
) -> Result<Status, IonError> {
function.clone().execute(self, args)?;
Ok(self.previous_status)
}
/// A method for executing commands in the Ion shell without capturing. It takes command(s)
/// as
/// a string argument, parses them, and executes them the same as it would if you had
/// executed
/// the command(s) in the command line REPL interface for Ion. If the supplied command is
/// not
/// terminated, then an error will be returned.
pub fn execute_command<T: std::io::Read>(&mut self, command: T) -> Result<Status, IonError> {
self.on_command(command.bytes().filter_map(Result::ok), true)?;
if let Some(block) = self.flow_control.last().map(Statement::to_string) {
self.previous_status = Status::from_exit_code(1);
Err(IonError::StatementFlowError(BlockError::UnclosedBlock(block)))
} else {
Ok(self.previous_status)
}
}
/// Executes a pipeline and returns the final exit status of the pipeline.
pub fn run_pipeline(&mut self, pipeline: &Pipeline<Job>) -> Result<Status, IonError> {
let command_start_time = SystemTime::now();
let mut pipeline = pipeline.expand(self)?;
let null_file =
if pipeline.pipe == PipeType::Disown { File::open(NULL_PATH).ok() } else { None };
let (stderr, stdout) = (
null_file.as_ref().or_else(|| self.stderr.as_ref()),
null_file.as_ref().or_else(|| self.stdout.as_ref()),
);
for item in &mut pipeline.items {
item.job.stdin = self
.stdin
.as_ref()
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stdout = stdout
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stderr = stderr
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
}
if let Some(ref callback) = self.pre_command {
callback(self, &pipeline);
}
// Don't execute commands when the `-n` flag is passed.
let exit_status = if self.opts.no_exec {
Ok(Status::SUCCESS)
} else if pipeline.requires_piping()
|| self.stderr.is_some()
|| self.stdin.is_some()
|| self.stdout.is_some()
{
self.execute_pipeline(pipeline).map_err(Into::into)
} else if let Some(main) = self.builtins.get(pipeline.items[0].command()) {
Ok(main(&pipeline.items[0].job.args, self))
} else if let Some(Value::Function(function)) =
self.variables.get(&pipeline.items[0].job.args[0]).cloned()
{
function.execute(self, &pipeline.items[0].job.args).map(|_| self.previous_status)
} else {
self.execute_pipeline(pipeline).map_err(Into::into)
}?;
if let Some(ref callback) = self.on_command {
if let Ok(elapsed_time) = command_start_time.elapsed() {
callback(self, elapsed_time);
}
}
if self.opts.err_exit && !exit_status.is_success() {
return Err(PipelineError::EarlyExit(exit_status).into());
}
Ok(exit_status)
}
/// Get the pid of the last executed job
#[must_use]
pub const fn previous_job(&self) -> Option<usize> {
if self.previous_job == !0 {
None
} else {
Some(self.previous_job)
}
}
/// Set the callback to call before each command
pub fn set_background_event(&mut self, callback: Option<BackgroundEventCallback>) {
self.background_event = callback;
}
/// Set the callback to call before each command
#[must_use]
pub fn background_event_mut(&mut self) -> &mut Option<BackgroundEventCallback> {
&mut self.background_event
}
/// Set the callback to call before each command
pub fn set_pre_command(&mut self, callback: Option<PreCommandCallback<'a>>) {
self.pre_command = callback;
}
/// Set the callback to call before each command
#[must_use]
pub fn pre_command_mut(&mut self) -> &mut Option<PreCommandCallback<'a>> | {
&mut self.pre_command
} | identifier_body |
|
mod.rs | Error<Self>> for IonError {
#[must_use]
fn from(cause: ExpansionError<Self>) -> Self { Self::ExpansionError(cause) }
}
/// Options for the shell
#[derive(Debug, Clone, Hash, Default)]
pub struct Options {
/// Exit from the shell on the first error.
pub err_exit: bool,
/// Activates the -p option, aka pipefail in bash
pub pipe_fail: bool,
/// Do not execute any commands given to the shell.
pub no_exec: bool,
/// If set, denotes that this shell is running as a background job.
pub grab_tty: bool,
}
/// The shell structure is a megastructure that manages all of the state of the shell throughout
/// the entirety of the
/// program. It is initialized at the beginning of the program, and lives until the end of the
/// program.
pub struct Shell<'a> {
/// Contains a list of built-in commands that were created when the program | flow_control: Block,
/// Contains the directory stack parameters.
directory_stack: DirectoryStack,
/// When a command is executed, the final result of that command is stored
/// here.
previous_status: Status,
/// The job ID of the previous command sent to the background.
previous_job: usize,
/// Contains all the options relative to the shell
opts: Options,
/// Contains information on all of the active background processes that are being managed
/// by the shell.
background: Arc<Mutex<Vec<BackgroundProcess>>>,
/// When the `fg` command is run, this will be used to communicate with the specified
/// background process.
foreground_signals: Arc<foreground::Signals>,
// Callbacks
/// Custom callback for each command call
on_command: Option<OnCommandCallback<'a>>,
/// Custom callback before each command call
pre_command: Option<PreCommandCallback<'a>>,
/// Custom callback when a background event occurs
background_event: Option<BackgroundEventCallback>,
// Default std pipes
stdin: Option<File>,
stdout: Option<File>,
stderr: Option<File>,
}
/// A callback that is executed after each pipeline is run
pub type OnCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, std::time::Duration) + 'a>;
/// A callback that is executed before each pipeline is run
pub type PreCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, &Pipeline<RefinedJob<'_>>) + 'a>;
/// A callback that is executed when a background event occurs
pub type BackgroundEventCallback = Arc<dyn Fn(usize, Pid, BackgroundEvent) + Send + Sync>;
impl<'a> Default for Shell<'a> {
#[must_use]
fn default() -> Self { Self::new() }
}
impl<'a> Shell<'a> {
/// Install signal handlers necessary for the shell to work
fn install_signal_handler() {
extern "C" fn handler(signal: i32) {
let signal = signal::Signal::try_from(signal).unwrap();
let signal = match signal {
signal::Signal::SIGINT => signals::SIGINT,
signal::Signal::SIGHUP => signals::SIGHUP,
signal::Signal::SIGTERM => signals::SIGTERM,
_ => unreachable!(),
};
signals::PENDING.store(signal as usize, Ordering::SeqCst);
}
unsafe {
let _ = signal::signal(signal::Signal::SIGHUP, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGINT, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGTERM, SigHandler::Handler(handler));
}
}
/// Create a new shell with default settings
#[must_use]
pub fn new() -> Self { Self::with_builtins(BuiltinMap::default()) }
/// Create a shell with custom builtins
#[must_use]
pub fn with_builtins(builtins: BuiltinMap<'a>) -> Self {
Self::install_signal_handler();
// This will block SIGTSTP, SIGTTOU, SIGTTIN, and SIGCHLD, which is required
// for this shell to manage its own process group / children / etc.
signals::block();
Shell {
builtins,
variables: Variables::default(),
flow_control: Block::with_capacity(5),
directory_stack: DirectoryStack::new(),
previous_job: !0,
previous_status: Status::SUCCESS,
opts: Options::default(),
background: Arc::new(Mutex::new(Vec::new())),
foreground_signals: Arc::new(foreground::Signals::new()),
on_command: None,
pre_command: None,
background_event: None,
stdin: None,
stdout: None,
stderr: None,
}
}
/// Replace the default stdin
pub fn stdin<T: Into<Option<File>>>(&mut self, stdin: T) -> Option<File> {
mem::replace(&mut self.stdin, stdin.into())
}
/// Replace the default stdout
pub fn stdout<T: Into<Option<File>>>(&mut self, stdout: T) -> Option<File> {
mem::replace(&mut self.stdout, stdout.into())
}
/// Replace the default stderr
pub fn stderr<T: Into<Option<File>>>(&mut self, stderr: T) -> Option<File> {
mem::replace(&mut self.stderr, stderr.into())
}
/// Access the directory stack
#[must_use]
pub const fn dir_stack(&self) -> &DirectoryStack { &self.directory_stack }
/// Mutable access to the directory stack
#[must_use]
pub fn dir_stack_mut(&mut self) -> &mut DirectoryStack { &mut self.directory_stack }
/// Resets the flow control fields to their default values.
pub fn reset_flow(&mut self) { self.flow_control.clear(); }
/// Exit the current block
pub fn exit_block(&mut self) -> Result<(), BlockError> {
self.flow_control.pop().map(|_| ()).ok_or(BlockError::UnmatchedEnd)
}
/// Get the depth of the current block
#[must_use]
pub fn block_len(&self) -> usize { self.flow_control.len() }
/// A method for executing a function, using `args` as the input.
pub fn execute_function<S: AsRef<str>>(
&mut self,
function: &Rc<Function>,
args: &[S],
) -> Result<Status, IonError> {
function.clone().execute(self, args)?;
Ok(self.previous_status)
}
/// A method for executing commands in the Ion shell without capturing. It takes command(s)
/// as
/// a string argument, parses them, and executes them the same as it would if you had
/// executed
/// the command(s) in the command line REPL interface for Ion. If the supplied command is
/// not
/// terminated, then an error will be returned.
pub fn execute_command<T: std::io::Read>(&mut self, command: T) -> Result<Status, IonError> {
self.on_command(command.bytes().filter_map(Result::ok), true)?;
if let Some(block) = self.flow_control.last().map(Statement::to_string) {
self.previous_status = Status::from_exit_code(1);
Err(IonError::StatementFlowError(BlockError::UnclosedBlock(block)))
} else {
Ok(self.previous_status)
}
}
/// Executes a pipeline and returns the final exit status of the pipeline.
pub fn run_pipeline(&mut self, pipeline: &Pipeline<Job>) -> Result<Status, IonError> {
let command_start_time = SystemTime::now();
let mut pipeline = pipeline.expand(self)?;
let null_file =
if pipeline.pipe == PipeType::Disown { File::open(NULL_PATH).ok() } else { None };
let (stderr, stdout) = (
null_file.as_ref().or_else(|| self.stderr.as_ref()),
null_file.as_ref().or_else(|| self.stdout.as_ref()),
);
for item in &mut pipeline.items {
item.job.stdin = self
.stdin
.as_ref()
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stdout = stdout
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stderr = stderr
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
}
if let Some(ref callback) = self.pre_command {
callback(self, &pipeline);
}
// Don't execute commands when the `-n` flag is passed.
let exit_status = if self.opts.no_exec {
Ok(Status::SUCCESS)
} else if pipeline.requires_piping()
|| self.stderr.is_some()
|| self.stdin.is_some()
|| self.stdout.is_some()
{
self.execute_pipeline(pipeline).map_err(Into::into)
} else if let Some(main) = self.builtins.get(pipeline.items[0].command()) {
Ok(main(&pipeline.items | /// started.
builtins: BuiltinMap<'a>,
/// Contains the aliases, strings, and array variable maps.
variables: Variables,
/// Contains the current state of flow control parameters. | random_line_split |
lib.rs | ::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn get_firmware_version(
p_firmware_version_buf: *mut u8,
fv_buf_size: usize,
) -> sgx_status_t {
let version = env!("CARGO_PKG_VERSION");
assert!(version.len() <= fv_buf_size);
let version_buf_slice =
unsafe { std::slice::from_raw_parts_mut(p_firmware_version_buf, fv_buf_size) };
version_buf_slice.clone_from_slice(&version.as_bytes());
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn init_remote_attestation_enc(
pub_key_buf: *const u8,
pub_key_size: usize,
p_context: *mut sgx_ra_context_t,
) -> sgx_status_t {
assert!(pub_key_size != 0);
assert!(!pub_key_buf.is_null());
let pub_key_vec = unsafe { std::slice::from_raw_parts(pub_key_buf, pub_key_size) };
let pub_key = sgx_ec256_public_t {
gx: from_slice(&pub_key_vec[0..32]),
gy: from_slice(&pub_key_vec[32..64]),
};
let mut context: sgx_ra_context_t = 0;
assert!(pub_key_vec.len() > 0);
let ret = unsafe {
sgx_ra_init(
&pub_key as *const sgx_ec256_public_t,
0,
&mut context as *mut sgx_ra_context_t,
)
};
if ret != sgx_status_t::SGX_SUCCESS {
return ret;
}
unsafe {
*p_context = context;
}
return ret;
}
/// Retrieve or generate the private key as a Vec<u8>
fn get_private_key() -> Result<std::vec::Vec<u8>, SgxRootEnclave> {
let mut private_key_guard = match PRIVATE_KEY.lock() {
Err(_) => return Err(SgxRootEnclave::LockFail),
Ok(guard) => guard,
};
let pkcs8_bytes = match &*private_key_guard {
Some(bytes) => {
bytes.clone()
}
None => {
// ECDSA prime256r1 generation.
let pkcs8_bytes = EcdsaKeyPair::generate_pkcs8(
&ring::signature::ECDSA_P256_SHA256_FIXED_SIGNING,
&SystemRandom::new(),)
.map_err(|_| SgxRootEnclave::PKCS8Error)?;
*private_key_guard = Some(pkcs8_bytes.as_ref().to_vec());
pkcs8_bytes.as_ref().to_vec()
}
};
return Ok(pkcs8_bytes);
}
#[no_mangle]
pub extern "C" fn sgx_get_collateral_report(
p_pubkey_challenge: *const u8,
pubkey_challenge_size: usize,
p_target_info: *const sgx_target_info_t,
report: *mut sgx_types::sgx_report_t,
csr_buffer: *mut u8,
csr_buf_size: usize,
p_csr_size: *mut usize,
) -> sgx_status_t {
let pubkey_challenge_vec =
unsafe { std::slice::from_raw_parts(p_pubkey_challenge, pubkey_challenge_size) };
let mut report_data = sgx_types::sgx_report_data_t::default();
// place the challenge in the report
report_data.d[0..pubkey_challenge_size].copy_from_slice(&pubkey_challenge_vec);
let private_key_ring = {
let private_key_vec = match get_private_key() {
Ok(vec) => vec,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) {
Ok(pkr) => pkr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
}
};
// generate the certificate signing request
let csr_vec = match csr::generate_csr(&csr::ROOT_ENCLAVE_CSR_TEMPLATE, &private_key_ring) {
Ok(csr) => csr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
// // place the hash of the csr in the report
let collateral_hash = ring::digest::digest(&ring::digest::SHA256, &csr_vec);
report_data.d[pubkey_challenge_size..48].copy_from_slice(collateral_hash.as_ref());
let ret = unsafe { sgx_create_report(p_target_info, &report_data, report) };
assert!(ret == sgx_types::sgx_status_t::SGX_SUCCESS);
// place the csr where it needs to be
if csr_vec.len() > csr_buf_size {
assert!(false);
} else {
let csr_buf_slice = unsafe { std::slice::from_raw_parts_mut(csr_buffer, csr_vec.len()) };
csr_buf_slice.clone_from_slice(&csr_vec);
unsafe { *p_csr_size = csr_vec.len() };
}
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn sgx_send_cert_chain(
root_cert: *const u8,
root_cert_size: usize,
enclave_cert: *const u8,
enclave_cert_size: usize,
) -> sgx_status_t {
let root_cert_slice = unsafe { std::slice::from_raw_parts(root_cert, root_cert_size) };
let enclave_cert_slice = unsafe { std::slice::from_raw_parts(enclave_cert, enclave_cert_size) };
let mut cert_chain_guard = match CERT_CHAIN.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
match &*cert_chain_guard {
Some(_) => {
panic!("Unhandled. CERT_CHAIN is not None.");
}
None => {
*cert_chain_guard = Some((enclave_cert_slice.to_vec(), root_cert_slice.to_vec()));
}
}
return sgx_status_t::SGX_SUCCESS;
}
#[no_mangle]
pub extern "C" fn start_local_attest_enc(
msg1: &sgx_dh_msg1_t,
msg2: &mut sgx_dh_msg2_t,
sgx_root_enclave_session_id: &mut u64,
) -> sgx_status_t {
let mut initiator = SgxDhInitiator::init_session();
let status = initiator.proc_msg1(msg1, msg2);
assert!(!status.is_err());
let session_id = SESSION_ID.fetch_add(1, Ordering::SeqCst);
{
let mut initiator_hash = match INITIATOR_HASH.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
initiator_hash.insert(session_id, initiator);
}
*sgx_root_enclave_session_id = session_id;
sgx_status_t::SGX_SUCCESS
}
const CSR_BODY_LOCATION: (usize, usize) = (4, 4 + 218);
const CSR_PUBKEY_LOCATION: (usize, usize) = (129 + 26, 220);
fn verify_csr(csr: &[u8]) -> Result<bool, std::string::String> {
let pubkey_bytes = &csr[CSR_PUBKEY_LOCATION.0..CSR_PUBKEY_LOCATION.1];
let public_key = ring::signature::UnparsedPublicKey::new(&ring::signature::ECDSA_P256_SHA256_ASN1, pubkey_bytes);
let csr_body = &csr[CSR_BODY_LOCATION.0..CSR_BODY_LOCATION.1];
let csr_signature = &csr[237..];
let verify_result = public_key.verify(&csr_body, &csr_signature);
if verify_result.is_err() {
return Err(format!("verify_csr failed:{:?}", verify_result));
} else {
return Ok(true);
}
}
#[no_mangle]
pub extern "C" fn finish_local_attest_enc(
dh_msg3_raw: &mut sgx_dh_msg3_t,
csr: *const u8,
csr_size: usize,
sgx_root_enclave_session_id: u64,
p_cert_buf: *mut u8,
cert_buf_size: usize,
p_cert_size: *mut usize,
cert_lengths: *mut u32,
cert_lengths_size: usize,
) -> SgxRootEnclave { |
let dh_msg3_raw_len =
mem::size_of::<sgx_dh_msg3_t>() as u32 + dh_msg3_raw.msg3_body.additional_prop_length;
let dh_msg3 = unsafe { SgxDhMsg3::from_raw_dh_msg3_t(dh_msg3_raw, dh_msg3_raw_len) };
assert!(!dh_msg3.is_none());
let dh_msg3 = match dh_msg3 {
Some(msg) => msg,
None => {
return SgxRootEnclave::Msg3RawError;
}
};
let mut initiator = {
let mut initiator_hash = match INITIATOR_HASH.lock() {
Err(_) => return SgxRootEnclave::LockFail,
Ok(guard) => guard,
};
initiator_hash.remove(&sgx_root_enclave_session_id).unwrap()
}; | identifier_body |
|
lib.rs | = 0x06,
PKCS8Error = 0x07,
StateError = 0x08,
PrivateKeyNotPopulated = 0x09,
}
#[no_mangle]
pub extern "C" fn g | p_fwv_len: &mut usize) -> sgx_status_t {
let version = env!("CARGO_PKG_VERSION");
*p_fwv_len = version.len();
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn get_firmware_version(
p_firmware_version_buf: *mut u8,
fv_buf_size: usize,
) -> sgx_status_t {
let version = env!("CARGO_PKG_VERSION");
assert!(version.len() <= fv_buf_size);
let version_buf_slice =
unsafe { std::slice::from_raw_parts_mut(p_firmware_version_buf, fv_buf_size) };
version_buf_slice.clone_from_slice(&version.as_bytes());
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn init_remote_attestation_enc(
pub_key_buf: *const u8,
pub_key_size: usize,
p_context: *mut sgx_ra_context_t,
) -> sgx_status_t {
assert!(pub_key_size != 0);
assert!(!pub_key_buf.is_null());
let pub_key_vec = unsafe { std::slice::from_raw_parts(pub_key_buf, pub_key_size) };
let pub_key = sgx_ec256_public_t {
gx: from_slice(&pub_key_vec[0..32]),
gy: from_slice(&pub_key_vec[32..64]),
};
let mut context: sgx_ra_context_t = 0;
assert!(pub_key_vec.len() > 0);
let ret = unsafe {
sgx_ra_init(
&pub_key as *const sgx_ec256_public_t,
0,
&mut context as *mut sgx_ra_context_t,
)
};
if ret != sgx_status_t::SGX_SUCCESS {
return ret;
}
unsafe {
*p_context = context;
}
return ret;
}
/// Retrieve or generate the private key as a Vec<u8>
fn get_private_key() -> Result<std::vec::Vec<u8>, SgxRootEnclave> {
let mut private_key_guard = match PRIVATE_KEY.lock() {
Err(_) => return Err(SgxRootEnclave::LockFail),
Ok(guard) => guard,
};
let pkcs8_bytes = match &*private_key_guard {
Some(bytes) => {
bytes.clone()
}
None => {
// ECDSA prime256r1 generation.
let pkcs8_bytes = EcdsaKeyPair::generate_pkcs8(
&ring::signature::ECDSA_P256_SHA256_FIXED_SIGNING,
&SystemRandom::new(),)
.map_err(|_| SgxRootEnclave::PKCS8Error)?;
*private_key_guard = Some(pkcs8_bytes.as_ref().to_vec());
pkcs8_bytes.as_ref().to_vec()
}
};
return Ok(pkcs8_bytes);
}
#[no_mangle]
pub extern "C" fn sgx_get_collateral_report(
p_pubkey_challenge: *const u8,
pubkey_challenge_size: usize,
p_target_info: *const sgx_target_info_t,
report: *mut sgx_types::sgx_report_t,
csr_buffer: *mut u8,
csr_buf_size: usize,
p_csr_size: *mut usize,
) -> sgx_status_t {
let pubkey_challenge_vec =
unsafe { std::slice::from_raw_parts(p_pubkey_challenge, pubkey_challenge_size) };
let mut report_data = sgx_types::sgx_report_data_t::default();
// place the challenge in the report
report_data.d[0..pubkey_challenge_size].copy_from_slice(&pubkey_challenge_vec);
let private_key_ring = {
let private_key_vec = match get_private_key() {
Ok(vec) => vec,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) {
Ok(pkr) => pkr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
}
};
// generate the certificate signing request
let csr_vec = match csr::generate_csr(&csr::ROOT_ENCLAVE_CSR_TEMPLATE, &private_key_ring) {
Ok(csr) => csr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
// // place the hash of the csr in the report
let collateral_hash = ring::digest::digest(&ring::digest::SHA256, &csr_vec);
report_data.d[pubkey_challenge_size..48].copy_from_slice(collateral_hash.as_ref());
let ret = unsafe { sgx_create_report(p_target_info, &report_data, report) };
assert!(ret == sgx_types::sgx_status_t::SGX_SUCCESS);
// place the csr where it needs to be
if csr_vec.len() > csr_buf_size {
assert!(false);
} else {
let csr_buf_slice = unsafe { std::slice::from_raw_parts_mut(csr_buffer, csr_vec.len()) };
csr_buf_slice.clone_from_slice(&csr_vec);
unsafe { *p_csr_size = csr_vec.len() };
}
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn sgx_send_cert_chain(
root_cert: *const u8,
root_cert_size: usize,
enclave_cert: *const u8,
enclave_cert_size: usize,
) -> sgx_status_t {
let root_cert_slice = unsafe { std::slice::from_raw_parts(root_cert, root_cert_size) };
let enclave_cert_slice = unsafe { std::slice::from_raw_parts(enclave_cert, enclave_cert_size) };
let mut cert_chain_guard = match CERT_CHAIN.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
match &*cert_chain_guard {
Some(_) => {
panic!("Unhandled. CERT_CHAIN is not None.");
}
None => {
*cert_chain_guard = Some((enclave_cert_slice.to_vec(), root_cert_slice.to_vec()));
}
}
return sgx_status_t::SGX_SUCCESS;
}
#[no_mangle]
pub extern "C" fn start_local_attest_enc(
msg1: &sgx_dh_msg1_t,
msg2: &mut sgx_dh_msg2_t,
sgx_root_enclave_session_id: &mut u64,
) -> sgx_status_t {
let mut initiator = SgxDhInitiator::init_session();
let status = initiator.proc_msg1(msg1, msg2);
assert!(!status.is_err());
let session_id = SESSION_ID.fetch_add(1, Ordering::SeqCst);
{
let mut initiator_hash = match INITIATOR_HASH.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
initiator_hash.insert(session_id, initiator);
}
*sgx_root_enclave_session_id = session_id;
sgx_status_t::SGX_SUCCESS
}
const CSR_BODY_LOCATION: (usize, usize) = (4, 4 + 218);
const CSR_PUBKEY_LOCATION: (usize, usize) = (129 + 26, 220);
fn verify_csr(csr: &[u8]) -> Result<bool, std::string::String> {
let pubkey_bytes = &csr[CSR_PUBKEY_LOCATION.0..CSR_PUBKEY_LOCATION.1];
let public_key = ring::signature::UnparsedPublicKey::new(&ring::signature::ECDSA_P256_SHA256_ASN1, pubkey_bytes);
let csr_body = &csr[CSR_BODY_LOCATION.0..CSR_BODY_LOCATION.1];
let csr_signature = &csr[237..];
let verify_result = public_key.verify(&csr_body, &csr_signature);
if verify_result.is_err() {
return Err(format!("verify_csr failed:{:?}", verify_result));
} else {
return Ok(true);
}
}
#[no_mangle]
pub extern "C" fn finish_local_attest_enc(
dh_msg3_raw: &mut sgx_dh_msg3_t,
csr: *const u8,
csr_size: usize,
sgx_root_enclave_session_id: u64,
p_cert_buf: *mut u8,
cert_buf_size: usize,
p_cert_size: *mut usize,
cert_lengths: *mut u32,
cert_lengths_size: usize,
) -> SgxRootEnclave {
let dh_msg3_raw_len =
mem::size_of::<sgx_dh_msg3_t>() as u32 + dh_msg3_raw.msg3_body.additional_prop_length;
let dh_msg3 = unsafe { SgxDhMsg3::from_raw_dh_msg3_t(dh_msg3_raw, dh_msg3_raw_len) };
assert!(!dh | et_firmware_version_len( | identifier_name |
lib.rs | = 0x06,
PKCS8Error = 0x07,
StateError = 0x08,
PrivateKeyNotPopulated = 0x09,
}
#[no_mangle]
pub extern "C" fn get_firmware_version_len(p_fwv_len: &mut usize) -> sgx_status_t {
let version = env!("CARGO_PKG_VERSION");
*p_fwv_len = version.len();
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn get_firmware_version(
p_firmware_version_buf: *mut u8,
fv_buf_size: usize,
) -> sgx_status_t {
let version = env!("CARGO_PKG_VERSION");
assert!(version.len() <= fv_buf_size);
let version_buf_slice =
unsafe { std::slice::from_raw_parts_mut(p_firmware_version_buf, fv_buf_size) };
version_buf_slice.clone_from_slice(&version.as_bytes());
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn init_remote_attestation_enc(
pub_key_buf: *const u8,
pub_key_size: usize,
p_context: *mut sgx_ra_context_t,
) -> sgx_status_t {
assert!(pub_key_size != 0);
assert!(!pub_key_buf.is_null());
let pub_key_vec = unsafe { std::slice::from_raw_parts(pub_key_buf, pub_key_size) };
let pub_key = sgx_ec256_public_t {
gx: from_slice(&pub_key_vec[0..32]),
gy: from_slice(&pub_key_vec[32..64]),
};
let mut context: sgx_ra_context_t = 0;
assert!(pub_key_vec.len() > 0);
let ret = unsafe {
sgx_ra_init(
&pub_key as *const sgx_ec256_public_t,
0,
&mut context as *mut sgx_ra_context_t,
)
};
if ret != sgx_status_t::SGX_SUCCESS {
return ret;
}
unsafe {
*p_context = context;
}
return ret;
}
/// Retrieve or generate the private key as a Vec<u8>
fn get_private_key() -> Result<std::vec::Vec<u8>, SgxRootEnclave> {
let mut private_key_guard = match PRIVATE_KEY.lock() {
Err(_) => return Err(SgxRootEnclave::LockFail),
Ok(guard) => guard,
};
let pkcs8_bytes = match &*private_key_guard {
Some(bytes) => {
bytes.clone()
}
None => {
// ECDSA prime256r1 generation.
let pkcs8_bytes = EcdsaKeyPair::generate_pkcs8(
&ring::signature::ECDSA_P256_SHA256_FIXED_SIGNING,
&SystemRandom::new(),)
.map_err(|_| SgxRootEnclave::PKCS8Error)?; | }
#[no_mangle]
pub extern "C" fn sgx_get_collateral_report(
p_pubkey_challenge: *const u8,
pubkey_challenge_size: usize,
p_target_info: *const sgx_target_info_t,
report: *mut sgx_types::sgx_report_t,
csr_buffer: *mut u8,
csr_buf_size: usize,
p_csr_size: *mut usize,
) -> sgx_status_t {
let pubkey_challenge_vec =
unsafe { std::slice::from_raw_parts(p_pubkey_challenge, pubkey_challenge_size) };
let mut report_data = sgx_types::sgx_report_data_t::default();
// place the challenge in the report
report_data.d[0..pubkey_challenge_size].copy_from_slice(&pubkey_challenge_vec);
let private_key_ring = {
let private_key_vec = match get_private_key() {
Ok(vec) => vec,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) {
Ok(pkr) => pkr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
}
};
// generate the certificate signing request
let csr_vec = match csr::generate_csr(&csr::ROOT_ENCLAVE_CSR_TEMPLATE, &private_key_ring) {
Ok(csr) => csr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
// // place the hash of the csr in the report
let collateral_hash = ring::digest::digest(&ring::digest::SHA256, &csr_vec);
report_data.d[pubkey_challenge_size..48].copy_from_slice(collateral_hash.as_ref());
let ret = unsafe { sgx_create_report(p_target_info, &report_data, report) };
assert!(ret == sgx_types::sgx_status_t::SGX_SUCCESS);
// place the csr where it needs to be
if csr_vec.len() > csr_buf_size {
assert!(false);
} else {
let csr_buf_slice = unsafe { std::slice::from_raw_parts_mut(csr_buffer, csr_vec.len()) };
csr_buf_slice.clone_from_slice(&csr_vec);
unsafe { *p_csr_size = csr_vec.len() };
}
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn sgx_send_cert_chain(
root_cert: *const u8,
root_cert_size: usize,
enclave_cert: *const u8,
enclave_cert_size: usize,
) -> sgx_status_t {
let root_cert_slice = unsafe { std::slice::from_raw_parts(root_cert, root_cert_size) };
let enclave_cert_slice = unsafe { std::slice::from_raw_parts(enclave_cert, enclave_cert_size) };
let mut cert_chain_guard = match CERT_CHAIN.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
match &*cert_chain_guard {
Some(_) => {
panic!("Unhandled. CERT_CHAIN is not None.");
}
None => {
*cert_chain_guard = Some((enclave_cert_slice.to_vec(), root_cert_slice.to_vec()));
}
}
return sgx_status_t::SGX_SUCCESS;
}
#[no_mangle]
pub extern "C" fn start_local_attest_enc(
msg1: &sgx_dh_msg1_t,
msg2: &mut sgx_dh_msg2_t,
sgx_root_enclave_session_id: &mut u64,
) -> sgx_status_t {
let mut initiator = SgxDhInitiator::init_session();
let status = initiator.proc_msg1(msg1, msg2);
assert!(!status.is_err());
let session_id = SESSION_ID.fetch_add(1, Ordering::SeqCst);
{
let mut initiator_hash = match INITIATOR_HASH.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
initiator_hash.insert(session_id, initiator);
}
*sgx_root_enclave_session_id = session_id;
sgx_status_t::SGX_SUCCESS
}
const CSR_BODY_LOCATION: (usize, usize) = (4, 4 + 218);
const CSR_PUBKEY_LOCATION: (usize, usize) = (129 + 26, 220);
fn verify_csr(csr: &[u8]) -> Result<bool, std::string::String> {
let pubkey_bytes = &csr[CSR_PUBKEY_LOCATION.0..CSR_PUBKEY_LOCATION.1];
let public_key = ring::signature::UnparsedPublicKey::new(&ring::signature::ECDSA_P256_SHA256_ASN1, pubkey_bytes);
let csr_body = &csr[CSR_BODY_LOCATION.0..CSR_BODY_LOCATION.1];
let csr_signature = &csr[237..];
let verify_result = public_key.verify(&csr_body, &csr_signature);
if verify_result.is_err() {
return Err(format!("verify_csr failed:{:?}", verify_result));
} else {
return Ok(true);
}
}
#[no_mangle]
pub extern "C" fn finish_local_attest_enc(
dh_msg3_raw: &mut sgx_dh_msg3_t,
csr: *const u8,
csr_size: usize,
sgx_root_enclave_session_id: u64,
p_cert_buf: *mut u8,
cert_buf_size: usize,
p_cert_size: *mut usize,
cert_lengths: *mut u32,
cert_lengths_size: usize,
) -> SgxRootEnclave {
let dh_msg3_raw_len =
mem::size_of::<sgx_dh_msg3_t>() as u32 + dh_msg3_raw.msg3_body.additional_prop_length;
let dh_msg3 = unsafe { SgxDhMsg3::from_raw_dh_msg3_t(dh_msg3_raw, dh_msg3_raw_len) };
assert!(!dh_msg | *private_key_guard = Some(pkcs8_bytes.as_ref().to_vec());
pkcs8_bytes.as_ref().to_vec()
}
};
return Ok(pkcs8_bytes); | random_line_split |
config.pb.go | () ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *LinkTemplate) GetOptions() []*LinkOptionsTemplate {
if m != nil {
return m.Options
}
return nil
}
// A simple key/value pair for link options.
type LinkOptionsTemplate struct {
// The key for the option. This is not expanded.
Key string `protobuf:"bytes,1,opt,name=key" yaml:"key,omitempty"`
// The value for the option. This is expanded the same as the LinkTemplate.
Value string `protobuf:"bytes,2,opt,name=value" yaml:"value,omitempty"`
}
func (m *LinkOptionsTemplate) Reset() { *m = LinkOptionsTemplate{} }
func (m *LinkOptionsTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkOptionsTemplate) ProtoMessage() {}
func (*LinkOptionsTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// A single tab on a dashboard.
type DashboardTab struct {
// The name of the dashboard tab to display in the client.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// The name of the TestGroup specifying the test results for this tab.
TestGroupName string `protobuf:"bytes,2,opt,name=test_group_name,json=testGroupName" yaml:"test_group_name,omitempty"`
// Default bug component for manually filing bugs from the dashboard
BugComponent int32 `protobuf:"varint,3,opt,name=bug_component,json=bugComponent" yaml:"bug_component,omitempty"`
// Default code search path for changelist search links
CodeSearchPath string `protobuf:"bytes,4,opt,name=code_search_path,json=codeSearchPath" yaml:"code_search_path,omitempty"`
// The URL template to visit after clicking on a cell.
OpenTestTemplate *LinkTemplate `protobuf:"bytes,7,opt,name=open_test_template,json=openTestTemplate" yaml:"open_test_template,omitempty"`
// The URL template to visit when filing a bug.
FileBugTemplate *LinkTemplate `protobuf:"bytes,8,opt,name=file_bug_template,json=fileBugTemplate" yaml:"file_bug_template,omitempty"`
// The URL template to visit when attaching a bug
AttachBugTemplate *LinkTemplate `protobuf:"bytes,9,opt,name=attach_bug_template,json=attachBugTemplate" yaml:"attach_bug_template,omitempty"`
// Text to show in the about menu as a link to another view of the results.
ResultsText string `protobuf:"bytes,10,opt,name=results_text,json=resultsText" yaml:"results_text,omitempty"`
// The URL template to visit after clicking.
ResultsUrlTemplate *LinkTemplate `protobuf:"bytes,11,opt,name=results_url_template,json=resultsUrlTemplate" yaml:"results_url_template,omitempty"`
// The URL template to visit when searching for changelists.
CodeSearchUrlTemplate *LinkTemplate `protobuf:"bytes,12,opt,name=code_search_url_template,json=codeSearchUrlTemplate" yaml:"code_search_url_template,omitempty"`
}
func (m *DashboardTab) Reset() { *m = DashboardTab{} }
func (m *DashboardTab) String() string { return proto.CompactTextString(m) }
func (*DashboardTab) ProtoMessage() {}
func (*DashboardTab) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *DashboardTab) GetOpenTestTemplate() *LinkTemplate {
if m != nil {
return m.OpenTestTemplate
}
return nil
}
func (m *DashboardTab) GetFileBugTemplate() *LinkTemplate {
if m != nil {
return m.FileBugTemplate
}
return nil
}
func (m *DashboardTab) GetAttachBugTemplate() *LinkTemplate {
if m != nil {
return m.AttachBugTemplate
}
return nil
}
func (m *DashboardTab) GetResultsUrlTemplate() *LinkTemplate {
if m != nil {
return m.ResultsUrlTemplate
}
return nil
}
func (m *DashboardTab) GetCodeSearchUrlTemplate() *LinkTemplate {
if m != nil {
return m.CodeSearchUrlTemplate
}
return nil
}
// A service configuration consisting of multiple test groups and dashboards.
type Configuration struct {
// A list of groups of tests to gather.
TestGroups []*TestGroup `protobuf:"bytes,1,rep,name=test_groups,json=testGroups" yaml:"test_groups,omitempty"`
// A list of all of the dashboards for a server.
Dashboards []*Dashboard `protobuf:"bytes,2,rep,name=dashboards" yaml:"dashboards,omitempty"`
}
func (m *Configuration) Reset() { *m = Configuration{} }
func (m *Configuration) String() string { return proto.CompactTextString(m) }
func (*Configuration) ProtoMessage() {}
func (*Configuration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Configuration) GetTestGroups() []*TestGroup {
if m != nil {
return m.TestGroups
}
return nil
}
func (m *Configuration) GetDashboards() []*Dashboard {
if m != nil {
return m.Dashboards
}
return nil
}
type DefaultConfiguration struct {
// A default testgroup with default initialization data
DefaultTestGroup *TestGroup `protobuf:"bytes,1,opt,name=default_test_group,json=defaultTestGroup" yaml:"default_test_group,omitempty"`
// A default dashboard with default initialization data
DefaultDashboardTab *DashboardTab `protobuf:"bytes,2,opt,name=default_dashboard_tab,json=defaultDashboardTab" yaml:"default_dashboard_tab,omitempty"`
}
func (m *DefaultConfiguration) Reset() { *m = DefaultConfiguration{} }
func (m *DefaultConfiguration) String() string { return proto.CompactTextString(m) }
func (*DefaultConfiguration) ProtoMessage() {}
func (*DefaultConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *DefaultConfiguration) GetDefaultTestGroup() *TestGroup {
if m != nil {
return m.DefaultTestGroup
}
return nil
}
func (m *DefaultConfiguration) GetDefaultDashboardTab() *DashboardTab {
if m != nil {
return m.DefaultDashboardTab
}
return nil
}
func init() {
proto.RegisterType((*TestGroup)(nil), "TestGroup")
proto.RegisterType((*TestGroup_ColumnHeader)(nil), "TestGroup.ColumnHeader")
proto.RegisterType((*Dashboard)(nil), "Dashboard")
proto.RegisterType((*LinkTemplate)(nil), "LinkTemplate")
proto.RegisterType((*LinkOptionsTemplate)(nil), "LinkOptionsTemplate")
proto.RegisterType((*DashboardTab)(nil), "DashboardTab")
proto.RegisterType((*Configuration)(nil), "Configuration")
proto.RegisterType((*DefaultConfiguration)(nil), "DefaultConfiguration")
proto.RegisterEnum("TestGroup_TestsName", TestGroup_TestsName_name, TestGroup_TestsName_value)
}
func init() { proto.RegisterFile("config.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 731 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x54, 0x5d, 0x8f, 0xd2, 0x40,
0x14, 0xb5, 0xb0, 0x5f, 0x5c, 0xca, 0x6e, 0x19, 0x40, 0xab, 0x89, 0x11, 0x6b, 0xb2, 0x21, 0x9a,
0x60, 0x82, 0x2f, 0x1a, 0xdd, 0xac, 0x08, 0xb8, 0x6e, 0xdc, 0x65, 0x49, 0x41, 0x5f, 0x27, 0x43,
0x19, 0xa0, 0xa1, 0xb4, 0x4d, 0x67, 0x6a, 0xe0, 0x77, 0xf8, 0x13, 0xfd, 0x1b, 0x3e, 0x98, 0x4e,
0xbf, 0xa6, 0x86, 0xb7, 0x99, 0x73, 0xcf, 0xbd, 0x77, 0xe6, 0xcc, 0xb9, 0x03, 0xaa, 0xe5, 0xb9,
0x4b, 0x7b, 0xd5, 0xf5, 0x03, 0x8f, 0x7b, 0xc6, 0x9f, 0x32, 0x54, 0x66, 0x94, 0xf1, 0x9b, 0xc | Descriptor | identifier_name |
|
config.pb.go |
func (TestGroup_TestsName) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// Specifies a group of tests to gather.
type TestGroup struct {
// Name of this TestGroup, for mapping dashboard tabs to tests.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// Path to the test result stored in gcs
GcsPrefix string `protobuf:"bytes,2,opt,name=gcs_prefix,json=gcsPrefix" yaml:"gcs_prefix,omitempty"`
// Number of days of test results to gather and serve.
DaysOfResults int32 `protobuf:"varint,3,opt,name=days_of_results,json=daysOfResults" yaml:"days_of_results,omitempty"`
// What to do with the 'Tests name' configuration value. It can replace the
// name of the test, be appended to the name of the test, or ignored. If it is
// ignored, then the name of the tests will be the build target.
TestsNamePolicy TestGroup_TestsName `protobuf:"varint,6,opt,name=tests_name_policy,json=testsNamePolicy,enum=TestGroup_TestsName" yaml:"tests_name_policy,omitempty"`
ColumnHeader []*TestGroup_ColumnHeader `protobuf:"bytes,9,rep,name=column_header,json=columnHeader" yaml:"column_header,omitempty"`
// deprecated - always set to true
UseKubernetesClient bool `protobuf:"varint,24,opt,name=use_kubernetes_client,json=useKubernetesClient" yaml:"use_kubernetes_client,omitempty"`
// deprecated - always set to true
IsExternal bool `protobuf:"varint,25,opt,name=is_external,json=isExternal" yaml:"is_external,omitempty"`
}
func (m *TestGroup) Reset() { *m = TestGroup{} }
func (m *TestGroup) String() string { return proto.CompactTextString(m) }
func (*TestGroup) ProtoMessage() {}
func (*TestGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *TestGroup) GetColumnHeader() []*TestGroup_ColumnHeader {
if m != nil {
return m.ColumnHeader
}
return nil
}
// Custom column headers for defining extra column-heading rows from values in
// the test result.
type TestGroup_ColumnHeader struct {
ConfigurationValue string `protobuf:"bytes,3,opt,name=configuration_value,json=configurationValue" yaml:"configuration_value,omitempty"`
}
func (m *TestGroup_ColumnHeader) Reset() { *m = TestGroup_ColumnHeader{} }
func (m *TestGroup_ColumnHeader) String() string { return proto.CompactTextString(m) }
func (*TestGroup_ColumnHeader) ProtoMessage() {}
func (*TestGroup_ColumnHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// Specifies a dashboard.
type Dashboard struct {
// A list of the tabs on the dashboard.
DashboardTab []*DashboardTab `protobuf:"bytes,1,rep,name=dashboard_tab,json=dashboardTab" yaml:"dashboard_tab,omitempty"`
// A name for the Dashboard.
Name string `protobuf:"bytes,2,opt,name=name" yaml:"name,omitempty"`
}
func (m *Dashboard) Reset() { *m = Dashboard{} }
func (m *Dashboard) String() string { return proto.CompactTextString(m) }
func (*Dashboard) ProtoMessage() {}
func (*Dashboard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Dashboard) GetDashboardTab() []*DashboardTab {
if m != nil {
return m.DashboardTab
}
return nil
}
type LinkTemplate struct {
// The URL template.
Url string `protobuf:"bytes,1,opt,name=url" yaml:"url,omitempty"`
// The options templates.
Options []*LinkOptionsTemplate `protobuf:"bytes,2,rep,name=options" yaml:"options,omitempty"`
}
func (m *LinkTemplate) Reset() { *m = LinkTemplate{} }
func (m *LinkTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkTemplate) ProtoMessage() {}
func (*LinkTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *LinkTemplate) GetOptions() []*LinkOptionsTemplate {
if m != nil {
return m.Options
}
return nil
}
// A simple key/value pair for link options.
type LinkOptionsTemplate struct {
// The key for the option. This is not expanded.
Key string `protobuf:"bytes,1,opt,name=key" yaml:"key,omitempty"`
// The value for the option. This is expanded the same as the LinkTemplate.
Value string `protobuf:"bytes,2,opt,name=value" yaml:"value,omitempty"`
}
func (m *LinkOptionsTemplate) Reset() { *m = LinkOptionsTemplate{} }
func (m *LinkOptionsTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkOptionsTemplate) ProtoMessage() {}
func (*LinkOptionsTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// A single tab on a dashboard.
type DashboardTab struct {
// The name of the dashboard tab to display in the client.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// The name of the TestGroup specifying the test results for this tab.
TestGroupName string `protobuf:"bytes,2,opt,name=test_group_name,json=testGroupName" yaml:"test_group_name,omitempty"`
// Default bug component for manually filing bugs from the dashboard
BugComponent int32 `protobuf:"varint,3,opt,name=bug_component,json=bugComponent" yaml:"bug_component,omitempty"`
// Default code search path for changelist search links
CodeSearchPath string `protobuf:"bytes,4,opt,name=code_search_path,json=codeSearchPath" yaml:"code_search_path,omitempty"`
// The URL template to visit after clicking on a cell.
OpenTestTemplate *LinkTemplate `protobuf:"bytes,7,opt,name=open_test_template,json=openTestTemplate" yaml:"open_test_template,omitempty"`
// The URL template to visit when filing a bug.
FileBugTemplate *LinkTemplate `protobuf:"bytes,8,opt,name=file_bug_template,json=fileBugTemplate" yaml:"file_bug_template,omitempty"`
// The URL template to visit when attaching a bug
AttachBugTemplate *LinkTemplate `protobuf:"bytes,9,opt,name=attach_bug_template,json=attachBugTemplate" yaml:"attach_bug_template,omitempty"`
// Text to show in the about menu as a link to another view of the results.
ResultsText string `protobuf:"bytes,10,opt,name=results_text,json=resultsText" yaml:"results_text,omitempty"`
// The URL template to visit after clicking.
ResultsUrlTemplate *LinkTemplate `protobuf:"bytes,11,opt,name=results_url_template,json=resultsUrlTemplate" yaml:"results_url_template,omitempty"`
// The URL template to visit when searching for changelists.
CodeSearchUrlTemplate *LinkTemplate `protobuf:"bytes,12,opt,name=code_search_url_template,json=codeSearchUrlTemplate" yaml:"code_search_url_template,omitempty"`
}
func (m *DashboardTab) Reset() { *m = DashboardTab{} }
func (m *DashboardTab) String() string { return proto.CompactTextString(m) }
func (*DashboardTab) ProtoMessage() {}
func (*DashboardTab) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *DashboardTab) GetOpenTestTemplate() *LinkTemplate {
if m != nil {
return m.OpenTestTemplate
}
return nil
}
func (m *DashboardTab) GetFileBugTemplate() *LinkTemplate {
if m != nil {
return m.FileBugTemplate
}
return nil
}
func (m *DashboardTab) GetAttachBugTemplate() *LinkTemplate {
if m != nil {
return m.AttachBugTemplate
}
return nil
}
func (m *DashboardTab) GetResultsUrlTemplate() *LinkTemplate {
if m != nil {
return m.ResultsUrlTemplate
}
return nil
}
func (m *DashboardTab) GetCodeSearchUrlTemplate() *LinkTemplate {
if m != nil {
return m.CodeSearchUrlTemplate
}
return nil
}
// A service configuration consisting of multiple test groups and dashboards.
type Configuration struct {
// A list of groups of tests to gather.
TestGroups []*TestGroup `protobuf:"bytes,1,rep,name=test_groups,json=testGroups" yaml:"test_groups,omitempty"`
// A list of all of the dashboards for a server.
Dashboards []*Dashboard `protobuf:"bytes,2,rep,name=dashboards" yaml:"dashboards,omitempty"`
}
func (m *Configuration) Reset() { *m = Configuration{} }
func (m *Configuration) String() string { return proto.CompactTextString(m) }
func (*Configuration) ProtoMessage() {}
func (*Configuration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Configuration) GetTestGroups() []*TestGroup {
if m != nil {
return m.TestGroups
}
return nil
}
func (m *Configuration) GetDashboards() []*Dashboard {
if m != nil {
return m.Dashboards
}
return nil
| {
return proto.EnumName(TestGroup_TestsName_name, int32(x))
} | identifier_body |
|
config.pb.go | }
// A simple key/value pair for link options.
type LinkOptionsTemplate struct {
// The key for the option. This is not expanded.
Key string `protobuf:"bytes,1,opt,name=key" yaml:"key,omitempty"`
// The value for the option. This is expanded the same as the LinkTemplate.
Value string `protobuf:"bytes,2,opt,name=value" yaml:"value,omitempty"`
}
func (m *LinkOptionsTemplate) Reset() { *m = LinkOptionsTemplate{} }
func (m *LinkOptionsTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkOptionsTemplate) ProtoMessage() {}
func (*LinkOptionsTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// A single tab on a dashboard.
type DashboardTab struct {
// The name of the dashboard tab to display in the client.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// The name of the TestGroup specifying the test results for this tab.
TestGroupName string `protobuf:"bytes,2,opt,name=test_group_name,json=testGroupName" yaml:"test_group_name,omitempty"`
// Default bug component for manually filing bugs from the dashboard
BugComponent int32 `protobuf:"varint,3,opt,name=bug_component,json=bugComponent" yaml:"bug_component,omitempty"`
// Default code search path for changelist search links
CodeSearchPath string `protobuf:"bytes,4,opt,name=code_search_path,json=codeSearchPath" yaml:"code_search_path,omitempty"`
// The URL template to visit after clicking on a cell.
OpenTestTemplate *LinkTemplate `protobuf:"bytes,7,opt,name=open_test_template,json=openTestTemplate" yaml:"open_test_template,omitempty"`
// The URL template to visit when filing a bug.
FileBugTemplate *LinkTemplate `protobuf:"bytes,8,opt,name=file_bug_template,json=fileBugTemplate" yaml:"file_bug_template,omitempty"`
// The URL template to visit when attaching a bug
AttachBugTemplate *LinkTemplate `protobuf:"bytes,9,opt,name=attach_bug_template,json=attachBugTemplate" yaml:"attach_bug_template,omitempty"`
// Text to show in the about menu as a link to another view of the results.
ResultsText string `protobuf:"bytes,10,opt,name=results_text,json=resultsText" yaml:"results_text,omitempty"`
// The URL template to visit after clicking.
ResultsUrlTemplate *LinkTemplate `protobuf:"bytes,11,opt,name=results_url_template,json=resultsUrlTemplate" yaml:"results_url_template,omitempty"`
// The URL template to visit when searching for changelists.
CodeSearchUrlTemplate *LinkTemplate `protobuf:"bytes,12,opt,name=code_search_url_template,json=codeSearchUrlTemplate" yaml:"code_search_url_template,omitempty"`
}
func (m *DashboardTab) Reset() { *m = DashboardTab{} }
func (m *DashboardTab) String() string { return proto.CompactTextString(m) }
func (*DashboardTab) ProtoMessage() {}
func (*DashboardTab) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *DashboardTab) GetOpenTestTemplate() *LinkTemplate {
if m != nil {
return m.OpenTestTemplate
}
return nil
}
func (m *DashboardTab) GetFileBugTemplate() *LinkTemplate {
if m != nil {
return m.FileBugTemplate
}
return nil
}
func (m *DashboardTab) GetAttachBugTemplate() *LinkTemplate {
if m != nil {
return m.AttachBugTemplate
}
return nil | }
func (m *DashboardTab) GetResultsUrlTemplate() *LinkTemplate {
if m != nil {
return m.ResultsUrlTemplate
}
return nil
}
func (m *DashboardTab) GetCodeSearchUrlTemplate() *LinkTemplate {
if m != nil {
return m.CodeSearchUrlTemplate
}
return nil
}
// A service configuration consisting of multiple test groups and dashboards.
type Configuration struct {
// A list of groups of tests to gather.
TestGroups []*TestGroup `protobuf:"bytes,1,rep,name=test_groups,json=testGroups" yaml:"test_groups,omitempty"`
// A list of all of the dashboards for a server.
Dashboards []*Dashboard `protobuf:"bytes,2,rep,name=dashboards" yaml:"dashboards,omitempty"`
}
func (m *Configuration) Reset() { *m = Configuration{} }
func (m *Configuration) String() string { return proto.CompactTextString(m) }
func (*Configuration) ProtoMessage() {}
func (*Configuration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Configuration) GetTestGroups() []*TestGroup {
if m != nil {
return m.TestGroups
}
return nil
}
func (m *Configuration) GetDashboards() []*Dashboard {
if m != nil {
return m.Dashboards
}
return nil
}
type DefaultConfiguration struct {
// A default testgroup with default initialization data
DefaultTestGroup *TestGroup `protobuf:"bytes,1,opt,name=default_test_group,json=defaultTestGroup" yaml:"default_test_group,omitempty"`
// A default dashboard with default initialization data
DefaultDashboardTab *DashboardTab `protobuf:"bytes,2,opt,name=default_dashboard_tab,json=defaultDashboardTab" yaml:"default_dashboard_tab,omitempty"`
}
func (m *DefaultConfiguration) Reset() { *m = DefaultConfiguration{} }
func (m *DefaultConfiguration) String() string { return proto.CompactTextString(m) }
func (*DefaultConfiguration) ProtoMessage() {}
func (*DefaultConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *DefaultConfiguration) GetDefaultTestGroup() *TestGroup {
if m != nil {
return m.DefaultTestGroup
}
return nil
}
func (m *DefaultConfiguration) GetDefaultDashboardTab() *DashboardTab {
if m != nil {
return m.DefaultDashboardTab
}
return nil
}
func init() {
proto.RegisterType((*TestGroup)(nil), "TestGroup")
proto.RegisterType((*TestGroup_ColumnHeader)(nil), "TestGroup.ColumnHeader")
proto.RegisterType((*Dashboard)(nil), "Dashboard")
proto.RegisterType((*LinkTemplate)(nil), "LinkTemplate")
proto.RegisterType((*LinkOptionsTemplate)(nil), "LinkOptionsTemplate")
proto.RegisterType((*DashboardTab)(nil), "DashboardTab")
proto.RegisterType((*Configuration)(nil), "Configuration")
proto.RegisterType((*DefaultConfiguration)(nil), "DefaultConfiguration")
proto.RegisterEnum("TestGroup_TestsName", TestGroup_TestsName_name, TestGroup_TestsName_value)
}
func init() { proto.RegisterFile("config.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 731 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x54, 0x5d, 0x8f, 0xd2, 0x40,
0x14, 0xb5, 0xb0, 0x5f, 0x5c, 0xca, 0x6e, 0x19, 0x40, 0xab, 0x89, 0x11, 0x6b, 0xb2, 0x21, 0x9a,
0x60, 0x82, 0x2f, 0x1a, 0xdd, 0xac, 0x08, 0xb8, 0x6e, 0xdc, 0x65, 0x49, 0x41, 0x5f, 0x27, 0x43,
0x19, 0xa0, 0xa1, 0xb4, 0x4d, 0x67, 0x6a, 0xe0, 0x77, 0xf8, 0x13, 0xfd, 0x1b, 0x3e, 0x98, 0x4e,
0xbf, 0xa6, 0x86, 0xb7, 0x99, 0x73, 0xcf, 0xbd, 0x77, 0xe6, 0xcc, 0xb9, 0x03, 0xaa, 0xe5, 0xb9,
0x4b, 0x7b, 0xd5, 0xf5, 0x03, 0x8f, 0x7b, 0xc6, 0x9f, 0x32, 0x54, 0x66, 0x94, 0xf1, 0x9b, 0xc0,
0x0b, 0x7d, 0x84, 0xe0, 0xc8, 0x25, 0x5b, 0xaa, 0x2b | random_line_split |
|
config.pb.go | // A simple key/value pair for link options.
type LinkOptionsTemplate struct {
// The key for the option. This is not expanded.
Key string `protobuf:"bytes,1,opt,name=key" yaml:"key,omitempty"`
// The value for the option. This is expanded the same as the LinkTemplate.
Value string `protobuf:"bytes,2,opt,name=value" yaml:"value,omitempty"`
}
func (m *LinkOptionsTemplate) Reset() { *m = LinkOptionsTemplate{} }
func (m *LinkOptionsTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkOptionsTemplate) ProtoMessage() {}
func (*LinkOptionsTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// A single tab on a dashboard.
type DashboardTab struct {
// The name of the dashboard tab to display in the client.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// The name of the TestGroup specifying the test results for this tab.
TestGroupName string `protobuf:"bytes,2,opt,name=test_group_name,json=testGroupName" yaml:"test_group_name,omitempty"`
// Default bug component for manually filing bugs from the dashboard
BugComponent int32 `protobuf:"varint,3,opt,name=bug_component,json=bugComponent" yaml:"bug_component,omitempty"`
// Default code search path for changelist search links
CodeSearchPath string `protobuf:"bytes,4,opt,name=code_search_path,json=codeSearchPath" yaml:"code_search_path,omitempty"`
// The URL template to visit after clicking on a cell.
OpenTestTemplate *LinkTemplate `protobuf:"bytes,7,opt,name=open_test_template,json=openTestTemplate" yaml:"open_test_template,omitempty"`
// The URL template to visit when filing a bug.
FileBugTemplate *LinkTemplate `protobuf:"bytes,8,opt,name=file_bug_template,json=fileBugTemplate" yaml:"file_bug_template,omitempty"`
// The URL template to visit when attaching a bug
AttachBugTemplate *LinkTemplate `protobuf:"bytes,9,opt,name=attach_bug_template,json=attachBugTemplate" yaml:"attach_bug_template,omitempty"`
// Text to show in the about menu as a link to another view of the results.
ResultsText string `protobuf:"bytes,10,opt,name=results_text,json=resultsText" yaml:"results_text,omitempty"`
// The URL template to visit after clicking.
ResultsUrlTemplate *LinkTemplate `protobuf:"bytes,11,opt,name=results_url_template,json=resultsUrlTemplate" yaml:"results_url_template,omitempty"`
// The URL template to visit when searching for changelists.
CodeSearchUrlTemplate *LinkTemplate `protobuf:"bytes,12,opt,name=code_search_url_template,json=codeSearchUrlTemplate" yaml:"code_search_url_template,omitempty"`
}
func (m *DashboardTab) Reset() { *m = DashboardTab{} }
func (m *DashboardTab) String() string { return proto.CompactTextString(m) }
func (*DashboardTab) ProtoMessage() {}
func (*DashboardTab) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *DashboardTab) GetOpenTestTemplate() *LinkTemplate {
if m != nil {
return m.OpenTestTemplate
}
return nil
}
func (m *DashboardTab) GetFileBugTemplate() *LinkTemplate {
if m != nil {
return m.FileBugTemplate
}
return nil
}
func (m *DashboardTab) GetAttachBugTemplate() *LinkTemplate {
if m != nil {
return m.AttachBugTemplate
}
return nil
}
func (m *DashboardTab) GetResultsUrlTemplate() *LinkTemplate {
if m != nil {
return m.ResultsUrlTemplate
}
return nil
}
func (m *DashboardTab) GetCodeSearchUrlTemplate() *LinkTemplate {
if m != nil {
return m.CodeSearchUrlTemplate
}
return nil
}
// A service configuration consisting of multiple test groups and dashboards.
type Configuration struct {
// A list of groups of tests to gather.
TestGroups []*TestGroup `protobuf:"bytes,1,rep,name=test_groups,json=testGroups" yaml:"test_groups,omitempty"`
// A list of all of the dashboards for a server.
Dashboards []*Dashboard `protobuf:"bytes,2,rep,name=dashboards" yaml:"dashboards,omitempty"`
}
func (m *Configuration) Reset() { *m = Configuration{} }
func (m *Configuration) String() string { return proto.CompactTextString(m) }
func (*Configuration) ProtoMessage() {}
func (*Configuration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Configuration) GetTestGroups() []*TestGroup {
if m != nil {
return m.TestGroups
}
return nil
}
func (m *Configuration) GetDashboards() []*Dashboard {
if m != nil |
return nil
}
type DefaultConfiguration struct {
// A default testgroup with default initialization data
DefaultTestGroup *TestGroup `protobuf:"bytes,1,opt,name=default_test_group,json=defaultTestGroup" yaml:"default_test_group,omitempty"`
// A default dashboard with default initialization data
DefaultDashboardTab *DashboardTab `protobuf:"bytes,2,opt,name=default_dashboard_tab,json=defaultDashboardTab" yaml:"default_dashboard_tab,omitempty"`
}
func (m *DefaultConfiguration) Reset() { *m = DefaultConfiguration{} }
func (m *DefaultConfiguration) String() string { return proto.CompactTextString(m) }
func (*DefaultConfiguration) ProtoMessage() {}
func (*DefaultConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *DefaultConfiguration) GetDefaultTestGroup() *TestGroup {
if m != nil {
return m.DefaultTestGroup
}
return nil
}
func (m *DefaultConfiguration) GetDefaultDashboardTab() *DashboardTab {
if m != nil {
return m.DefaultDashboardTab
}
return nil
}
func init() {
proto.RegisterType((*TestGroup)(nil), "TestGroup")
proto.RegisterType((*TestGroup_ColumnHeader)(nil), "TestGroup.ColumnHeader")
proto.RegisterType((*Dashboard)(nil), "Dashboard")
proto.RegisterType((*LinkTemplate)(nil), "LinkTemplate")
proto.RegisterType((*LinkOptionsTemplate)(nil), "LinkOptionsTemplate")
proto.RegisterType((*DashboardTab)(nil), "DashboardTab")
proto.RegisterType((*Configuration)(nil), "Configuration")
proto.RegisterType((*DefaultConfiguration)(nil), "DefaultConfiguration")
proto.RegisterEnum("TestGroup_TestsName", TestGroup_TestsName_name, TestGroup_TestsName_value)
}
func init() { proto.RegisterFile("config.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 731 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x54, 0x5d, 0x8f, 0xd2, 0x40,
0x14, 0xb5, 0xb0, 0x5f, 0x5c, 0xca, 0x6e, 0x19, 0x40, 0xab, 0x89, 0x11, 0x6b, 0xb2, 0x21, 0x9a,
0x60, 0x82, 0x2f, 0x1a, 0xdd, 0xac, 0x08, 0xb8, 0x6e, 0xdc, 0x65, 0x49, 0x41, 0x5f, 0x27, 0x43,
0x19, 0xa0, 0xa1, 0xb4, 0x4d, 0x67, 0x6a, 0xe0, 0x77, 0xf8, 0x13, 0xfd, 0x1b, 0x3e, 0x98, 0x4e,
0xbf, 0xa6, 0x86, 0xb7, 0x99, 0x73, 0xcf, 0xbd, 0x77, 0xe6, 0xcc, 0xb9, 0x03, 0xaa, 0xe5, 0xb9,
0x4b, 0x7b, 0xd5, 0xf5, 0x03, 0x8f, 0x7b, 0xc6, 0x9f, 0x32, 0x54, 0x66, 0x94, 0xf1, 0x9b, 0xc0,
0x0b, 0x7d, 0x84, 0xe0, 0xc8, 0x25, 0x5b, 0xaa, 0x | {
return m.Dashboards
} | conditional_block |
main.rs | data ^= key;
let zero = Simd::splat(0);
data = data.aes_dec_last(zero).aes_enc(zero);
return data.aes_enc_last(zero);
}
fn inv_aes_decx4(mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let zero = Simd::splat(0);
hash = hash.aes_dec_last(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc_last(zero);
return hash;
}
fn single_prefix(count: usize, target_hash: Simd<u8, 16>) -> Simd<u8, 16> {
// The first stage looks like this:
// Hash ^ Seed = dec^4(Count ^ Seed ^ Chunk)
// To get the chunk, we need to reverse these:
// dec^-4(Hash ^ Seed) = Count ^ Seed ^ Chunk
// Chunk = dec^4(Hash ^ Seed) ^ Count ^ Seed
// To create a one-prefix initialization, we want:
// Hash = Count
// Count = Count + 16
let mut hash = target_hash;
hash = inv_aes_decx4(hash);
let prefix_init = Simd::<u64, 2>::from_array([count as u64 + 16, 0]).to_ne_bytes();
hash ^= prefix_init;
hash ^= DEFAULT_SEED;
return hash;
}
fn preimage_prefix_hash(mut hash: Simd<u8, 16>, data: &[u8]) -> Simd<u8, 16> {
let chunks = data.len() / 16;
let tail = &data[chunks*16..];
let mut tail_buf = [0_u8; 16];
tail_buf[..tail.len()].copy_from_slice(tail);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
for chunk in data.chunks_exact(16).rev() {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
hash ^= value;
}
return hash;
}
fn invert_block(mut hash: Simd<u8, 16>, chunk: &[u8]) -> Simd<u8, 16> {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
return hash ^ value;
}
fn invert_last(suffix: &[u8], mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let mut tail_buf = [0_u8; 16];
tail_buf[..suffix.len()].copy_from_slice(suffix);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
hash = inv_aes_decx4(hash);
return hash;
}
fn concat(prefix: Simd<u8, 16>, target: &[u8]) -> Vec<u8> {
let mut image = prefix.to_array().to_vec();
image.extend_from_slice(target);
image
}
fn prefix_collision_attack(message: &[u8]) |
fn chosen_prefix(prefix: &[u8]) {
let zero = Simd::splat(0);
let mut message = prefix.to_vec();
let remainder = 16 - (message.len() % 16);
message.extend((0..remainder).map(|_| b'A'));
message.extend((0..16).map(|_| 0));
let hash = ComputeGlyphHash(&message);
let pre_current = invert_last(&[], hash);
let pre_target = invert_last(&[], zero);
let last = message.len() - 16;
let suffix = pre_current ^ pre_target;
message[last..].copy_from_slice(&suffix.to_array());
println!("Demonstrating chosen prefix attack");
println!("prefix: {:x?}", prefix);
println!("forgery: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
println!();
}
fn preimage_attack(suffix: &[u8]) {
println!("Demonstrating preimage attack");
println!("suffix: {:x?}", suffix);
let target_hash = Simd::splat(0);
println!("goal hash: {:x?}", target_hash);
let prefix_hash = preimage_prefix_hash(target_hash, suffix);
let preimage_prefix = single_prefix(suffix.len(), prefix_hash);
println!("prefix: {:x?}", preimage_prefix);
let message = concat(preimage_prefix, suffix);
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
}
fn padding_attack() {
println!("Demonstrating padding attack");
println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b""));
println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01"));
println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A"));
println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00"));
println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA"));
println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00"));
println!();
}
fn invert_attack(message: &[u8]) {
println!("Demonstrating invert attack, invert a hash up to 15 bytes");
println!("Note: due to padding attack, there are actually more messages");
println!("plaintext: {:x?}", message);
let mut hash = ComputeGlyphHash(message);
println!("hash: {:x?}", hash);
hash = inv_aes_decx4(hash);
hash ^= DEFAULT_SEED;
let mut buffer = hash.to_array();
let len = buffer.iter().rposition(|&chr| chr != 0).map_or(0, |x| x + 1);
if len == 16 {
println!("the plaintext mus be shorter than 16 bytes, cannot invert");
return;
}
buffer[0] ^= len as u8;
let recovered = &buffer[..len];
println!("recovered: {:x?}", recovered);
println!("hash: {:x?}", ComputeGlyphHash(recovered));
println!();
}
pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool {
// check if the characters are outside of '0'..'z' range
if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() {
return false;
}
// check if the characters are in of '9'+1..'A'-1 range
if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() {
return false;
}
// check if the characters are in of 'Z'+1..'a'-1 range
if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() {
return false;
}
return true;
}
use core::sync::atomic::{AtomicBool, Ordering};
static FOUND: AtomicBool = AtomicBool::new(false);
fn find_ascii_zeros(suffix: &[u8], worker: u64) {
const ATTACK_BYTES: usize = 6;
let mut target_hash = Simd::<u8, 16>::splat(0);
let mut bsuffix = suffix;
let suffix_len = 16 - ATTACK_BYTES;
let mut whole_block = false;
if suffix.len() >= suffix_len {
target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]);
bsuffix = &suffix[..suffix_len];
whole_block = true;
}
let mut controlled = [0u8; 16];
let total_len = ATTACK_BYTES + suffix.len();
| {
let mut target_hash = Simd::<u64, 2>::from_array([message.len() as u64, 0]).to_ne_bytes();
target_hash ^= DEFAULT_SEED;
let prefix = single_prefix(message.len(), target_hash);
println!("Demonstrating prefix attack");
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(b"hello"));
println!("prefix: {:x?}", prefix);
let forgery = concat(prefix, message);
println!("forgery: {:x?}", forgery);
println!("hash: {:x?}", ComputeGlyphHash(&forgery));
println!();
} | identifier_body |
main.rs | data ^= key;
let zero = Simd::splat(0);
data = data.aes_dec_last(zero).aes_enc(zero);
return data.aes_enc_last(zero);
}
fn inv_aes_decx4(mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let zero = Simd::splat(0);
hash = hash.aes_dec_last(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc_last(zero);
return hash;
}
fn single_prefix(count: usize, target_hash: Simd<u8, 16>) -> Simd<u8, 16> {
// The first stage looks like this:
// Hash ^ Seed = dec^4(Count ^ Seed ^ Chunk)
// To get the chunk, we need to reverse these:
// dec^-4(Hash ^ Seed) = Count ^ Seed ^ Chunk
// Chunk = dec^4(Hash ^ Seed) ^ Count ^ Seed
// To create a one-prefix initialization, we want:
// Hash = Count
// Count = Count + 16
let mut hash = target_hash;
hash = inv_aes_decx4(hash);
let prefix_init = Simd::<u64, 2>::from_array([count as u64 + 16, 0]).to_ne_bytes();
hash ^= prefix_init;
hash ^= DEFAULT_SEED;
return hash;
}
fn preimage_prefix_hash(mut hash: Simd<u8, 16>, data: &[u8]) -> Simd<u8, 16> {
let chunks = data.len() / 16;
let tail = &data[chunks*16..];
let mut tail_buf = [0_u8; 16];
tail_buf[..tail.len()].copy_from_slice(tail);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
for chunk in data.chunks_exact(16).rev() {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
hash ^= value;
}
return hash;
}
fn invert_block(mut hash: Simd<u8, 16>, chunk: &[u8]) -> Simd<u8, 16> {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
return hash ^ value;
}
fn invert_last(suffix: &[u8], mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let mut tail_buf = [0_u8; 16];
tail_buf[..suffix.len()].copy_from_slice(suffix);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
hash = inv_aes_decx4(hash);
return hash;
}
fn | (prefix: Simd<u8, 16>, target: &[u8]) -> Vec<u8> {
let mut image = prefix.to_array().to_vec();
image.extend_from_slice(target);
image
}
fn prefix_collision_attack(message: &[u8]) {
let mut target_hash = Simd::<u64, 2>::from_array([message.len() as u64, 0]).to_ne_bytes();
target_hash ^= DEFAULT_SEED;
let prefix = single_prefix(message.len(), target_hash);
println!("Demonstrating prefix attack");
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(b"hello"));
println!("prefix: {:x?}", prefix);
let forgery = concat(prefix, message);
println!("forgery: {:x?}", forgery);
println!("hash: {:x?}", ComputeGlyphHash(&forgery));
println!();
}
fn chosen_prefix(prefix: &[u8]) {
let zero = Simd::splat(0);
let mut message = prefix.to_vec();
let remainder = 16 - (message.len() % 16);
message.extend((0..remainder).map(|_| b'A'));
message.extend((0..16).map(|_| 0));
let hash = ComputeGlyphHash(&message);
let pre_current = invert_last(&[], hash);
let pre_target = invert_last(&[], zero);
let last = message.len() - 16;
let suffix = pre_current ^ pre_target;
message[last..].copy_from_slice(&suffix.to_array());
println!("Demonstrating chosen prefix attack");
println!("prefix: {:x?}", prefix);
println!("forgery: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
println!();
}
fn preimage_attack(suffix: &[u8]) {
println!("Demonstrating preimage attack");
println!("suffix: {:x?}", suffix);
let target_hash = Simd::splat(0);
println!("goal hash: {:x?}", target_hash);
let prefix_hash = preimage_prefix_hash(target_hash, suffix);
let preimage_prefix = single_prefix(suffix.len(), prefix_hash);
println!("prefix: {:x?}", preimage_prefix);
let message = concat(preimage_prefix, suffix);
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
}
fn padding_attack() {
println!("Demonstrating padding attack");
println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b""));
println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01"));
println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A"));
println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00"));
println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA"));
println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00"));
println!();
}
fn invert_attack(message: &[u8]) {
println!("Demonstrating invert attack, invert a hash up to 15 bytes");
println!("Note: due to padding attack, there are actually more messages");
println!("plaintext: {:x?}", message);
let mut hash = ComputeGlyphHash(message);
println!("hash: {:x?}", hash);
hash = inv_aes_decx4(hash);
hash ^= DEFAULT_SEED;
let mut buffer = hash.to_array();
let len = buffer.iter().rposition(|&chr| chr != 0).map_or(0, |x| x + 1);
if len == 16 {
println!("the plaintext mus be shorter than 16 bytes, cannot invert");
return;
}
buffer[0] ^= len as u8;
let recovered = &buffer[..len];
println!("recovered: {:x?}", recovered);
println!("hash: {:x?}", ComputeGlyphHash(recovered));
println!();
}
pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool {
// check if the characters are outside of '0'..'z' range
if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() {
return false;
}
// check if the characters are in of '9'+1..'A'-1 range
if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() {
return false;
}
// check if the characters are in of 'Z'+1..'a'-1 range
if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() {
return false;
}
return true;
}
use core::sync::atomic::{AtomicBool, Ordering};
static FOUND: AtomicBool = AtomicBool::new(false);
fn find_ascii_zeros(suffix: &[u8], worker: u64) {
const ATTACK_BYTES: usize = 6;
let mut target_hash = Simd::<u8, 16>::splat(0);
let mut bsuffix = suffix;
let suffix_len = 16 - ATTACK_BYTES;
let mut whole_block = false;
if suffix.len() >= suffix_len {
target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]);
bsuffix = &suffix[..suffix_len];
whole_block = true;
}
let mut controlled = [0u8; 16];
let total_len = ATTACK_BYTES + suffix.len();
| concat | identifier_name |
main.rs | u8]) {
let zero = Simd::splat(0);
let mut message = prefix.to_vec();
let remainder = 16 - (message.len() % 16);
message.extend((0..remainder).map(|_| b'A'));
message.extend((0..16).map(|_| 0));
let hash = ComputeGlyphHash(&message);
let pre_current = invert_last(&[], hash);
let pre_target = invert_last(&[], zero);
let last = message.len() - 16;
let suffix = pre_current ^ pre_target;
message[last..].copy_from_slice(&suffix.to_array());
println!("Demonstrating chosen prefix attack");
println!("prefix: {:x?}", prefix);
println!("forgery: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
println!();
}
fn preimage_attack(suffix: &[u8]) {
println!("Demonstrating preimage attack");
println!("suffix: {:x?}", suffix);
let target_hash = Simd::splat(0);
println!("goal hash: {:x?}", target_hash);
let prefix_hash = preimage_prefix_hash(target_hash, suffix);
let preimage_prefix = single_prefix(suffix.len(), prefix_hash);
println!("prefix: {:x?}", preimage_prefix);
let message = concat(preimage_prefix, suffix);
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
}
fn padding_attack() {
println!("Demonstrating padding attack");
println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b""));
println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01"));
println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A"));
println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00"));
println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA"));
println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00"));
println!();
}
fn invert_attack(message: &[u8]) {
println!("Demonstrating invert attack, invert a hash up to 15 bytes");
println!("Note: due to padding attack, there are actually more messages");
println!("plaintext: {:x?}", message);
let mut hash = ComputeGlyphHash(message);
println!("hash: {:x?}", hash);
hash = inv_aes_decx4(hash);
hash ^= DEFAULT_SEED;
let mut buffer = hash.to_array();
let len = buffer.iter().rposition(|&chr| chr != 0).map_or(0, |x| x + 1);
if len == 16 {
println!("the plaintext mus be shorter than 16 bytes, cannot invert");
return;
}
buffer[0] ^= len as u8;
let recovered = &buffer[..len];
println!("recovered: {:x?}", recovered);
println!("hash: {:x?}", ComputeGlyphHash(recovered));
println!();
}
pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool {
// check if the characters are outside of '0'..'z' range
if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() {
return false;
}
// check if the characters are in of '9'+1..'A'-1 range
if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() {
return false;
}
// check if the characters are in of 'Z'+1..'a'-1 range
if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() {
return false;
}
return true;
}
use core::sync::atomic::{AtomicBool, Ordering};
static FOUND: AtomicBool = AtomicBool::new(false);
fn find_ascii_zeros(suffix: &[u8], worker: u64) {
const ATTACK_BYTES: usize = 6;
let mut target_hash = Simd::<u8, 16>::splat(0);
let mut bsuffix = suffix;
let suffix_len = 16 - ATTACK_BYTES;
let mut whole_block = false;
if suffix.len() >= suffix_len {
target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]);
bsuffix = &suffix[..suffix_len];
whole_block = true;
}
let mut controlled = [0u8; 16];
let total_len = ATTACK_BYTES + suffix.len();
let controlled_bytes = total_len.min(16);
let controlled = &mut controlled[..controlled_bytes];
controlled[ATTACK_BYTES..].copy_from_slice(bsuffix);
let seed = Simd::from_array([
17820195240, 4041143216,
22093178114, 2324176188,
]);
let mut rng = SRng::new(seed * Simd::splat(worker + 1));
let start = std::time::Instant::now();
for ii in 0_u64.. {
if FOUND.load(Ordering::Relaxed) {
return;
}
let prefix = rng.random_alphanum();
controlled[..6].copy_from_slice(&prefix[..6]);
let prefix = {
let prefix_hash = if whole_block {
invert_block(target_hash, controlled)
} else {
preimage_prefix_hash(target_hash, controlled)
};
single_prefix(total_len, prefix_hash)
};
if check_alphanum(prefix) {
FOUND.store(true, Ordering::Relaxed);
let mut buffer = prefix.to_array().to_vec();
buffer.extend_from_slice(&controlled[..6]);
buffer.extend_from_slice(suffix);
let elapsed = start.elapsed();
let mhs = (ii as f64) / 1e6 / elapsed.as_secs_f64();
eprintln!("found prefix in {}it {:?} {:3.3}MH/s/core", ii, elapsed, mhs);
eprintln!("hash: {:x?}", ComputeGlyphHash(&buffer));
println!("{}", core::str::from_utf8(&buffer).unwrap());
break;
}
}
}
const MESSAGE: &[&[u8]] = &[
b" Hello Casey! I hope this message finds you well.",
b" Please ignore those 22 random chars to the left for now.",
b" The work you've done on refterm is admirable. There are",
b" not enough performance conscious programmers around, and",
b" we need a demonstration of what is achievable. However,",
b" I would like to address the claim that the hash function",
b" used in refterm is 'cryptographically secure'. There is",
b" a very specific meaning attached to those words, namely:",
b" 1) it is hard to create a message for a given hash value",
b" 2) it is hard to produce two messages with the same hash",
b" If you check, the following strings have the same hash:",
b" xvD7FsaUdGy9UyjalZlFEU, 0XXPpB0wpVszsvSxgsn0su,",
b" IGNwdjol0dxLflcnfW7vsI, jcTHx0zBJbW2tdiX157RSz.",
b" In fact, every line in the message yields the exact same",
b" hash value. That is 0x00000000000000000000000000000000.",
b" I believe this was a clear enough demonstration that the",
b" hash function `ComputeGlyphHash` isn't cryptographically",
b" secure, and that an attacker can corrupt the glyph cache",
b" by printing glyphs with the same hash. The main problem",
b" with this hash function is that all operations consuming",
b" bytes are invertible. Which means an attacker could run",
b" the hash function in reverse, consuming the message from",
b" behind, and calculate the message to get the given hash.",
b" The hash is also weak to a padding attack. For example,", | br#" two strings "A" and "B\x00" yield the same hash, because"#,
b" the padding is constant, so zero byte in the end doens't",
b" matter, and the first byte is `xor`ed with input length.",
b" If you'd like to, you can read this blog post explaining",
b" these attacks in detail and how to avoid them using well", | random_line_split |
|
main.rs | data ^= key;
let zero = Simd::splat(0);
data = data.aes_dec_last(zero).aes_enc(zero);
return data.aes_enc_last(zero);
}
fn inv_aes_decx4(mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let zero = Simd::splat(0);
hash = hash.aes_dec_last(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc_last(zero);
return hash;
}
fn single_prefix(count: usize, target_hash: Simd<u8, 16>) -> Simd<u8, 16> {
// The first stage looks like this:
// Hash ^ Seed = dec^4(Count ^ Seed ^ Chunk)
// To get the chunk, we need to reverse these:
// dec^-4(Hash ^ Seed) = Count ^ Seed ^ Chunk
// Chunk = dec^4(Hash ^ Seed) ^ Count ^ Seed
// To create a one-prefix initialization, we want:
// Hash = Count
// Count = Count + 16
let mut hash = target_hash;
hash = inv_aes_decx4(hash);
let prefix_init = Simd::<u64, 2>::from_array([count as u64 + 16, 0]).to_ne_bytes();
hash ^= prefix_init;
hash ^= DEFAULT_SEED;
return hash;
}
fn preimage_prefix_hash(mut hash: Simd<u8, 16>, data: &[u8]) -> Simd<u8, 16> {
let chunks = data.len() / 16;
let tail = &data[chunks*16..];
let mut tail_buf = [0_u8; 16];
tail_buf[..tail.len()].copy_from_slice(tail);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
for chunk in data.chunks_exact(16).rev() {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
hash ^= value;
}
return hash;
}
fn invert_block(mut hash: Simd<u8, 16>, chunk: &[u8]) -> Simd<u8, 16> {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
return hash ^ value;
}
fn invert_last(suffix: &[u8], mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let mut tail_buf = [0_u8; 16];
tail_buf[..suffix.len()].copy_from_slice(suffix);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
hash = inv_aes_decx4(hash);
return hash;
}
fn concat(prefix: Simd<u8, 16>, target: &[u8]) -> Vec<u8> {
let mut image = prefix.to_array().to_vec();
image.extend_from_slice(target);
image
}
fn prefix_collision_attack(message: &[u8]) {
let mut target_hash = Simd::<u64, 2>::from_array([message.len() as u64, 0]).to_ne_bytes();
target_hash ^= DEFAULT_SEED;
let prefix = single_prefix(message.len(), target_hash);
println!("Demonstrating prefix attack");
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(b"hello"));
println!("prefix: {:x?}", prefix);
let forgery = concat(prefix, message);
println!("forgery: {:x?}", forgery);
println!("hash: {:x?}", ComputeGlyphHash(&forgery));
println!();
}
fn chosen_prefix(prefix: &[u8]) {
let zero = Simd::splat(0);
let mut message = prefix.to_vec();
let remainder = 16 - (message.len() % 16);
message.extend((0..remainder).map(|_| b'A'));
message.extend((0..16).map(|_| 0));
let hash = ComputeGlyphHash(&message);
let pre_current = invert_last(&[], hash);
let pre_target = invert_last(&[], zero);
let last = message.len() - 16;
let suffix = pre_current ^ pre_target;
message[last..].copy_from_slice(&suffix.to_array());
println!("Demonstrating chosen prefix attack");
println!("prefix: {:x?}", prefix);
println!("forgery: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
println!();
}
fn preimage_attack(suffix: &[u8]) {
println!("Demonstrating preimage attack");
println!("suffix: {:x?}", suffix);
let target_hash = Simd::splat(0);
println!("goal hash: {:x?}", target_hash);
let prefix_hash = preimage_prefix_hash(target_hash, suffix);
let preimage_prefix = single_prefix(suffix.len(), prefix_hash);
println!("prefix: {:x?}", preimage_prefix);
let message = concat(preimage_prefix, suffix);
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
}
fn padding_attack() {
println!("Demonstrating padding attack");
println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b""));
println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01"));
println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A"));
println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00"));
println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA"));
println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00"));
println!();
}
fn invert_attack(message: &[u8]) {
println!("Demonstrating invert attack, invert a hash up to 15 bytes");
println!("Note: due to padding attack, there are actually more messages");
println!("plaintext: {:x?}", message);
let mut hash = ComputeGlyphHash(message);
println!("hash: {:x?}", hash);
hash = inv_aes_decx4(hash);
hash ^= DEFAULT_SEED;
let mut buffer = hash.to_array();
let len = buffer.iter().rposition(|&chr| chr != 0).map_or(0, |x| x + 1);
if len == 16 |
buffer[0] ^= len as u8;
let recovered = &buffer[..len];
println!("recovered: {:x?}", recovered);
println!("hash: {:x?}", ComputeGlyphHash(recovered));
println!();
}
pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool {
// check if the characters are outside of '0'..'z' range
if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() {
return false;
}
// check if the characters are in of '9'+1..'A'-1 range
if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() {
return false;
}
// check if the characters are in of 'Z'+1..'a'-1 range
if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() {
return false;
}
return true;
}
use core::sync::atomic::{AtomicBool, Ordering};
static FOUND: AtomicBool = AtomicBool::new(false);
fn find_ascii_zeros(suffix: &[u8], worker: u64) {
const ATTACK_BYTES: usize = 6;
let mut target_hash = Simd::<u8, 16>::splat(0);
let mut bsuffix = suffix;
let suffix_len = 16 - ATTACK_BYTES;
let mut whole_block = false;
if suffix.len() >= suffix_len {
target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]);
bsuffix = &suffix[..suffix_len];
whole_block = true;
}
let mut controlled = [0u8; 16];
let total_len = ATTACK_BYTES + suffix.len();
| {
println!("the plaintext mus be shorter than 16 bytes, cannot invert");
return;
} | conditional_block |
constants.go | a while)
AverecmdRetrySleepSeconds = 10
ShellcmdRetryCount = 60 // wait 10 minutes (ex. apt install waiting for lock to release)
ShellcmdRetrySleepSeconds = 10
ClusterAliveRetryCount = 3 // try 3 times to see if the cluster is alive
ClusterAliveRetrySleepSeconds = 5
AverecmdLogFile = "~/averecmd.log"
VServerName = "vserver"
DBUtilYes = "yes"
DBUtilNo = "no"
VfxtKeyPubFile = "~/vfxt_ssh_key_data.pub"
ShellLogFile = "~/shell.log"
// Platform
PlatformAzure = "azure"
// cluster sizes
ClusterSkuUnsupportedTestFast = "unsupported_test_SKU_fast"
ClusterSkuUnsupportedTest = "unsupported_test_SKU"
ClusterSkuProd = "prod_sku"
// cache policies
CachePolicyClientsBypass = "Clients Bypassing the Cluster"
CachePolicyReadCaching = "Read Caching"
CachePolicyReadWriteCaching = "Read and Write Caching"
CachePolicyFullCaching = "Full Caching"
CachePolicyTransitioningClients = "Transitioning Clients Before or After a Migration"
CachePolicyIsolatedCloudWorkstation = "Isolated Cloud Workstation"
CachePolicyCollaboratingCloudWorkstation = "Collaborating Cloud Workstation"
CachePolicyReadOnlyHighVerificationTime = "Read Only High Verification Time"
CachePolicyIsolatedCloudWorkstationCheckAttributes = "{}"
CachePolicyCollaboratingCloudWorkstationCheckAttributes = "{'checkAttrPeriod':30,'checkDirAttrPeriod':30}"
CachePolicyReadOnlyHighVerificationTimeCheckAttributes = "{'checkAttrPeriod':10800,'checkDirAttrPeriod':10800}"
CachePolicyClientsBypassCustomCheckAttributes = "{'checkAttrPeriod':%d}"
QuotaCacheMoveMax = "cfs.quotaCacheMoveMax DN 50" // 50 is the max
QuotaDivisorFloor = "cfs.quotaCacheDivisorFloor CQ %d"
// This setting is used to speed up the number of blocks
// to be assigned to a policy. Decreasing it could reduce
// the impact from the early added corefiler default is 20
QuotaMaxMultiplierForInvalidatedMassQuota = "cfs.maxMultiplierForInvalidatedMassQuota VS 2"
QuotaWaitMinutes = 20 // wait up to 20 minutes for the quota to balance
TargetPercentageError = float32(0.01)
QuotaSpeedUpDeleteFirstFiler = true
TerraformAutoMessage = "Customer Added Custom Setting via Terraform"
TerraformOverriddenAutoMessage = "Customer Overridden Deprecated Custom Setting via Terraform"
TerraformFeatureMessage = "Terraform Feature"
// features that are custom settings
AutoWanOptimizeCustomSetting = "autoWanOptimize YF 2"
CustomSettingOverride = "override "
NFSConnMultCustomSetting = "nfsConnMult YW %d"
MinNFSConnMult = 1
MaxNFSConnMult = 23
DefaultNFSConnMult = 4
AnalyticsClusterFilersRaw = "cluster_filers_raw"
CacheModeReadWrite = "read-write"
CacheModeReadOnly = "read"
WriteBackDelayDefault = 30
// user policies for admin.addUser Avere xml rpc call
UserReadOnly = "ro"
UserReadWrite = "rw"
AdminUserName = "admin"
// filer class
FilerClassNetappNonClustered = "NetappNonClustered"
FilerClassNetappClustered = "NetappClustered"
FilerClassEMCIsilon = "EmcIsilon"
FilerClassOther = "Other"
FilerClassAvereCloud = "AvereCloud"
// VServer retry
VServerRetryCount = 60
VServerRetrySleepSeconds = 10
// filer retry
FilerRetryCount = 120
FilerRetrySleepSeconds = 10
// cluster stable, wait 40 minutes for cluster to become healthy
ClusterStableRetryCount = 240
ClusterStableRetrySleepSeconds = 10
// node change, wait 40 minutes for node increase or decrease
NodeChangeRetryCount = 240
NodeChangeRetrySleepSeconds = 10
// only wait 15 minutes for support uploads
UploadGSIRetryCount = 45
UploadGSIRetrySleepSeconds = 20
// status's returned from Activity
StatusComplete = "complete"
StatusCompleted = "completed"
StatusNodeRemoved = "node(s) removed"
CompletedPercent = "100"
NodeUp = "up"
AlertSeverityGreen = "green" // this means the alert is complete
AlertSeverityYellow = "yellow" // this will eventually resolve itself
// the cloud filer export
CloudFilerExport = "/"
// the share permssions
PermissionsPreserve = "preserve" // this is the default for NFS shares
PermissionsModebits = "modebits" // this is the default for the Azure Storage Share
PrimaryClusterIPKey = "IP"
DefaultExportPolicyName = "default"
DefaultDirectoryServiceName = "default"
FaultString = "faultString"
FaultCode = "faultCode"
MultiCall = "--json system.multicall"
JunctionPolicyPosix = "posix"
JunctionPolicyCifs = "cifs"
CIFSUsernameSourceAD = "AD"
CIFSUsernameSourceFile = "File"
CIFSSelfPasswdUriStrFmt = "https://%s/avere/avere-user.txt"
CIFSSelfGroupUriStrFmt = "https://%s/avere/avere-group.txt"
ProactiveSupportDisabled = "Disabled"
ProactiveSupportSupport = "Support"
ProactiveSupportAPI = "API"
ProactiveSupportFull = "Full"
SupportNamePrefix = "av"
SupportNameSeparator = "0x2d"
SupportNameUnknown = "unknown"
RollingTraceTimeAfter = 2
RollingTraceTimeBefore = 10
DefaultRollingTraceFlag = "0xef401"
)
// terraform schema constants - avoids bugs on schema name changes
const (
controller_address = "controller_address"
controller_admin_username = "controller_admin_username"
controller_admin_password = "controller_admin_password"
controller_ssh_port = "controller_ssh_port"
run_local = "run_local"
use_availability_zones = "use_availability_zones"
allow_non_ascii = "allow_non_ascii"
location = "location"
platform = "platform"
azure_resource_group = "azure_resource_group"
azure_network_resource_group = "azure_network_resource_group"
azure_network_name = "azure_network_name"
azure_subnet_name = "azure_subnet_name"
ntp_servers = "ntp_servers"
timezone = "timezone"
dns_server = "dns_server"
dns_domain = "dns_domain"
dns_search = "dns_search"
proxy_uri = "proxy_uri"
cluster_proxy_uri = "cluster_proxy_uri"
image_id = "image_id"
vfxt_cluster_name = "vfxt_cluster_name"
vfxt_admin_password = "vfxt_admin_password"
vfxt_ssh_key_data = "vfxt_ssh_key_data"
vfxt_node_count = "vfxt_node_count"
node_size = "node_size"
node_cache_size = "node_cache_size"
enable_nlm = "enable_nlm"
vserver_first_ip = "vserver_first_ip"
vserver_ip_count = "vserver_ip_count"
global_custom_settings = "global_custom_settings"
vserver_settings = "vserver_settings"
enable_support_uploads = "enable_support_uploads"
support_uploads_company_name = "support_uploads_company_name"
enable_rolling_trace_data = "enable_rolling_trace_data"
rolling_trace_flag = "rolling_trace_flag" | cifs_netbios_domain_name = "cifs_netbios_domain_name"
cifs_dc_addreses = "cifs_dc_addreses"
cifs_server_name = "cifs_server_name"
cifs_username = "cifs_username"
cifs_password = "cifs_password"
cifs_flatfile_passwd_uri = "cifs_flatfile_passwd_uri"
cifs_flatfile_group_uri = "cifs_flatfile_group_uri"
cifs_flatfile_passwd_b64z = "cifs_flatfile_passwd_b64z"
cifs_flatfile_group_b64z = "cifs_flatfile_group_b64z"
cifs_rid_mapping_base_integer = "cifs_rid_mapping_base_integer"
cifs_organizational_unit = " | active_support_upload = "active_support_upload"
enable_secure_proactive_support = "enable_secure_proactive_support"
cifs_ad_domain = "cifs_ad_domain" | random_line_split |
FRB_MCMC.py | #
####################################################################
#EoR parameters (fixed in this version)
zeta = 500
Mturn = 10
Rmfp = 30
#Cosmology constants
nHI = 10
H0 = float(68)/float(3.086e19)
OMm = 0.25
OMl = 0.75
baryon2DMfrac = 0.05
#constants
pc = 3.08*10**16 # pc in terms of m
cm2m = 0.01 #cm to m conversion
####################################################################
# Emcee specific parameters #
####################################################################
#os.chdir("/home/grx40/projects/def-acliu/grx40/soft/21cmFASTM/Programs/")
#dimensions and walkers of EnsembleSampler
ndim = 4
nwalkers = 24
####################################################################
# FRB script loader #
####################################################################
#Constants for the script
Box_length = 300
HII_DIM = 200
DIM = 800
####################################################################
# Lightcone stuff #
####################################################################
halo_directory = '../Boxes/Default_Res/'
lightcone_sharpcutoff = False
z_end = 0.0
z_start = 10.0
#we never actually use this
delta_z = 0.5
box_slice = 199
nboxes = int(np.round(float(z_start - z_end)/float(delta_z),1)) + 1
z_range_of_halo_boxes = np.linspace(z_start, z_end, nboxes)
#confirm that the z_range is correct (debug)
os.system("echo " + str(z_range_of_halo_boxes))
#directory to get the base density boxes AFTER reionization
density_boxes ='../Boxes/Fiducial_1_5000_30_5e8_allZ/'
#make the base density lightcone once (use it for all subsequent times
densitylightcone = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), N = 500, directory = density_boxes, marker = 'updated_smoothed_deltax')
#make the halolightcone
#what is the redshift range spanned by the halo files?
z_range_of_halo_boxes = np.linspace(10, 0.0, int(np.round(float(10)/float(0.5),2) + 1))
os.system('echo ' + str(z_range_of_halo_boxes))
#load halos (i.e. FRBs)
halo_directory = '../Boxes/Halos/'
#load all the halopos for all the redshifts and store them into a single array
#Halopos_z = np.zeros((len(z_range_of_halo_boxes)), dtype = object)
#for z in range(Halopos_z.shape[0]):
# if z_range_of_halo_boxes[z] < 6.0:
# print('switching to the same box')
#switch to the same box over and over (because those boxes aren't made yet)
# box = 'halos_z6.00_800_300Mpc_5015241'
# Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
# else:
# box = 'halos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'0_800_300Mpc_5015241'
#Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
#os.system('echo Done redshift' + str(np.round(z_range_of_halo_boxes[z],1)))
#save the lightcone should something go very very wrong
#np.savez('Halopos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'_FRB.npz', Halopos_z = Halopos_z[z])
#do the lightcone for the Halo field
#Halo_Position_Box = np.zeros((len(z_range_of_halo_boxes), HII_DIM, HII_DIM, HII_DIM))
#Halo_Mass_Box = np.zeros_like(Halo_Position_Box)
#for z in range(len(z_range_of_halo_boxes)):
# Halo_Position_Box[z] , Halo_Mass_Box[z] = misc.map2box(Halopos_z[z], HII_DIM)
#Halo_lightcone, halolightcone_redshifts = lcH.lightcone(DIM = HII_DIM, halo_boxes_z = Halo_Position_Box, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), return_redshifts = True)
#load Fiducial stuff
npzfile = np.load('Halo_lightcone.npz', allow_pickle = True)
Halo_lightcone = npzfile['Halo_lightcone']
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
os.system("echo shape of fiducial dm z " + str(fiducial_DM_z.shape))
####################################################################
# Define Bayesian Probabilities #
####################################################################
#lets add the number density to the prior. If we constrain it using the likelihood then we may end up in the
#unfortunate situation of having the code get stuck with a gigantic ACF
def lnprior(x):
|
def lnprob(x, fiducial_DM_z ):
lp = lnprior(x)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(x, fiducial_DM_z)
beta_list = []
zeta_list = []
Mturn_list = []
model_DM_z = []
Rmfp_list = []
chi2_model = []
def lnlike(x, fiducial_DM_z):
#draw a tag for this run
OUTPUT_NUMBER = int(np.abs(np.random.uniform(1000000, 9990000)))
#map emcee space to EoR parameters
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
beta_list.append(beta)
zeta_list.append(zeta)
Mturn_list.append(Mturn)
Rmfp_list.append(Rmfp)
if beta >= 0:
sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
t21_i = time.time()
#make the reionization scenario for these parameters
os.system("echo choice of beta is " + str(beta) + ' leading to a sigma of' + str(sigma) +' with sign' + str(sign) )
os.system("./init " + str(sign) + ' ' + str(sigma) +' ' + str(OUTPUT_NUMBER) )
os.system("./drive_zscroll_noTs " + str(10*zeta) +' ' + str(Rmfp) +' ' + str(Mturn*5*10**7)+ ' ' + str(OUTPUT_NUMBER))
t21_f = time.time()
os.system("echo RUN " + str(RUN) + " 21cmfast runtime is " + str(t21_f - t21_i))
#make lightcone for this model data
os.system("echo n boxes is " + str(nboxes))
#make the lightcone for each quantity
box_slice = 199
#cope the post EoR stuff
copy_FROM_TO('xH_', 5015241, OUTPUT_NUMBER)
xH_lightcone_model , lightcone_redshifts = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, N = 500, box_slice = int(box_slice), directory = '../Boxes/', tag = OUTPUT_NUMBER, return_redshifts = True )
os.system('echo Done making lightcone!')
time_DM_start = time.time()
#number of redshifts to include in our FRB plot
lc_z_subsample = 10
DM_z_y_z_model = | beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
if -1 < beta < 1 and 200 < zeta < 1000 and 1e7 < (Mturn*5e7) < 9e9 and 5 < Rmfp < 60:
os.system("echo RUN " + str(RUN) + " accepting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp) )
return 0.0
os.system("echo RUN " + str(RUN) + " Rejecting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp ) )
return -np.inf | identifier_body |
FRB_MCMC.py |
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
return (sign*sigma)
####################################################################
# Cosmology and Astrophysical Parameters #
####################################################################
#EoR parameters (fixed in this version)
zeta = 500
Mturn = 10
Rmfp = 30
#Cosmology constants
nHI = 10
H0 = float(68)/float(3.086e19)
OMm = 0.25
OMl = 0.75
baryon2DMfrac = 0.05
#constants
pc = 3.08*10**16 # pc in terms of m
cm2m = 0.01 #cm to m conversion
####################################################################
# Emcee specific parameters #
####################################################################
#os.chdir("/home/grx40/projects/def-acliu/grx40/soft/21cmFASTM/Programs/")
#dimensions and walkers of EnsembleSampler
ndim = 4
nwalkers = 24
####################################################################
# FRB script loader #
####################################################################
#Constants for the script
Box_length = 300
HII_DIM = 200
DIM = 800
####################################################################
# Lightcone stuff #
####################################################################
halo_directory = '../Boxes/Default_Res/'
lightcone_sharpcutoff = False
z_end = 0.0
z_start = 10.0
#we never actually use this
delta_z = 0.5
box_slice = 199
nboxes = int(np.round(float(z_start - z_end)/float(delta_z),1)) + 1
z_range_of_halo_boxes = np.linspace(z_start, z_end, nboxes)
#confirm that the z_range is correct (debug)
os.system("echo " + str(z_range_of_halo_boxes))
#directory to get the base density boxes AFTER reionization
density_boxes ='../Boxes/Fiducial_1_5000_30_5e8_allZ/'
#make the base density lightcone once (use it for all subsequent times
densitylightcone = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), N = 500, directory = density_boxes, marker = 'updated_smoothed_deltax')
#make the halolightcone
#what is the redshift range spanned by the halo files?
z_range_of_halo_boxes = np.linspace(10, 0.0, int(np.round(float(10)/float(0.5),2) + 1))
os.system('echo ' + str(z_range_of_halo_boxes))
#load halos (i.e. FRBs)
halo_directory = '../Boxes/Halos/'
#load all the halopos for all the redshifts and store them into a single array
#Halopos_z = np.zeros((len(z_range_of_halo_boxes)), dtype = object)
#for z in range(Halopos_z.shape[0]):
# if z_range_of_halo_boxes[z] < 6.0:
# print('switching to the same box')
#switch to the same box over and over (because those boxes aren't made yet)
# box = 'halos_z6.00_800_300Mpc_5015241'
# Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
# else:
# box = 'halos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'0_800_300Mpc_5015241'
#Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
#os.system('echo Done redshift' + str(np.round(z_range_of_halo_boxes[z],1)))
#save the lightcone should something go very very wrong
#np.savez('Halopos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'_FRB.npz', Halopos_z = Halopos_z[z])
#do the lightcone for the Halo field
#Halo_Position_Box = np.zeros((len(z_range_of_halo_boxes), HII_DIM, HII_DIM, HII_DIM))
#Halo_Mass_Box = np.zeros_like(Halo_Position_Box)
#for z in range(len(z_range_of_halo_boxes)):
# Halo_Position_Box[z] , Halo_Mass_Box[z] = misc.map2box(Halopos_z[z], HII_DIM)
#Halo_lightcone, halolightcone_redshifts = lcH.lightcone(DIM = HII_DIM, halo_boxes_z = Halo_Position_Box, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), return_redshifts = True)
#load Fiducial stuff
npzfile = np.load('Halo_lightcone.npz', allow_pickle = True)
Halo_lightcone = npzfile['Halo_lightcone']
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
os.system("echo shape of fiducial dm z " + str(fiducial_DM_z.shape))
####################################################################
# Define Bayesian Probabilities #
####################################################################
#lets add the number density to the prior. If we constrain it using the likelihood then we may end up in the
#unfortunate situation of having the code get stuck with a gigantic ACF
def lnprior(x):
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
if -1 < beta < 1 and 200 < zeta < 1000 and 1e7 < (Mturn*5e7) < 9e9 and 5 < Rmfp < 60:
os.system("echo RUN " + str(RUN) + " accepting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp) )
return 0.0
os.system("echo RUN " + str(RUN) + " Rejecting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp ) )
return -np.inf
def lnprob(x, fiducial_DM_z ):
lp = lnprior(x)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(x, fiducial_DM_z)
beta_list = []
zeta_list = []
Mturn_list = []
model_DM_z = []
Rmfp_list = []
chi2_model = []
def lnlike(x, fiducial_DM_z):
#draw a tag for this run
OUTPUT_NUMBER = int(np.abs(np.random.uniform(1000000, 9990000)))
#map emcee space to EoR parameters
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
beta_list.append(beta)
zeta_list.append(zeta)
Mturn_list.append(Mturn)
Rmfp_list.append(Rmfp)
if beta >= 0:
sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
t21_i = time.time()
#make the reionization scenario for these parameters
os.system("echo choice of beta is " + str(beta) + ' leading to a sigma of' + str(sigma) +' with sign' + str(sign) )
os.system("./init " + str(sign) + ' ' + str(sigma) +' ' + str(OUTPUT_NUMBER) )
os.system("./drive_zscroll_noTs " + str(10*zeta) +' ' + str(Rmfp) +' ' + str(Mturn*5*10**7)+ ' ' + str(OUTPUT_NUMBER))
t21_f = time.time()
os.system("echo RUN " + str(RUN) + " 21cmfast runtime is " + str(t21_f - t21_i))
#make lightcone for this model data
os.system("echo n boxes is " + str(nboxes))
#make the lightcone for each quantity
box_slice = 199
#cope the post EoR stuff
copy_FROM_TO('xH_', 5015241, OUTPUT_NUMBER)
xH_lightcone_model , lightcone_redshifts = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of | sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
return (sign*sigma) | conditional_block |
|
FRB_MCMC.py | #
####################################################################
#EoR parameters (fixed in this version)
zeta = 500
Mturn = 10
Rmfp = 30
#Cosmology constants
nHI = 10
H0 = float(68)/float(3.086e19)
OMm = 0.25
OMl = 0.75
baryon2DMfrac = 0.05
#constants
pc = 3.08*10**16 # pc in terms of m
cm2m = 0.01 #cm to m conversion
####################################################################
# Emcee specific parameters #
####################################################################
#os.chdir("/home/grx40/projects/def-acliu/grx40/soft/21cmFASTM/Programs/")
#dimensions and walkers of EnsembleSampler
ndim = 4
nwalkers = 24
####################################################################
# FRB script loader #
####################################################################
#Constants for the script
Box_length = 300
HII_DIM = 200
DIM = 800
####################################################################
# Lightcone stuff #
####################################################################
halo_directory = '../Boxes/Default_Res/'
lightcone_sharpcutoff = False
z_end = 0.0
z_start = 10.0
#we never actually use this
delta_z = 0.5
box_slice = 199
nboxes = int(np.round(float(z_start - z_end)/float(delta_z),1)) + 1
z_range_of_halo_boxes = np.linspace(z_start, z_end, nboxes)
#confirm that the z_range is correct (debug)
os.system("echo " + str(z_range_of_halo_boxes))
#directory to get the base density boxes AFTER reionization
density_boxes ='../Boxes/Fiducial_1_5000_30_5e8_allZ/'
#make the base density lightcone once (use it for all subsequent times
densitylightcone = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), N = 500, directory = density_boxes, marker = 'updated_smoothed_deltax')
#make the halolightcone
#what is the redshift range spanned by the halo files?
z_range_of_halo_boxes = np.linspace(10, 0.0, int(np.round(float(10)/float(0.5),2) + 1))
os.system('echo ' + str(z_range_of_halo_boxes))
#load halos (i.e. FRBs)
halo_directory = '../Boxes/Halos/'
#load all the halopos for all the redshifts and store them into a single array
#Halopos_z = np.zeros((len(z_range_of_halo_boxes)), dtype = object)
#for z in range(Halopos_z.shape[0]):
# if z_range_of_halo_boxes[z] < 6.0:
# print('switching to the same box')
#switch to the same box over and over (because those boxes aren't made yet)
# box = 'halos_z6.00_800_300Mpc_5015241'
# Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
# else:
# box = 'halos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'0_800_300Mpc_5015241'
#Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
#os.system('echo Done redshift' + str(np.round(z_range_of_halo_boxes[z],1)))
#save the lightcone should something go very very wrong
#np.savez('Halopos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'_FRB.npz', Halopos_z = Halopos_z[z])
#do the lightcone for the Halo field
#Halo_Position_Box = np.zeros((len(z_range_of_halo_boxes), HII_DIM, HII_DIM, HII_DIM))
#Halo_Mass_Box = np.zeros_like(Halo_Position_Box)
#for z in range(len(z_range_of_halo_boxes)):
# Halo_Position_Box[z] , Halo_Mass_Box[z] = misc.map2box(Halopos_z[z], HII_DIM)
#Halo_lightcone, halolightcone_redshifts = lcH.lightcone(DIM = HII_DIM, halo_boxes_z = Halo_Position_Box, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), return_redshifts = True)
#load Fiducial stuff
npzfile = np.load('Halo_lightcone.npz', allow_pickle = True)
Halo_lightcone = npzfile['Halo_lightcone']
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
os.system("echo shape of fiducial dm z " + str(fiducial_DM_z.shape))
####################################################################
# Define Bayesian Probabilities #
####################################################################
#lets add the number density to the prior. If we constrain it using the likelihood then we may end up in the
#unfortunate situation of having the code get stuck with a gigantic ACF
def | (x):
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
if -1 < beta < 1 and 200 < zeta < 1000 and 1e7 < (Mturn*5e7) < 9e9 and 5 < Rmfp < 60:
os.system("echo RUN " + str(RUN) + " accepting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp) )
return 0.0
os.system("echo RUN " + str(RUN) + " Rejecting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp ) )
return -np.inf
def lnprob(x, fiducial_DM_z ):
lp = lnprior(x)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(x, fiducial_DM_z)
beta_list = []
zeta_list = []
Mturn_list = []
model_DM_z = []
Rmfp_list = []
chi2_model = []
def lnlike(x, fiducial_DM_z):
#draw a tag for this run
OUTPUT_NUMBER = int(np.abs(np.random.uniform(1000000, 9990000)))
#map emcee space to EoR parameters
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
beta_list.append(beta)
zeta_list.append(zeta)
Mturn_list.append(Mturn)
Rmfp_list.append(Rmfp)
if beta >= 0:
sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
t21_i = time.time()
#make the reionization scenario for these parameters
os.system("echo choice of beta is " + str(beta) + ' leading to a sigma of' + str(sigma) +' with sign' + str(sign) )
os.system("./init " + str(sign) + ' ' + str(sigma) +' ' + str(OUTPUT_NUMBER) )
os.system("./drive_zscroll_noTs " + str(10*zeta) +' ' + str(Rmfp) +' ' + str(Mturn*5*10**7)+ ' ' + str(OUTPUT_NUMBER))
t21_f = time.time()
os.system("echo RUN " + str(RUN) + " 21cmfast runtime is " + str(t21_f - t21_i))
#make lightcone for this model data
os.system("echo n boxes is " + str(nboxes))
#make the lightcone for each quantity
box_slice = 199
#cope the post EoR stuff
copy_FROM_TO('xH_', 5015241, OUTPUT_NUMBER)
xH_lightcone_model , lightcone_redshifts = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, N = 500, box_slice = int(box_slice), directory = '../Boxes/', tag = OUTPUT_NUMBER, return_redshifts = True )
os.system('echo Done making lightcone!')
time_DM_start = time.time()
#number of redshifts to include in our FRB plot
lc_z_subsample = 10
DM_z_y_z_model = | lnprior | identifier_name |
FRB_MCMC.py | Parameters #
####################################################################
#EoR parameters (fixed in this version)
zeta = 500
Mturn = 10
Rmfp = 30
#Cosmology constants
nHI = 10
H0 = float(68)/float(3.086e19)
OMm = 0.25
OMl = 0.75
baryon2DMfrac = 0.05
#constants
pc = 3.08*10**16 # pc in terms of m
cm2m = 0.01 #cm to m conversion
####################################################################
# Emcee specific parameters #
####################################################################
|
####################################################################
# FRB script loader #
####################################################################
#Constants for the script
Box_length = 300
HII_DIM = 200
DIM = 800
####################################################################
# Lightcone stuff #
####################################################################
halo_directory = '../Boxes/Default_Res/'
lightcone_sharpcutoff = False
z_end = 0.0
z_start = 10.0
#we never actually use this
delta_z = 0.5
box_slice = 199
nboxes = int(np.round(float(z_start - z_end)/float(delta_z),1)) + 1
z_range_of_halo_boxes = np.linspace(z_start, z_end, nboxes)
#confirm that the z_range is correct (debug)
os.system("echo " + str(z_range_of_halo_boxes))
#directory to get the base density boxes AFTER reionization
density_boxes ='../Boxes/Fiducial_1_5000_30_5e8_allZ/'
#make the base density lightcone once (use it for all subsequent times
densitylightcone = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), N = 500, directory = density_boxes, marker = 'updated_smoothed_deltax')
#make the halolightcone
#what is the redshift range spanned by the halo files?
z_range_of_halo_boxes = np.linspace(10, 0.0, int(np.round(float(10)/float(0.5),2) + 1))
os.system('echo ' + str(z_range_of_halo_boxes))
#load halos (i.e. FRBs)
halo_directory = '../Boxes/Halos/'
#load all the halopos for all the redshifts and store them into a single array
#Halopos_z = np.zeros((len(z_range_of_halo_boxes)), dtype = object)
#for z in range(Halopos_z.shape[0]):
# if z_range_of_halo_boxes[z] < 6.0:
# print('switching to the same box')
#switch to the same box over and over (because those boxes aren't made yet)
# box = 'halos_z6.00_800_300Mpc_5015241'
# Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
# else:
# box = 'halos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'0_800_300Mpc_5015241'
#Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
#os.system('echo Done redshift' + str(np.round(z_range_of_halo_boxes[z],1)))
#save the lightcone should something go very very wrong
#np.savez('Halopos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'_FRB.npz', Halopos_z = Halopos_z[z])
#do the lightcone for the Halo field
#Halo_Position_Box = np.zeros((len(z_range_of_halo_boxes), HII_DIM, HII_DIM, HII_DIM))
#Halo_Mass_Box = np.zeros_like(Halo_Position_Box)
#for z in range(len(z_range_of_halo_boxes)):
# Halo_Position_Box[z] , Halo_Mass_Box[z] = misc.map2box(Halopos_z[z], HII_DIM)
#Halo_lightcone, halolightcone_redshifts = lcH.lightcone(DIM = HII_DIM, halo_boxes_z = Halo_Position_Box, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), return_redshifts = True)
#load Fiducial stuff
npzfile = np.load('Halo_lightcone.npz', allow_pickle = True)
Halo_lightcone = npzfile['Halo_lightcone']
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
os.system("echo shape of fiducial dm z " + str(fiducial_DM_z.shape))
####################################################################
# Define Bayesian Probabilities #
####################################################################
#lets add the number density to the prior. If we constrain it using the likelihood then we may end up in the
#unfortunate situation of having the code get stuck with a gigantic ACF
def lnprior(x):
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
if -1 < beta < 1 and 200 < zeta < 1000 and 1e7 < (Mturn*5e7) < 9e9 and 5 < Rmfp < 60:
os.system("echo RUN " + str(RUN) + " accepting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp) )
return 0.0
os.system("echo RUN " + str(RUN) + " Rejecting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp ) )
return -np.inf
def lnprob(x, fiducial_DM_z ):
lp = lnprior(x)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(x, fiducial_DM_z)
beta_list = []
zeta_list = []
Mturn_list = []
model_DM_z = []
Rmfp_list = []
chi2_model = []
def lnlike(x, fiducial_DM_z):
#draw a tag for this run
OUTPUT_NUMBER = int(np.abs(np.random.uniform(1000000, 9990000)))
#map emcee space to EoR parameters
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
beta_list.append(beta)
zeta_list.append(zeta)
Mturn_list.append(Mturn)
Rmfp_list.append(Rmfp)
if beta >= 0:
sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
t21_i = time.time()
#make the reionization scenario for these parameters
os.system("echo choice of beta is " + str(beta) + ' leading to a sigma of' + str(sigma) +' with sign' + str(sign) )
os.system("./init " + str(sign) + ' ' + str(sigma) +' ' + str(OUTPUT_NUMBER) )
os.system("./drive_zscroll_noTs " + str(10*zeta) +' ' + str(Rmfp) +' ' + str(Mturn*5*10**7)+ ' ' + str(OUTPUT_NUMBER))
t21_f = time.time()
os.system("echo RUN " + str(RUN) + " 21cmfast runtime is " + str(t21_f - t21_i))
#make lightcone for this model data
os.system("echo n boxes is " + str(nboxes))
#make the lightcone for each quantity
box_slice = 199
#cope the post EoR stuff
copy_FROM_TO('xH_', 5015241, OUTPUT_NUMBER)
xH_lightcone_model , lightcone_redshifts = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, N = 500, box_slice = int(box_slice), directory = '../Boxes/', tag = OUTPUT_NUMBER, return_redshifts = True )
os.system('echo Done making lightcone!')
time_DM_start = time.time()
#number of redshifts to include in our FRB plot
lc_z_subsample = 10
DM_z_y_z_model | #os.chdir("/home/grx40/projects/def-acliu/grx40/soft/21cmFASTM/Programs/")
#dimensions and walkers of EnsembleSampler
ndim = 4
nwalkers = 24
| random_line_split |
marker_detector.py | ), cv2.COLOR_BGR2HSV)
self.gray = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2GRAY)
self.edged = cv2.Canny(self.blurred, 50, 150)
self.lab = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2LAB)
self.thresh = cv2.threshold(self.gray, 60, 255, cv2.THRESH_BINARY)[1]
self.cnts = None
def __contours(self, image):
self.cnts = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.cnts = self.cnts[0] if imutils.is_cv2() else self.cnts[1]
def contours_shape(self):
self.__contours(self.edged)
return self.cnts
def contours_color(self):
self.__contours(self.thresh)
return self.cnts
class ContourWrapper:
def __init__(self, contour, ratio=1):
self.contour = contour
self.ratio = ratio
self.peri = cv2.arcLength(self.contour, True)
self.approx = cv2.approxPolyDP(self.contour, 0.04 * self.peri, True)
self.M = cv2.moments(self.contour)
(self.x, self.y, self.w, self.h) = cv2.boundingRect(self.approx)
self.bounding_rect = (self.y, self.x, self.w, self.h)
((self.x_mnc, self.y_mnc), self.radius) = cv2.minEnclosingCircle(contour)
self.area = cv2.contourArea(self.contour)
# cX and cY are center of mass of contour
self.cX, self.cY = self.__get_cx_cy()
def __get_cx_cy(self):
cx = 0
cy = 0
if self.M["m00"] > 0:
cx = int((self.M["m10"] / self.M["m00"]) * self.ratio)
cy = int((self.M["m01"] / self.M["m00"]) * self.ratio)
return cx, cy
class GraphicsUtils:
def __init__(self):
pass
def draw_station_status(self, image, text):
|
def draw_train_status(self, image, idx):
string = "Train: " + str(idx)
cv2.putText(image, string, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_crosshair(self, image, shape):
(startX, endX) = (int(shape.centerX - (shape.w * 0.15)), int(shape.centerX + (shape.w * 0.15)))
(startY, endY) = (int(shape.centerY - (shape.h * 0.15)), int(shape.centerY + (shape.h * 0.15)))
cv2.line(image, (startX, shape.centerY), (endX, shape.centerY), (0, 0, 255), 3)
cv2.line(image, (shape.centerX, startY), (shape.centerX, endY), (0, 0, 255), 3)
pass
def draw_contour(self, image, approx):
cv2.drawContours(image, [approx], -1, (0, 255, 255), 4)
pass
square_str = "square"
triangle_str = "triangle"
class Shape:
def __init__(self, type="", area=0, center_x=0, center_y=0, x=0, y=0, w=0, h=0):
self.type = type
self.contour = None
self.area = area
self.centerX = center_x
self.centerY = center_y
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def __if_type(self, type1, type2):
return type1 == type2
def set_contour(self, contour):
self.contour = contour
pass
def set_center(self, c_x, c_y):
self.centerX = c_x
self.centerY = c_y
def set_type(self, approx):
if 4 <= approx <= 4:
self.type = square_str
elif approx == 3:
self.type = triangle_str
else:
self.type = "unknown"
pass
def set_size(self, x, y, w, h):
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def is_square(self):
if self.__if_type(self.type, square_str):
return True
return False
def is_triangle(self):
if self.__if_type(self.type, triangle_str):
return True
return False
def is_area_higer_than(self, value):
return self.area >= value
def is_area_lower_than(self, value):
return self.area <= value
def __str__(self):
str = "Type: %s, color: %s, area: %d, center(x,y): %d, %d, size(x,y,w,h): %d, %d, %d, %d" % (self.type, self.color, self.area, self.centerX, self.centerY, self.x, self.y, self.w, self.h)
return str
class ShapeDetector:
def __init__(self, image):
self.IW = ImageWrapper(image)
self.shape = Shape()
self.detected = False
self.stations = {'red': 'zajezdnia', 'green': 'strzyza', 'purple': 'kieplinek'}
self.trains = {'red': 6, 'green': 2, 'purple': 1}
pass
def detect_trains(self):
return self.__detect(trains=True)
def detect_platforms(self):
return self.__detect(platforms=True)
def detect_depot(self):
return self.__detect(depot=True)
def __detect(self, platforms=False, depot=False, trains=False):
self.detected = False
output = {"train": 0 , "platform": None}
array_of_contours = []
GU = GraphicsUtils()
for c in self.IW.contours_shape():
CW = ContourWrapper(c)
self.shape.set_type(len(CW.approx))
self.shape.area = CW.area
self.shape.set_contour(CW.contour)
if self.shape.is_square():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
if self.shape.is_triangle():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
#
# for i in range(len(array_of_contours)):
# print(i)
# ratio = abs(array_of_contours[i].w / array_of_contours[i].h)
# print(str(array_of_contours[i].w) + ', ' + str(array_of_contours[i].h) + ', ' + str(ratio))
# if abs(ratio - 1.0) >= 0.3:
# print(abs(ratio - 1.0))
# array_of_contours.pop(i)
# print('usunieto')
# i -= 1
for elem in array_of_contours:
ratio = elem.w / elem.h
if abs(ratio - 1.0) >= 0.3:
array_of_contours.remove(elem)
if len(array_of_contours) >= 2:
if trains is True:
#check squres
a, b = self.check_cws_array_ratios(array_of_contours, 4.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
GU.draw_train_status(self.IW.output_image, str(self.trains[color2]) + ", " + color2)
output["train"] = self.trains[color2]
if platforms is True or depot is True:
#check triangles
a, b = self.check_cws_array_ratios(array_of_contours, 8.5, 1)
if | string = "Station: " + text
cv2.putText(image, string, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass | identifier_body |
marker_detector.py | init__(self):
pass
def draw_station_status(self, image, text):
string = "Station: " + text
cv2.putText(image, string, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_train_status(self, image, idx):
string = "Train: " + str(idx)
cv2.putText(image, string, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_crosshair(self, image, shape):
(startX, endX) = (int(shape.centerX - (shape.w * 0.15)), int(shape.centerX + (shape.w * 0.15)))
(startY, endY) = (int(shape.centerY - (shape.h * 0.15)), int(shape.centerY + (shape.h * 0.15)))
cv2.line(image, (startX, shape.centerY), (endX, shape.centerY), (0, 0, 255), 3)
cv2.line(image, (shape.centerX, startY), (shape.centerX, endY), (0, 0, 255), 3)
pass
def draw_contour(self, image, approx):
cv2.drawContours(image, [approx], -1, (0, 255, 255), 4)
pass
square_str = "square"
triangle_str = "triangle"
class Shape:
def __init__(self, type="", area=0, center_x=0, center_y=0, x=0, y=0, w=0, h=0):
self.type = type
self.contour = None
self.area = area
self.centerX = center_x
self.centerY = center_y
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def __if_type(self, type1, type2):
return type1 == type2
def set_contour(self, contour):
self.contour = contour
pass
def set_center(self, c_x, c_y):
self.centerX = c_x
self.centerY = c_y
def set_type(self, approx):
if 4 <= approx <= 4:
self.type = square_str
elif approx == 3:
self.type = triangle_str
else:
self.type = "unknown"
pass
def set_size(self, x, y, w, h):
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def is_square(self):
if self.__if_type(self.type, square_str):
return True
return False
def is_triangle(self):
if self.__if_type(self.type, triangle_str):
return True
return False
def is_area_higer_than(self, value):
return self.area >= value
def is_area_lower_than(self, value):
return self.area <= value
def __str__(self):
str = "Type: %s, color: %s, area: %d, center(x,y): %d, %d, size(x,y,w,h): %d, %d, %d, %d" % (self.type, self.color, self.area, self.centerX, self.centerY, self.x, self.y, self.w, self.h)
return str
class ShapeDetector:
def __init__(self, image):
self.IW = ImageWrapper(image)
self.shape = Shape()
self.detected = False
self.stations = {'red': 'zajezdnia', 'green': 'strzyza', 'purple': 'kieplinek'}
self.trains = {'red': 6, 'green': 2, 'purple': 1}
pass
def detect_trains(self):
return self.__detect(trains=True)
def detect_platforms(self):
return self.__detect(platforms=True)
def detect_depot(self):
return self.__detect(depot=True)
def __detect(self, platforms=False, depot=False, trains=False):
self.detected = False
output = {"train": 0 , "platform": None}
array_of_contours = []
GU = GraphicsUtils()
for c in self.IW.contours_shape():
CW = ContourWrapper(c)
self.shape.set_type(len(CW.approx))
self.shape.area = CW.area
self.shape.set_contour(CW.contour)
if self.shape.is_square():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
if self.shape.is_triangle():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
#
# for i in range(len(array_of_contours)):
# print(i)
# ratio = abs(array_of_contours[i].w / array_of_contours[i].h)
# print(str(array_of_contours[i].w) + ', ' + str(array_of_contours[i].h) + ', ' + str(ratio))
# if abs(ratio - 1.0) >= 0.3:
# print(abs(ratio - 1.0))
# array_of_contours.pop(i)
# print('usunieto')
# i -= 1
for elem in array_of_contours:
ratio = elem.w / elem.h
if abs(ratio - 1.0) >= 0.3:
array_of_contours.remove(elem)
if len(array_of_contours) >= 2:
if trains is True:
#check squres
a, b = self.check_cws_array_ratios(array_of_contours, 4.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
GU.draw_train_status(self.IW.output_image, str(self.trains[color2]) + ", " + color2)
output["train"] = self.trains[color2]
if platforms is True or depot is True:
#check triangles
a, b = self.check_cws_array_ratios(array_of_contours, 8.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
if platforms is True:
if color2 is "green" or color2 is "purple":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
if depot is True:
if color2 is "red":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
return output
def add_cw_to_similarity_array(self, cnts_array, CW):
for cnt in cnts_array:
if cnt.cX == CW.cX and cnt.cY == CW.cY:
if 0.95 <= (cnt.area/CW.area) <= 1.05:
return cnts_array
cnts_array.append(CW)
return cnts_array
def check_cws_array_ratios(self, cnts_array, exp_ratio, error):
expected_ratio = exp_ratio
err = error
ratio = 0
for i in range(0, len(cnts_array)):
| for j in range(0, len(cnts_array)):
if cnts_array[j].area != 0:
ratio = cnts_array[i].area / cnts_array[j].area
if abs(ratio-expected_ratio) <= err and self.check_similarity_of_two_cw(cnts_array[i], cnts_array[j]):
return cnts_array[i], cnts_array[j] | conditional_block |
|
marker_detector.py | ), cv2.COLOR_BGR2HSV)
self.gray = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2GRAY)
self.edged = cv2.Canny(self.blurred, 50, 150)
self.lab = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2LAB)
self.thresh = cv2.threshold(self.gray, 60, 255, cv2.THRESH_BINARY)[1]
self.cnts = None
def __contours(self, image):
self.cnts = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.cnts = self.cnts[0] if imutils.is_cv2() else self.cnts[1]
def contours_shape(self):
self.__contours(self.edged)
return self.cnts
def | (self):
self.__contours(self.thresh)
return self.cnts
class ContourWrapper:
def __init__(self, contour, ratio=1):
self.contour = contour
self.ratio = ratio
self.peri = cv2.arcLength(self.contour, True)
self.approx = cv2.approxPolyDP(self.contour, 0.04 * self.peri, True)
self.M = cv2.moments(self.contour)
(self.x, self.y, self.w, self.h) = cv2.boundingRect(self.approx)
self.bounding_rect = (self.y, self.x, self.w, self.h)
((self.x_mnc, self.y_mnc), self.radius) = cv2.minEnclosingCircle(contour)
self.area = cv2.contourArea(self.contour)
# cX and cY are center of mass of contour
self.cX, self.cY = self.__get_cx_cy()
def __get_cx_cy(self):
cx = 0
cy = 0
if self.M["m00"] > 0:
cx = int((self.M["m10"] / self.M["m00"]) * self.ratio)
cy = int((self.M["m01"] / self.M["m00"]) * self.ratio)
return cx, cy
class GraphicsUtils:
def __init__(self):
pass
def draw_station_status(self, image, text):
string = "Station: " + text
cv2.putText(image, string, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_train_status(self, image, idx):
string = "Train: " + str(idx)
cv2.putText(image, string, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_crosshair(self, image, shape):
(startX, endX) = (int(shape.centerX - (shape.w * 0.15)), int(shape.centerX + (shape.w * 0.15)))
(startY, endY) = (int(shape.centerY - (shape.h * 0.15)), int(shape.centerY + (shape.h * 0.15)))
cv2.line(image, (startX, shape.centerY), (endX, shape.centerY), (0, 0, 255), 3)
cv2.line(image, (shape.centerX, startY), (shape.centerX, endY), (0, 0, 255), 3)
pass
def draw_contour(self, image, approx):
cv2.drawContours(image, [approx], -1, (0, 255, 255), 4)
pass
square_str = "square"
triangle_str = "triangle"
class Shape:
def __init__(self, type="", area=0, center_x=0, center_y=0, x=0, y=0, w=0, h=0):
self.type = type
self.contour = None
self.area = area
self.centerX = center_x
self.centerY = center_y
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def __if_type(self, type1, type2):
return type1 == type2
def set_contour(self, contour):
self.contour = contour
pass
def set_center(self, c_x, c_y):
self.centerX = c_x
self.centerY = c_y
def set_type(self, approx):
if 4 <= approx <= 4:
self.type = square_str
elif approx == 3:
self.type = triangle_str
else:
self.type = "unknown"
pass
def set_size(self, x, y, w, h):
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def is_square(self):
if self.__if_type(self.type, square_str):
return True
return False
def is_triangle(self):
if self.__if_type(self.type, triangle_str):
return True
return False
def is_area_higer_than(self, value):
return self.area >= value
def is_area_lower_than(self, value):
return self.area <= value
def __str__(self):
str = "Type: %s, color: %s, area: %d, center(x,y): %d, %d, size(x,y,w,h): %d, %d, %d, %d" % (self.type, self.color, self.area, self.centerX, self.centerY, self.x, self.y, self.w, self.h)
return str
class ShapeDetector:
def __init__(self, image):
self.IW = ImageWrapper(image)
self.shape = Shape()
self.detected = False
self.stations = {'red': 'zajezdnia', 'green': 'strzyza', 'purple': 'kieplinek'}
self.trains = {'red': 6, 'green': 2, 'purple': 1}
pass
def detect_trains(self):
return self.__detect(trains=True)
def detect_platforms(self):
return self.__detect(platforms=True)
def detect_depot(self):
return self.__detect(depot=True)
def __detect(self, platforms=False, depot=False, trains=False):
self.detected = False
output = {"train": 0 , "platform": None}
array_of_contours = []
GU = GraphicsUtils()
for c in self.IW.contours_shape():
CW = ContourWrapper(c)
self.shape.set_type(len(CW.approx))
self.shape.area = CW.area
self.shape.set_contour(CW.contour)
if self.shape.is_square():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
if self.shape.is_triangle():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
#
# for i in range(len(array_of_contours)):
# print(i)
# ratio = abs(array_of_contours[i].w / array_of_contours[i].h)
# print(str(array_of_contours[i].w) + ', ' + str(array_of_contours[i].h) + ', ' + str(ratio))
# if abs(ratio - 1.0) >= 0.3:
# print(abs(ratio - 1.0))
# array_of_contours.pop(i)
# print('usunieto')
# i -= 1
for elem in array_of_contours:
ratio = elem.w / elem.h
if abs(ratio - 1.0) >= 0.3:
array_of_contours.remove(elem)
if len(array_of_contours) >= 2:
if trains is True:
#check squres
a, b = self.check_cws_array_ratios(array_of_contours, 4.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
GU.draw_train_status(self.IW.output_image, str(self.trains[color2]) + ", " + color2)
output["train"] = self.trains[color2]
if platforms is True or depot is True:
#check triangles
a, b = self.check_cws_array_ratios(array_of_contours, 8.5, 1)
| contours_color | identifier_name |
marker_detector.py | 55), 3)
pass
def draw_contour(self, image, approx):
cv2.drawContours(image, [approx], -1, (0, 255, 255), 4)
pass
square_str = "square"
triangle_str = "triangle"
class Shape:
def __init__(self, type="", area=0, center_x=0, center_y=0, x=0, y=0, w=0, h=0):
self.type = type
self.contour = None
self.area = area
self.centerX = center_x
self.centerY = center_y
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def __if_type(self, type1, type2):
return type1 == type2
def set_contour(self, contour):
self.contour = contour
pass
def set_center(self, c_x, c_y):
self.centerX = c_x
self.centerY = c_y
def set_type(self, approx):
if 4 <= approx <= 4:
self.type = square_str
elif approx == 3:
self.type = triangle_str
else:
self.type = "unknown"
pass
def set_size(self, x, y, w, h):
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def is_square(self):
if self.__if_type(self.type, square_str):
return True
return False
def is_triangle(self):
if self.__if_type(self.type, triangle_str):
return True
return False
def is_area_higer_than(self, value):
return self.area >= value
def is_area_lower_than(self, value):
return self.area <= value
def __str__(self):
str = "Type: %s, color: %s, area: %d, center(x,y): %d, %d, size(x,y,w,h): %d, %d, %d, %d" % (self.type, self.color, self.area, self.centerX, self.centerY, self.x, self.y, self.w, self.h)
return str
class ShapeDetector:
def __init__(self, image):
self.IW = ImageWrapper(image)
self.shape = Shape()
self.detected = False
self.stations = {'red': 'zajezdnia', 'green': 'strzyza', 'purple': 'kieplinek'}
self.trains = {'red': 6, 'green': 2, 'purple': 1}
pass
def detect_trains(self):
return self.__detect(trains=True)
def detect_platforms(self):
return self.__detect(platforms=True)
def detect_depot(self):
return self.__detect(depot=True)
def __detect(self, platforms=False, depot=False, trains=False):
self.detected = False
output = {"train": 0 , "platform": None}
array_of_contours = []
GU = GraphicsUtils()
for c in self.IW.contours_shape():
CW = ContourWrapper(c)
self.shape.set_type(len(CW.approx))
self.shape.area = CW.area
self.shape.set_contour(CW.contour)
if self.shape.is_square():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
if self.shape.is_triangle():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
#
# for i in range(len(array_of_contours)):
# print(i)
# ratio = abs(array_of_contours[i].w / array_of_contours[i].h)
# print(str(array_of_contours[i].w) + ', ' + str(array_of_contours[i].h) + ', ' + str(ratio))
# if abs(ratio - 1.0) >= 0.3:
# print(abs(ratio - 1.0))
# array_of_contours.pop(i)
# print('usunieto')
# i -= 1
for elem in array_of_contours:
ratio = elem.w / elem.h
if abs(ratio - 1.0) >= 0.3:
array_of_contours.remove(elem)
if len(array_of_contours) >= 2:
if trains is True:
#check squres
a, b = self.check_cws_array_ratios(array_of_contours, 4.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
GU.draw_train_status(self.IW.output_image, str(self.trains[color2]) + ", " + color2)
output["train"] = self.trains[color2]
if platforms is True or depot is True:
#check triangles
a, b = self.check_cws_array_ratios(array_of_contours, 8.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
if platforms is True:
if color2 is "green" or color2 is "purple":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
if depot is True:
if color2 is "red":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
return output
def add_cw_to_similarity_array(self, cnts_array, CW):
for cnt in cnts_array:
if cnt.cX == CW.cX and cnt.cY == CW.cY:
if 0.95 <= (cnt.area/CW.area) <= 1.05:
return cnts_array
cnts_array.append(CW)
return cnts_array
def check_cws_array_ratios(self, cnts_array, exp_ratio, error):
expected_ratio = exp_ratio
err = error
ratio = 0
for i in range(0, len(cnts_array)):
for j in range(0, len(cnts_array)):
if cnts_array[j].area != 0:
ratio = cnts_array[i].area / cnts_array[j].area
if abs(ratio-expected_ratio) <= err and self.check_similarity_of_two_cw(cnts_array[i], cnts_array[j]):
return cnts_array[i], cnts_array[j]
return None, None
def check_similarity_of_two_cw(self, cw_1, cw_2):
err = 50
if abs(cw_1.cX - cw_2.cX) <= err:
if abs(cw_1.cY - cw_2.cY) <= err:
return True
return False
##################################################################
##################################################################
##################################################################
#EXAMPLE OF USAGE BELOW, DELETE WHILE INTERGRATING WITH WHOLE PROJECT
#
# def video():
# cap = cv2.VideoCapture('../shapes/z_pocigami_2.avi')#('../shapes/biale_przejazd_bez_pociagow.avi')#('../shapes/biale_przejazd_z_znacznikami.avi')
# while cap.isOpened():
# ret, frame = cap.read()
#
# if not ret:
# break
#
#
# #example of usage
# shape = ShapeDetector(frame)
# shape.detect_depot()
# shape.detect_trains()
# shape.detect_platforms()
#
# cv2.imshow('frameOUT', shape.IW.output_image)
# cv2.imshow('frameOUT2', shape.IW.edged)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
# pass | #
# | random_line_split |
|
application.py |
#StdLibs
import json
from os import path
import csv
###################################################
#Programmato da Alex Prosdocimo e Matteo Mirandola#
###################################################
application = Flask(__name__)
@application.route("/") # Index
def index():
return make_response(render_template("index.html"))
@application.route("/getGraph", methods=["POST", "GET"])
def getgraph():
#Metodo POST: responsabile di ottnere i dati in formato json dal server.
#Il server si aspetta un campo data che contenga il nome di un file esistente nel server nella cartella /static/json/
#Se non trova il file da un 404
#Se non trova il campo data da un 400
if request.method == "POST":
if('data' in request.form):
if(path.exists("static/jsons/" + request.form['data'] + ".json")):
with open("static/jsons/" + request.form['data'] + ".json", "r") as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return "<h1>404 NOT FOUND"
else:
return "<h1>400 BAD REQUEST"
else:
#Metodo GET:
#si aspetta un campo graph che contenga uno dei nomi sotto presenti
#nel caso di mf e emig si aspetta anche un secondo campo che specifichi
#l'università o la provincia-
#Inoltre, iscrittiAtn e mf POSSONO (ma non devono necessariamente) avere
#un campo aggiuntivo che filtri i dati di uno specifico anno o per uno specifico sesso2
if 'graph' in request.args:
# HBar Graph per la paga oraria provinciale a seconda del livello di istruzione
if(request.args['graph'] == "pagaOra"):
return make_response(render_template("graphs/pagaOra.html"))
# Line Graph per gli iscritti alle università nel veneto per anno
elif(request.args['graph'] == "iscrittiAtn"):
if('sex' in request.args):
return make_response(render_template("graphs/iscrittiAtn.html", sex=int(request.args['sex'])))
else:
return make_response(render_template("graphs/iscrittiAtn.html", sex=0))
elif(request.args['graph'] == "disoccupati"):
return make_response(render_template("graphs/disoccupatiGraph.html"))
elif(request.args['graph'] == "iscrittiProv"):
return make_response(render_template("graphs/iscrittiProv.html"))
# Donut Graph per la distribuzione di m/f nelle università in veneto
elif(request.args['graph'] == "mf" and 'atn' in request.args):
dir = "graphs/mf/mf" + request.args['atn'] + ".html"
print(dir)
if(path.exists("templates/" + dir)):
if('year' in request.args):
return make_response(render_template(dir, year=int(request.args['year'])))
else:
return make_response(render_template(dir, year=0))
# Polar Area Graph per gli studenti emigrati in altre regioni
elif(request.args['graph'] == "emig" and "prov" in request.args):
dir = "graphs/emig/iscrittiEmig" + \
request.args['prov'] + ".html"
if(path.exists("templates/" + dir)):
return make_response(render_template(dir))
return "<h1>400 BAD REQUEST"
#Per aggiornare i dataset:
#A causa di un errore nella creazione del file riguardante gli iscritti per ogni ateneo da parte del MIUR il file
#riguardante gli iscritti per ateneo non sono scaricabili dinamicamente e va sostituito manualmente.
#Allo stesso modo, i dati ottenuti tramite l'istat non sono scaricabili dinamicamente tramite la api in quanto
#le sue prestazioni sono limitate (oltre a non permettere i filtri necessari per ottenere i file).
#Il dataset delle provincie viene aggiornato automaticamente ogni settimana. Gli altri vanno sostituiti manualmente.
#I dataset statici vanno inseriti nella cartella /static/notUpdating/
#Il dataset riguardante gli iscritti per ateneo va scaricato a questo link http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/32d26e28-a0b5-45f3-9152-6072164f3e63/download/iscrittixateneo.csv
#e rinominato iscrittiAteneo.csv
#Il dataset riguardante gli iscritti emigrati dalla regione è stato creato manualmente a partire da altri dati e non può essere aggiornato
#I dataset riguardanti la percentuale di disoccupazione e la retribuzione oraria media sono reperibili a questo portale http://dati.istat.it/
#Sfortunatamente la funzione di ricerca del sito è molto lenta e limitata, comunque sia i due data set sono "Tasso di Disoccupazione - Dati Provinciali"
#e "Retribuzione oraria media per titolo di studio". In entrambi i casi, è necessario filtrare i risultati per le sole provincie del Veneto.
#I file vanno rinominati retribuzioneMedia.csv e taxDisocc.csv
#Fortunatamente, si aggiornano solo annualmente
@application.route("/doUpdate")
def updateData():
#File iscritti per ateneo
#I dati vengono inseriti in un dizionario come array, il formato è più sotto
with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiAteneo = {
'Venezia CF': [],
'Verona': [],
'Venezia IUAV': [],
'Padova': []}
for row in data:
row = row[0].split(';')
if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1] == 'Venezia Iuav' or row[1] == 'Verona':
tmp = row[1]
if 'Venezia C' in row[1]:
tmp = 'Venezia CF'
if tmp == 'Venezia Iuav':
tmp = 'Venezia IUAV'
iscrittiAteneo[tmp].append(
row[0] + ';' + row[3] + ';' + row[4])
iscrittiAteneoJson = json.dumps(iscrittiAteneo)
# Formato: {"nomeAteneo" : ["annoScolastico;numeroIscrittiMaschi;numeroIscrittiFemmine",...,...],...,...}
open('static/jsons/iscrittiAteneo.json',
"wb").write(iscrittiAteneoJson.encode())
# File iscritti emigrati in altre regioni
with open('static/notUpdating/iscrittiEmig.json', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = json.load(f)
iscrittiEmig = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in reader['records']:
if row[4].lower() == 'padova' or row[4].lower() == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower() == 'verona' or row[4].lower() == 'treviso' or row[4].lower() == 'belluno' or row[4].lower() == 'rovigo':
iscrittiEmig[row[4].lower()].append(
row[1] + ';' + row[4] + ';' + row[2] + ';' + str(row[6]))
lista = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []
}
count = 0
for key in iscrittiEmig.keys():
while len(iscrittiEmig[key]) > 2:
tmp = iscrittiEmig[key].pop(0).split(';')
if count == 0:
count | import requests #Apache 2.0
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.