file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
leaderboard.go | // Copyright (c) 2015, Sgt. Kabukiman | MIT licensed
package srapi
import (
"net/url"
"strconv"
)
// Leaderboard represents a leaderboard, i.e. a collection of ranked runs for a
// certain configuration of game, category, level and a few others.
type Leaderboard struct {
// a link to the leaderboard on speedrun.com
Weblink string
// whether or not emulators are allowed
Emulators bool
// what platform, if any (otherwise this is empty), is the leaderboard limited to
Platform string
// what region, if any (otherwise this is empty), is the leaderboard limited to
Region string
// whether or not to only take runs with videos into account
VideoOnly bool `json:"video-only"`
// the timing method used to compare runs against each other
Timing TimingMethod
// the chosen variables (keys) and values (values) for the leaderboard, both
// given as their respective IDs
Values map[string]string
// the runs, sorted from best to worst
Runs []RankedRun
// API links to related resources
Links []Link
// do not use this field directly, use the available methods
PlatformsData interface{} `json:"platforms"`
// do not use this field directly, use the available methods
RegionsData interface{} `json:"regions"`
// do not use this field directly, use the available methods
GameData interface{} `json:"game"`
// do not use this field directly, use the available methods
CategoryData interface{} `json:"category"`
// do not use this field directly, use the available methods
LevelData interface{} `json:"level"`
// do not use this field directly, use the available methods
PlayersData interface{} `json:"players"`
// do not use this field directly, use the available methods
VariablesData interface{} `json:"variables"`
}
// RankedRun is a run with an assigned rank. As the rank only makes sense when
// a specific ruleset (video-only? realtime or ingame time? etc.) is applied,
// normal runs do not have a rank; only those in leaderboards have.
type RankedRun struct {
// the embedded run
Run Run
// the rank, starting at 1
Rank int
}
// leaderboardResponse models the actual API response from the server
type leaderboardResponse struct {
// the one leaderboard contained in the response
Data Leaderboard
}
// FullGameLeaderboard retrieves a the leaderboard for a specific game and one of
// its full-game categories. An error is returned if no category is given or if
// a per-level category is given. If no game is given, it is fetched automatically,
// but if you have it already at hand, you can save one request by specifying it.
func FullGameLeaderboard(game *Game, cat *Category, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if cat.Type != "per-game" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a full-game category."}
}
if game == nil {
var err *Error
game, err = cat.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/category/" + cat.ID, options, nil, nil, embeds})
}
// LevelLeaderboard retrieves a the leaderboard for a specific game and one of
// its levels in a specific category. An error is returned if no category or
// level is given or if a full-game category is given. If no game is given, it
// is fetched automatically, but if you have it already at hand, you can save
// one request by specifying it.
func LevelLeaderboard(game *Game, cat *Category, level *Level, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if level == nil {
return nil, &Error{"", "", ErrorBadLogic, "No level given."}
}
if cat.Type != "per-level" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a individual-level category."}
}
if game == nil {
var err *Error
game, err = level.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/level/" + level.ID + "/" + cat.ID, options, nil, nil, embeds})
}
// Game returns the game that the leaderboard is for. If it was not embedded, it
// is fetched from the network. Except for broken data on speedrun.com, this
// should never return nil.
func (lb *Leaderboard) Game(embeds string) (*Game, *Error) {
// we only have the game ID at hand
asserted, okay := lb.GameData.(string)
if okay {
return GameByID(asserted, embeds)
}
return toGame(lb.GameData, true), nil
}
// Category returns the category that the leaderboard is for. If it was not
// embedded, it is fetched from the network. Except for broken data on
// speedrun.com, this should never return nil.
func (lb *Leaderboard) Category(embeds string) (*Category, *Error) {
// we only have the category ID at hand
asserted, okay := lb.CategoryData.(string)
if okay {
return CategoryByID(asserted, embeds)
}
return toCategory(lb.CategoryData, true), nil
}
// Level returns the level that the leaderboard is for. If it's a full-game
// leaderboard, nil is returned. If the level was not embedded, it is fetched
// from the network.
func (lb *Leaderboard) Level(embeds string) (*Level, *Error) {
if lb.LevelData == nil {
return nil, nil
}
// we only have the level ID at hand
asserted, okay := lb.LevelData.(string)
if okay {
return LevelByID(asserted, embeds) | return toLevel(lb.LevelData, true), nil
}
// Platforms returns a list of all platforms that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Platforms() *PlatformCollection {
return toPlatformCollection(lb.PlatformsData)
}
// Regions returns a list of all regions that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Regions() *RegionCollection {
return toRegionCollection(lb.RegionsData)
}
// Variables returns a list of all variables that are present in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Variables() *VariableCollection {
return toVariableCollection(lb.VariablesData)
}
// Players returns a list of all players that are present in the leaderboard.
// If they have not been embedded, an empty slice is returned.
func (lb *Leaderboard) Players() *PlayerCollection {
return toPlayerCollection(lb.PlayersData)
}
// for the 'hasLinks' interface
func (lb *Leaderboard) links() []Link {
return lb.Links
}
// LeaderboardOptions are the options that can be used to further narrow down a
// leaderboard to only a subset of runs.
type LeaderboardOptions struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// The platform ID to restrict the leaderboard to.
Platform string
// The platform ID to restrict the leaderboard to.
Region string
// When set, can control if all or no runs are done on emulators.
Emulators OptionalFlag
// When set, can control if all or no runs are required to have a video.
VideoOnly OptionalFlag
// the timing method that should be used to compare runs; not all are
// allowed for all games, a server-side error will be returned if an invalid
// choice was made.
Timing TimingMethod
// ISO 8601 date; when given, only runs done before this date will be considerd
Date string
// map of variable IDs to value IDs
Values map[string]string
}
// applyToURL merged the filter into a URL.
func (lo *LeaderboardOptions) applyToURL(u *url.URL) {
if lo == nil {
return
}
values := u.Query()
if lo.Top > 0 {
values.Set("top", strconv.Itoa(lo.Top))
}
if len(lo.Platform) > 0 {
values.Set("platform", lo.Platform)
}
if len(lo.Region) > 0 {
values.Set("region", lo.Region)
}
if len(lo.Timing) > 0 {
values.Set("timing", string(lo.Timing))
}
if len(lo.Date) > 0 {
values.Set("date", lo.Date)
}
lo.Emulators.applyToQuery("emulators", &values)
lo.VideoOnly.applyToQuery("video-only", &values)
for varID, valueID := range lo.Values {
values.Set("var-"+varID, valueID)
}
u.RawQuery = values.Encode()
}
// LeaderboardFilter represents the possible filtering options when fetching a
// list of leaderboards.
type LeaderboardFilter struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// If set, can be used to skip returning empty leaderboards.
SkipEmpty OptionalFlag
}
// applyToURL merged the filter into a URL.
func (lf *LeaderboardFilter) applyToURL(u *url.URL) {
if lf == nil {
return
}
values := u.Query()
if lf.Top > 0 {
values.Set("top", strconv.Itoa(lf.Top))
}
lf.SkipEmpty.applyToQuery("skip-empty", &values)
u.RawQuery = values.Encode()
}
// fetchLeaderboard fetches a single leaderboard from the network. If the request
// failed, the returned leaderboard is nil. Otherwise, the error is nil.
func fetchLeaderboard(request request) (*Leaderboard, *Error) {
result := &leaderboardResponse{}
err := httpClient.do(request, result)
if err != nil {
return nil, err
}
return &result.Data, nil
}
// fetchLeaderboardLink tries to fetch a given link and interpret the response as
// a single leaderboard. If the link is nil or the leaderboard could not be fetched,
// nil is returned.
func fetchLeaderboardLink(link requestable, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if !link.exists() {
return nil, nil
}
return fetchLeaderboard(link.request(options, nil, embeds))
}
// fetchLeaderboards fetches a list of leaderboards from the network. It always
// returns a collection, even when an error is returned.
func fetchLeaderboards(request request) (*LeaderboardCollection, *Error) {
result := &LeaderboardCollection{}
err := httpClient.do(request, result)
return result, err
}
// fetchLeaderboardsLink tries to fetch a given link and interpret the response as
// a list of leaderboards. It always returns a collection, even when an error is
// returned or the given link is nil.
func fetchLeaderboardsLink(link requestable, filter filter, sort *Sorting, embeds string) (*LeaderboardCollection, *Error) {
if !link.exists() {
return &LeaderboardCollection{}, nil
}
return fetchLeaderboards(link.request(filter, sort, embeds))
} | }
| random_line_split |
leaderboard.go | // Copyright (c) 2015, Sgt. Kabukiman | MIT licensed
package srapi
import (
"net/url"
"strconv"
)
// Leaderboard represents a leaderboard, i.e. a collection of ranked runs for a
// certain configuration of game, category, level and a few others.
type Leaderboard struct {
// a link to the leaderboard on speedrun.com
Weblink string
// whether or not emulators are allowed
Emulators bool
// what platform, if any (otherwise this is empty), is the leaderboard limited to
Platform string
// what region, if any (otherwise this is empty), is the leaderboard limited to
Region string
// whether or not to only take runs with videos into account
VideoOnly bool `json:"video-only"`
// the timing method used to compare runs against each other
Timing TimingMethod
// the chosen variables (keys) and values (values) for the leaderboard, both
// given as their respective IDs
Values map[string]string
// the runs, sorted from best to worst
Runs []RankedRun
// API links to related resources
Links []Link
// do not use this field directly, use the available methods
PlatformsData interface{} `json:"platforms"`
// do not use this field directly, use the available methods
RegionsData interface{} `json:"regions"`
// do not use this field directly, use the available methods
GameData interface{} `json:"game"`
// do not use this field directly, use the available methods
CategoryData interface{} `json:"category"`
// do not use this field directly, use the available methods
LevelData interface{} `json:"level"`
// do not use this field directly, use the available methods
PlayersData interface{} `json:"players"`
// do not use this field directly, use the available methods
VariablesData interface{} `json:"variables"`
}
// RankedRun is a run with an assigned rank. As the rank only makes sense when
// a specific ruleset (video-only? realtime or ingame time? etc.) is applied,
// normal runs do not have a rank; only those in leaderboards have.
type RankedRun struct {
// the embedded run
Run Run
// the rank, starting at 1
Rank int
}
// leaderboardResponse models the actual API response from the server
type leaderboardResponse struct {
// the one leaderboard contained in the response
Data Leaderboard
}
// FullGameLeaderboard retrieves a the leaderboard for a specific game and one of
// its full-game categories. An error is returned if no category is given or if
// a per-level category is given. If no game is given, it is fetched automatically,
// but if you have it already at hand, you can save one request by specifying it.
func FullGameLeaderboard(game *Game, cat *Category, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if cat.Type != "per-game" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a full-game category."}
}
if game == nil |
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/category/" + cat.ID, options, nil, nil, embeds})
}
// LevelLeaderboard retrieves a the leaderboard for a specific game and one of
// its levels in a specific category. An error is returned if no category or
// level is given or if a full-game category is given. If no game is given, it
// is fetched automatically, but if you have it already at hand, you can save
// one request by specifying it.
func LevelLeaderboard(game *Game, cat *Category, level *Level, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if level == nil {
return nil, &Error{"", "", ErrorBadLogic, "No level given."}
}
if cat.Type != "per-level" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a individual-level category."}
}
if game == nil {
var err *Error
game, err = level.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/level/" + level.ID + "/" + cat.ID, options, nil, nil, embeds})
}
// Game returns the game that the leaderboard is for. If it was not embedded, it
// is fetched from the network. Except for broken data on speedrun.com, this
// should never return nil.
func (lb *Leaderboard) Game(embeds string) (*Game, *Error) {
// we only have the game ID at hand
asserted, okay := lb.GameData.(string)
if okay {
return GameByID(asserted, embeds)
}
return toGame(lb.GameData, true), nil
}
// Category returns the category that the leaderboard is for. If it was not
// embedded, it is fetched from the network. Except for broken data on
// speedrun.com, this should never return nil.
func (lb *Leaderboard) Category(embeds string) (*Category, *Error) {
// we only have the category ID at hand
asserted, okay := lb.CategoryData.(string)
if okay {
return CategoryByID(asserted, embeds)
}
return toCategory(lb.CategoryData, true), nil
}
// Level returns the level that the leaderboard is for. If it's a full-game
// leaderboard, nil is returned. If the level was not embedded, it is fetched
// from the network.
func (lb *Leaderboard) Level(embeds string) (*Level, *Error) {
if lb.LevelData == nil {
return nil, nil
}
// we only have the level ID at hand
asserted, okay := lb.LevelData.(string)
if okay {
return LevelByID(asserted, embeds)
}
return toLevel(lb.LevelData, true), nil
}
// Platforms returns a list of all platforms that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Platforms() *PlatformCollection {
return toPlatformCollection(lb.PlatformsData)
}
// Regions returns a list of all regions that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Regions() *RegionCollection {
return toRegionCollection(lb.RegionsData)
}
// Variables returns a list of all variables that are present in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Variables() *VariableCollection {
return toVariableCollection(lb.VariablesData)
}
// Players returns a list of all players that are present in the leaderboard.
// If they have not been embedded, an empty slice is returned.
func (lb *Leaderboard) Players() *PlayerCollection {
return toPlayerCollection(lb.PlayersData)
}
// for the 'hasLinks' interface
func (lb *Leaderboard) links() []Link {
return lb.Links
}
// LeaderboardOptions are the options that can be used to further narrow down a
// leaderboard to only a subset of runs.
type LeaderboardOptions struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// The platform ID to restrict the leaderboard to.
Platform string
// The platform ID to restrict the leaderboard to.
Region string
// When set, can control if all or no runs are done on emulators.
Emulators OptionalFlag
// When set, can control if all or no runs are required to have a video.
VideoOnly OptionalFlag
// the timing method that should be used to compare runs; not all are
// allowed for all games, a server-side error will be returned if an invalid
// choice was made.
Timing TimingMethod
// ISO 8601 date; when given, only runs done before this date will be considerd
Date string
// map of variable IDs to value IDs
Values map[string]string
}
// applyToURL merged the filter into a URL.
func (lo *LeaderboardOptions) applyToURL(u *url.URL) {
if lo == nil {
return
}
values := u.Query()
if lo.Top > 0 {
values.Set("top", strconv.Itoa(lo.Top))
}
if len(lo.Platform) > 0 {
values.Set("platform", lo.Platform)
}
if len(lo.Region) > 0 {
values.Set("region", lo.Region)
}
if len(lo.Timing) > 0 {
values.Set("timing", string(lo.Timing))
}
if len(lo.Date) > 0 {
values.Set("date", lo.Date)
}
lo.Emulators.applyToQuery("emulators", &values)
lo.VideoOnly.applyToQuery("video-only", &values)
for varID, valueID := range lo.Values {
values.Set("var-"+varID, valueID)
}
u.RawQuery = values.Encode()
}
// LeaderboardFilter represents the possible filtering options when fetching a
// list of leaderboards.
type LeaderboardFilter struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// If set, can be used to skip returning empty leaderboards.
SkipEmpty OptionalFlag
}
// applyToURL merged the filter into a URL.
func (lf *LeaderboardFilter) applyToURL(u *url.URL) {
if lf == nil {
return
}
values := u.Query()
if lf.Top > 0 {
values.Set("top", strconv.Itoa(lf.Top))
}
lf.SkipEmpty.applyToQuery("skip-empty", &values)
u.RawQuery = values.Encode()
}
// fetchLeaderboard fetches a single leaderboard from the network. If the request
// failed, the returned leaderboard is nil. Otherwise, the error is nil.
func fetchLeaderboard(request request) (*Leaderboard, *Error) {
result := &leaderboardResponse{}
err := httpClient.do(request, result)
if err != nil {
return nil, err
}
return &result.Data, nil
}
// fetchLeaderboardLink tries to fetch a given link and interpret the response as
// a single leaderboard. If the link is nil or the leaderboard could not be fetched,
// nil is returned.
func fetchLeaderboardLink(link requestable, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if !link.exists() {
return nil, nil
}
return fetchLeaderboard(link.request(options, nil, embeds))
}
// fetchLeaderboards fetches a list of leaderboards from the network. It always
// returns a collection, even when an error is returned.
func fetchLeaderboards(request request) (*LeaderboardCollection, *Error) {
result := &LeaderboardCollection{}
err := httpClient.do(request, result)
return result, err
}
// fetchLeaderboardsLink tries to fetch a given link and interpret the response as
// a list of leaderboards. It always returns a collection, even when an error is
// returned or the given link is nil.
func fetchLeaderboardsLink(link requestable, filter filter, sort *Sorting, embeds string) (*LeaderboardCollection, *Error) {
if !link.exists() {
return &LeaderboardCollection{}, nil
}
return fetchLeaderboards(link.request(filter, sort, embeds))
}
| {
var err *Error
game, err = cat.Game("")
if err != nil {
return nil, err
}
} | conditional_block |
leaderboard.go | // Copyright (c) 2015, Sgt. Kabukiman | MIT licensed
package srapi
import (
"net/url"
"strconv"
)
// Leaderboard represents a leaderboard, i.e. a collection of ranked runs for a
// certain configuration of game, category, level and a few others.
type Leaderboard struct {
// a link to the leaderboard on speedrun.com
Weblink string
// whether or not emulators are allowed
Emulators bool
// what platform, if any (otherwise this is empty), is the leaderboard limited to
Platform string
// what region, if any (otherwise this is empty), is the leaderboard limited to
Region string
// whether or not to only take runs with videos into account
VideoOnly bool `json:"video-only"`
// the timing method used to compare runs against each other
Timing TimingMethod
// the chosen variables (keys) and values (values) for the leaderboard, both
// given as their respective IDs
Values map[string]string
// the runs, sorted from best to worst
Runs []RankedRun
// API links to related resources
Links []Link
// do not use this field directly, use the available methods
PlatformsData interface{} `json:"platforms"`
// do not use this field directly, use the available methods
RegionsData interface{} `json:"regions"`
// do not use this field directly, use the available methods
GameData interface{} `json:"game"`
// do not use this field directly, use the available methods
CategoryData interface{} `json:"category"`
// do not use this field directly, use the available methods
LevelData interface{} `json:"level"`
// do not use this field directly, use the available methods
PlayersData interface{} `json:"players"`
// do not use this field directly, use the available methods
VariablesData interface{} `json:"variables"`
}
// RankedRun is a run with an assigned rank. As the rank only makes sense when
// a specific ruleset (video-only? realtime or ingame time? etc.) is applied,
// normal runs do not have a rank; only those in leaderboards have.
type RankedRun struct {
// the embedded run
Run Run
// the rank, starting at 1
Rank int
}
// leaderboardResponse models the actual API response from the server
type leaderboardResponse struct {
// the one leaderboard contained in the response
Data Leaderboard
}
// FullGameLeaderboard retrieves a the leaderboard for a specific game and one of
// its full-game categories. An error is returned if no category is given or if
// a per-level category is given. If no game is given, it is fetched automatically,
// but if you have it already at hand, you can save one request by specifying it.
func FullGameLeaderboard(game *Game, cat *Category, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if cat.Type != "per-game" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a full-game category."}
}
if game == nil {
var err *Error
game, err = cat.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/category/" + cat.ID, options, nil, nil, embeds})
}
// LevelLeaderboard retrieves a the leaderboard for a specific game and one of
// its levels in a specific category. An error is returned if no category or
// level is given or if a full-game category is given. If no game is given, it
// is fetched automatically, but if you have it already at hand, you can save
// one request by specifying it.
func LevelLeaderboard(game *Game, cat *Category, level *Level, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if level == nil {
return nil, &Error{"", "", ErrorBadLogic, "No level given."}
}
if cat.Type != "per-level" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a individual-level category."}
}
if game == nil {
var err *Error
game, err = level.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/level/" + level.ID + "/" + cat.ID, options, nil, nil, embeds})
}
// Game returns the game that the leaderboard is for. If it was not embedded, it
// is fetched from the network. Except for broken data on speedrun.com, this
// should never return nil.
func (lb *Leaderboard) Game(embeds string) (*Game, *Error) {
// we only have the game ID at hand
asserted, okay := lb.GameData.(string)
if okay {
return GameByID(asserted, embeds)
}
return toGame(lb.GameData, true), nil
}
// Category returns the category that the leaderboard is for. If it was not
// embedded, it is fetched from the network. Except for broken data on
// speedrun.com, this should never return nil.
func (lb *Leaderboard) Category(embeds string) (*Category, *Error) |
// Level returns the level that the leaderboard is for. If it's a full-game
// leaderboard, nil is returned. If the level was not embedded, it is fetched
// from the network.
func (lb *Leaderboard) Level(embeds string) (*Level, *Error) {
if lb.LevelData == nil {
return nil, nil
}
// we only have the level ID at hand
asserted, okay := lb.LevelData.(string)
if okay {
return LevelByID(asserted, embeds)
}
return toLevel(lb.LevelData, true), nil
}
// Platforms returns a list of all platforms that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Platforms() *PlatformCollection {
return toPlatformCollection(lb.PlatformsData)
}
// Regions returns a list of all regions that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Regions() *RegionCollection {
return toRegionCollection(lb.RegionsData)
}
// Variables returns a list of all variables that are present in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Variables() *VariableCollection {
return toVariableCollection(lb.VariablesData)
}
// Players returns a list of all players that are present in the leaderboard.
// If they have not been embedded, an empty slice is returned.
func (lb *Leaderboard) Players() *PlayerCollection {
return toPlayerCollection(lb.PlayersData)
}
// for the 'hasLinks' interface
func (lb *Leaderboard) links() []Link {
return lb.Links
}
// LeaderboardOptions are the options that can be used to further narrow down a
// leaderboard to only a subset of runs.
type LeaderboardOptions struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// The platform ID to restrict the leaderboard to.
Platform string
// The platform ID to restrict the leaderboard to.
Region string
// When set, can control if all or no runs are done on emulators.
Emulators OptionalFlag
// When set, can control if all or no runs are required to have a video.
VideoOnly OptionalFlag
// the timing method that should be used to compare runs; not all are
// allowed for all games, a server-side error will be returned if an invalid
// choice was made.
Timing TimingMethod
// ISO 8601 date; when given, only runs done before this date will be considerd
Date string
// map of variable IDs to value IDs
Values map[string]string
}
// applyToURL merged the filter into a URL.
func (lo *LeaderboardOptions) applyToURL(u *url.URL) {
if lo == nil {
return
}
values := u.Query()
if lo.Top > 0 {
values.Set("top", strconv.Itoa(lo.Top))
}
if len(lo.Platform) > 0 {
values.Set("platform", lo.Platform)
}
if len(lo.Region) > 0 {
values.Set("region", lo.Region)
}
if len(lo.Timing) > 0 {
values.Set("timing", string(lo.Timing))
}
if len(lo.Date) > 0 {
values.Set("date", lo.Date)
}
lo.Emulators.applyToQuery("emulators", &values)
lo.VideoOnly.applyToQuery("video-only", &values)
for varID, valueID := range lo.Values {
values.Set("var-"+varID, valueID)
}
u.RawQuery = values.Encode()
}
// LeaderboardFilter represents the possible filtering options when fetching a
// list of leaderboards.
type LeaderboardFilter struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// If set, can be used to skip returning empty leaderboards.
SkipEmpty OptionalFlag
}
// applyToURL merged the filter into a URL.
func (lf *LeaderboardFilter) applyToURL(u *url.URL) {
if lf == nil {
return
}
values := u.Query()
if lf.Top > 0 {
values.Set("top", strconv.Itoa(lf.Top))
}
lf.SkipEmpty.applyToQuery("skip-empty", &values)
u.RawQuery = values.Encode()
}
// fetchLeaderboard fetches a single leaderboard from the network. If the request
// failed, the returned leaderboard is nil. Otherwise, the error is nil.
func fetchLeaderboard(request request) (*Leaderboard, *Error) {
result := &leaderboardResponse{}
err := httpClient.do(request, result)
if err != nil {
return nil, err
}
return &result.Data, nil
}
// fetchLeaderboardLink tries to fetch a given link and interpret the response as
// a single leaderboard. If the link is nil or the leaderboard could not be fetched,
// nil is returned.
func fetchLeaderboardLink(link requestable, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if !link.exists() {
return nil, nil
}
return fetchLeaderboard(link.request(options, nil, embeds))
}
// fetchLeaderboards fetches a list of leaderboards from the network. It always
// returns a collection, even when an error is returned.
func fetchLeaderboards(request request) (*LeaderboardCollection, *Error) {
result := &LeaderboardCollection{}
err := httpClient.do(request, result)
return result, err
}
// fetchLeaderboardsLink tries to fetch a given link and interpret the response as
// a list of leaderboards. It always returns a collection, even when an error is
// returned or the given link is nil.
func fetchLeaderboardsLink(link requestable, filter filter, sort *Sorting, embeds string) (*LeaderboardCollection, *Error) {
if !link.exists() {
return &LeaderboardCollection{}, nil
}
return fetchLeaderboards(link.request(filter, sort, embeds))
}
| {
// we only have the category ID at hand
asserted, okay := lb.CategoryData.(string)
if okay {
return CategoryByID(asserted, embeds)
}
return toCategory(lb.CategoryData, true), nil
} | identifier_body |
leaderboard.go | // Copyright (c) 2015, Sgt. Kabukiman | MIT licensed
package srapi
import (
"net/url"
"strconv"
)
// Leaderboard represents a leaderboard, i.e. a collection of ranked runs for a
// certain configuration of game, category, level and a few others.
type Leaderboard struct {
// a link to the leaderboard on speedrun.com
Weblink string
// whether or not emulators are allowed
Emulators bool
// what platform, if any (otherwise this is empty), is the leaderboard limited to
Platform string
// what region, if any (otherwise this is empty), is the leaderboard limited to
Region string
// whether or not to only take runs with videos into account
VideoOnly bool `json:"video-only"`
// the timing method used to compare runs against each other
Timing TimingMethod
// the chosen variables (keys) and values (values) for the leaderboard, both
// given as their respective IDs
Values map[string]string
// the runs, sorted from best to worst
Runs []RankedRun
// API links to related resources
Links []Link
// do not use this field directly, use the available methods
PlatformsData interface{} `json:"platforms"`
// do not use this field directly, use the available methods
RegionsData interface{} `json:"regions"`
// do not use this field directly, use the available methods
GameData interface{} `json:"game"`
// do not use this field directly, use the available methods
CategoryData interface{} `json:"category"`
// do not use this field directly, use the available methods
LevelData interface{} `json:"level"`
// do not use this field directly, use the available methods
PlayersData interface{} `json:"players"`
// do not use this field directly, use the available methods
VariablesData interface{} `json:"variables"`
}
// RankedRun is a run with an assigned rank. As the rank only makes sense when
// a specific ruleset (video-only? realtime or ingame time? etc.) is applied,
// normal runs do not have a rank; only those in leaderboards have.
type RankedRun struct {
// the embedded run
Run Run
// the rank, starting at 1
Rank int
}
// leaderboardResponse models the actual API response from the server
type leaderboardResponse struct {
// the one leaderboard contained in the response
Data Leaderboard
}
// FullGameLeaderboard retrieves a the leaderboard for a specific game and one of
// its full-game categories. An error is returned if no category is given or if
// a per-level category is given. If no game is given, it is fetched automatically,
// but if you have it already at hand, you can save one request by specifying it.
func FullGameLeaderboard(game *Game, cat *Category, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if cat.Type != "per-game" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a full-game category."}
}
if game == nil {
var err *Error
game, err = cat.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/category/" + cat.ID, options, nil, nil, embeds})
}
// LevelLeaderboard retrieves a the leaderboard for a specific game and one of
// its levels in a specific category. An error is returned if no category or
// level is given or if a full-game category is given. If no game is given, it
// is fetched automatically, but if you have it already at hand, you can save
// one request by specifying it.
func LevelLeaderboard(game *Game, cat *Category, level *Level, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if cat == nil {
return nil, &Error{"", "", ErrorBadLogic, "No category given."}
}
if level == nil {
return nil, &Error{"", "", ErrorBadLogic, "No level given."}
}
if cat.Type != "per-level" {
return nil, &Error{"", "", ErrorBadLogic, "The given category is not a individual-level category."}
}
if game == nil {
var err *Error
game, err = level.Game("")
if err != nil {
return nil, err
}
}
return fetchLeaderboard(request{"GET", "/leaderboards/" + game.ID + "/level/" + level.ID + "/" + cat.ID, options, nil, nil, embeds})
}
// Game returns the game that the leaderboard is for. If it was not embedded, it
// is fetched from the network. Except for broken data on speedrun.com, this
// should never return nil.
func (lb *Leaderboard) Game(embeds string) (*Game, *Error) {
// we only have the game ID at hand
asserted, okay := lb.GameData.(string)
if okay {
return GameByID(asserted, embeds)
}
return toGame(lb.GameData, true), nil
}
// Category returns the category that the leaderboard is for. If it was not
// embedded, it is fetched from the network. Except for broken data on
// speedrun.com, this should never return nil.
func (lb *Leaderboard) Category(embeds string) (*Category, *Error) {
// we only have the category ID at hand
asserted, okay := lb.CategoryData.(string)
if okay {
return CategoryByID(asserted, embeds)
}
return toCategory(lb.CategoryData, true), nil
}
// Level returns the level that the leaderboard is for. If it's a full-game
// leaderboard, nil is returned. If the level was not embedded, it is fetched
// from the network.
func (lb *Leaderboard) Level(embeds string) (*Level, *Error) {
if lb.LevelData == nil {
return nil, nil
}
// we only have the level ID at hand
asserted, okay := lb.LevelData.(string)
if okay {
return LevelByID(asserted, embeds)
}
return toLevel(lb.LevelData, true), nil
}
// Platforms returns a list of all platforms that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Platforms() *PlatformCollection {
return toPlatformCollection(lb.PlatformsData)
}
// Regions returns a list of all regions that are used in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) Regions() *RegionCollection {
return toRegionCollection(lb.RegionsData)
}
// Variables returns a list of all variables that are present in the leaderboard.
// If they have not been embedded, an empty collection is returned.
func (lb *Leaderboard) | () *VariableCollection {
return toVariableCollection(lb.VariablesData)
}
// Players returns a list of all players that are present in the leaderboard.
// If they have not been embedded, an empty slice is returned.
func (lb *Leaderboard) Players() *PlayerCollection {
return toPlayerCollection(lb.PlayersData)
}
// for the 'hasLinks' interface
func (lb *Leaderboard) links() []Link {
return lb.Links
}
// LeaderboardOptions are the options that can be used to further narrow down a
// leaderboard to only a subset of runs.
type LeaderboardOptions struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// The platform ID to restrict the leaderboard to.
Platform string
// The platform ID to restrict the leaderboard to.
Region string
// When set, can control if all or no runs are done on emulators.
Emulators OptionalFlag
// When set, can control if all or no runs are required to have a video.
VideoOnly OptionalFlag
// the timing method that should be used to compare runs; not all are
// allowed for all games, a server-side error will be returned if an invalid
// choice was made.
Timing TimingMethod
// ISO 8601 date; when given, only runs done before this date will be considerd
Date string
// map of variable IDs to value IDs
Values map[string]string
}
// applyToURL merged the filter into a URL.
func (lo *LeaderboardOptions) applyToURL(u *url.URL) {
if lo == nil {
return
}
values := u.Query()
if lo.Top > 0 {
values.Set("top", strconv.Itoa(lo.Top))
}
if len(lo.Platform) > 0 {
values.Set("platform", lo.Platform)
}
if len(lo.Region) > 0 {
values.Set("region", lo.Region)
}
if len(lo.Timing) > 0 {
values.Set("timing", string(lo.Timing))
}
if len(lo.Date) > 0 {
values.Set("date", lo.Date)
}
lo.Emulators.applyToQuery("emulators", &values)
lo.VideoOnly.applyToQuery("video-only", &values)
for varID, valueID := range lo.Values {
values.Set("var-"+varID, valueID)
}
u.RawQuery = values.Encode()
}
// LeaderboardFilter represents the possible filtering options when fetching a
// list of leaderboards.
type LeaderboardFilter struct {
// If set to a value >0, only this many places are returned. Note that there
// can be multiple runs with the same rank, so you can end up with
// len(runs) > Top. This value is ignored when set to anything else.
Top int
// If set, can be used to skip returning empty leaderboards.
SkipEmpty OptionalFlag
}
// applyToURL merged the filter into a URL.
func (lf *LeaderboardFilter) applyToURL(u *url.URL) {
if lf == nil {
return
}
values := u.Query()
if lf.Top > 0 {
values.Set("top", strconv.Itoa(lf.Top))
}
lf.SkipEmpty.applyToQuery("skip-empty", &values)
u.RawQuery = values.Encode()
}
// fetchLeaderboard fetches a single leaderboard from the network. If the request
// failed, the returned leaderboard is nil. Otherwise, the error is nil.
func fetchLeaderboard(request request) (*Leaderboard, *Error) {
result := &leaderboardResponse{}
err := httpClient.do(request, result)
if err != nil {
return nil, err
}
return &result.Data, nil
}
// fetchLeaderboardLink tries to fetch a given link and interpret the response as
// a single leaderboard. If the link is nil or the leaderboard could not be fetched,
// nil is returned.
func fetchLeaderboardLink(link requestable, options *LeaderboardOptions, embeds string) (*Leaderboard, *Error) {
if !link.exists() {
return nil, nil
}
return fetchLeaderboard(link.request(options, nil, embeds))
}
// fetchLeaderboards fetches a list of leaderboards from the network. It always
// returns a collection, even when an error is returned.
func fetchLeaderboards(request request) (*LeaderboardCollection, *Error) {
result := &LeaderboardCollection{}
err := httpClient.do(request, result)
return result, err
}
// fetchLeaderboardsLink tries to fetch a given link and interpret the response as
// a list of leaderboards. It always returns a collection, even when an error is
// returned or the given link is nil.
func fetchLeaderboardsLink(link requestable, filter filter, sort *Sorting, embeds string) (*LeaderboardCollection, *Error) {
if !link.exists() {
return &LeaderboardCollection{}, nil
}
return fetchLeaderboards(link.request(filter, sort, embeds))
}
| Variables | identifier_name |
cyclone.go | /*-
* Copyright © 2016-2017, Jörg Pernfuß <[email protected]>
* Copyright © 2016, 1&1 Internet SE
* All rights reserved.
*
* Use of this source code is governed by a 2-clause BSD license
* that can be found in the LICENSE file.
*/
package cyclone // import "github.com/mjolnir42/cyclone/lib/cyclone"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/go-redis/redis"
"github.com/mjolnir42/cyclone/lib/cyclone/cpu"
"github.com/mjolnir42/cyclone/lib/cyclone/disk"
"github.com/mjolnir42/cyclone/lib/cyclone/mem"
"github.com/mjolnir42/erebos"
"github.com/mjolnir42/legacy"
metrics "github.com/rcrowley/go-metrics"
)
// Handlers is the registry of running application handlers
var Handlers map[int]erebos.Handler
// AgeCutOff is the duration after which back-processed alarms are
// ignored and not alerted
var AgeCutOff time.Duration
func init() {
Handlers = make(map[int]erebos.Handler)
}
// Cyclone performs threshold evaluation alarming on metrics
type Cyclone struct {
Num int
Input chan *erebos.Transport
Shutdown chan struct{}
Death chan error
Config *erebos.Config
Metrics *metrics.Registry
CPUData map[int64]cpu.CPU
MemData map[int64]mem.Mem
CTXData map[int64]cpu.CTX
DskData map[int64]map[string]disk.Disk
redis *redis.Client
internalInput chan *legacy.MetricSplit
}
// AlarmEvent is the datatype for sending out alarm notifications
type AlarmEvent struct {
Source string `json:"source"`
EventID string `json:"event_id"`
Version string `json:"version"`
Sourcehost string `json:"sourcehost"`
Oncall string `json:"on_call"`
Targethost string `json:"targethost"`
Message string `json:"message"`
Level int64 `json:"level"`
Timestamp string `json:"timestamp"`
Check string `json:"check"`
Monitoring string `json:"monitoring"`
Team string `json:"team"`
}
// run is the event loop for Cyclone
func (c *Cyclone) run( |
runloop:
for {
select {
case <-c.Shutdown:
// received shutdown, drain input channel which will be
// closed by main
goto drainloop
case msg := <-c.Input:
if msg == nil {
// this can happen if we read the closed Input channel
// before the closed Shutdown channel
continue runloop
}
if err := c.process(msg); err != nil {
c.Death <- err
<-c.Shutdown
break runloop
}
}
}
drainloop:
for {
select {
case msg := <-c.Input:
if msg == nil {
// channel is closed
break drainloop
}
c.process(msg)
}
}
}
// process evaluates a metric and raises alarms as required
func (c *Cyclone) process(msg *erebos.Transport) error {
if msg == nil || msg.Value == nil {
logrus.Warnf("Ignoring empty message from: %d", msg.HostID)
if msg != nil {
go c.commit(msg)
}
return nil
}
m := &legacy.MetricSplit{}
if err := json.Unmarshal(msg.Value, m); err != nil {
return err
}
switch m.Path {
case `_internal.cyclone.heartbeat`:
c.heartbeat()
return nil
}
// non-heartbeat metrics count towards processed metrics
metrics.GetOrRegisterMeter(`/metrics/processed.per.second`,
*c.Metrics).Mark(1)
switch m.Path {
case `/sys/cpu/ctx`:
ctx := cpu.CTX{}
id := m.AssetID
if _, ok := c.CTXData[id]; ok {
ctx = c.CTXData[id]
}
m = ctx.Update(m)
c.CTXData[id] = ctx
case `/sys/cpu/count/idle`:
fallthrough
case `/sys/cpu/count/iowait`:
fallthrough
case `/sys/cpu/count/irq`:
fallthrough
case `/sys/cpu/count/nice`:
fallthrough
case `/sys/cpu/count/softirq`:
fallthrough
case `/sys/cpu/count/system`:
fallthrough
case `/sys/cpu/count/user`:
cu := cpu.CPU{}
id := m.AssetID
if _, ok := c.CPUData[id]; ok {
cu = c.CPUData[id]
}
cu.Update(m)
m = cu.Calculate()
c.CPUData[id] = cu
case `/sys/memory/active`:
fallthrough
case `/sys/memory/buffers`:
fallthrough
case `/sys/memory/cached`:
fallthrough
case `/sys/memory/free`:
fallthrough
case `/sys/memory/inactive`:
fallthrough
case `/sys/memory/swapfree`:
fallthrough
case `/sys/memory/swaptotal`:
fallthrough
case `/sys/memory/total`:
mm := mem.Mem{}
id := m.AssetID
if _, ok := c.MemData[id]; ok {
mm = c.MemData[id]
}
mm.Update(m)
m = mm.Calculate()
c.MemData[id] = mm
case `/sys/disk/blk_total`:
fallthrough
case `/sys/disk/blk_used`:
fallthrough
case `/sys/disk/blk_read`:
fallthrough
case `/sys/disk/blk_wrtn`:
if len(m.Tags) == 0 {
m = nil
break
}
d := disk.Disk{}
id := m.AssetID
mpt := m.Tags[0]
if c.DskData[id] == nil {
c.DskData[id] = make(map[string]disk.Disk)
}
if _, ok := c.DskData[id][mpt]; !ok {
c.DskData[id][mpt] = d
}
if _, ok := c.DskData[id][mpt]; ok {
d = c.DskData[id][mpt]
}
d.Update(m)
mArr := d.Calculate()
if mArr != nil {
for _, mPtr := range mArr {
// no deadlock, channel is buffered
c.internalInput <- mPtr
}
}
c.DskData[id][mpt] = d
m = nil
}
if m == nil {
logrus.Debugf("Cyclone[%d], Metric has been consumed", c.Num)
return nil
}
lid := m.LookupID()
thr := c.Lookup(lid)
if thr == nil {
logrus.Errorf("Cyclone[%d], ERROR fetching threshold data. Lookup service available?", c.Num)
return nil
}
if len(thr) == 0 {
logrus.Debugf("Cyclone[%d], No thresholds configured for %s from %d", c.Num, m.Path, m.AssetID)
return nil
}
logrus.Debugf("Cyclone[%d], Forwarding %s from %d for evaluation (%s)", c.Num, m.Path, m.AssetID, lid)
evals := metrics.GetOrRegisterMeter(`/evaluations.per.second`,
*c.Metrics)
evals.Mark(1)
internalMetric := false
switch m.Path {
case
// internal metrics generated by cyclone
`cpu.ctx.per.second`,
`cpu.usage.percent`,
`memory.usage.percent`:
internalMetric = true
case
// internal metrics sent by main daemon
`/sys/cpu/blocked`,
`/sys/cpu/uptime`,
`/sys/load/300s`,
`/sys/load/60s`,
`/sys/load/900s`,
`/sys/load/running_proc`,
`/sys/load/total_proc`:
internalMetric = true
default:
switch {
case
strings.HasPrefix(m.Path, `disk.free:`),
strings.HasPrefix(m.Path, `disk.read.per.second:`),
strings.HasPrefix(m.Path, `disk.usage.percent:`),
strings.HasPrefix(m.Path, `disk.write.per.second:`):
internalMetric = true
}
}
evaluations := 0
thrloop:
for key := range thr {
var alarmLevel = "0"
var brokenThr int64
dispatchAlarm := false
broken := false
fVal := ``
if internalMetric {
dispatchAlarm = true
}
if len(m.Tags) > 0 && m.Tags[0] == thr[key].ID {
dispatchAlarm = true
}
if !dispatchAlarm {
continue thrloop
}
logrus.Debugf("Cyclone[%d], Evaluating metric %s from %d against config %s",
c.Num, m.Path, m.AssetID, thr[key].ID)
evaluations++
lvlloop:
for _, lvl := range []string{`9`, `8`, `7`, `6`, `5`, `4`, `3`, `2`, `1`, `0`} {
thrval, ok := thr[key].Thresholds[lvl]
if !ok {
continue
}
logrus.Debugf("Cyclone[%d], Checking %s alarmlevel %s", c.Num, thr[key].ID, lvl)
switch m.Type {
case `integer`:
fallthrough
case `long`:
broken, fVal = c.cmpInt(thr[key].Predicate,
m.Value().(int64),
thrval)
case `real`:
broken, fVal = c.cmpFlp(thr[key].Predicate,
m.Value().(float64),
thrval)
}
if broken {
alarmLevel = lvl
brokenThr = thrval
break lvlloop
}
}
al := AlarmEvent{
Source: fmt.Sprintf("%s / %s", thr[key].MetaTargethost, thr[key].MetaSource),
EventID: thr[key].ID,
Version: c.Config.Cyclone.APIVersion,
Sourcehost: thr[key].MetaTargethost,
Oncall: thr[key].Oncall,
Targethost: thr[key].MetaTargethost,
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
Check: fmt.Sprintf("cyclone(%s)", m.Path),
Monitoring: thr[key].MetaMonitoring,
Team: thr[key].MetaTeam,
}
al.Level, _ = strconv.ParseInt(alarmLevel, 10, 64)
if alarmLevel == `0` {
al.Message = `Ok.`
} else {
al.Message = fmt.Sprintf(
"Metric %s has broken threshold. Value %s %s %d",
m.Path,
fVal,
thr[key].Predicate,
brokenThr,
)
}
if al.Oncall == `` {
al.Oncall = `No oncall information available`
}
c.updateEval(thr[key].ID)
if c.Config.Cyclone.TestMode {
// do not send out alarms in testmode
continue thrloop
}
alrms := metrics.GetOrRegisterMeter(`/alarms.per.second`,
*c.Metrics)
alrms.Mark(1)
go func(a AlarmEvent) {
b := new(bytes.Buffer)
aSlice := []AlarmEvent{a}
if err := json.NewEncoder(b).Encode(aSlice); err != nil {
logrus.Errorf("Cyclone[%d], ERROR json encoding alarm for %s: %s", c.Num, a.EventID, err)
return
}
resp, err := http.Post(
c.Config.Cyclone.DestinationURI,
`application/json; charset=utf-8`,
b,
)
if err != nil {
logrus.Errorf("Cyclone[%d], ERROR sending alarm for %s: %s", c.Num, a.EventID, err)
return
}
logrus.Infof("Cyclone[%d], Dispatched alarm for %s at level %d, returncode was %d",
c.Num, a.EventID, a.Level, resp.StatusCode)
if resp.StatusCode >= 209 {
// read response body
bt, _ := ioutil.ReadAll(resp.Body)
logrus.Errorf("Cyclone[%d], ResponseMsg(%d): %s", c.Num, resp.StatusCode, string(bt))
resp.Body.Close()
// reset buffer and encode JSON again so it can be
// logged
b.Reset()
json.NewEncoder(b).Encode(aSlice)
logrus.Errorf("Cyclone[%d], RequestJSON: %s", c.Num, b.String())
return
}
// ensure http.Response.Body is consumed and closed,
// otherwise it leaks filehandles
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}(al)
}
if evaluations == 0 {
logrus.Debugf("Cyclone[%d], metric %s(%d) matched no configurations", c.Num, m.Path, m.AssetID)
}
return nil
}
// commit marks a message as fully processed
func (c *Cyclone) commit(msg *erebos.Transport) {
msg.Commit <- &erebos.Commit{
Topic: msg.Topic,
Partition: msg.Partition,
Offset: msg.Offset,
}
}
// cmpInt compares an integer value against a threshold
func (c *Cyclone) cmpInt(pred string, value, threshold int64) (bool, string) {
fVal := fmt.Sprintf("%d", value)
switch pred {
case `<`:
return value < threshold, fVal
case `<=`:
return value <= threshold, fVal
case `==`:
return value == threshold, fVal
case `>=`:
return value >= threshold, fVal
case `>`:
return value > threshold, fVal
case `!=`:
return value != threshold, fVal
default:
logrus.Errorf("Cyclone[%d], ERROR unknown predicate: %s", c.Num, pred)
return false, ``
}
}
// cmpFlp compares a floating point value against a threshold
func (c *Cyclone) cmpFlp(pred string, value float64, threshold int64) (bool, string) {
fthreshold := float64(threshold)
fVal := fmt.Sprintf("%.3f", value)
switch pred {
case `<`:
return value < fthreshold, fVal
case `<=`:
return value <= fthreshold, fVal
case `==`:
return value == fthreshold, fVal
case `>=`:
return value >= fthreshold, fVal
case `>`:
return value > fthreshold, fVal
case `!=`:
return value != fthreshold, fVal
default:
logrus.Errorf("Cyclone[%d], ERROR unknown predicate: %s", c.Num, pred)
return false, ``
}
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
| ) { | identifier_name |
cyclone.go | /*-
* Copyright © 2016-2017, Jörg Pernfuß <[email protected]>
* Copyright © 2016, 1&1 Internet SE
* All rights reserved.
*
* Use of this source code is governed by a 2-clause BSD license
* that can be found in the LICENSE file.
*/
package cyclone // import "github.com/mjolnir42/cyclone/lib/cyclone"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/go-redis/redis"
"github.com/mjolnir42/cyclone/lib/cyclone/cpu"
"github.com/mjolnir42/cyclone/lib/cyclone/disk"
"github.com/mjolnir42/cyclone/lib/cyclone/mem"
"github.com/mjolnir42/erebos"
"github.com/mjolnir42/legacy"
metrics "github.com/rcrowley/go-metrics"
)
// Handlers is the registry of running application handlers
var Handlers map[int]erebos.Handler
// AgeCutOff is the duration after which back-processed alarms are
// ignored and not alerted
var AgeCutOff time.Duration
func init() {
Handlers = make(map[int]erebos.Handler)
}
// Cyclone performs threshold evaluation alarming on metrics
type Cyclone struct {
Num int
Input chan *erebos.Transport
Shutdown chan struct{}
Death chan error
Config *erebos.Config
Metrics *metrics.Registry
CPUData map[int64]cpu.CPU
MemData map[int64]mem.Mem
CTXData map[int64]cpu.CTX
DskData map[int64]map[string]disk.Disk
redis *redis.Client
internalInput chan *legacy.MetricSplit
}
// AlarmEvent is the datatype for sending out alarm notifications
type AlarmEvent struct {
Source string `json:"source"`
EventID string `json:"event_id"`
Version string `json:"version"`
Sourcehost string `json:"sourcehost"`
Oncall string `json:"on_call"`
Targethost string `json:"targethost"`
Message string `json:"message"`
Level int64 `json:"level"`
Timestamp string `json:"timestamp"`
Check string `json:"check"`
Monitoring string `json:"monitoring"`
Team string `json:"team"`
}
// run is the event loop for Cyclone
func (c *Cyclone) run() {
runloop:
for {
select {
case <-c.Shutdown:
// received shutdown, drain input channel which will be
// closed by main
goto drainloop
case msg := <-c.Input:
if msg == nil {
// this can happen if we read the closed Input channel
// before the closed Shutdown channel
continue runloop
}
if err := c.process(msg); err != nil {
c.Death <- err
<-c.Shutdown
break runloop
}
}
}
drainloop:
for {
select {
case msg := <-c.Input:
if msg == nil {
// channel is closed
break drainloop
}
c.process(msg)
}
}
}
// process evaluates a metric and raises alarms as required
func (c *Cyclone) process(msg *erebos.Transport) error {
if msg == nil || msg.Value == nil {
logrus.Warnf("Ignoring empty message from: %d", msg.HostID)
if msg != nil {
go c.commit(msg)
}
return nil
}
m := &legacy.MetricSplit{}
if err := json.Unmarshal(msg.Value, m); err != nil {
return err
}
switch m.Path {
case `_internal.cyclone.heartbeat`:
c.heartbeat()
return nil
}
// non-heartbeat metrics count towards processed metrics
metrics.GetOrRegisterMeter(`/metrics/processed.per.second`,
*c.Metrics).Mark(1)
switch m.Path {
case `/sys/cpu/ctx`:
ctx := cpu.CTX{}
id := m.AssetID
if _, ok := c.CTXData[id]; ok {
ctx = c.CTXData[id]
}
m = ctx.Update(m)
c.CTXData[id] = ctx
case `/sys/cpu/count/idle`:
fallthrough
case `/sys/cpu/count/iowait`:
fallthrough
case `/sys/cpu/count/irq`:
fallthrough
case `/sys/cpu/count/nice`:
fallthrough
case `/sys/cpu/count/softirq`:
fallthrough
case `/sys/cpu/count/system`:
fallthrough
case `/sys/cpu/count/user`:
cu := cpu.CPU{}
id := m.AssetID
if _, ok := c.CPUData[id]; ok {
cu = c.CPUData[id]
}
cu.Update(m)
m = cu.Calculate()
c.CPUData[id] = cu
case `/sys/memory/active`:
fallthrough
case `/sys/memory/buffers`:
fallthrough
case `/sys/memory/cached`:
fallthrough
case `/sys/memory/free`:
fallthrough
case `/sys/memory/inactive`:
fallthrough
case `/sys/memory/swapfree`:
fallthrough
case `/sys/memory/swaptotal`:
fallthrough
case `/sys/memory/total`:
mm := mem.Mem{}
id := m.AssetID
if _, ok := c.MemData[id]; ok {
| m.Update(m)
m = mm.Calculate()
c.MemData[id] = mm
case `/sys/disk/blk_total`:
fallthrough
case `/sys/disk/blk_used`:
fallthrough
case `/sys/disk/blk_read`:
fallthrough
case `/sys/disk/blk_wrtn`:
if len(m.Tags) == 0 {
m = nil
break
}
d := disk.Disk{}
id := m.AssetID
mpt := m.Tags[0]
if c.DskData[id] == nil {
c.DskData[id] = make(map[string]disk.Disk)
}
if _, ok := c.DskData[id][mpt]; !ok {
c.DskData[id][mpt] = d
}
if _, ok := c.DskData[id][mpt]; ok {
d = c.DskData[id][mpt]
}
d.Update(m)
mArr := d.Calculate()
if mArr != nil {
for _, mPtr := range mArr {
// no deadlock, channel is buffered
c.internalInput <- mPtr
}
}
c.DskData[id][mpt] = d
m = nil
}
if m == nil {
logrus.Debugf("Cyclone[%d], Metric has been consumed", c.Num)
return nil
}
lid := m.LookupID()
thr := c.Lookup(lid)
if thr == nil {
logrus.Errorf("Cyclone[%d], ERROR fetching threshold data. Lookup service available?", c.Num)
return nil
}
if len(thr) == 0 {
logrus.Debugf("Cyclone[%d], No thresholds configured for %s from %d", c.Num, m.Path, m.AssetID)
return nil
}
logrus.Debugf("Cyclone[%d], Forwarding %s from %d for evaluation (%s)", c.Num, m.Path, m.AssetID, lid)
evals := metrics.GetOrRegisterMeter(`/evaluations.per.second`,
*c.Metrics)
evals.Mark(1)
internalMetric := false
switch m.Path {
case
// internal metrics generated by cyclone
`cpu.ctx.per.second`,
`cpu.usage.percent`,
`memory.usage.percent`:
internalMetric = true
case
// internal metrics sent by main daemon
`/sys/cpu/blocked`,
`/sys/cpu/uptime`,
`/sys/load/300s`,
`/sys/load/60s`,
`/sys/load/900s`,
`/sys/load/running_proc`,
`/sys/load/total_proc`:
internalMetric = true
default:
switch {
case
strings.HasPrefix(m.Path, `disk.free:`),
strings.HasPrefix(m.Path, `disk.read.per.second:`),
strings.HasPrefix(m.Path, `disk.usage.percent:`),
strings.HasPrefix(m.Path, `disk.write.per.second:`):
internalMetric = true
}
}
evaluations := 0
thrloop:
for key := range thr {
var alarmLevel = "0"
var brokenThr int64
dispatchAlarm := false
broken := false
fVal := ``
if internalMetric {
dispatchAlarm = true
}
if len(m.Tags) > 0 && m.Tags[0] == thr[key].ID {
dispatchAlarm = true
}
if !dispatchAlarm {
continue thrloop
}
logrus.Debugf("Cyclone[%d], Evaluating metric %s from %d against config %s",
c.Num, m.Path, m.AssetID, thr[key].ID)
evaluations++
lvlloop:
for _, lvl := range []string{`9`, `8`, `7`, `6`, `5`, `4`, `3`, `2`, `1`, `0`} {
thrval, ok := thr[key].Thresholds[lvl]
if !ok {
continue
}
logrus.Debugf("Cyclone[%d], Checking %s alarmlevel %s", c.Num, thr[key].ID, lvl)
switch m.Type {
case `integer`:
fallthrough
case `long`:
broken, fVal = c.cmpInt(thr[key].Predicate,
m.Value().(int64),
thrval)
case `real`:
broken, fVal = c.cmpFlp(thr[key].Predicate,
m.Value().(float64),
thrval)
}
if broken {
alarmLevel = lvl
brokenThr = thrval
break lvlloop
}
}
al := AlarmEvent{
Source: fmt.Sprintf("%s / %s", thr[key].MetaTargethost, thr[key].MetaSource),
EventID: thr[key].ID,
Version: c.Config.Cyclone.APIVersion,
Sourcehost: thr[key].MetaTargethost,
Oncall: thr[key].Oncall,
Targethost: thr[key].MetaTargethost,
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
Check: fmt.Sprintf("cyclone(%s)", m.Path),
Monitoring: thr[key].MetaMonitoring,
Team: thr[key].MetaTeam,
}
al.Level, _ = strconv.ParseInt(alarmLevel, 10, 64)
if alarmLevel == `0` {
al.Message = `Ok.`
} else {
al.Message = fmt.Sprintf(
"Metric %s has broken threshold. Value %s %s %d",
m.Path,
fVal,
thr[key].Predicate,
brokenThr,
)
}
if al.Oncall == `` {
al.Oncall = `No oncall information available`
}
c.updateEval(thr[key].ID)
if c.Config.Cyclone.TestMode {
// do not send out alarms in testmode
continue thrloop
}
alrms := metrics.GetOrRegisterMeter(`/alarms.per.second`,
*c.Metrics)
alrms.Mark(1)
go func(a AlarmEvent) {
b := new(bytes.Buffer)
aSlice := []AlarmEvent{a}
if err := json.NewEncoder(b).Encode(aSlice); err != nil {
logrus.Errorf("Cyclone[%d], ERROR json encoding alarm for %s: %s", c.Num, a.EventID, err)
return
}
resp, err := http.Post(
c.Config.Cyclone.DestinationURI,
`application/json; charset=utf-8`,
b,
)
if err != nil {
logrus.Errorf("Cyclone[%d], ERROR sending alarm for %s: %s", c.Num, a.EventID, err)
return
}
logrus.Infof("Cyclone[%d], Dispatched alarm for %s at level %d, returncode was %d",
c.Num, a.EventID, a.Level, resp.StatusCode)
if resp.StatusCode >= 209 {
// read response body
bt, _ := ioutil.ReadAll(resp.Body)
logrus.Errorf("Cyclone[%d], ResponseMsg(%d): %s", c.Num, resp.StatusCode, string(bt))
resp.Body.Close()
// reset buffer and encode JSON again so it can be
// logged
b.Reset()
json.NewEncoder(b).Encode(aSlice)
logrus.Errorf("Cyclone[%d], RequestJSON: %s", c.Num, b.String())
return
}
// ensure http.Response.Body is consumed and closed,
// otherwise it leaks filehandles
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}(al)
}
if evaluations == 0 {
logrus.Debugf("Cyclone[%d], metric %s(%d) matched no configurations", c.Num, m.Path, m.AssetID)
}
return nil
}
// commit marks a message as fully processed
func (c *Cyclone) commit(msg *erebos.Transport) {
msg.Commit <- &erebos.Commit{
Topic: msg.Topic,
Partition: msg.Partition,
Offset: msg.Offset,
}
}
// cmpInt compares an integer value against a threshold
func (c *Cyclone) cmpInt(pred string, value, threshold int64) (bool, string) {
fVal := fmt.Sprintf("%d", value)
switch pred {
case `<`:
return value < threshold, fVal
case `<=`:
return value <= threshold, fVal
case `==`:
return value == threshold, fVal
case `>=`:
return value >= threshold, fVal
case `>`:
return value > threshold, fVal
case `!=`:
return value != threshold, fVal
default:
logrus.Errorf("Cyclone[%d], ERROR unknown predicate: %s", c.Num, pred)
return false, ``
}
}
// cmpFlp compares a floating point value against a threshold
func (c *Cyclone) cmpFlp(pred string, value float64, threshold int64) (bool, string) {
fthreshold := float64(threshold)
fVal := fmt.Sprintf("%.3f", value)
switch pred {
case `<`:
return value < fthreshold, fVal
case `<=`:
return value <= fthreshold, fVal
case `==`:
return value == fthreshold, fVal
case `>=`:
return value >= fthreshold, fVal
case `>`:
return value > fthreshold, fVal
case `!=`:
return value != fthreshold, fVal
default:
logrus.Errorf("Cyclone[%d], ERROR unknown predicate: %s", c.Num, pred)
return false, ``
}
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
| mm = c.MemData[id]
}
m | conditional_block |
cyclone.go | /*-
* Copyright © 2016-2017, Jörg Pernfuß <[email protected]>
* Copyright © 2016, 1&1 Internet SE
* All rights reserved.
*
* Use of this source code is governed by a 2-clause BSD license
* that can be found in the LICENSE file.
*/
package cyclone // import "github.com/mjolnir42/cyclone/lib/cyclone"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/go-redis/redis"
"github.com/mjolnir42/cyclone/lib/cyclone/cpu"
"github.com/mjolnir42/cyclone/lib/cyclone/disk"
"github.com/mjolnir42/cyclone/lib/cyclone/mem"
"github.com/mjolnir42/erebos"
"github.com/mjolnir42/legacy"
metrics "github.com/rcrowley/go-metrics"
)
// Handlers is the registry of running application handlers
var Handlers map[int]erebos.Handler
// AgeCutOff is the duration after which back-processed alarms are
// ignored and not alerted
var AgeCutOff time.Duration
func init() {
Handlers = make(map[int]erebos.Handler)
}
// Cyclone performs threshold evaluation alarming on metrics
type Cyclone struct {
Num int
Input chan *erebos.Transport
Shutdown chan struct{}
Death chan error
Config *erebos.Config
Metrics *metrics.Registry
CPUData map[int64]cpu.CPU
MemData map[int64]mem.Mem
CTXData map[int64]cpu.CTX
DskData map[int64]map[string]disk.Disk
redis *redis.Client
internalInput chan *legacy.MetricSplit
}
// AlarmEvent is the datatype for sending out alarm notifications
type AlarmEvent struct {
Source string `json:"source"`
EventID string `json:"event_id"`
Version string `json:"version"`
Sourcehost string `json:"sourcehost"`
Oncall string `json:"on_call"`
Targethost string `json:"targethost"`
Message string `json:"message"`
Level int64 `json:"level"`
Timestamp string `json:"timestamp"`
Check string `json:"check"`
Monitoring string `json:"monitoring"`
Team string `json:"team"`
}
// run is the event loop for Cyclone
func (c *Cyclone) run() {
r | process evaluates a metric and raises alarms as required
func (c *Cyclone) process(msg *erebos.Transport) error {
if msg == nil || msg.Value == nil {
logrus.Warnf("Ignoring empty message from: %d", msg.HostID)
if msg != nil {
go c.commit(msg)
}
return nil
}
m := &legacy.MetricSplit{}
if err := json.Unmarshal(msg.Value, m); err != nil {
return err
}
switch m.Path {
case `_internal.cyclone.heartbeat`:
c.heartbeat()
return nil
}
// non-heartbeat metrics count towards processed metrics
metrics.GetOrRegisterMeter(`/metrics/processed.per.second`,
*c.Metrics).Mark(1)
switch m.Path {
case `/sys/cpu/ctx`:
ctx := cpu.CTX{}
id := m.AssetID
if _, ok := c.CTXData[id]; ok {
ctx = c.CTXData[id]
}
m = ctx.Update(m)
c.CTXData[id] = ctx
case `/sys/cpu/count/idle`:
fallthrough
case `/sys/cpu/count/iowait`:
fallthrough
case `/sys/cpu/count/irq`:
fallthrough
case `/sys/cpu/count/nice`:
fallthrough
case `/sys/cpu/count/softirq`:
fallthrough
case `/sys/cpu/count/system`:
fallthrough
case `/sys/cpu/count/user`:
cu := cpu.CPU{}
id := m.AssetID
if _, ok := c.CPUData[id]; ok {
cu = c.CPUData[id]
}
cu.Update(m)
m = cu.Calculate()
c.CPUData[id] = cu
case `/sys/memory/active`:
fallthrough
case `/sys/memory/buffers`:
fallthrough
case `/sys/memory/cached`:
fallthrough
case `/sys/memory/free`:
fallthrough
case `/sys/memory/inactive`:
fallthrough
case `/sys/memory/swapfree`:
fallthrough
case `/sys/memory/swaptotal`:
fallthrough
case `/sys/memory/total`:
mm := mem.Mem{}
id := m.AssetID
if _, ok := c.MemData[id]; ok {
mm = c.MemData[id]
}
mm.Update(m)
m = mm.Calculate()
c.MemData[id] = mm
case `/sys/disk/blk_total`:
fallthrough
case `/sys/disk/blk_used`:
fallthrough
case `/sys/disk/blk_read`:
fallthrough
case `/sys/disk/blk_wrtn`:
if len(m.Tags) == 0 {
m = nil
break
}
d := disk.Disk{}
id := m.AssetID
mpt := m.Tags[0]
if c.DskData[id] == nil {
c.DskData[id] = make(map[string]disk.Disk)
}
if _, ok := c.DskData[id][mpt]; !ok {
c.DskData[id][mpt] = d
}
if _, ok := c.DskData[id][mpt]; ok {
d = c.DskData[id][mpt]
}
d.Update(m)
mArr := d.Calculate()
if mArr != nil {
for _, mPtr := range mArr {
// no deadlock, channel is buffered
c.internalInput <- mPtr
}
}
c.DskData[id][mpt] = d
m = nil
}
if m == nil {
logrus.Debugf("Cyclone[%d], Metric has been consumed", c.Num)
return nil
}
lid := m.LookupID()
thr := c.Lookup(lid)
if thr == nil {
logrus.Errorf("Cyclone[%d], ERROR fetching threshold data. Lookup service available?", c.Num)
return nil
}
if len(thr) == 0 {
logrus.Debugf("Cyclone[%d], No thresholds configured for %s from %d", c.Num, m.Path, m.AssetID)
return nil
}
logrus.Debugf("Cyclone[%d], Forwarding %s from %d for evaluation (%s)", c.Num, m.Path, m.AssetID, lid)
evals := metrics.GetOrRegisterMeter(`/evaluations.per.second`,
*c.Metrics)
evals.Mark(1)
internalMetric := false
switch m.Path {
case
// internal metrics generated by cyclone
`cpu.ctx.per.second`,
`cpu.usage.percent`,
`memory.usage.percent`:
internalMetric = true
case
// internal metrics sent by main daemon
`/sys/cpu/blocked`,
`/sys/cpu/uptime`,
`/sys/load/300s`,
`/sys/load/60s`,
`/sys/load/900s`,
`/sys/load/running_proc`,
`/sys/load/total_proc`:
internalMetric = true
default:
switch {
case
strings.HasPrefix(m.Path, `disk.free:`),
strings.HasPrefix(m.Path, `disk.read.per.second:`),
strings.HasPrefix(m.Path, `disk.usage.percent:`),
strings.HasPrefix(m.Path, `disk.write.per.second:`):
internalMetric = true
}
}
evaluations := 0
thrloop:
for key := range thr {
var alarmLevel = "0"
var brokenThr int64
dispatchAlarm := false
broken := false
fVal := ``
if internalMetric {
dispatchAlarm = true
}
if len(m.Tags) > 0 && m.Tags[0] == thr[key].ID {
dispatchAlarm = true
}
if !dispatchAlarm {
continue thrloop
}
logrus.Debugf("Cyclone[%d], Evaluating metric %s from %d against config %s",
c.Num, m.Path, m.AssetID, thr[key].ID)
evaluations++
lvlloop:
for _, lvl := range []string{`9`, `8`, `7`, `6`, `5`, `4`, `3`, `2`, `1`, `0`} {
thrval, ok := thr[key].Thresholds[lvl]
if !ok {
continue
}
logrus.Debugf("Cyclone[%d], Checking %s alarmlevel %s", c.Num, thr[key].ID, lvl)
switch m.Type {
case `integer`:
fallthrough
case `long`:
broken, fVal = c.cmpInt(thr[key].Predicate,
m.Value().(int64),
thrval)
case `real`:
broken, fVal = c.cmpFlp(thr[key].Predicate,
m.Value().(float64),
thrval)
}
if broken {
alarmLevel = lvl
brokenThr = thrval
break lvlloop
}
}
al := AlarmEvent{
Source: fmt.Sprintf("%s / %s", thr[key].MetaTargethost, thr[key].MetaSource),
EventID: thr[key].ID,
Version: c.Config.Cyclone.APIVersion,
Sourcehost: thr[key].MetaTargethost,
Oncall: thr[key].Oncall,
Targethost: thr[key].MetaTargethost,
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
Check: fmt.Sprintf("cyclone(%s)", m.Path),
Monitoring: thr[key].MetaMonitoring,
Team: thr[key].MetaTeam,
}
al.Level, _ = strconv.ParseInt(alarmLevel, 10, 64)
if alarmLevel == `0` {
al.Message = `Ok.`
} else {
al.Message = fmt.Sprintf(
"Metric %s has broken threshold. Value %s %s %d",
m.Path,
fVal,
thr[key].Predicate,
brokenThr,
)
}
if al.Oncall == `` {
al.Oncall = `No oncall information available`
}
c.updateEval(thr[key].ID)
if c.Config.Cyclone.TestMode {
// do not send out alarms in testmode
continue thrloop
}
alrms := metrics.GetOrRegisterMeter(`/alarms.per.second`,
*c.Metrics)
alrms.Mark(1)
go func(a AlarmEvent) {
b := new(bytes.Buffer)
aSlice := []AlarmEvent{a}
if err := json.NewEncoder(b).Encode(aSlice); err != nil {
logrus.Errorf("Cyclone[%d], ERROR json encoding alarm for %s: %s", c.Num, a.EventID, err)
return
}
resp, err := http.Post(
c.Config.Cyclone.DestinationURI,
`application/json; charset=utf-8`,
b,
)
if err != nil {
logrus.Errorf("Cyclone[%d], ERROR sending alarm for %s: %s", c.Num, a.EventID, err)
return
}
logrus.Infof("Cyclone[%d], Dispatched alarm for %s at level %d, returncode was %d",
c.Num, a.EventID, a.Level, resp.StatusCode)
if resp.StatusCode >= 209 {
// read response body
bt, _ := ioutil.ReadAll(resp.Body)
logrus.Errorf("Cyclone[%d], ResponseMsg(%d): %s", c.Num, resp.StatusCode, string(bt))
resp.Body.Close()
// reset buffer and encode JSON again so it can be
// logged
b.Reset()
json.NewEncoder(b).Encode(aSlice)
logrus.Errorf("Cyclone[%d], RequestJSON: %s", c.Num, b.String())
return
}
// ensure http.Response.Body is consumed and closed,
// otherwise it leaks filehandles
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}(al)
}
if evaluations == 0 {
logrus.Debugf("Cyclone[%d], metric %s(%d) matched no configurations", c.Num, m.Path, m.AssetID)
}
return nil
}
// commit marks a message as fully processed
func (c *Cyclone) commit(msg *erebos.Transport) {
msg.Commit <- &erebos.Commit{
Topic: msg.Topic,
Partition: msg.Partition,
Offset: msg.Offset,
}
}
// cmpInt compares an integer value against a threshold
func (c *Cyclone) cmpInt(pred string, value, threshold int64) (bool, string) {
fVal := fmt.Sprintf("%d", value)
switch pred {
case `<`:
return value < threshold, fVal
case `<=`:
return value <= threshold, fVal
case `==`:
return value == threshold, fVal
case `>=`:
return value >= threshold, fVal
case `>`:
return value > threshold, fVal
case `!=`:
return value != threshold, fVal
default:
logrus.Errorf("Cyclone[%d], ERROR unknown predicate: %s", c.Num, pred)
return false, ``
}
}
// cmpFlp compares a floating point value against a threshold
func (c *Cyclone) cmpFlp(pred string, value float64, threshold int64) (bool, string) {
fthreshold := float64(threshold)
fVal := fmt.Sprintf("%.3f", value)
switch pred {
case `<`:
return value < fthreshold, fVal
case `<=`:
return value <= fthreshold, fVal
case `==`:
return value == fthreshold, fVal
case `>=`:
return value >= fthreshold, fVal
case `>`:
return value > fthreshold, fVal
case `!=`:
return value != fthreshold, fVal
default:
logrus.Errorf("Cyclone[%d], ERROR unknown predicate: %s", c.Num, pred)
return false, ``
}
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
| unloop:
for {
select {
case <-c.Shutdown:
// received shutdown, drain input channel which will be
// closed by main
goto drainloop
case msg := <-c.Input:
if msg == nil {
// this can happen if we read the closed Input channel
// before the closed Shutdown channel
continue runloop
}
if err := c.process(msg); err != nil {
c.Death <- err
<-c.Shutdown
break runloop
}
}
}
drainloop:
for {
select {
case msg := <-c.Input:
if msg == nil {
// channel is closed
break drainloop
}
c.process(msg)
}
}
}
// | identifier_body |
cyclone.go | /*-
* Copyright © 2016-2017, Jörg Pernfuß <[email protected]>
* Copyright © 2016, 1&1 Internet SE
* All rights reserved.
*
* Use of this source code is governed by a 2-clause BSD license
* that can be found in the LICENSE file.
*/
package cyclone // import "github.com/mjolnir42/cyclone/lib/cyclone"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/go-redis/redis"
"github.com/mjolnir42/cyclone/lib/cyclone/cpu"
"github.com/mjolnir42/cyclone/lib/cyclone/disk"
"github.com/mjolnir42/cyclone/lib/cyclone/mem"
"github.com/mjolnir42/erebos"
"github.com/mjolnir42/legacy"
metrics "github.com/rcrowley/go-metrics"
)
// Handlers is the registry of running application handlers
var Handlers map[int]erebos.Handler
// AgeCutOff is the duration after which back-processed alarms are
// ignored and not alerted
var AgeCutOff time.Duration
func init() {
Handlers = make(map[int]erebos.Handler)
}
// Cyclone performs threshold evaluation alarming on metrics
type Cyclone struct {
Num int
Input chan *erebos.Transport
Shutdown chan struct{}
Death chan error
Config *erebos.Config
Metrics *metrics.Registry
CPUData map[int64]cpu.CPU
MemData map[int64]mem.Mem | CTXData map[int64]cpu.CTX
DskData map[int64]map[string]disk.Disk
redis *redis.Client
internalInput chan *legacy.MetricSplit
}
// AlarmEvent is the datatype for sending out alarm notifications
type AlarmEvent struct {
Source string `json:"source"`
EventID string `json:"event_id"`
Version string `json:"version"`
Sourcehost string `json:"sourcehost"`
Oncall string `json:"on_call"`
Targethost string `json:"targethost"`
Message string `json:"message"`
Level int64 `json:"level"`
Timestamp string `json:"timestamp"`
Check string `json:"check"`
Monitoring string `json:"monitoring"`
Team string `json:"team"`
}
// run is the event loop for Cyclone
func (c *Cyclone) run() {
runloop:
for {
select {
case <-c.Shutdown:
// received shutdown, drain input channel which will be
// closed by main
goto drainloop
case msg := <-c.Input:
if msg == nil {
// this can happen if we read the closed Input channel
// before the closed Shutdown channel
continue runloop
}
if err := c.process(msg); err != nil {
c.Death <- err
<-c.Shutdown
break runloop
}
}
}
drainloop:
for {
select {
case msg := <-c.Input:
if msg == nil {
// channel is closed
break drainloop
}
c.process(msg)
}
}
}
// process evaluates a metric and raises alarms as required
func (c *Cyclone) process(msg *erebos.Transport) error {
if msg == nil || msg.Value == nil {
logrus.Warnf("Ignoring empty message from: %d", msg.HostID)
if msg != nil {
go c.commit(msg)
}
return nil
}
m := &legacy.MetricSplit{}
if err := json.Unmarshal(msg.Value, m); err != nil {
return err
}
switch m.Path {
case `_internal.cyclone.heartbeat`:
c.heartbeat()
return nil
}
// non-heartbeat metrics count towards processed metrics
metrics.GetOrRegisterMeter(`/metrics/processed.per.second`,
*c.Metrics).Mark(1)
switch m.Path {
case `/sys/cpu/ctx`:
ctx := cpu.CTX{}
id := m.AssetID
if _, ok := c.CTXData[id]; ok {
ctx = c.CTXData[id]
}
m = ctx.Update(m)
c.CTXData[id] = ctx
case `/sys/cpu/count/idle`:
fallthrough
case `/sys/cpu/count/iowait`:
fallthrough
case `/sys/cpu/count/irq`:
fallthrough
case `/sys/cpu/count/nice`:
fallthrough
case `/sys/cpu/count/softirq`:
fallthrough
case `/sys/cpu/count/system`:
fallthrough
case `/sys/cpu/count/user`:
cu := cpu.CPU{}
id := m.AssetID
if _, ok := c.CPUData[id]; ok {
cu = c.CPUData[id]
}
cu.Update(m)
m = cu.Calculate()
c.CPUData[id] = cu
case `/sys/memory/active`:
fallthrough
case `/sys/memory/buffers`:
fallthrough
case `/sys/memory/cached`:
fallthrough
case `/sys/memory/free`:
fallthrough
case `/sys/memory/inactive`:
fallthrough
case `/sys/memory/swapfree`:
fallthrough
case `/sys/memory/swaptotal`:
fallthrough
case `/sys/memory/total`:
mm := mem.Mem{}
id := m.AssetID
if _, ok := c.MemData[id]; ok {
mm = c.MemData[id]
}
mm.Update(m)
m = mm.Calculate()
c.MemData[id] = mm
case `/sys/disk/blk_total`:
fallthrough
case `/sys/disk/blk_used`:
fallthrough
case `/sys/disk/blk_read`:
fallthrough
case `/sys/disk/blk_wrtn`:
if len(m.Tags) == 0 {
m = nil
break
}
d := disk.Disk{}
id := m.AssetID
mpt := m.Tags[0]
if c.DskData[id] == nil {
c.DskData[id] = make(map[string]disk.Disk)
}
if _, ok := c.DskData[id][mpt]; !ok {
c.DskData[id][mpt] = d
}
if _, ok := c.DskData[id][mpt]; ok {
d = c.DskData[id][mpt]
}
d.Update(m)
mArr := d.Calculate()
if mArr != nil {
for _, mPtr := range mArr {
// no deadlock, channel is buffered
c.internalInput <- mPtr
}
}
c.DskData[id][mpt] = d
m = nil
}
if m == nil {
logrus.Debugf("Cyclone[%d], Metric has been consumed", c.Num)
return nil
}
lid := m.LookupID()
thr := c.Lookup(lid)
if thr == nil {
logrus.Errorf("Cyclone[%d], ERROR fetching threshold data. Lookup service available?", c.Num)
return nil
}
if len(thr) == 0 {
logrus.Debugf("Cyclone[%d], No thresholds configured for %s from %d", c.Num, m.Path, m.AssetID)
return nil
}
logrus.Debugf("Cyclone[%d], Forwarding %s from %d for evaluation (%s)", c.Num, m.Path, m.AssetID, lid)
evals := metrics.GetOrRegisterMeter(`/evaluations.per.second`,
*c.Metrics)
evals.Mark(1)
internalMetric := false
switch m.Path {
case
// internal metrics generated by cyclone
`cpu.ctx.per.second`,
`cpu.usage.percent`,
`memory.usage.percent`:
internalMetric = true
case
// internal metrics sent by main daemon
`/sys/cpu/blocked`,
`/sys/cpu/uptime`,
`/sys/load/300s`,
`/sys/load/60s`,
`/sys/load/900s`,
`/sys/load/running_proc`,
`/sys/load/total_proc`:
internalMetric = true
default:
switch {
case
strings.HasPrefix(m.Path, `disk.free:`),
strings.HasPrefix(m.Path, `disk.read.per.second:`),
strings.HasPrefix(m.Path, `disk.usage.percent:`),
strings.HasPrefix(m.Path, `disk.write.per.second:`):
internalMetric = true
}
}
evaluations := 0
thrloop:
for key := range thr {
var alarmLevel = "0"
var brokenThr int64
dispatchAlarm := false
broken := false
fVal := ``
if internalMetric {
dispatchAlarm = true
}
if len(m.Tags) > 0 && m.Tags[0] == thr[key].ID {
dispatchAlarm = true
}
if !dispatchAlarm {
continue thrloop
}
logrus.Debugf("Cyclone[%d], Evaluating metric %s from %d against config %s",
c.Num, m.Path, m.AssetID, thr[key].ID)
evaluations++
lvlloop:
for _, lvl := range []string{`9`, `8`, `7`, `6`, `5`, `4`, `3`, `2`, `1`, `0`} {
thrval, ok := thr[key].Thresholds[lvl]
if !ok {
continue
}
logrus.Debugf("Cyclone[%d], Checking %s alarmlevel %s", c.Num, thr[key].ID, lvl)
switch m.Type {
case `integer`:
fallthrough
case `long`:
broken, fVal = c.cmpInt(thr[key].Predicate,
m.Value().(int64),
thrval)
case `real`:
broken, fVal = c.cmpFlp(thr[key].Predicate,
m.Value().(float64),
thrval)
}
if broken {
alarmLevel = lvl
brokenThr = thrval
break lvlloop
}
}
al := AlarmEvent{
Source: fmt.Sprintf("%s / %s", thr[key].MetaTargethost, thr[key].MetaSource),
EventID: thr[key].ID,
Version: c.Config.Cyclone.APIVersion,
Sourcehost: thr[key].MetaTargethost,
Oncall: thr[key].Oncall,
Targethost: thr[key].MetaTargethost,
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
Check: fmt.Sprintf("cyclone(%s)", m.Path),
Monitoring: thr[key].MetaMonitoring,
Team: thr[key].MetaTeam,
}
al.Level, _ = strconv.ParseInt(alarmLevel, 10, 64)
if alarmLevel == `0` {
al.Message = `Ok.`
} else {
al.Message = fmt.Sprintf(
"Metric %s has broken threshold. Value %s %s %d",
m.Path,
fVal,
thr[key].Predicate,
brokenThr,
)
}
if al.Oncall == `` {
al.Oncall = `No oncall information available`
}
c.updateEval(thr[key].ID)
if c.Config.Cyclone.TestMode {
// do not send out alarms in testmode
continue thrloop
}
alrms := metrics.GetOrRegisterMeter(`/alarms.per.second`,
*c.Metrics)
alrms.Mark(1)
go func(a AlarmEvent) {
b := new(bytes.Buffer)
aSlice := []AlarmEvent{a}
if err := json.NewEncoder(b).Encode(aSlice); err != nil {
logrus.Errorf("Cyclone[%d], ERROR json encoding alarm for %s: %s", c.Num, a.EventID, err)
return
}
resp, err := http.Post(
c.Config.Cyclone.DestinationURI,
`application/json; charset=utf-8`,
b,
)
if err != nil {
logrus.Errorf("Cyclone[%d], ERROR sending alarm for %s: %s", c.Num, a.EventID, err)
return
}
logrus.Infof("Cyclone[%d], Dispatched alarm for %s at level %d, returncode was %d",
c.Num, a.EventID, a.Level, resp.StatusCode)
if resp.StatusCode >= 209 {
// read response body
bt, _ := ioutil.ReadAll(resp.Body)
logrus.Errorf("Cyclone[%d], ResponseMsg(%d): %s", c.Num, resp.StatusCode, string(bt))
resp.Body.Close()
// reset buffer and encode JSON again so it can be
// logged
b.Reset()
json.NewEncoder(b).Encode(aSlice)
logrus.Errorf("Cyclone[%d], RequestJSON: %s", c.Num, b.String())
return
}
// ensure http.Response.Body is consumed and closed,
// otherwise it leaks filehandles
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}(al)
}
if evaluations == 0 {
logrus.Debugf("Cyclone[%d], metric %s(%d) matched no configurations", c.Num, m.Path, m.AssetID)
}
return nil
}
// commit marks a message as fully processed
func (c *Cyclone) commit(msg *erebos.Transport) {
msg.Commit <- &erebos.Commit{
Topic: msg.Topic,
Partition: msg.Partition,
Offset: msg.Offset,
}
}
// cmpInt compares an integer value against a threshold
func (c *Cyclone) cmpInt(pred string, value, threshold int64) (bool, string) {
fVal := fmt.Sprintf("%d", value)
switch pred {
case `<`:
return value < threshold, fVal
case `<=`:
return value <= threshold, fVal
case `==`:
return value == threshold, fVal
case `>=`:
return value >= threshold, fVal
case `>`:
return value > threshold, fVal
case `!=`:
return value != threshold, fVal
default:
logrus.Errorf("Cyclone[%d], ERROR unknown predicate: %s", c.Num, pred)
return false, ``
}
}
// cmpFlp compares a floating point value against a threshold
func (c *Cyclone) cmpFlp(pred string, value float64, threshold int64) (bool, string) {
fthreshold := float64(threshold)
fVal := fmt.Sprintf("%.3f", value)
switch pred {
case `<`:
return value < fthreshold, fVal
case `<=`:
return value <= fthreshold, fVal
case `==`:
return value == fthreshold, fVal
case `>=`:
return value >= fthreshold, fVal
case `>`:
return value > fthreshold, fVal
case `!=`:
return value != fthreshold, fVal
default:
logrus.Errorf("Cyclone[%d], ERROR unknown predicate: %s", c.Num, pred)
return false, ``
}
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix | random_line_split |
|
server.rs | use std::io::IoResult;
use crypto::sha1::Sha1;
use crypto::digest::Digest;
use serialize::base64::{ToBase64, STANDARD};
use std::ascii::AsciiExt;
use time;
use std::io::{Listener, Acceptor};
use std::io::net::tcp::TcpListener;
use std::io::net::tcp::TcpStream;
use http::buffer::BufferedStream;
use std::thread::Thread;
use std::sync::mpsc::{channel, Sender, Receiver};
use http::server::{Server, Request, ResponseWriter};
use http::status::SwitchingProtocols;
use http::headers::HeaderEnum;
use http::headers::response::Header::ExtensionHeader;
use http::headers::connection::Connection::Token;
use http::method::Method::Get;
pub use message::Payload::{Text, Binary, Empty};
pub use message::Opcode::{ContinuationOp, TextOp, BinaryOp, CloseOp, PingOp, PongOp};
use message::Message;
static WEBSOCKET_SALT: &'static str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
pub trait WebSocketServer: Server {
// called when a web socket connection is successfully established.
//
// this can't block! leaving implementation to trait user, in case they
// want to custom scheduling, tracking clients, reconnect logic, etc.
//
// TODO: may want to send more info in, such as the connecting IP address?
fn handle_ws_connect(&self, receiver: Receiver<Box<Message>>, sender: Sender<Box<Message>>) -> ();
// XXX: this is mostly a copy of the serve_forever fn in the Server trait.
// rust-http needs some changes in order to avoid this duplication
fn ws_serve_forever(self) {
let config = self.get_config();
debug!("About to bind to {}", config.bind_address);
let mut acceptor = match TcpListener::bind((config.bind_address.ip.to_string().as_slice(), config.bind_address.port)).listen() {
Err(err) => {
error!("bind or listen failed :-(: {}", err);
return;
},
Ok(acceptor) => acceptor,
};
debug!("listening");
loop {
let stream = match acceptor.accept() {
Err(error) => {
debug!("accept failed: {}", error);
// Question: is this the correct thing to do? We should probably be more
// intelligent, for there are some accept failures that are likely to be
// permanent, such that continuing would be a very bad idea, such as
// ENOBUFS/ENOMEM; and some where it should just be ignored, e.g.
// ECONNABORTED. TODO.
continue;
},
Ok(socket) => socket,
};
let child_self = self.clone();
Thread::spawn(move || {
let mut stream = BufferedStream::new(stream);
debug!("accepted connection");
let mut successful_handshake = false;
loop { // A keep-alive loop, condition at end
let (request, err_status) = Request::load(&mut stream);
let close_connection = request.close_connection;
let mut response = ResponseWriter::new(&mut stream);
match err_status {
Ok(()) => {
successful_handshake = child_self.handle_possible_ws_request(request, &mut response);
// Ensure that we actually do send a response:
match response.try_write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
Err(status) => {
// Uh oh, it's a response that I as a server cannot cope with.
// No good user-agent should have caused this, so for the moment
// at least I am content to send no body in the response.
response.status = status;
response.headers.content_length = Some(0);
match response.write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
}
// Ensure the request is flushed, any Transfer-Encoding completed, etc.
match response.finish_response() {
Err(err) => {
error!("finishing response failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
if successful_handshake || close_connection {
break;
}
}
if successful_handshake {
child_self.serve_websockets(stream).unwrap();
}
}).detach();
}
}
fn serve_websockets(&self, stream: BufferedStream<TcpStream>) -> IoResult<()> {
let mut stream = stream.wrapped;
let write_stream = stream.clone();
let (in_sender, in_receiver) = channel();
let (out_sender, out_receiver) = channel();
self.handle_ws_connect(in_receiver, out_sender);
// write task
Thread::spawn(move || {
// ugh: https://github.com/mozilla/rust/blob/3dbc1c34e694f38daeef741cfffc558606443c15/src/test/run-pass/kindck-implicit-close-over-mut-var.rs#L40-L44
// work to fix this is ongoing here: https://github.com/mozilla/rust/issues/11958
let mut write_stream = write_stream;
loop {
let message = out_receiver.recv().unwrap();
message.send(&mut write_stream).unwrap(); // fails this task in case of an error; FIXME make sure this fails the read (parent) task
}
}).detach();
// read task, effectively the parent of the write task
loop {
let message = Message::load(&mut stream).unwrap(); // fails the task if there's an error.
debug!("message: {}", message);
match message.opcode {
CloseOp => {
try!(stream.close_read());
try!(message.send(&mut stream)); // complete close handeshake - send the same message right back at the client
try!(stream.close_write());
break; // as this task dies, this should release the write task above, as well as the task set up in handle_ws_connection, if any
},
PingOp => {
let pong = Message {
payload: message.payload,
opcode: PongOp
};
try!(pong.send(&mut stream));
},
PongOp => (),
_ => in_sender.send(message).unwrap()
}
}
Ok(())
}
fn sec_websocket_accept(&self, sec_websocket_key: &str) -> String |
// check if the http request is a web socket upgrade request, and return true if so.
// otherwise, fall back on the regular http request handler
fn handle_possible_ws_request(&self, r: Request, w: &mut ResponseWriter) -> bool {
// TODO allow configuration of endpoint for websocket
match (r.method.clone(), r.headers.upgrade.clone()){
// (&Get, &Some("websocket"), &Some(box [Token(box "Upgrade")])) => //\{ FIXME this doesn't work. but client must have the header "Connection: Upgrade"
(Get, Some(ref upgrade)) => {
if !upgrade.as_slice().eq_ignore_ascii_case("websocket"){
self.handle_request(r, w);
return false;
}
// TODO client must have the header "Connection: Upgrade"
//
// TODO The request MUST include a header field with the name
// |Sec-WebSocket-Version|. The value of this header field MUST be 13.
// WebSocket Opening Handshake
w.status = SwitchingProtocols;
w.headers.upgrade = Some(String::from_str("websocket"));
// w.headers.transfer_encoding = None;
w.headers.content_length = Some(0);
w.headers.connection = Some(vec!(Token(String::from_str("Upgrade"))));
w.headers.date = Some(time::now_utc());
w.headers.server = Some(String::from_str("rust-ws/0.1-pre"));
for header in r.headers.iter() {
debug!("Header {}: {}", header.header_name(), header.header_value());
}
// NOTE: think this is actually Sec-WebSocket-Key (capital Web[S]ocket), but rust-http normalizes header names
match r.headers.extensions.get(&String::from_str("Sec-Websocket-Key")) {
Some(val) => {
let sec_websocket_accept = self.sec_websocket_accept((*val).as_slice());
w.headers.insert(ExtensionHeader(String::from_str("Sec-WebSocket-Accept"), sec_websocket_accept));
},
None => panic!()
}
return true; // successful_handshake
},
(_, _) => self.handle_request(r, w)
}
return false;
}
}
| {
// NOTE from RFC 6455
//
// To prove that the handshake was received, the server has to take two
// pieces of information and combine them to form a response. The first
// piece of information comes from the |Sec-WebSocket-Key| header field
// in the client handshake:
//
// Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
//
// For this header field, the server has to take the value (as present
// in the header field, e.g., the base64-encoded [RFC4648] version minus
// any leading and trailing whitespace) and concatenate this with the
// Globally Unique Identifier (GUID, [RFC4122]) "258EAFA5-E914-47DA-
// 95CA-C5AB0DC85B11" in string form, which is unlikely to be used by
// network endpoints that do not understand the WebSocket Protocol. A
// SHA-1 hash (160 bits) [FIPS.180-3], base64-encoded (see Section 4 of
// [RFC4648]), of this concatenation is then returned in the server's
// handshake.
let mut sh = Sha1::new();
let mut out = [0u8; 20];
sh.input_str((String::from_str(sec_websocket_key) + WEBSOCKET_SALT).as_slice());
sh.result(out.as_mut_slice());
return out.to_base64(STANDARD);
} | identifier_body |
server.rs | use std::io::IoResult;
use crypto::sha1::Sha1;
use crypto::digest::Digest;
use serialize::base64::{ToBase64, STANDARD};
use std::ascii::AsciiExt;
use time;
use std::io::{Listener, Acceptor};
use std::io::net::tcp::TcpListener;
use std::io::net::tcp::TcpStream; | use http::server::{Server, Request, ResponseWriter};
use http::status::SwitchingProtocols;
use http::headers::HeaderEnum;
use http::headers::response::Header::ExtensionHeader;
use http::headers::connection::Connection::Token;
use http::method::Method::Get;
pub use message::Payload::{Text, Binary, Empty};
pub use message::Opcode::{ContinuationOp, TextOp, BinaryOp, CloseOp, PingOp, PongOp};
use message::Message;
static WEBSOCKET_SALT: &'static str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
pub trait WebSocketServer: Server {
// called when a web socket connection is successfully established.
//
// this can't block! leaving implementation to trait user, in case they
// want to custom scheduling, tracking clients, reconnect logic, etc.
//
// TODO: may want to send more info in, such as the connecting IP address?
fn handle_ws_connect(&self, receiver: Receiver<Box<Message>>, sender: Sender<Box<Message>>) -> ();
// XXX: this is mostly a copy of the serve_forever fn in the Server trait.
// rust-http needs some changes in order to avoid this duplication
fn ws_serve_forever(self) {
let config = self.get_config();
debug!("About to bind to {}", config.bind_address);
let mut acceptor = match TcpListener::bind((config.bind_address.ip.to_string().as_slice(), config.bind_address.port)).listen() {
Err(err) => {
error!("bind or listen failed :-(: {}", err);
return;
},
Ok(acceptor) => acceptor,
};
debug!("listening");
loop {
let stream = match acceptor.accept() {
Err(error) => {
debug!("accept failed: {}", error);
// Question: is this the correct thing to do? We should probably be more
// intelligent, for there are some accept failures that are likely to be
// permanent, such that continuing would be a very bad idea, such as
// ENOBUFS/ENOMEM; and some where it should just be ignored, e.g.
// ECONNABORTED. TODO.
continue;
},
Ok(socket) => socket,
};
let child_self = self.clone();
Thread::spawn(move || {
let mut stream = BufferedStream::new(stream);
debug!("accepted connection");
let mut successful_handshake = false;
loop { // A keep-alive loop, condition at end
let (request, err_status) = Request::load(&mut stream);
let close_connection = request.close_connection;
let mut response = ResponseWriter::new(&mut stream);
match err_status {
Ok(()) => {
successful_handshake = child_self.handle_possible_ws_request(request, &mut response);
// Ensure that we actually do send a response:
match response.try_write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
Err(status) => {
// Uh oh, it's a response that I as a server cannot cope with.
// No good user-agent should have caused this, so for the moment
// at least I am content to send no body in the response.
response.status = status;
response.headers.content_length = Some(0);
match response.write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
}
// Ensure the request is flushed, any Transfer-Encoding completed, etc.
match response.finish_response() {
Err(err) => {
error!("finishing response failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
if successful_handshake || close_connection {
break;
}
}
if successful_handshake {
child_self.serve_websockets(stream).unwrap();
}
}).detach();
}
}
fn serve_websockets(&self, stream: BufferedStream<TcpStream>) -> IoResult<()> {
let mut stream = stream.wrapped;
let write_stream = stream.clone();
let (in_sender, in_receiver) = channel();
let (out_sender, out_receiver) = channel();
self.handle_ws_connect(in_receiver, out_sender);
// write task
Thread::spawn(move || {
// ugh: https://github.com/mozilla/rust/blob/3dbc1c34e694f38daeef741cfffc558606443c15/src/test/run-pass/kindck-implicit-close-over-mut-var.rs#L40-L44
// work to fix this is ongoing here: https://github.com/mozilla/rust/issues/11958
let mut write_stream = write_stream;
loop {
let message = out_receiver.recv().unwrap();
message.send(&mut write_stream).unwrap(); // fails this task in case of an error; FIXME make sure this fails the read (parent) task
}
}).detach();
// read task, effectively the parent of the write task
loop {
let message = Message::load(&mut stream).unwrap(); // fails the task if there's an error.
debug!("message: {}", message);
match message.opcode {
CloseOp => {
try!(stream.close_read());
try!(message.send(&mut stream)); // complete close handeshake - send the same message right back at the client
try!(stream.close_write());
break; // as this task dies, this should release the write task above, as well as the task set up in handle_ws_connection, if any
},
PingOp => {
let pong = Message {
payload: message.payload,
opcode: PongOp
};
try!(pong.send(&mut stream));
},
PongOp => (),
_ => in_sender.send(message).unwrap()
}
}
Ok(())
}
fn sec_websocket_accept(&self, sec_websocket_key: &str) -> String {
// NOTE from RFC 6455
//
// To prove that the handshake was received, the server has to take two
// pieces of information and combine them to form a response. The first
// piece of information comes from the |Sec-WebSocket-Key| header field
// in the client handshake:
//
// Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
//
// For this header field, the server has to take the value (as present
// in the header field, e.g., the base64-encoded [RFC4648] version minus
// any leading and trailing whitespace) and concatenate this with the
// Globally Unique Identifier (GUID, [RFC4122]) "258EAFA5-E914-47DA-
// 95CA-C5AB0DC85B11" in string form, which is unlikely to be used by
// network endpoints that do not understand the WebSocket Protocol. A
// SHA-1 hash (160 bits) [FIPS.180-3], base64-encoded (see Section 4 of
// [RFC4648]), of this concatenation is then returned in the server's
// handshake.
let mut sh = Sha1::new();
let mut out = [0u8; 20];
sh.input_str((String::from_str(sec_websocket_key) + WEBSOCKET_SALT).as_slice());
sh.result(out.as_mut_slice());
return out.to_base64(STANDARD);
}
// check if the http request is a web socket upgrade request, and return true if so.
// otherwise, fall back on the regular http request handler
fn handle_possible_ws_request(&self, r: Request, w: &mut ResponseWriter) -> bool {
// TODO allow configuration of endpoint for websocket
match (r.method.clone(), r.headers.upgrade.clone()){
// (&Get, &Some("websocket"), &Some(box [Token(box "Upgrade")])) => //\{ FIXME this doesn't work. but client must have the header "Connection: Upgrade"
(Get, Some(ref upgrade)) => {
if !upgrade.as_slice().eq_ignore_ascii_case("websocket"){
self.handle_request(r, w);
return false;
}
// TODO client must have the header "Connection: Upgrade"
//
// TODO The request MUST include a header field with the name
// |Sec-WebSocket-Version|. The value of this header field MUST be 13.
// WebSocket Opening Handshake
w.status = SwitchingProtocols;
w.headers.upgrade = Some(String::from_str("websocket"));
// w.headers.transfer_encoding = None;
w.headers.content_length = Some(0);
w.headers.connection = Some(vec!(Token(String::from_str("Upgrade"))));
w.headers.date = Some(time::now_utc());
w.headers.server = Some(String::from_str("rust-ws/0.1-pre"));
for header in r.headers.iter() {
debug!("Header {}: {}", header.header_name(), header.header_value());
}
// NOTE: think this is actually Sec-WebSocket-Key (capital Web[S]ocket), but rust-http normalizes header names
match r.headers.extensions.get(&String::from_str("Sec-Websocket-Key")) {
Some(val) => {
let sec_websocket_accept = self.sec_websocket_accept((*val).as_slice());
w.headers.insert(ExtensionHeader(String::from_str("Sec-WebSocket-Accept"), sec_websocket_accept));
},
None => panic!()
}
return true; // successful_handshake
},
(_, _) => self.handle_request(r, w)
}
return false;
}
} | use http::buffer::BufferedStream;
use std::thread::Thread;
use std::sync::mpsc::{channel, Sender, Receiver};
| random_line_split |
server.rs | use std::io::IoResult;
use crypto::sha1::Sha1;
use crypto::digest::Digest;
use serialize::base64::{ToBase64, STANDARD};
use std::ascii::AsciiExt;
use time;
use std::io::{Listener, Acceptor};
use std::io::net::tcp::TcpListener;
use std::io::net::tcp::TcpStream;
use http::buffer::BufferedStream;
use std::thread::Thread;
use std::sync::mpsc::{channel, Sender, Receiver};
use http::server::{Server, Request, ResponseWriter};
use http::status::SwitchingProtocols;
use http::headers::HeaderEnum;
use http::headers::response::Header::ExtensionHeader;
use http::headers::connection::Connection::Token;
use http::method::Method::Get;
pub use message::Payload::{Text, Binary, Empty};
pub use message::Opcode::{ContinuationOp, TextOp, BinaryOp, CloseOp, PingOp, PongOp};
use message::Message;
static WEBSOCKET_SALT: &'static str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
pub trait WebSocketServer: Server {
// called when a web socket connection is successfully established.
//
// this can't block! leaving implementation to trait user, in case they
// want to custom scheduling, tracking clients, reconnect logic, etc.
//
// TODO: may want to send more info in, such as the connecting IP address?
fn handle_ws_connect(&self, receiver: Receiver<Box<Message>>, sender: Sender<Box<Message>>) -> ();
// XXX: this is mostly a copy of the serve_forever fn in the Server trait.
// rust-http needs some changes in order to avoid this duplication
fn ws_serve_forever(self) {
let config = self.get_config();
debug!("About to bind to {}", config.bind_address);
let mut acceptor = match TcpListener::bind((config.bind_address.ip.to_string().as_slice(), config.bind_address.port)).listen() {
Err(err) => {
error!("bind or listen failed :-(: {}", err);
return;
},
Ok(acceptor) => acceptor,
};
debug!("listening");
loop {
let stream = match acceptor.accept() {
Err(error) => {
debug!("accept failed: {}", error);
// Question: is this the correct thing to do? We should probably be more
// intelligent, for there are some accept failures that are likely to be
// permanent, such that continuing would be a very bad idea, such as
// ENOBUFS/ENOMEM; and some where it should just be ignored, e.g.
// ECONNABORTED. TODO.
continue;
},
Ok(socket) => socket,
};
let child_self = self.clone();
Thread::spawn(move || {
let mut stream = BufferedStream::new(stream);
debug!("accepted connection");
let mut successful_handshake = false;
loop { // A keep-alive loop, condition at end
let (request, err_status) = Request::load(&mut stream);
let close_connection = request.close_connection;
let mut response = ResponseWriter::new(&mut stream);
match err_status {
Ok(()) => {
successful_handshake = child_self.handle_possible_ws_request(request, &mut response);
// Ensure that we actually do send a response:
match response.try_write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
Err(status) => {
// Uh oh, it's a response that I as a server cannot cope with.
// No good user-agent should have caused this, so for the moment
// at least I am content to send no body in the response.
response.status = status;
response.headers.content_length = Some(0);
match response.write_headers() {
Err(err) => {
error!("Writing headers failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
},
}
// Ensure the request is flushed, any Transfer-Encoding completed, etc.
match response.finish_response() {
Err(err) => {
error!("finishing response failed: {}", err);
return; // Presumably bad connection, so give up.
},
Ok(_) => (),
}
if successful_handshake || close_connection {
break;
}
}
if successful_handshake {
child_self.serve_websockets(stream).unwrap();
}
}).detach();
}
}
fn serve_websockets(&self, stream: BufferedStream<TcpStream>) -> IoResult<()> {
let mut stream = stream.wrapped;
let write_stream = stream.clone();
let (in_sender, in_receiver) = channel();
let (out_sender, out_receiver) = channel();
self.handle_ws_connect(in_receiver, out_sender);
// write task
Thread::spawn(move || {
// ugh: https://github.com/mozilla/rust/blob/3dbc1c34e694f38daeef741cfffc558606443c15/src/test/run-pass/kindck-implicit-close-over-mut-var.rs#L40-L44
// work to fix this is ongoing here: https://github.com/mozilla/rust/issues/11958
let mut write_stream = write_stream;
loop {
let message = out_receiver.recv().unwrap();
message.send(&mut write_stream).unwrap(); // fails this task in case of an error; FIXME make sure this fails the read (parent) task
}
}).detach();
// read task, effectively the parent of the write task
loop {
let message = Message::load(&mut stream).unwrap(); // fails the task if there's an error.
debug!("message: {}", message);
match message.opcode {
CloseOp => {
try!(stream.close_read());
try!(message.send(&mut stream)); // complete close handeshake - send the same message right back at the client
try!(stream.close_write());
break; // as this task dies, this should release the write task above, as well as the task set up in handle_ws_connection, if any
},
PingOp => {
let pong = Message {
payload: message.payload,
opcode: PongOp
};
try!(pong.send(&mut stream));
},
PongOp => (),
_ => in_sender.send(message).unwrap()
}
}
Ok(())
}
fn sec_websocket_accept(&self, sec_websocket_key: &str) -> String {
// NOTE from RFC 6455
//
// To prove that the handshake was received, the server has to take two
// pieces of information and combine them to form a response. The first
// piece of information comes from the |Sec-WebSocket-Key| header field
// in the client handshake:
//
// Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
//
// For this header field, the server has to take the value (as present
// in the header field, e.g., the base64-encoded [RFC4648] version minus
// any leading and trailing whitespace) and concatenate this with the
// Globally Unique Identifier (GUID, [RFC4122]) "258EAFA5-E914-47DA-
// 95CA-C5AB0DC85B11" in string form, which is unlikely to be used by
// network endpoints that do not understand the WebSocket Protocol. A
// SHA-1 hash (160 bits) [FIPS.180-3], base64-encoded (see Section 4 of
// [RFC4648]), of this concatenation is then returned in the server's
// handshake.
let mut sh = Sha1::new();
let mut out = [0u8; 20];
sh.input_str((String::from_str(sec_websocket_key) + WEBSOCKET_SALT).as_slice());
sh.result(out.as_mut_slice());
return out.to_base64(STANDARD);
}
// check if the http request is a web socket upgrade request, and return true if so.
// otherwise, fall back on the regular http request handler
fn | (&self, r: Request, w: &mut ResponseWriter) -> bool {
// TODO allow configuration of endpoint for websocket
match (r.method.clone(), r.headers.upgrade.clone()){
// (&Get, &Some("websocket"), &Some(box [Token(box "Upgrade")])) => //\{ FIXME this doesn't work. but client must have the header "Connection: Upgrade"
(Get, Some(ref upgrade)) => {
if !upgrade.as_slice().eq_ignore_ascii_case("websocket"){
self.handle_request(r, w);
return false;
}
// TODO client must have the header "Connection: Upgrade"
//
// TODO The request MUST include a header field with the name
// |Sec-WebSocket-Version|. The value of this header field MUST be 13.
// WebSocket Opening Handshake
w.status = SwitchingProtocols;
w.headers.upgrade = Some(String::from_str("websocket"));
// w.headers.transfer_encoding = None;
w.headers.content_length = Some(0);
w.headers.connection = Some(vec!(Token(String::from_str("Upgrade"))));
w.headers.date = Some(time::now_utc());
w.headers.server = Some(String::from_str("rust-ws/0.1-pre"));
for header in r.headers.iter() {
debug!("Header {}: {}", header.header_name(), header.header_value());
}
// NOTE: think this is actually Sec-WebSocket-Key (capital Web[S]ocket), but rust-http normalizes header names
match r.headers.extensions.get(&String::from_str("Sec-Websocket-Key")) {
Some(val) => {
let sec_websocket_accept = self.sec_websocket_accept((*val).as_slice());
w.headers.insert(ExtensionHeader(String::from_str("Sec-WebSocket-Accept"), sec_websocket_accept));
},
None => panic!()
}
return true; // successful_handshake
},
(_, _) => self.handle_request(r, w)
}
return false;
}
}
| handle_possible_ws_request | identifier_name |
site.js | //Alert variables
const alertMessage = document.getElementById("alert-message");
let alertMessageText = "<p><strong>Alert: </strong>Here is the medium length sentence long alert popup up purple bar.</p><i class='alert-clear fa fa-times'></i>";
const alertText = document.getElementsByClassName("alert-text");
const clearAlert = document.getElementsByClassName("alert-clear");
//chart variables
const trafficOverview = document.getElementById('TrafficOverview').getContext('2d');
const trafficOverviewTime = document.getElementsByClassName("traffic-time-context");
const trafficSummary = document.getElementById('TrafficSummary').getContext('2d');
const mobileUsers = document.getElementById('MobileUsers').getContext('2d');
//settings variables
const toggleContainer = document.getElementsByClassName('toggle-switch');
const toggleText = document.getElementsByClassName('toggle-text');
const toggleButton = document.getElementsByClassName('toggle-button');
//noitification variables
const liveNotification = document.querySelector('.liveNotification');
const notificationBell = document.querySelector('.bell');
const dropDown = document.querySelector('.dropdown-container');
const notificationClear = document.getElementsByClassName('notification-clear');
const dropDownHeader = document.querySelector(".dropdown-header");
//search variables
const search = document.getElementById("search");
const userName = document.getElementsByClassName("user-name");
const searchList = document.getElementById("searchList");
const listedUser = document.getElementsByClassName('listedUser');
//form variables
const formSubmit = document.getElementById("form-submit");
const messageText = document.getElementById("message-text");
const userError = document.getElementById("userError");
const messageError = document.getElementById("messageError");
let count = -1;
let data = [ 0,500 ,1000, 1500, 1250, 1750, 2000, 1500, 2000, 2500, 2250];
let labels = ['16-22', '23-29', '30-5', '6-12', '13-19', '20-26', '27-3', '4-10', '11-17', '18-24', '25-31'];
let notificationText = ["This is a new notification.",
"You have 6 unread messages.",
"You have 3 new followers.",
"Your password expires in 7 days."];
///////////////////////////////
//File performance
// var t0 = performance.now();
// var result = instantiatePage();
// var t1 = performance.now();
// console.log('Took', (t1 - t0).toFixed(4), 'milliseconds to generate:', result);
instantiatePage();
//Instantiate listeners, constructors
function instantiatePage(){
document.addEventListener("DOMContentLoaded", () => {
displayAlert(alertMessageText, 'general');
addAlertListener();
addTrafficTimeListener();
toggleSwitch();
addNotification(notificationText);
notificationListener();
globalClickListener();
deleteNotification();
globalKeyListener();
formListener();
setToggle();
addMessageListener();
//create array from user elements
let userArray = createArray(userName);
addSearchListener(userArray);
}); // end DOMContentLoaded
}
////////////////////////////////////////////////////////////////////
//global listener to click off notifications
function globalClickListener(){
document.addEventListener("click", (e) => {
if (dropDown.style.display === "block" &&
!(e.target.className.includes("bell") ||
e.target.parentElement.className.includes("dropdown-container") ||
e.target.className.includes("notification-clear"))) {
dropDown.style.display = "none";
dropDown.style.transform = "translate(0, 0)";
} // end if
//remove search with click
if(searchList.firstChild){
clearSearch();
}
}); //end eventlistener
} //end function
function globalKeyListener(){
search.addEventListener("keyup", (e) => {
if(!search.value){
count = -1;
}
//if user has typed and there are results
if(search.value && searchList.children){
search.style.textTransform = "capitalize";
//up arrow key
if(e.key === 'ArrowUp'){
if(count === -1){
count = -1;
}
else if(count === 0){
count = 0;
}
else{
count -= 1;
}
if(count > -1){
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
}
}
//down arrow key
else if(e.key === 'ArrowDown'){
if(count >= (listedUser.length - 1)){
count = listedUser.length - 1;
}
else {
count++;
}
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
} //end else if
} // if
}); // end listener
}
////////////////////////////////////////////////////////////////////
//NOTIFICATIONS
//add eventlistener to delete Notifications
function deleteNotification(){
for(let i = 0; i < notificationClear.length; i++){
notificationClear[i].addEventListener("click", (e) => {
let notification = e.target.parentElement;
dropDown.removeChild(notification);
sizeNotificationContainer();
notificationHeader();
});
}
}
//add eventlistener to notification bell
function notificationListener() {
notificationBell.addEventListener("click", () => {
if(dropDown.style.display !== "block"){
dropDown.style.display = "block";
sizeNotificationContainer();
}
});
}
//figure notification container size
function sizeNotificationContainer(){
let width;
let translate;
if(window.innerWidth < 400){
dropDown.style.left = "5px";
dropDown.style.transform = "translateY(40px)";
} else if (window.innerWidth < 500){
width = (dropDown.offsetWidth - notificationBell.offsetWidth) / 2;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
} else {
width = dropDown.offsetWidth - notificationBell.offsetWidth;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
}
}
//adjust notificaiton header text
function notificationHeader(){
let num = dropDown.children.length - 1;
dropDownHeader.textContent = "You have " + num + " notifications";
if(num > 0){
liveNotification.style.opacity = "1";
}
if(num === 0){
liveNotification.style.opacity = "0";
}
}
//add notifications to dropdown
function addNotification(messages) {
messages.forEach((message) => {
let notification = document.createElement("div");
notification.className = "dropdown-item";
notification.innerHTML = message +
"<i class='notification-clear fa fa-times'></i>";
dropDown.appendChild(notification);
notificationHeader();
});
}
///////////////////////////////////////////////////////////
//Alert Bar
//display purple alert bar
function displayAlert(text, type){
let message = document.createElement("div");
message.classList.add("alert-text");
message.classList.add("alert-" + type);
message.innerHTML = text;
alertMessage.appendChild(message);
}
//add listener to remove alert bar
function addAlertListener(){
for(let i = 0; i < clearAlert.length; i++){
clearAlert[i].addEventListener("click", (event) => {
let node = event.target;
let fullMessage = node.parentElement;
alertMessage.removeChild(fullMessage);
});
}
}
//////////////////////////////////////////////////////
//Traffic Overview
// function constructTrafficOverview(data, labels){
let trafficChart = new Chart(trafficOverview, {
type: 'line',
data: {
labels: labels,
datasets: [{
label: 'Traffic',
data: data,
borderWidth: 1,
lineTension: 0,
backgroundColor: 'rgba(183,185,233,.5)',
borderColor:'rgba(183,185,233,1)',
pointRadius: 5,
pointBackgroundColor: 'white',
pointBorderColor: "#7478bf",
pointBorderWidth: 2,
spanGaps: true,
}],
},
options: {
animation: {
easing: 'linear'
},
responsive:true,
maintainAspectRatio: false,
scales: {
yAxes: [{
ticks: {
padding: 25,
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
ticks: {
padding: 15,
},
gridLines: {
tickMarkLength: 0,
}
}]
}, //end scales
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
}
} //end options
}); //End Traffic Overview
//}
//add new data to chart
function addData(chart, label, data) {
let newDataLength = data.length;
for(let i = 0; i < newDataLength; i++){
chart.data.datasets[0].data.push(data[i]);
chart.data.labels.push(label[i]);
}
chart.update();
}
//remove data from chart
function removeData(chart) {
let dataLength = chart.data.datasets[0].data.length;
for(let i = 0; i < dataLength; i++){
chart.data.datasets[0].data.pop();
chart.data.labels.pop();
}
chart.update();
}
//add event listener for traffic time
function addTrafficTimeListener(){
for(let i = 0; i < trafficOverviewTime.length; i++){
trafficOverviewTime[i].addEventListener("click", (e) => {
removeClass(trafficOverviewTime, "highlight");
removeData(trafficChart);
let event = e.target;
let time = event.textContent;
if(time === "Hourly"){
data = [500, 510, 525, 520, 517, 545, 550, 560, 555, 570 ];
labels = [ 'Aug 5th, 8:00', '9:00', '10:00', '11:00', '12:00', '1:00', '2:00', '3:00', '4:00', '5:00'];
addData(trafficChart, labels, data);
event.classList.add("highlight");
} else if (time === "Daily"){
data = [500, 630, 615, 680, 745, 715, 750 ];
labels = [ 'S (8/5)', 'M (8/6)', 'T (8/7)', 'W (8/8)', 'R (8/9)', 'F (8/10)', 'S (8/11)'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Weekly"){
data = [ 0,500 ,1000, 1500, 1250, 1750, 2000, 1500, 2000, 2500, 2250];
labels = ['16-22', '23-29', '30-5', '6-12', '13-19', '20-26', '27-3', '4-10', '11-17', '18-24', '25-31'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Monthly"){
data = [ 2000, 3100, 2400, 3200, 4500, 4900, 3700, 5100, 5500, 5000, 6100, 6250];
labels = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
}
trafficChart.update();
}); //end event listener
} //end for loop
} //end function
function removeClass(array, CSSclass){
for(let i = 0; i < array.length; i++){
array[i].classList.remove(CSSclass);
}
}
///////////////////////////////////////////////
//Traffic Summary
var trafficSummaryChart = new Chart(trafficSummary, {
type: 'bar',
data: {
labels: ['S', 'M', 'T', 'W', 'T', 'F', 'S'],
datasets: [{
label: '',
data: [50, 75, 125, 100, 200, 175, 125],
backgroundColor: '#7377bf',
}]
},
options: {
responsive:true,
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
},
scales: {
yAxes: [{
ticks: {
beginAtZero: true,
suggestedMax: 250,
padding: 25
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
maxBarThickness: 35,
BarThickness: 'flex',
gridLines: {
tickMarkLength: 0,
},
ticks: {
padding: 15
}
}]
} //end scales
} //end options
});
////////////////////////////////////////////
//Mobile USERS
var mobileUsersChart = new Chart(mobileUsers, {
type: 'doughnut',
data: {
labels: ['Phones', 'Tablets', 'Desktop'],
datasets: [{
data: [25, 15 ,60],
backgroundColor: [
'#74b1bf',
'#81c98f',
'#7377bf',
],
}]
},
options: {
responsive:true,
legend: {
position: 'right',
labels: {
padding: 20,
boxWidth: 15,
fontSize: 16
}
},
} // end options
}); // end mobile users
///////////////////////////////////////////////////////////////////////////
//Toggle switches
function toggleSwitch() {
for(let i = 0; i < toggleContainer.length; i++){
toggleContainer[i].addEventListener("click", (e) => {
if(toggleText[i].textContent === "On"){
toggleOff(i);
localStorage.setItem('toggle' + i, 'off');
}
else if (toggleText[i].textContent === "Off") |
});
}
}
function toggleOff(i){
toggleButton[i].style.transform = "translateX(-43px)";
toggleButton[i].style.transition = ".25s";
toggleText[i].textContent = "Off";
toggleText[i].style.transform = "translateX(25px)";
toggleText[i].style.transition = ".25s";
toggleContainer[i].style.backgroundColor = "#a8aad7";
toggleContainer[i].style.transition = ".25s";
}
function toggleOn(i){
toggleButton[i].style.transform = "translateX(0)";
toggleButton[i].style.transition = ".25s";
toggleText[i].textContent = "On";
toggleText[i].style.transform = "translateX(0)";
toggleText[i].style.transition = ".25s";
toggleContainer[i].style.backgroundColor = "#7377bf";
toggleContainer[i].style.transition = ".25s";
}
function setToggle(){
for(let i = 0; i < toggleContainer.length; i++){
let togglePosition = localStorage.getItem("toggle" + i);
if(togglePosition === "on"){
toggleOn(i);
}
else if(togglePosition === "off"){
toggleOff(i);
}
}
}
///////////////////////////////////////////////////////////////
//form
function formListener(){
formSubmit.addEventListener('click', (e) => {
e.preventDefault();
if(search.value.length === 0){
userError.style.display = "inline-block";
}
if(messageText.value.length === 0){
messageError.style.display = "inline-block";
}
if(search.value.length > 0 && messageText.value.length > 0 ){
submitForm();
}
});
}
function submitForm(){
let user = search.value;
alertMessageText = "<p><strong>Alert: </strong>Message sent successfully to <span class='capitalize'>" + user + "</span>.</p><i class='alert-clear fa fa-times'></i>";
displayAlert(alertMessageText, 'success');
addAlertListener();
alertMessage.scrollIntoView({behavior: "smooth", block: "start" });
messageError.style.display = "none";
userError.style.display = "none";
search.value = "";
messageText.value = "";
}
function addMessageListener(){
messageText.addEventListener("keyup", () =>{
if(messageError.style.display === "inline-block" && messageText.value.length > 0){
messageError.style.display = "none";
}
});
}
//Search/////////////////////////////////////////////////////////////////
//add listener to search bar
function addSearchListener(users){
search.addEventListener("keyup", function(e) {
if(userError.style.display === "inline-block" && search.value.length > 0){
userError.style.display = "none";
}
//clear existing search results
if(e.keyCode !== 38 && e.keyCode !== 40 && e.keyCode !== 9){
clearSearch();
let searchString = search.value.toLowerCase();
users.forEach((user) => {
let userString = user.toLowerCase();
if(userString.includes(searchString) && searchString.length > 0){
//regular expression to convert all instances in newString
let regEx = new RegExp(searchString, "g");
let newString = "<strong>" + searchString + "</strong>";
let highlightedUser = userString.replace(regEx, newString);
let listedUser = document.createElement("p");
// listedUser.tabIndex = 0;
listedUser.className = "listedUser";
listedUser.innerHTML = highlightedUser;
searchList.appendChild(listedUser);
listedUser.addEventListener("click", (e) =>{
search.value = listedUser.textContent;
search.style.textTransform = "capitalize";
});
} //end if statement
}); //end forEach
}
}); //end listener
} //end function
function clearSearch(){
while(searchList.firstChild){
searchList.removeChild(searchList.firstChild);
}
}
//create array
function createArray(list){
let array = [];
for(let i = 0; i < list.length; i++){
let item = list[i].textContent;
array.push(item);
}
return array;
}
| {
toggleOn(i);
localStorage.setItem('toggle' + i, 'on');
} | conditional_block |
site.js | //Alert variables
const alertMessage = document.getElementById("alert-message");
let alertMessageText = "<p><strong>Alert: </strong>Here is the medium length sentence long alert popup up purple bar.</p><i class='alert-clear fa fa-times'></i>";
const alertText = document.getElementsByClassName("alert-text");
const clearAlert = document.getElementsByClassName("alert-clear");
//chart variables
const trafficOverview = document.getElementById('TrafficOverview').getContext('2d');
const trafficOverviewTime = document.getElementsByClassName("traffic-time-context");
const trafficSummary = document.getElementById('TrafficSummary').getContext('2d');
const mobileUsers = document.getElementById('MobileUsers').getContext('2d');
//settings variables
const toggleContainer = document.getElementsByClassName('toggle-switch');
const toggleText = document.getElementsByClassName('toggle-text');
const toggleButton = document.getElementsByClassName('toggle-button');
//noitification variables
const liveNotification = document.querySelector('.liveNotification');
const notificationBell = document.querySelector('.bell');
const dropDown = document.querySelector('.dropdown-container');
const notificationClear = document.getElementsByClassName('notification-clear');
const dropDownHeader = document.querySelector(".dropdown-header");
//search variables
const search = document.getElementById("search");
const userName = document.getElementsByClassName("user-name");
const searchList = document.getElementById("searchList");
const listedUser = document.getElementsByClassName('listedUser');
//form variables
const formSubmit = document.getElementById("form-submit");
const messageText = document.getElementById("message-text");
const userError = document.getElementById("userError");
const messageError = document.getElementById("messageError");
let count = -1;
let data = [ 0,500 ,1000, 1500, 1250, 1750, 2000, 1500, 2000, 2500, 2250];
let labels = ['16-22', '23-29', '30-5', '6-12', '13-19', '20-26', '27-3', '4-10', '11-17', '18-24', '25-31'];
let notificationText = ["This is a new notification.",
"You have 6 unread messages.",
"You have 3 new followers.",
"Your password expires in 7 days."];
///////////////////////////////
//File performance
// var t0 = performance.now();
// var result = instantiatePage();
// var t1 = performance.now();
// console.log('Took', (t1 - t0).toFixed(4), 'milliseconds to generate:', result);
instantiatePage();
//Instantiate listeners, constructors
function instantiatePage(){
document.addEventListener("DOMContentLoaded", () => {
displayAlert(alertMessageText, 'general');
addAlertListener();
addTrafficTimeListener();
toggleSwitch();
addNotification(notificationText);
notificationListener();
globalClickListener();
deleteNotification();
globalKeyListener();
formListener();
setToggle();
addMessageListener();
//create array from user elements
let userArray = createArray(userName);
addSearchListener(userArray);
}); // end DOMContentLoaded
}
////////////////////////////////////////////////////////////////////
//global listener to click off notifications
function globalClickListener(){
document.addEventListener("click", (e) => {
if (dropDown.style.display === "block" &&
!(e.target.className.includes("bell") ||
e.target.parentElement.className.includes("dropdown-container") ||
e.target.className.includes("notification-clear"))) {
dropDown.style.display = "none";
dropDown.style.transform = "translate(0, 0)";
} // end if
//remove search with click
if(searchList.firstChild){
clearSearch();
}
}); //end eventlistener
} //end function
function globalKeyListener(){
search.addEventListener("keyup", (e) => {
if(!search.value){
count = -1;
}
//if user has typed and there are results
if(search.value && searchList.children){
search.style.textTransform = "capitalize";
//up arrow key
if(e.key === 'ArrowUp'){
if(count === -1){
count = -1;
}
else if(count === 0){
count = 0;
}
else{
count -= 1;
}
if(count > -1){
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
}
}
//down arrow key
else if(e.key === 'ArrowDown'){
if(count >= (listedUser.length - 1)){
count = listedUser.length - 1;
}
else {
count++;
}
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
} //end else if
} // if
}); // end listener
}
////////////////////////////////////////////////////////////////////
//NOTIFICATIONS
//add eventlistener to delete Notifications
function deleteNotification(){
for(let i = 0; i < notificationClear.length; i++){
notificationClear[i].addEventListener("click", (e) => {
let notification = e.target.parentElement;
dropDown.removeChild(notification);
sizeNotificationContainer();
notificationHeader();
});
}
}
//add eventlistener to notification bell
function notificationListener() {
notificationBell.addEventListener("click", () => {
if(dropDown.style.display !== "block"){
dropDown.style.display = "block";
sizeNotificationContainer();
}
});
}
//figure notification container size
function sizeNotificationContainer(){
let width;
let translate;
if(window.innerWidth < 400){
dropDown.style.left = "5px";
dropDown.style.transform = "translateY(40px)";
} else if (window.innerWidth < 500){
width = (dropDown.offsetWidth - notificationBell.offsetWidth) / 2;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
} else {
width = dropDown.offsetWidth - notificationBell.offsetWidth;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
}
}
//adjust notificaiton header text
function notificationHeader(){
let num = dropDown.children.length - 1;
dropDownHeader.textContent = "You have " + num + " notifications";
if(num > 0){
liveNotification.style.opacity = "1";
}
if(num === 0){
liveNotification.style.opacity = "0";
}
}
//add notifications to dropdown
function addNotification(messages) {
messages.forEach((message) => {
let notification = document.createElement("div");
notification.className = "dropdown-item";
notification.innerHTML = message +
"<i class='notification-clear fa fa-times'></i>";
dropDown.appendChild(notification);
notificationHeader();
});
}
///////////////////////////////////////////////////////////
//Alert Bar
//display purple alert bar
function displayAlert(text, type){
let message = document.createElement("div");
message.classList.add("alert-text");
message.classList.add("alert-" + type);
message.innerHTML = text;
alertMessage.appendChild(message);
}
//add listener to remove alert bar
function addAlertListener(){
for(let i = 0; i < clearAlert.length; i++){
clearAlert[i].addEventListener("click", (event) => {
let node = event.target;
let fullMessage = node.parentElement;
alertMessage.removeChild(fullMessage);
});
}
}
//////////////////////////////////////////////////////
//Traffic Overview
// function constructTrafficOverview(data, labels){
let trafficChart = new Chart(trafficOverview, {
type: 'line',
data: {
labels: labels,
datasets: [{
label: 'Traffic',
data: data,
borderWidth: 1,
lineTension: 0,
backgroundColor: 'rgba(183,185,233,.5)',
borderColor:'rgba(183,185,233,1)',
pointRadius: 5,
pointBackgroundColor: 'white',
pointBorderColor: "#7478bf",
pointBorderWidth: 2,
spanGaps: true,
}],
},
options: {
animation: {
easing: 'linear'
},
responsive:true,
maintainAspectRatio: false,
scales: {
yAxes: [{
ticks: {
padding: 25,
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
ticks: {
padding: 15,
},
gridLines: {
tickMarkLength: 0,
}
}]
}, //end scales
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
}
} //end options
}); //End Traffic Overview
//}
//add new data to chart
function addData(chart, label, data) {
let newDataLength = data.length;
for(let i = 0; i < newDataLength; i++){
chart.data.datasets[0].data.push(data[i]);
chart.data.labels.push(label[i]);
}
chart.update();
}
//remove data from chart
function removeData(chart) {
let dataLength = chart.data.datasets[0].data.length;
for(let i = 0; i < dataLength; i++){
chart.data.datasets[0].data.pop();
chart.data.labels.pop();
}
chart.update();
}
//add event listener for traffic time
function addTrafficTimeListener(){
for(let i = 0; i < trafficOverviewTime.length; i++){
trafficOverviewTime[i].addEventListener("click", (e) => {
removeClass(trafficOverviewTime, "highlight");
removeData(trafficChart);
let event = e.target;
let time = event.textContent;
if(time === "Hourly"){
data = [500, 510, 525, 520, 517, 545, 550, 560, 555, 570 ];
labels = [ 'Aug 5th, 8:00', '9:00', '10:00', '11:00', '12:00', '1:00', '2:00', '3:00', '4:00', '5:00'];
addData(trafficChart, labels, data);
event.classList.add("highlight");
} else if (time === "Daily"){
data = [500, 630, 615, 680, 745, 715, 750 ];
labels = [ 'S (8/5)', 'M (8/6)', 'T (8/7)', 'W (8/8)', 'R (8/9)', 'F (8/10)', 'S (8/11)'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Weekly"){
data = [ 0,500 ,1000, 1500, 1250, 1750, 2000, 1500, 2000, 2500, 2250];
labels = ['16-22', '23-29', '30-5', '6-12', '13-19', '20-26', '27-3', '4-10', '11-17', '18-24', '25-31'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Monthly"){
data = [ 2000, 3100, 2400, 3200, 4500, 4900, 3700, 5100, 5500, 5000, 6100, 6250];
labels = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
}
trafficChart.update();
}); //end event listener
} //end for loop
} //end function
function removeClass(array, CSSclass){
for(let i = 0; i < array.length; i++){
array[i].classList.remove(CSSclass);
}
}
///////////////////////////////////////////////
//Traffic Summary
var trafficSummaryChart = new Chart(trafficSummary, {
type: 'bar',
data: {
labels: ['S', 'M', 'T', 'W', 'T', 'F', 'S'],
datasets: [{
label: '',
data: [50, 75, 125, 100, 200, 175, 125],
backgroundColor: '#7377bf',
}]
},
options: {
responsive:true,
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
},
scales: {
yAxes: [{
ticks: {
beginAtZero: true,
suggestedMax: 250,
padding: 25
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
maxBarThickness: 35,
BarThickness: 'flex',
gridLines: {
tickMarkLength: 0,
},
ticks: {
padding: 15
}
}]
} //end scales
} //end options
});
////////////////////////////////////////////
//Mobile USERS
var mobileUsersChart = new Chart(mobileUsers, {
type: 'doughnut',
data: {
labels: ['Phones', 'Tablets', 'Desktop'],
datasets: [{
data: [25, 15 ,60],
backgroundColor: [
'#74b1bf',
'#81c98f',
'#7377bf',
],
}]
},
options: {
responsive:true,
legend: {
position: 'right',
labels: {
padding: 20,
boxWidth: 15,
fontSize: 16
}
},
} // end options
}); // end mobile users
///////////////////////////////////////////////////////////////////////////
//Toggle switches
function toggleSwitch() {
for(let i = 0; i < toggleContainer.length; i++){
toggleContainer[i].addEventListener("click", (e) => {
if(toggleText[i].textContent === "On"){
toggleOff(i);
localStorage.setItem('toggle' + i, 'off');
}
else if (toggleText[i].textContent === "Off"){
toggleOn(i);
localStorage.setItem('toggle' + i, 'on');
}
});
}
}
function toggleOff(i){
toggleButton[i].style.transform = "translateX(-43px)";
toggleButton[i].style.transition = ".25s";
toggleText[i].textContent = "Off";
toggleText[i].style.transform = "translateX(25px)";
toggleText[i].style.transition = ".25s";
toggleContainer[i].style.backgroundColor = "#a8aad7";
toggleContainer[i].style.transition = ".25s";
}
function | (i){
toggleButton[i].style.transform = "translateX(0)";
toggleButton[i].style.transition = ".25s";
toggleText[i].textContent = "On";
toggleText[i].style.transform = "translateX(0)";
toggleText[i].style.transition = ".25s";
toggleContainer[i].style.backgroundColor = "#7377bf";
toggleContainer[i].style.transition = ".25s";
}
function setToggle(){
for(let i = 0; i < toggleContainer.length; i++){
let togglePosition = localStorage.getItem("toggle" + i);
if(togglePosition === "on"){
toggleOn(i);
}
else if(togglePosition === "off"){
toggleOff(i);
}
}
}
///////////////////////////////////////////////////////////////
//form
function formListener(){
formSubmit.addEventListener('click', (e) => {
e.preventDefault();
if(search.value.length === 0){
userError.style.display = "inline-block";
}
if(messageText.value.length === 0){
messageError.style.display = "inline-block";
}
if(search.value.length > 0 && messageText.value.length > 0 ){
submitForm();
}
});
}
function submitForm(){
let user = search.value;
alertMessageText = "<p><strong>Alert: </strong>Message sent successfully to <span class='capitalize'>" + user + "</span>.</p><i class='alert-clear fa fa-times'></i>";
displayAlert(alertMessageText, 'success');
addAlertListener();
alertMessage.scrollIntoView({behavior: "smooth", block: "start" });
messageError.style.display = "none";
userError.style.display = "none";
search.value = "";
messageText.value = "";
}
function addMessageListener(){
messageText.addEventListener("keyup", () =>{
if(messageError.style.display === "inline-block" && messageText.value.length > 0){
messageError.style.display = "none";
}
});
}
//Search/////////////////////////////////////////////////////////////////
//add listener to search bar
function addSearchListener(users){
search.addEventListener("keyup", function(e) {
if(userError.style.display === "inline-block" && search.value.length > 0){
userError.style.display = "none";
}
//clear existing search results
if(e.keyCode !== 38 && e.keyCode !== 40 && e.keyCode !== 9){
clearSearch();
let searchString = search.value.toLowerCase();
users.forEach((user) => {
let userString = user.toLowerCase();
if(userString.includes(searchString) && searchString.length > 0){
//regular expression to convert all instances in newString
let regEx = new RegExp(searchString, "g");
let newString = "<strong>" + searchString + "</strong>";
let highlightedUser = userString.replace(regEx, newString);
let listedUser = document.createElement("p");
// listedUser.tabIndex = 0;
listedUser.className = "listedUser";
listedUser.innerHTML = highlightedUser;
searchList.appendChild(listedUser);
listedUser.addEventListener("click", (e) =>{
search.value = listedUser.textContent;
search.style.textTransform = "capitalize";
});
} //end if statement
}); //end forEach
}
}); //end listener
} //end function
function clearSearch(){
while(searchList.firstChild){
searchList.removeChild(searchList.firstChild);
}
}
//create array
function createArray(list){
let array = [];
for(let i = 0; i < list.length; i++){
let item = list[i].textContent;
array.push(item);
}
return array;
}
| toggleOn | identifier_name |
site.js | //Alert variables
const alertMessage = document.getElementById("alert-message");
let alertMessageText = "<p><strong>Alert: </strong>Here is the medium length sentence long alert popup up purple bar.</p><i class='alert-clear fa fa-times'></i>";
const alertText = document.getElementsByClassName("alert-text");
const clearAlert = document.getElementsByClassName("alert-clear");
//chart variables
const trafficOverview = document.getElementById('TrafficOverview').getContext('2d');
const trafficOverviewTime = document.getElementsByClassName("traffic-time-context");
const trafficSummary = document.getElementById('TrafficSummary').getContext('2d');
const mobileUsers = document.getElementById('MobileUsers').getContext('2d');
//settings variables
const toggleContainer = document.getElementsByClassName('toggle-switch');
const toggleText = document.getElementsByClassName('toggle-text');
const toggleButton = document.getElementsByClassName('toggle-button');
//noitification variables
const liveNotification = document.querySelector('.liveNotification');
const notificationBell = document.querySelector('.bell');
const dropDown = document.querySelector('.dropdown-container');
const notificationClear = document.getElementsByClassName('notification-clear');
const dropDownHeader = document.querySelector(".dropdown-header");
//search variables
const search = document.getElementById("search");
const userName = document.getElementsByClassName("user-name");
const searchList = document.getElementById("searchList");
const listedUser = document.getElementsByClassName('listedUser');
//form variables
const formSubmit = document.getElementById("form-submit");
const messageText = document.getElementById("message-text");
const userError = document.getElementById("userError");
const messageError = document.getElementById("messageError");
let count = -1;
let data = [ 0,500 ,1000, 1500, 1250, 1750, 2000, 1500, 2000, 2500, 2250];
let labels = ['16-22', '23-29', '30-5', '6-12', '13-19', '20-26', '27-3', '4-10', '11-17', '18-24', '25-31'];
let notificationText = ["This is a new notification.",
"You have 6 unread messages.",
"You have 3 new followers.",
"Your password expires in 7 days."];
| // var result = instantiatePage();
// var t1 = performance.now();
// console.log('Took', (t1 - t0).toFixed(4), 'milliseconds to generate:', result);
instantiatePage();
//Instantiate listeners, constructors
function instantiatePage(){
document.addEventListener("DOMContentLoaded", () => {
displayAlert(alertMessageText, 'general');
addAlertListener();
addTrafficTimeListener();
toggleSwitch();
addNotification(notificationText);
notificationListener();
globalClickListener();
deleteNotification();
globalKeyListener();
formListener();
setToggle();
addMessageListener();
//create array from user elements
let userArray = createArray(userName);
addSearchListener(userArray);
}); // end DOMContentLoaded
}
////////////////////////////////////////////////////////////////////
//global listener to click off notifications
function globalClickListener(){
document.addEventListener("click", (e) => {
if (dropDown.style.display === "block" &&
!(e.target.className.includes("bell") ||
e.target.parentElement.className.includes("dropdown-container") ||
e.target.className.includes("notification-clear"))) {
dropDown.style.display = "none";
dropDown.style.transform = "translate(0, 0)";
} // end if
//remove search with click
if(searchList.firstChild){
clearSearch();
}
}); //end eventlistener
} //end function
function globalKeyListener(){
search.addEventListener("keyup", (e) => {
if(!search.value){
count = -1;
}
//if user has typed and there are results
if(search.value && searchList.children){
search.style.textTransform = "capitalize";
//up arrow key
if(e.key === 'ArrowUp'){
if(count === -1){
count = -1;
}
else if(count === 0){
count = 0;
}
else{
count -= 1;
}
if(count > -1){
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
}
}
//down arrow key
else if(e.key === 'ArrowDown'){
if(count >= (listedUser.length - 1)){
count = listedUser.length - 1;
}
else {
count++;
}
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
} //end else if
} // if
}); // end listener
}
////////////////////////////////////////////////////////////////////
//NOTIFICATIONS
//add eventlistener to delete Notifications
function deleteNotification(){
for(let i = 0; i < notificationClear.length; i++){
notificationClear[i].addEventListener("click", (e) => {
let notification = e.target.parentElement;
dropDown.removeChild(notification);
sizeNotificationContainer();
notificationHeader();
});
}
}
//add eventlistener to notification bell
function notificationListener() {
notificationBell.addEventListener("click", () => {
if(dropDown.style.display !== "block"){
dropDown.style.display = "block";
sizeNotificationContainer();
}
});
}
//figure notification container size
function sizeNotificationContainer(){
let width;
let translate;
if(window.innerWidth < 400){
dropDown.style.left = "5px";
dropDown.style.transform = "translateY(40px)";
} else if (window.innerWidth < 500){
width = (dropDown.offsetWidth - notificationBell.offsetWidth) / 2;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
} else {
width = dropDown.offsetWidth - notificationBell.offsetWidth;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
}
}
//adjust notificaiton header text
function notificationHeader(){
let num = dropDown.children.length - 1;
dropDownHeader.textContent = "You have " + num + " notifications";
if(num > 0){
liveNotification.style.opacity = "1";
}
if(num === 0){
liveNotification.style.opacity = "0";
}
}
//add notifications to dropdown
function addNotification(messages) {
messages.forEach((message) => {
let notification = document.createElement("div");
notification.className = "dropdown-item";
notification.innerHTML = message +
"<i class='notification-clear fa fa-times'></i>";
dropDown.appendChild(notification);
notificationHeader();
});
}
///////////////////////////////////////////////////////////
//Alert Bar
//display purple alert bar
function displayAlert(text, type){
let message = document.createElement("div");
message.classList.add("alert-text");
message.classList.add("alert-" + type);
message.innerHTML = text;
alertMessage.appendChild(message);
}
//add listener to remove alert bar
function addAlertListener(){
for(let i = 0; i < clearAlert.length; i++){
clearAlert[i].addEventListener("click", (event) => {
let node = event.target;
let fullMessage = node.parentElement;
alertMessage.removeChild(fullMessage);
});
}
}
//////////////////////////////////////////////////////
//Traffic Overview
// function constructTrafficOverview(data, labels){
let trafficChart = new Chart(trafficOverview, {
type: 'line',
data: {
labels: labels,
datasets: [{
label: 'Traffic',
data: data,
borderWidth: 1,
lineTension: 0,
backgroundColor: 'rgba(183,185,233,.5)',
borderColor:'rgba(183,185,233,1)',
pointRadius: 5,
pointBackgroundColor: 'white',
pointBorderColor: "#7478bf",
pointBorderWidth: 2,
spanGaps: true,
}],
},
options: {
animation: {
easing: 'linear'
},
responsive:true,
maintainAspectRatio: false,
scales: {
yAxes: [{
ticks: {
padding: 25,
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
ticks: {
padding: 15,
},
gridLines: {
tickMarkLength: 0,
}
}]
}, //end scales
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
}
} //end options
}); //End Traffic Overview
//}
//add new data to chart
function addData(chart, label, data) {
let newDataLength = data.length;
for(let i = 0; i < newDataLength; i++){
chart.data.datasets[0].data.push(data[i]);
chart.data.labels.push(label[i]);
}
chart.update();
}
//remove data from chart
function removeData(chart) {
let dataLength = chart.data.datasets[0].data.length;
for(let i = 0; i < dataLength; i++){
chart.data.datasets[0].data.pop();
chart.data.labels.pop();
}
chart.update();
}
//add event listener for traffic time
function addTrafficTimeListener(){
for(let i = 0; i < trafficOverviewTime.length; i++){
trafficOverviewTime[i].addEventListener("click", (e) => {
removeClass(trafficOverviewTime, "highlight");
removeData(trafficChart);
let event = e.target;
let time = event.textContent;
if(time === "Hourly"){
data = [500, 510, 525, 520, 517, 545, 550, 560, 555, 570 ];
labels = [ 'Aug 5th, 8:00', '9:00', '10:00', '11:00', '12:00', '1:00', '2:00', '3:00', '4:00', '5:00'];
addData(trafficChart, labels, data);
event.classList.add("highlight");
} else if (time === "Daily"){
data = [500, 630, 615, 680, 745, 715, 750 ];
labels = [ 'S (8/5)', 'M (8/6)', 'T (8/7)', 'W (8/8)', 'R (8/9)', 'F (8/10)', 'S (8/11)'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Weekly"){
data = [ 0,500 ,1000, 1500, 1250, 1750, 2000, 1500, 2000, 2500, 2250];
labels = ['16-22', '23-29', '30-5', '6-12', '13-19', '20-26', '27-3', '4-10', '11-17', '18-24', '25-31'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Monthly"){
data = [ 2000, 3100, 2400, 3200, 4500, 4900, 3700, 5100, 5500, 5000, 6100, 6250];
labels = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
}
trafficChart.update();
}); //end event listener
} //end for loop
} //end function
function removeClass(array, CSSclass){
for(let i = 0; i < array.length; i++){
array[i].classList.remove(CSSclass);
}
}
///////////////////////////////////////////////
//Traffic Summary
var trafficSummaryChart = new Chart(trafficSummary, {
type: 'bar',
data: {
labels: ['S', 'M', 'T', 'W', 'T', 'F', 'S'],
datasets: [{
label: '',
data: [50, 75, 125, 100, 200, 175, 125],
backgroundColor: '#7377bf',
}]
},
options: {
responsive:true,
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
},
scales: {
yAxes: [{
ticks: {
beginAtZero: true,
suggestedMax: 250,
padding: 25
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
maxBarThickness: 35,
BarThickness: 'flex',
gridLines: {
tickMarkLength: 0,
},
ticks: {
padding: 15
}
}]
} //end scales
} //end options
});
////////////////////////////////////////////
//Mobile USERS
var mobileUsersChart = new Chart(mobileUsers, {
type: 'doughnut',
data: {
labels: ['Phones', 'Tablets', 'Desktop'],
datasets: [{
data: [25, 15 ,60],
backgroundColor: [
'#74b1bf',
'#81c98f',
'#7377bf',
],
}]
},
options: {
responsive:true,
legend: {
position: 'right',
labels: {
padding: 20,
boxWidth: 15,
fontSize: 16
}
},
} // end options
}); // end mobile users
///////////////////////////////////////////////////////////////////////////
//Toggle switches
function toggleSwitch() {
for(let i = 0; i < toggleContainer.length; i++){
toggleContainer[i].addEventListener("click", (e) => {
if(toggleText[i].textContent === "On"){
toggleOff(i);
localStorage.setItem('toggle' + i, 'off');
}
else if (toggleText[i].textContent === "Off"){
toggleOn(i);
localStorage.setItem('toggle' + i, 'on');
}
});
}
}
function toggleOff(i){
toggleButton[i].style.transform = "translateX(-43px)";
toggleButton[i].style.transition = ".25s";
toggleText[i].textContent = "Off";
toggleText[i].style.transform = "translateX(25px)";
toggleText[i].style.transition = ".25s";
toggleContainer[i].style.backgroundColor = "#a8aad7";
toggleContainer[i].style.transition = ".25s";
}
function toggleOn(i){
toggleButton[i].style.transform = "translateX(0)";
toggleButton[i].style.transition = ".25s";
toggleText[i].textContent = "On";
toggleText[i].style.transform = "translateX(0)";
toggleText[i].style.transition = ".25s";
toggleContainer[i].style.backgroundColor = "#7377bf";
toggleContainer[i].style.transition = ".25s";
}
function setToggle(){
for(let i = 0; i < toggleContainer.length; i++){
let togglePosition = localStorage.getItem("toggle" + i);
if(togglePosition === "on"){
toggleOn(i);
}
else if(togglePosition === "off"){
toggleOff(i);
}
}
}
///////////////////////////////////////////////////////////////
//form
function formListener(){
formSubmit.addEventListener('click', (e) => {
e.preventDefault();
if(search.value.length === 0){
userError.style.display = "inline-block";
}
if(messageText.value.length === 0){
messageError.style.display = "inline-block";
}
if(search.value.length > 0 && messageText.value.length > 0 ){
submitForm();
}
});
}
function submitForm(){
let user = search.value;
alertMessageText = "<p><strong>Alert: </strong>Message sent successfully to <span class='capitalize'>" + user + "</span>.</p><i class='alert-clear fa fa-times'></i>";
displayAlert(alertMessageText, 'success');
addAlertListener();
alertMessage.scrollIntoView({behavior: "smooth", block: "start" });
messageError.style.display = "none";
userError.style.display = "none";
search.value = "";
messageText.value = "";
}
function addMessageListener(){
messageText.addEventListener("keyup", () =>{
if(messageError.style.display === "inline-block" && messageText.value.length > 0){
messageError.style.display = "none";
}
});
}
//Search/////////////////////////////////////////////////////////////////
//add listener to search bar
function addSearchListener(users){
search.addEventListener("keyup", function(e) {
if(userError.style.display === "inline-block" && search.value.length > 0){
userError.style.display = "none";
}
//clear existing search results
if(e.keyCode !== 38 && e.keyCode !== 40 && e.keyCode !== 9){
clearSearch();
let searchString = search.value.toLowerCase();
users.forEach((user) => {
let userString = user.toLowerCase();
if(userString.includes(searchString) && searchString.length > 0){
//regular expression to convert all instances in newString
let regEx = new RegExp(searchString, "g");
let newString = "<strong>" + searchString + "</strong>";
let highlightedUser = userString.replace(regEx, newString);
let listedUser = document.createElement("p");
// listedUser.tabIndex = 0;
listedUser.className = "listedUser";
listedUser.innerHTML = highlightedUser;
searchList.appendChild(listedUser);
listedUser.addEventListener("click", (e) =>{
search.value = listedUser.textContent;
search.style.textTransform = "capitalize";
});
} //end if statement
}); //end forEach
}
}); //end listener
} //end function
function clearSearch(){
while(searchList.firstChild){
searchList.removeChild(searchList.firstChild);
}
}
//create array
function createArray(list){
let array = [];
for(let i = 0; i < list.length; i++){
let item = list[i].textContent;
array.push(item);
}
return array;
} | ///////////////////////////////
//File performance
// var t0 = performance.now(); | random_line_split |
site.js | //Alert variables
const alertMessage = document.getElementById("alert-message");
let alertMessageText = "<p><strong>Alert: </strong>Here is the medium length sentence long alert popup up purple bar.</p><i class='alert-clear fa fa-times'></i>";
const alertText = document.getElementsByClassName("alert-text");
const clearAlert = document.getElementsByClassName("alert-clear");
//chart variables
const trafficOverview = document.getElementById('TrafficOverview').getContext('2d');
const trafficOverviewTime = document.getElementsByClassName("traffic-time-context");
const trafficSummary = document.getElementById('TrafficSummary').getContext('2d');
const mobileUsers = document.getElementById('MobileUsers').getContext('2d');
//settings variables
const toggleContainer = document.getElementsByClassName('toggle-switch');
const toggleText = document.getElementsByClassName('toggle-text');
const toggleButton = document.getElementsByClassName('toggle-button');
//noitification variables
const liveNotification = document.querySelector('.liveNotification');
const notificationBell = document.querySelector('.bell');
const dropDown = document.querySelector('.dropdown-container');
const notificationClear = document.getElementsByClassName('notification-clear');
const dropDownHeader = document.querySelector(".dropdown-header");
//search variables
const search = document.getElementById("search");
const userName = document.getElementsByClassName("user-name");
const searchList = document.getElementById("searchList");
const listedUser = document.getElementsByClassName('listedUser');
//form variables
const formSubmit = document.getElementById("form-submit");
const messageText = document.getElementById("message-text");
const userError = document.getElementById("userError");
const messageError = document.getElementById("messageError");
let count = -1;
let data = [ 0,500 ,1000, 1500, 1250, 1750, 2000, 1500, 2000, 2500, 2250];
let labels = ['16-22', '23-29', '30-5', '6-12', '13-19', '20-26', '27-3', '4-10', '11-17', '18-24', '25-31'];
let notificationText = ["This is a new notification.",
"You have 6 unread messages.",
"You have 3 new followers.",
"Your password expires in 7 days."];
///////////////////////////////
//File performance
// var t0 = performance.now();
// var result = instantiatePage();
// var t1 = performance.now();
// console.log('Took', (t1 - t0).toFixed(4), 'milliseconds to generate:', result);
instantiatePage();
//Instantiate listeners, constructors
function instantiatePage(){
document.addEventListener("DOMContentLoaded", () => {
displayAlert(alertMessageText, 'general');
addAlertListener();
addTrafficTimeListener();
toggleSwitch();
addNotification(notificationText);
notificationListener();
globalClickListener();
deleteNotification();
globalKeyListener();
formListener();
setToggle();
addMessageListener();
//create array from user elements
let userArray = createArray(userName);
addSearchListener(userArray);
}); // end DOMContentLoaded
}
////////////////////////////////////////////////////////////////////
//global listener to click off notifications
function globalClickListener() |
function globalKeyListener(){
search.addEventListener("keyup", (e) => {
if(!search.value){
count = -1;
}
//if user has typed and there are results
if(search.value && searchList.children){
search.style.textTransform = "capitalize";
//up arrow key
if(e.key === 'ArrowUp'){
if(count === -1){
count = -1;
}
else if(count === 0){
count = 0;
}
else{
count -= 1;
}
if(count > -1){
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
}
}
//down arrow key
else if(e.key === 'ArrowDown'){
if(count >= (listedUser.length - 1)){
count = listedUser.length - 1;
}
else {
count++;
}
listedUser[count].style.outline = '2px solid #4d4c72';
if(listedUser[count].nextSibling){
listedUser[count].nextSibling.style.outline = 'none';
}
if(listedUser[count].previousSibling){
listedUser[count].previousSibling.style.outline = 'none';
}
search.value = listedUser[count].textContent;
} //end else if
} // if
}); // end listener
}
////////////////////////////////////////////////////////////////////
//NOTIFICATIONS
//add eventlistener to delete Notifications
function deleteNotification(){
for(let i = 0; i < notificationClear.length; i++){
notificationClear[i].addEventListener("click", (e) => {
let notification = e.target.parentElement;
dropDown.removeChild(notification);
sizeNotificationContainer();
notificationHeader();
});
}
}
//add eventlistener to notification bell
function notificationListener() {
notificationBell.addEventListener("click", () => {
if(dropDown.style.display !== "block"){
dropDown.style.display = "block";
sizeNotificationContainer();
}
});
}
//figure notification container size
function sizeNotificationContainer(){
let width;
let translate;
if(window.innerWidth < 400){
dropDown.style.left = "5px";
dropDown.style.transform = "translateY(40px)";
} else if (window.innerWidth < 500){
width = (dropDown.offsetWidth - notificationBell.offsetWidth) / 2;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
} else {
width = dropDown.offsetWidth - notificationBell.offsetWidth;
translate = "translate(-" + width + "px, 40px)";
dropDown.style.transform = translate;
dropDown.style.transition = "transform .25s";
}
}
//adjust notificaiton header text
function notificationHeader(){
let num = dropDown.children.length - 1;
dropDownHeader.textContent = "You have " + num + " notifications";
if(num > 0){
liveNotification.style.opacity = "1";
}
if(num === 0){
liveNotification.style.opacity = "0";
}
}
//add notifications to dropdown
function addNotification(messages) {
messages.forEach((message) => {
let notification = document.createElement("div");
notification.className = "dropdown-item";
notification.innerHTML = message +
"<i class='notification-clear fa fa-times'></i>";
dropDown.appendChild(notification);
notificationHeader();
});
}
///////////////////////////////////////////////////////////
//Alert Bar
//display purple alert bar
function displayAlert(text, type){
let message = document.createElement("div");
message.classList.add("alert-text");
message.classList.add("alert-" + type);
message.innerHTML = text;
alertMessage.appendChild(message);
}
//add listener to remove alert bar
function addAlertListener(){
for(let i = 0; i < clearAlert.length; i++){
clearAlert[i].addEventListener("click", (event) => {
let node = event.target;
let fullMessage = node.parentElement;
alertMessage.removeChild(fullMessage);
});
}
}
//////////////////////////////////////////////////////
//Traffic Overview
// function constructTrafficOverview(data, labels){
let trafficChart = new Chart(trafficOverview, {
type: 'line',
data: {
labels: labels,
datasets: [{
label: 'Traffic',
data: data,
borderWidth: 1,
lineTension: 0,
backgroundColor: 'rgba(183,185,233,.5)',
borderColor:'rgba(183,185,233,1)',
pointRadius: 5,
pointBackgroundColor: 'white',
pointBorderColor: "#7478bf",
pointBorderWidth: 2,
spanGaps: true,
}],
},
options: {
animation: {
easing: 'linear'
},
responsive:true,
maintainAspectRatio: false,
scales: {
yAxes: [{
ticks: {
padding: 25,
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
ticks: {
padding: 15,
},
gridLines: {
tickMarkLength: 0,
}
}]
}, //end scales
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
}
} //end options
}); //End Traffic Overview
//}
//add new data to chart
function addData(chart, label, data) {
let newDataLength = data.length;
for(let i = 0; i < newDataLength; i++){
chart.data.datasets[0].data.push(data[i]);
chart.data.labels.push(label[i]);
}
chart.update();
}
//remove data from chart
function removeData(chart) {
let dataLength = chart.data.datasets[0].data.length;
for(let i = 0; i < dataLength; i++){
chart.data.datasets[0].data.pop();
chart.data.labels.pop();
}
chart.update();
}
//add event listener for traffic time
function addTrafficTimeListener(){
for(let i = 0; i < trafficOverviewTime.length; i++){
trafficOverviewTime[i].addEventListener("click", (e) => {
removeClass(trafficOverviewTime, "highlight");
removeData(trafficChart);
let event = e.target;
let time = event.textContent;
if(time === "Hourly"){
data = [500, 510, 525, 520, 517, 545, 550, 560, 555, 570 ];
labels = [ 'Aug 5th, 8:00', '9:00', '10:00', '11:00', '12:00', '1:00', '2:00', '3:00', '4:00', '5:00'];
addData(trafficChart, labels, data);
event.classList.add("highlight");
} else if (time === "Daily"){
data = [500, 630, 615, 680, 745, 715, 750 ];
labels = [ 'S (8/5)', 'M (8/6)', 'T (8/7)', 'W (8/8)', 'R (8/9)', 'F (8/10)', 'S (8/11)'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Weekly"){
data = [ 0,500 ,1000, 1500, 1250, 1750, 2000, 1500, 2000, 2500, 2250];
labels = ['16-22', '23-29', '30-5', '6-12', '13-19', '20-26', '27-3', '4-10', '11-17', '18-24', '25-31'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
} else if (time === "Monthly"){
data = [ 2000, 3100, 2400, 3200, 4500, 4900, 3700, 5100, 5500, 5000, 6100, 6250];
labels = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'];
event.classList.add("highlight");
addData(trafficChart, labels, data);
}
trafficChart.update();
}); //end event listener
} //end for loop
} //end function
function removeClass(array, CSSclass){
for(let i = 0; i < array.length; i++){
array[i].classList.remove(CSSclass);
}
}
///////////////////////////////////////////////
//Traffic Summary
var trafficSummaryChart = new Chart(trafficSummary, {
type: 'bar',
data: {
labels: ['S', 'M', 'T', 'W', 'T', 'F', 'S'],
datasets: [{
label: '',
data: [50, 75, 125, 100, 200, 175, 125],
backgroundColor: '#7377bf',
}]
},
options: {
responsive:true,
legend: {
display:false
},
layout: {
padding: {
left: 5,
right: 15,
top: 5,
bottom: 5
}
},
scales: {
yAxes: [{
ticks: {
beginAtZero: true,
suggestedMax: 250,
padding: 25
},
gridLines: {
tickMarkLength: 0,
}
}],
xAxes: [{
maxBarThickness: 35,
BarThickness: 'flex',
gridLines: {
tickMarkLength: 0,
},
ticks: {
padding: 15
}
}]
} //end scales
} //end options
});
////////////////////////////////////////////
//Mobile USERS
var mobileUsersChart = new Chart(mobileUsers, {
type: 'doughnut',
data: {
labels: ['Phones', 'Tablets', 'Desktop'],
datasets: [{
data: [25, 15 ,60],
backgroundColor: [
'#74b1bf',
'#81c98f',
'#7377bf',
],
}]
},
options: {
responsive:true,
legend: {
position: 'right',
labels: {
padding: 20,
boxWidth: 15,
fontSize: 16
}
},
} // end options
}); // end mobile users
///////////////////////////////////////////////////////////////////////////
//Toggle switches
function toggleSwitch() {
for(let i = 0; i < toggleContainer.length; i++){
toggleContainer[i].addEventListener("click", (e) => {
if(toggleText[i].textContent === "On"){
toggleOff(i);
localStorage.setItem('toggle' + i, 'off');
}
else if (toggleText[i].textContent === "Off"){
toggleOn(i);
localStorage.setItem('toggle' + i, 'on');
}
});
}
}
function toggleOff(i){
toggleButton[i].style.transform = "translateX(-43px)";
toggleButton[i].style.transition = ".25s";
toggleText[i].textContent = "Off";
toggleText[i].style.transform = "translateX(25px)";
toggleText[i].style.transition = ".25s";
toggleContainer[i].style.backgroundColor = "#a8aad7";
toggleContainer[i].style.transition = ".25s";
}
function toggleOn(i){
toggleButton[i].style.transform = "translateX(0)";
toggleButton[i].style.transition = ".25s";
toggleText[i].textContent = "On";
toggleText[i].style.transform = "translateX(0)";
toggleText[i].style.transition = ".25s";
toggleContainer[i].style.backgroundColor = "#7377bf";
toggleContainer[i].style.transition = ".25s";
}
function setToggle(){
for(let i = 0; i < toggleContainer.length; i++){
let togglePosition = localStorage.getItem("toggle" + i);
if(togglePosition === "on"){
toggleOn(i);
}
else if(togglePosition === "off"){
toggleOff(i);
}
}
}
///////////////////////////////////////////////////////////////
//form
function formListener(){
formSubmit.addEventListener('click', (e) => {
e.preventDefault();
if(search.value.length === 0){
userError.style.display = "inline-block";
}
if(messageText.value.length === 0){
messageError.style.display = "inline-block";
}
if(search.value.length > 0 && messageText.value.length > 0 ){
submitForm();
}
});
}
function submitForm(){
let user = search.value;
alertMessageText = "<p><strong>Alert: </strong>Message sent successfully to <span class='capitalize'>" + user + "</span>.</p><i class='alert-clear fa fa-times'></i>";
displayAlert(alertMessageText, 'success');
addAlertListener();
alertMessage.scrollIntoView({behavior: "smooth", block: "start" });
messageError.style.display = "none";
userError.style.display = "none";
search.value = "";
messageText.value = "";
}
function addMessageListener(){
messageText.addEventListener("keyup", () =>{
if(messageError.style.display === "inline-block" && messageText.value.length > 0){
messageError.style.display = "none";
}
});
}
//Search/////////////////////////////////////////////////////////////////
//add listener to search bar
function addSearchListener(users){
search.addEventListener("keyup", function(e) {
if(userError.style.display === "inline-block" && search.value.length > 0){
userError.style.display = "none";
}
//clear existing search results
if(e.keyCode !== 38 && e.keyCode !== 40 && e.keyCode !== 9){
clearSearch();
let searchString = search.value.toLowerCase();
users.forEach((user) => {
let userString = user.toLowerCase();
if(userString.includes(searchString) && searchString.length > 0){
//regular expression to convert all instances in newString
let regEx = new RegExp(searchString, "g");
let newString = "<strong>" + searchString + "</strong>";
let highlightedUser = userString.replace(regEx, newString);
let listedUser = document.createElement("p");
// listedUser.tabIndex = 0;
listedUser.className = "listedUser";
listedUser.innerHTML = highlightedUser;
searchList.appendChild(listedUser);
listedUser.addEventListener("click", (e) =>{
search.value = listedUser.textContent;
search.style.textTransform = "capitalize";
});
} //end if statement
}); //end forEach
}
}); //end listener
} //end function
function clearSearch(){
while(searchList.firstChild){
searchList.removeChild(searchList.firstChild);
}
}
//create array
function createArray(list){
let array = [];
for(let i = 0; i < list.length; i++){
let item = list[i].textContent;
array.push(item);
}
return array;
}
| {
document.addEventListener("click", (e) => {
if (dropDown.style.display === "block" &&
!(e.target.className.includes("bell") ||
e.target.parentElement.className.includes("dropdown-container") ||
e.target.className.includes("notification-clear"))) {
dropDown.style.display = "none";
dropDown.style.transform = "translate(0, 0)";
} // end if
//remove search with click
if(searchList.firstChild){
clearSearch();
}
}); //end eventlistener
} //end function | identifier_body |
generator.go | package parse
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/importer"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
// _ "golang.org/x/tools/go/gcimporter"
"go/types"
)
//The caller will send a function of this type to do all the actual
//modification of the target package
type GeneratorFunc func(typeName string, fields []Field, imports []Import)
// File holds a single parsed file and associated data.
type File struct {
pkg *Package // Package to which this file belongs.
file *ast.File // Parsed AST.
// These fields are reset for each type being generated.
typeName string // Name of the struct type.
fields []Field // Accumulator for the structs fields
imports []Import
}
// Value represents a struct field
type Field struct {
Name string // The name of the field.
TypeName string //string representation of the Go Type of the field
Tags map[string]StructTag
}
type StructTag struct {
Name string
Value string
}
type Import struct {
ImportedName string
}
type Package struct {
dir string
name string
defs map[*ast.Ident]types.Object
files []*File
typesPkg *types.Package
}
// Generator holds the state of the analysis. Primarily used to buffer
// the output for format.Source.
type Generator struct {
Buf bytes.Buffer // Accumulated output.
pkg *Package // Package we are scanning.
dir string
}
//Run parses the target package and generates the code, verifying the package before and after generation.
//pathArgs is a list of file paths, to either individual files or whole directories.
//typeName is the name of a struct we're working on. outputName is where the generated code should go.
//genFn is the most important part, and recieves all the meta info about the targeted Type
func (g *Generator) Run(pathArgs []string, typeName string, outputName string, genFn GeneratorFunc) error {
//Parse the package
g.Prepare(pathArgs)
// Print the header and package clause.
g.Printf("// Code generated by 'exemplar %s'; DO NOT EDIT\n", strings.Join(os.Args[1:], " "))
g.Printf("\n")
g.Printf("package %s", g.pkg.name)
g.Printf("\n")
g.collectAndGenerate(typeName, genFn)
//format output
src := g.format()
// Write to file.
//TODO: Fix this to not be tied to propertizer
//DEBUG: fmt.Printf("Typename in parse: %s", typeName)
if outputName == "" {
baseName := fmt.Sprintf("%s_properties.go", typeName)
outputName = filepath.Join(g.dir, strings.ToLower(baseName))
}
fmt.Println(outputName)
err := ioutil.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
return nil
}
func (g *Generator) Prepare(args []string) {
log.Printf("Prepare - checking if path is a directory or a list of files %s", args[0])
if len(args) == 1 && isDirectory(args[0]) {
log.Printf("Prepare - found directory")
g.dir = args[0]
g.parsePackageDir(args[0])
} else {
log.Printf("Prepare - found file list")
g.dir = filepath.Dir(args[0])
g.parsePackageFiles(args)
}
}
//collect gathers all the info from all the package's files about the type
//necessary to do fun things
func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {
fields := make([]Field, 0, 100)
imports := make([]Import, 0, 100)
for _, file := range g.pkg.files {
// Set the state for this run of the walker.
file.typeName = typeName
file.fields = nil
if file.file != nil {
ast.Inspect(file.file, file.genDecl)
fields = append(fields, file.fields...)
imports = append(imports, file.imports...)
}
}
genFn(typeName, fields, imports)
}
// genDecl processes one declaration clause.
func (f *File) genDecl(node ast.Node) bool {
decl, ok := node.(*ast.GenDecl)
if !ok || decl.Tok != token.TYPE { // We only care about Type declarations.
return true
}
// The name of the type of the constants we are declaring.
// Can change if this is a multi-element declaration.
typ := ""
// Loop over the elements of the declaration. Each element is a ValueSpec:
// a list of names possibly followed by a type, possibly followed by values.
// If the type and value are both missing, we carry down the type (and value,
// but the "go/types" package takes care of that).
for _, spec := range decl.Specs {
tspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.
if tspec.Type != nil {
// "X T". We have a type. Remember it.
typ = tspec.Name.Name
}
if typ != f.typeName {
// This is not the type we're looking for.
continue
}
// We now have a list of names (from one line of source code) all being
// declared with the desired type.
structType, ok := tspec.Type.(*ast.StructType)
if !ok {
//not a struct type
continue
}
typesObj, typeObjOk := f.pkg.defs[tspec.Name]
if !typeObjOk {
log.Fatalf("no type info found for struct %s", typ)
}
for _, fieldLine := range structType.Fields.List {
for _, field := range fieldLine.Names {
//skip struct padding
if field.Name == "_" {
continue
}
fieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)
typeStr := fieldObj.Type().String()
tags := parseFieldTags(fieldLine.Tag)
//Skip here so we don't include rubbish import lines
if tags["exclude_dao"].Value == "true" {
continue
}
processedTypeStr, importPath := processTypeStr(typeStr)
//log.Printf("processedTypeStr: %s, importPath: %s", processedTypeStr, importPath)
if importPath != "" && !importExists(importPath, f.imports) {
f.imports = append(f.imports, Import{importPath})
}
v := Field{
Name: field.Name,
Tags: tags,
TypeName: processedTypeStr,
}
f.fields = append(f.fields, v)
}
}
}
return false
}
//We need to make sure that we get the type used right, with a package prefix
func processTypeStr(typeStr string) (typeName, importPath string) {
if strings.Contains(typeStr, "/") {
slashSplit := strings.Split(typeStr, "/")
pkgNameAndType := slashSplit[len(slashSplit)-1]
pkgName := strings.Split(pkgNameAndType, ".")[0]
importPath := fmt.Sprintf("%s/%s", strings.Join(slashSplit[0:len(slashSplit)-1], "/"), pkgName)
return pkgNameAndType, importPath
} else if strings.Contains(typeStr, ".") {
dotSplit := strings.Split(typeStr, ".")
importPath := dotSplit[0]
pkgNameAndType := typeStr
return pkgNameAndType, importPath
} else {
return typeStr, ""
}
}
//Check to see if a path already exists in the []Import
func importExists(pathName string, imports []Import) bool {
for _, val := range imports {
if pathName == val.ImportedName {
return true
}
}
return false
}
// parsePackageDir parses the package residing in the directory.
func (g *Generator) parsePackageDir(directory string) {
log.Printf("Collecting objects in package %s for parsing", directory)
pkg, err := build.Default.ImportDir(directory, 0)
if err != nil {
log.Fatalf("cannot process directory %s: %s", directory, err)
}
var names []string
names = append(names, pkg.GoFiles...)
names = append(names, pkg.CgoFiles...)
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
// names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package.
names = append(names, pkg.SFiles...)
names = prefixDirectory(directory, names)
log.Printf("Found object names: %+v", names)
g.parsePackage(directory, names, nil)
}
// parsePackage analyzes the single package constructed from the named files.
// If text is non-nil, it is a string to be used instead of the content of the file,
// to be used for testing. parsePackage exits if there is an error.
func (g *Generator) parsePackage(directory string, names []string, text interface{}) {
var files []*File
var astFiles []*ast.File
g.pkg = new(Package)
fs := token.NewFileSet()
for _, name := range names {
if !strings.HasSuffix(name, ".go") {
continue
}
log.Printf("Parsing file: %s", name)
parsedFile, err := parser.ParseFile(fs, name, text, 0)
if err != nil {
log.Fatalf("parsing package: %s: %s", name, err)
}
astFiles = append(astFiles, parsedFile)
files = append(files, &File{
file: parsedFile,
pkg: g.pkg,
})
}
if len(astFiles) == 0 {
log.Fatalf("%s: no buildable Go files", directory)
}
g.pkg.name = astFiles[0].Name.Name
g.pkg.files = files
g.pkg.dir = directory
// Type check the package.
g.pkg.check(fs, astFiles)
}
// parsePackageFiles parses the package occupying the named files.
func (g *Generator) parsePackageFiles(names []string) {
g.parsePackage(".", names, nil)
}
// prefixDirectory places the directory name on the beginning of each name in the list.
func prefixDirectory(directory string, names []string) []string {
if directory == "." {
return names
}
ret := make([]string, len(names))
for i, name := range names {
ret[i] = filepath.Join(directory, name)
}
return ret
}
func (g *Generator) Printf(format string, args ...interface{}) {
fmt.Fprintf(&g.Buf, format, args...)
}
func (g *Generator) Print(output string) { | func (g *Generator) format() []byte {
//DEBUG: fmt.Print(g.Buf.String())
src, err := format.Source(g.Buf.Bytes())
if err != nil {
// Should never happen, but can arise when developing this code.
// The user can compile the output to see the error.
log.Printf("warning: internal error: invalid Go generated: %s", err)
log.Printf("warning: compile the package to analyze the error")
return g.Buf.Bytes()
}
return src
}
// check type-checks the package. The package must be OK to proceed.
func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) {
pkg.defs = make(map[*ast.Ident]types.Object)
errFn := func(err error) {
cErr := err.(types.Error)
if cErr.Soft {
return
}
if strings.Contains(cErr.Msg, "has no field or method") ||
strings.Contains(cErr.Msg, "invalid operation: cannot call non-function") ||
//2016-01-11: Try and skip past issues with VendorExperiment
strings.Contains(cErr.Msg, "vendor") {
log.Printf("IGNORED: during package check: %s", cErr.Msg)
return
}
log.Fatalf("checking package: %s", cErr.Msg)
}
config := types.Config{FakeImportC: true, Error: errFn, Importer: importer.ForCompiler(fs, "source", nil)}
info := &types.Info{
Defs: pkg.defs,
}
typesPkg, _ := config.Check(pkg.dir, fs, astFiles, info)
pkg.typesPkg = typesPkg
}
// isDirectory reports whether the named file is a directory.
func isDirectory(name string) bool {
info, err := os.Stat(name)
if err != nil {
log.Fatal(err)
}
return info.IsDir()
}
func parseFieldTags(tagString *ast.BasicLit) map[string]StructTag {
tagMap := make(map[string]StructTag)
if tagString != nil {
sanitized := strings.Replace(tagString.Value, "`", "", -1)
var buffer []byte = make([]byte, 0, 10)
var key string
var inTag bool
for i := 0; i < len(sanitized); i++ {
if sanitized[i] == ':' {
key = strings.TrimSpace(bytes.NewBuffer(buffer).String())
buffer = make([]byte, 0, 10)
continue
}
if sanitized[i] == '"' {
if inTag {
tagMap[key] = StructTag{Name: key, Value: strings.TrimSpace(bytes.NewBuffer(buffer).String())}
buffer, key = make([]byte, 0, 10), ""
//key = ""
inTag = false
continue
} else {
inTag = true
continue
}
}
buffer = append(buffer, sanitized[i])
}
}
return tagMap
} | fmt.Fprint(&g.Buf, output)
}
//format returns the gofmt-ed contents of the Generator's buffer. | random_line_split |
generator.go | package parse
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/importer"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
// _ "golang.org/x/tools/go/gcimporter"
"go/types"
)
//The caller will send a function of this type to do all the actual
//modification of the target package
type GeneratorFunc func(typeName string, fields []Field, imports []Import)
// File holds a single parsed file and associated data.
type File struct {
pkg *Package // Package to which this file belongs.
file *ast.File // Parsed AST.
// These fields are reset for each type being generated.
typeName string // Name of the struct type.
fields []Field // Accumulator for the structs fields
imports []Import
}
// Value represents a struct field
type Field struct {
Name string // The name of the field.
TypeName string //string representation of the Go Type of the field
Tags map[string]StructTag
}
type StructTag struct {
Name string
Value string
}
type Import struct {
ImportedName string
}
type Package struct {
dir string
name string
defs map[*ast.Ident]types.Object
files []*File
typesPkg *types.Package
}
// Generator holds the state of the analysis. Primarily used to buffer
// the output for format.Source.
type Generator struct {
Buf bytes.Buffer // Accumulated output.
pkg *Package // Package we are scanning.
dir string
}
//Run parses the target package and generates the code, verifying the package before and after generation.
//pathArgs is a list of file paths, to either individual files or whole directories.
//typeName is the name of a struct we're working on. outputName is where the generated code should go.
//genFn is the most important part, and recieves all the meta info about the targeted Type
func (g *Generator) Run(pathArgs []string, typeName string, outputName string, genFn GeneratorFunc) error {
//Parse the package
g.Prepare(pathArgs)
// Print the header and package clause.
g.Printf("// Code generated by 'exemplar %s'; DO NOT EDIT\n", strings.Join(os.Args[1:], " "))
g.Printf("\n")
g.Printf("package %s", g.pkg.name)
g.Printf("\n")
g.collectAndGenerate(typeName, genFn)
//format output
src := g.format()
// Write to file.
//TODO: Fix this to not be tied to propertizer
//DEBUG: fmt.Printf("Typename in parse: %s", typeName)
if outputName == "" {
baseName := fmt.Sprintf("%s_properties.go", typeName)
outputName = filepath.Join(g.dir, strings.ToLower(baseName))
}
fmt.Println(outputName)
err := ioutil.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
return nil
}
func (g *Generator) Prepare(args []string) {
log.Printf("Prepare - checking if path is a directory or a list of files %s", args[0])
if len(args) == 1 && isDirectory(args[0]) {
log.Printf("Prepare - found directory")
g.dir = args[0]
g.parsePackageDir(args[0])
} else {
log.Printf("Prepare - found file list")
g.dir = filepath.Dir(args[0])
g.parsePackageFiles(args)
}
}
//collect gathers all the info from all the package's files about the type
//necessary to do fun things
func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {
fields := make([]Field, 0, 100)
imports := make([]Import, 0, 100)
for _, file := range g.pkg.files {
// Set the state for this run of the walker.
file.typeName = typeName
file.fields = nil
if file.file != nil {
ast.Inspect(file.file, file.genDecl)
fields = append(fields, file.fields...)
imports = append(imports, file.imports...)
}
}
genFn(typeName, fields, imports)
}
// genDecl processes one declaration clause.
func (f *File) genDecl(node ast.Node) bool {
decl, ok := node.(*ast.GenDecl)
if !ok || decl.Tok != token.TYPE { // We only care about Type declarations.
return true
}
// The name of the type of the constants we are declaring.
// Can change if this is a multi-element declaration.
typ := ""
// Loop over the elements of the declaration. Each element is a ValueSpec:
// a list of names possibly followed by a type, possibly followed by values.
// If the type and value are both missing, we carry down the type (and value,
// but the "go/types" package takes care of that).
for _, spec := range decl.Specs {
tspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.
if tspec.Type != nil {
// "X T". We have a type. Remember it.
typ = tspec.Name.Name
}
if typ != f.typeName {
// This is not the type we're looking for.
continue
}
// We now have a list of names (from one line of source code) all being
// declared with the desired type.
structType, ok := tspec.Type.(*ast.StructType)
if !ok {
//not a struct type
continue
}
typesObj, typeObjOk := f.pkg.defs[tspec.Name]
if !typeObjOk {
log.Fatalf("no type info found for struct %s", typ)
}
for _, fieldLine := range structType.Fields.List {
for _, field := range fieldLine.Names {
//skip struct padding
if field.Name == "_" {
continue
}
fieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)
typeStr := fieldObj.Type().String()
tags := parseFieldTags(fieldLine.Tag)
//Skip here so we don't include rubbish import lines
if tags["exclude_dao"].Value == "true" {
continue
}
processedTypeStr, importPath := processTypeStr(typeStr)
//log.Printf("processedTypeStr: %s, importPath: %s", processedTypeStr, importPath)
if importPath != "" && !importExists(importPath, f.imports) {
f.imports = append(f.imports, Import{importPath})
}
v := Field{
Name: field.Name,
Tags: tags,
TypeName: processedTypeStr,
}
f.fields = append(f.fields, v)
}
}
}
return false
}
//We need to make sure that we get the type used right, with a package prefix
func processTypeStr(typeStr string) (typeName, importPath string) {
if strings.Contains(typeStr, "/") {
slashSplit := strings.Split(typeStr, "/")
pkgNameAndType := slashSplit[len(slashSplit)-1]
pkgName := strings.Split(pkgNameAndType, ".")[0]
importPath := fmt.Sprintf("%s/%s", strings.Join(slashSplit[0:len(slashSplit)-1], "/"), pkgName)
return pkgNameAndType, importPath
} else if strings.Contains(typeStr, ".") {
dotSplit := strings.Split(typeStr, ".")
importPath := dotSplit[0]
pkgNameAndType := typeStr
return pkgNameAndType, importPath
} else {
return typeStr, ""
}
}
//Check to see if a path already exists in the []Import
func importExists(pathName string, imports []Import) bool {
for _, val := range imports {
if pathName == val.ImportedName {
return true
}
}
return false
}
// parsePackageDir parses the package residing in the directory.
func (g *Generator) parsePackageDir(directory string) {
log.Printf("Collecting objects in package %s for parsing", directory)
pkg, err := build.Default.ImportDir(directory, 0)
if err != nil {
log.Fatalf("cannot process directory %s: %s", directory, err)
}
var names []string
names = append(names, pkg.GoFiles...)
names = append(names, pkg.CgoFiles...)
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
// names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package.
names = append(names, pkg.SFiles...)
names = prefixDirectory(directory, names)
log.Printf("Found object names: %+v", names)
g.parsePackage(directory, names, nil)
}
// parsePackage analyzes the single package constructed from the named files.
// If text is non-nil, it is a string to be used instead of the content of the file,
// to be used for testing. parsePackage exits if there is an error.
func (g *Generator) parsePackage(directory string, names []string, text interface{}) |
// parsePackageFiles parses the package occupying the named files.
func (g *Generator) parsePackageFiles(names []string) {
g.parsePackage(".", names, nil)
}
// prefixDirectory places the directory name on the beginning of each name in the list.
func prefixDirectory(directory string, names []string) []string {
if directory == "." {
return names
}
ret := make([]string, len(names))
for i, name := range names {
ret[i] = filepath.Join(directory, name)
}
return ret
}
func (g *Generator) Printf(format string, args ...interface{}) {
fmt.Fprintf(&g.Buf, format, args...)
}
func (g *Generator) Print(output string) {
fmt.Fprint(&g.Buf, output)
}
//format returns the gofmt-ed contents of the Generator's buffer.
func (g *Generator) format() []byte {
//DEBUG: fmt.Print(g.Buf.String())
src, err := format.Source(g.Buf.Bytes())
if err != nil {
// Should never happen, but can arise when developing this code.
// The user can compile the output to see the error.
log.Printf("warning: internal error: invalid Go generated: %s", err)
log.Printf("warning: compile the package to analyze the error")
return g.Buf.Bytes()
}
return src
}
// check type-checks the package. The package must be OK to proceed.
func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) {
pkg.defs = make(map[*ast.Ident]types.Object)
errFn := func(err error) {
cErr := err.(types.Error)
if cErr.Soft {
return
}
if strings.Contains(cErr.Msg, "has no field or method") ||
strings.Contains(cErr.Msg, "invalid operation: cannot call non-function") ||
//2016-01-11: Try and skip past issues with VendorExperiment
strings.Contains(cErr.Msg, "vendor") {
log.Printf("IGNORED: during package check: %s", cErr.Msg)
return
}
log.Fatalf("checking package: %s", cErr.Msg)
}
config := types.Config{FakeImportC: true, Error: errFn, Importer: importer.ForCompiler(fs, "source", nil)}
info := &types.Info{
Defs: pkg.defs,
}
typesPkg, _ := config.Check(pkg.dir, fs, astFiles, info)
pkg.typesPkg = typesPkg
}
// isDirectory reports whether the named file is a directory.
func isDirectory(name string) bool {
info, err := os.Stat(name)
if err != nil {
log.Fatal(err)
}
return info.IsDir()
}
func parseFieldTags(tagString *ast.BasicLit) map[string]StructTag {
tagMap := make(map[string]StructTag)
if tagString != nil {
sanitized := strings.Replace(tagString.Value, "`", "", -1)
var buffer []byte = make([]byte, 0, 10)
var key string
var inTag bool
for i := 0; i < len(sanitized); i++ {
if sanitized[i] == ':' {
key = strings.TrimSpace(bytes.NewBuffer(buffer).String())
buffer = make([]byte, 0, 10)
continue
}
if sanitized[i] == '"' {
if inTag {
tagMap[key] = StructTag{Name: key, Value: strings.TrimSpace(bytes.NewBuffer(buffer).String())}
buffer, key = make([]byte, 0, 10), ""
//key = ""
inTag = false
continue
} else {
inTag = true
continue
}
}
buffer = append(buffer, sanitized[i])
}
}
return tagMap
}
| {
var files []*File
var astFiles []*ast.File
g.pkg = new(Package)
fs := token.NewFileSet()
for _, name := range names {
if !strings.HasSuffix(name, ".go") {
continue
}
log.Printf("Parsing file: %s", name)
parsedFile, err := parser.ParseFile(fs, name, text, 0)
if err != nil {
log.Fatalf("parsing package: %s: %s", name, err)
}
astFiles = append(astFiles, parsedFile)
files = append(files, &File{
file: parsedFile,
pkg: g.pkg,
})
}
if len(astFiles) == 0 {
log.Fatalf("%s: no buildable Go files", directory)
}
g.pkg.name = astFiles[0].Name.Name
g.pkg.files = files
g.pkg.dir = directory
// Type check the package.
g.pkg.check(fs, astFiles)
} | identifier_body |
generator.go | package parse
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/importer"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
// _ "golang.org/x/tools/go/gcimporter"
"go/types"
)
//The caller will send a function of this type to do all the actual
//modification of the target package
type GeneratorFunc func(typeName string, fields []Field, imports []Import)
// File holds a single parsed file and associated data.
type File struct {
pkg *Package // Package to which this file belongs.
file *ast.File // Parsed AST.
// These fields are reset for each type being generated.
typeName string // Name of the struct type.
fields []Field // Accumulator for the structs fields
imports []Import
}
// Value represents a struct field
type Field struct {
Name string // The name of the field.
TypeName string //string representation of the Go Type of the field
Tags map[string]StructTag
}
type StructTag struct {
Name string
Value string
}
type Import struct {
ImportedName string
}
type Package struct {
dir string
name string
defs map[*ast.Ident]types.Object
files []*File
typesPkg *types.Package
}
// Generator holds the state of the analysis. Primarily used to buffer
// the output for format.Source.
type Generator struct {
Buf bytes.Buffer // Accumulated output.
pkg *Package // Package we are scanning.
dir string
}
//Run parses the target package and generates the code, verifying the package before and after generation.
//pathArgs is a list of file paths, to either individual files or whole directories.
//typeName is the name of a struct we're working on. outputName is where the generated code should go.
//genFn is the most important part, and recieves all the meta info about the targeted Type
func (g *Generator) Run(pathArgs []string, typeName string, outputName string, genFn GeneratorFunc) error {
//Parse the package
g.Prepare(pathArgs)
// Print the header and package clause.
g.Printf("// Code generated by 'exemplar %s'; DO NOT EDIT\n", strings.Join(os.Args[1:], " "))
g.Printf("\n")
g.Printf("package %s", g.pkg.name)
g.Printf("\n")
g.collectAndGenerate(typeName, genFn)
//format output
src := g.format()
// Write to file.
//TODO: Fix this to not be tied to propertizer
//DEBUG: fmt.Printf("Typename in parse: %s", typeName)
if outputName == "" {
baseName := fmt.Sprintf("%s_properties.go", typeName)
outputName = filepath.Join(g.dir, strings.ToLower(baseName))
}
fmt.Println(outputName)
err := ioutil.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
return nil
}
func (g *Generator) Prepare(args []string) {
log.Printf("Prepare - checking if path is a directory or a list of files %s", args[0])
if len(args) == 1 && isDirectory(args[0]) {
log.Printf("Prepare - found directory")
g.dir = args[0]
g.parsePackageDir(args[0])
} else {
log.Printf("Prepare - found file list")
g.dir = filepath.Dir(args[0])
g.parsePackageFiles(args)
}
}
//collect gathers all the info from all the package's files about the type
//necessary to do fun things
func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {
fields := make([]Field, 0, 100)
imports := make([]Import, 0, 100)
for _, file := range g.pkg.files {
// Set the state for this run of the walker.
file.typeName = typeName
file.fields = nil
if file.file != nil {
ast.Inspect(file.file, file.genDecl)
fields = append(fields, file.fields...)
imports = append(imports, file.imports...)
}
}
genFn(typeName, fields, imports)
}
// genDecl processes one declaration clause.
func (f *File) genDecl(node ast.Node) bool {
decl, ok := node.(*ast.GenDecl)
if !ok || decl.Tok != token.TYPE { // We only care about Type declarations.
return true
}
// The name of the type of the constants we are declaring.
// Can change if this is a multi-element declaration.
typ := ""
// Loop over the elements of the declaration. Each element is a ValueSpec:
// a list of names possibly followed by a type, possibly followed by values.
// If the type and value are both missing, we carry down the type (and value,
// but the "go/types" package takes care of that).
for _, spec := range decl.Specs {
tspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.
if tspec.Type != nil {
// "X T". We have a type. Remember it.
typ = tspec.Name.Name
}
if typ != f.typeName {
// This is not the type we're looking for.
continue
}
// We now have a list of names (from one line of source code) all being
// declared with the desired type.
structType, ok := tspec.Type.(*ast.StructType)
if !ok {
//not a struct type
continue
}
typesObj, typeObjOk := f.pkg.defs[tspec.Name]
if !typeObjOk {
log.Fatalf("no type info found for struct %s", typ)
}
for _, fieldLine := range structType.Fields.List {
for _, field := range fieldLine.Names {
//skip struct padding
if field.Name == "_" {
continue
}
fieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)
typeStr := fieldObj.Type().String()
tags := parseFieldTags(fieldLine.Tag)
//Skip here so we don't include rubbish import lines
if tags["exclude_dao"].Value == "true" {
continue
}
processedTypeStr, importPath := processTypeStr(typeStr)
//log.Printf("processedTypeStr: %s, importPath: %s", processedTypeStr, importPath)
if importPath != "" && !importExists(importPath, f.imports) {
f.imports = append(f.imports, Import{importPath})
}
v := Field{
Name: field.Name,
Tags: tags,
TypeName: processedTypeStr,
}
f.fields = append(f.fields, v)
}
}
}
return false
}
//We need to make sure that we get the type used right, with a package prefix
func processTypeStr(typeStr string) (typeName, importPath string) {
if strings.Contains(typeStr, "/") {
slashSplit := strings.Split(typeStr, "/")
pkgNameAndType := slashSplit[len(slashSplit)-1]
pkgName := strings.Split(pkgNameAndType, ".")[0]
importPath := fmt.Sprintf("%s/%s", strings.Join(slashSplit[0:len(slashSplit)-1], "/"), pkgName)
return pkgNameAndType, importPath
} else if strings.Contains(typeStr, ".") {
dotSplit := strings.Split(typeStr, ".")
importPath := dotSplit[0]
pkgNameAndType := typeStr
return pkgNameAndType, importPath
} else {
return typeStr, ""
}
}
//Check to see if a path already exists in the []Import
func | (pathName string, imports []Import) bool {
for _, val := range imports {
if pathName == val.ImportedName {
return true
}
}
return false
}
// parsePackageDir parses the package residing in the directory.
func (g *Generator) parsePackageDir(directory string) {
log.Printf("Collecting objects in package %s for parsing", directory)
pkg, err := build.Default.ImportDir(directory, 0)
if err != nil {
log.Fatalf("cannot process directory %s: %s", directory, err)
}
var names []string
names = append(names, pkg.GoFiles...)
names = append(names, pkg.CgoFiles...)
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
// names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package.
names = append(names, pkg.SFiles...)
names = prefixDirectory(directory, names)
log.Printf("Found object names: %+v", names)
g.parsePackage(directory, names, nil)
}
// parsePackage analyzes the single package constructed from the named files.
// If text is non-nil, it is a string to be used instead of the content of the file,
// to be used for testing. parsePackage exits if there is an error.
func (g *Generator) parsePackage(directory string, names []string, text interface{}) {
var files []*File
var astFiles []*ast.File
g.pkg = new(Package)
fs := token.NewFileSet()
for _, name := range names {
if !strings.HasSuffix(name, ".go") {
continue
}
log.Printf("Parsing file: %s", name)
parsedFile, err := parser.ParseFile(fs, name, text, 0)
if err != nil {
log.Fatalf("parsing package: %s: %s", name, err)
}
astFiles = append(astFiles, parsedFile)
files = append(files, &File{
file: parsedFile,
pkg: g.pkg,
})
}
if len(astFiles) == 0 {
log.Fatalf("%s: no buildable Go files", directory)
}
g.pkg.name = astFiles[0].Name.Name
g.pkg.files = files
g.pkg.dir = directory
// Type check the package.
g.pkg.check(fs, astFiles)
}
// parsePackageFiles parses the package occupying the named files.
func (g *Generator) parsePackageFiles(names []string) {
g.parsePackage(".", names, nil)
}
// prefixDirectory places the directory name on the beginning of each name in the list.
func prefixDirectory(directory string, names []string) []string {
if directory == "." {
return names
}
ret := make([]string, len(names))
for i, name := range names {
ret[i] = filepath.Join(directory, name)
}
return ret
}
func (g *Generator) Printf(format string, args ...interface{}) {
fmt.Fprintf(&g.Buf, format, args...)
}
func (g *Generator) Print(output string) {
fmt.Fprint(&g.Buf, output)
}
//format returns the gofmt-ed contents of the Generator's buffer.
func (g *Generator) format() []byte {
//DEBUG: fmt.Print(g.Buf.String())
src, err := format.Source(g.Buf.Bytes())
if err != nil {
// Should never happen, but can arise when developing this code.
// The user can compile the output to see the error.
log.Printf("warning: internal error: invalid Go generated: %s", err)
log.Printf("warning: compile the package to analyze the error")
return g.Buf.Bytes()
}
return src
}
// check type-checks the package. The package must be OK to proceed.
func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) {
pkg.defs = make(map[*ast.Ident]types.Object)
errFn := func(err error) {
cErr := err.(types.Error)
if cErr.Soft {
return
}
if strings.Contains(cErr.Msg, "has no field or method") ||
strings.Contains(cErr.Msg, "invalid operation: cannot call non-function") ||
//2016-01-11: Try and skip past issues with VendorExperiment
strings.Contains(cErr.Msg, "vendor") {
log.Printf("IGNORED: during package check: %s", cErr.Msg)
return
}
log.Fatalf("checking package: %s", cErr.Msg)
}
config := types.Config{FakeImportC: true, Error: errFn, Importer: importer.ForCompiler(fs, "source", nil)}
info := &types.Info{
Defs: pkg.defs,
}
typesPkg, _ := config.Check(pkg.dir, fs, astFiles, info)
pkg.typesPkg = typesPkg
}
// isDirectory reports whether the named file is a directory.
func isDirectory(name string) bool {
info, err := os.Stat(name)
if err != nil {
log.Fatal(err)
}
return info.IsDir()
}
func parseFieldTags(tagString *ast.BasicLit) map[string]StructTag {
tagMap := make(map[string]StructTag)
if tagString != nil {
sanitized := strings.Replace(tagString.Value, "`", "", -1)
var buffer []byte = make([]byte, 0, 10)
var key string
var inTag bool
for i := 0; i < len(sanitized); i++ {
if sanitized[i] == ':' {
key = strings.TrimSpace(bytes.NewBuffer(buffer).String())
buffer = make([]byte, 0, 10)
continue
}
if sanitized[i] == '"' {
if inTag {
tagMap[key] = StructTag{Name: key, Value: strings.TrimSpace(bytes.NewBuffer(buffer).String())}
buffer, key = make([]byte, 0, 10), ""
//key = ""
inTag = false
continue
} else {
inTag = true
continue
}
}
buffer = append(buffer, sanitized[i])
}
}
return tagMap
}
| importExists | identifier_name |
generator.go | package parse
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/importer"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
// _ "golang.org/x/tools/go/gcimporter"
"go/types"
)
//The caller will send a function of this type to do all the actual
//modification of the target package
type GeneratorFunc func(typeName string, fields []Field, imports []Import)
// File holds a single parsed file and associated data.
type File struct {
pkg *Package // Package to which this file belongs.
file *ast.File // Parsed AST.
// These fields are reset for each type being generated.
typeName string // Name of the struct type.
fields []Field // Accumulator for the structs fields
imports []Import
}
// Value represents a struct field
type Field struct {
Name string // The name of the field.
TypeName string //string representation of the Go Type of the field
Tags map[string]StructTag
}
type StructTag struct {
Name string
Value string
}
type Import struct {
ImportedName string
}
type Package struct {
dir string
name string
defs map[*ast.Ident]types.Object
files []*File
typesPkg *types.Package
}
// Generator holds the state of the analysis. Primarily used to buffer
// the output for format.Source.
type Generator struct {
Buf bytes.Buffer // Accumulated output.
pkg *Package // Package we are scanning.
dir string
}
//Run parses the target package and generates the code, verifying the package before and after generation.
//pathArgs is a list of file paths, to either individual files or whole directories.
//typeName is the name of a struct we're working on. outputName is where the generated code should go.
//genFn is the most important part, and recieves all the meta info about the targeted Type
func (g *Generator) Run(pathArgs []string, typeName string, outputName string, genFn GeneratorFunc) error {
//Parse the package
g.Prepare(pathArgs)
// Print the header and package clause.
g.Printf("// Code generated by 'exemplar %s'; DO NOT EDIT\n", strings.Join(os.Args[1:], " "))
g.Printf("\n")
g.Printf("package %s", g.pkg.name)
g.Printf("\n")
g.collectAndGenerate(typeName, genFn)
//format output
src := g.format()
// Write to file.
//TODO: Fix this to not be tied to propertizer
//DEBUG: fmt.Printf("Typename in parse: %s", typeName)
if outputName == "" {
baseName := fmt.Sprintf("%s_properties.go", typeName)
outputName = filepath.Join(g.dir, strings.ToLower(baseName))
}
fmt.Println(outputName)
err := ioutil.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
return nil
}
func (g *Generator) Prepare(args []string) {
log.Printf("Prepare - checking if path is a directory or a list of files %s", args[0])
if len(args) == 1 && isDirectory(args[0]) {
log.Printf("Prepare - found directory")
g.dir = args[0]
g.parsePackageDir(args[0])
} else {
log.Printf("Prepare - found file list")
g.dir = filepath.Dir(args[0])
g.parsePackageFiles(args)
}
}
//collect gathers all the info from all the package's files about the type
//necessary to do fun things
func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {
fields := make([]Field, 0, 100)
imports := make([]Import, 0, 100)
for _, file := range g.pkg.files {
// Set the state for this run of the walker.
file.typeName = typeName
file.fields = nil
if file.file != nil {
ast.Inspect(file.file, file.genDecl)
fields = append(fields, file.fields...)
imports = append(imports, file.imports...)
}
}
genFn(typeName, fields, imports)
}
// genDecl processes one declaration clause.
func (f *File) genDecl(node ast.Node) bool {
decl, ok := node.(*ast.GenDecl)
if !ok || decl.Tok != token.TYPE { // We only care about Type declarations.
return true
}
// The name of the type of the constants we are declaring.
// Can change if this is a multi-element declaration.
typ := ""
// Loop over the elements of the declaration. Each element is a ValueSpec:
// a list of names possibly followed by a type, possibly followed by values.
// If the type and value are both missing, we carry down the type (and value,
// but the "go/types" package takes care of that).
for _, spec := range decl.Specs {
tspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.
if tspec.Type != nil {
// "X T". We have a type. Remember it.
typ = tspec.Name.Name
}
if typ != f.typeName {
// This is not the type we're looking for.
continue
}
// We now have a list of names (from one line of source code) all being
// declared with the desired type.
structType, ok := tspec.Type.(*ast.StructType)
if !ok {
//not a struct type
continue
}
typesObj, typeObjOk := f.pkg.defs[tspec.Name]
if !typeObjOk {
log.Fatalf("no type info found for struct %s", typ)
}
for _, fieldLine := range structType.Fields.List {
for _, field := range fieldLine.Names {
//skip struct padding
if field.Name == "_" |
fieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)
typeStr := fieldObj.Type().String()
tags := parseFieldTags(fieldLine.Tag)
//Skip here so we don't include rubbish import lines
if tags["exclude_dao"].Value == "true" {
continue
}
processedTypeStr, importPath := processTypeStr(typeStr)
//log.Printf("processedTypeStr: %s, importPath: %s", processedTypeStr, importPath)
if importPath != "" && !importExists(importPath, f.imports) {
f.imports = append(f.imports, Import{importPath})
}
v := Field{
Name: field.Name,
Tags: tags,
TypeName: processedTypeStr,
}
f.fields = append(f.fields, v)
}
}
}
return false
}
//We need to make sure that we get the type used right, with a package prefix
func processTypeStr(typeStr string) (typeName, importPath string) {
if strings.Contains(typeStr, "/") {
slashSplit := strings.Split(typeStr, "/")
pkgNameAndType := slashSplit[len(slashSplit)-1]
pkgName := strings.Split(pkgNameAndType, ".")[0]
importPath := fmt.Sprintf("%s/%s", strings.Join(slashSplit[0:len(slashSplit)-1], "/"), pkgName)
return pkgNameAndType, importPath
} else if strings.Contains(typeStr, ".") {
dotSplit := strings.Split(typeStr, ".")
importPath := dotSplit[0]
pkgNameAndType := typeStr
return pkgNameAndType, importPath
} else {
return typeStr, ""
}
}
//Check to see if a path already exists in the []Import
func importExists(pathName string, imports []Import) bool {
for _, val := range imports {
if pathName == val.ImportedName {
return true
}
}
return false
}
// parsePackageDir parses the package residing in the directory.
func (g *Generator) parsePackageDir(directory string) {
log.Printf("Collecting objects in package %s for parsing", directory)
pkg, err := build.Default.ImportDir(directory, 0)
if err != nil {
log.Fatalf("cannot process directory %s: %s", directory, err)
}
var names []string
names = append(names, pkg.GoFiles...)
names = append(names, pkg.CgoFiles...)
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
// names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package.
names = append(names, pkg.SFiles...)
names = prefixDirectory(directory, names)
log.Printf("Found object names: %+v", names)
g.parsePackage(directory, names, nil)
}
// parsePackage analyzes the single package constructed from the named files.
// If text is non-nil, it is a string to be used instead of the content of the file,
// to be used for testing. parsePackage exits if there is an error.
func (g *Generator) parsePackage(directory string, names []string, text interface{}) {
var files []*File
var astFiles []*ast.File
g.pkg = new(Package)
fs := token.NewFileSet()
for _, name := range names {
if !strings.HasSuffix(name, ".go") {
continue
}
log.Printf("Parsing file: %s", name)
parsedFile, err := parser.ParseFile(fs, name, text, 0)
if err != nil {
log.Fatalf("parsing package: %s: %s", name, err)
}
astFiles = append(astFiles, parsedFile)
files = append(files, &File{
file: parsedFile,
pkg: g.pkg,
})
}
if len(astFiles) == 0 {
log.Fatalf("%s: no buildable Go files", directory)
}
g.pkg.name = astFiles[0].Name.Name
g.pkg.files = files
g.pkg.dir = directory
// Type check the package.
g.pkg.check(fs, astFiles)
}
// parsePackageFiles parses the package occupying the named files.
func (g *Generator) parsePackageFiles(names []string) {
g.parsePackage(".", names, nil)
}
// prefixDirectory places the directory name on the beginning of each name in the list.
func prefixDirectory(directory string, names []string) []string {
if directory == "." {
return names
}
ret := make([]string, len(names))
for i, name := range names {
ret[i] = filepath.Join(directory, name)
}
return ret
}
func (g *Generator) Printf(format string, args ...interface{}) {
fmt.Fprintf(&g.Buf, format, args...)
}
func (g *Generator) Print(output string) {
fmt.Fprint(&g.Buf, output)
}
//format returns the gofmt-ed contents of the Generator's buffer.
func (g *Generator) format() []byte {
//DEBUG: fmt.Print(g.Buf.String())
src, err := format.Source(g.Buf.Bytes())
if err != nil {
// Should never happen, but can arise when developing this code.
// The user can compile the output to see the error.
log.Printf("warning: internal error: invalid Go generated: %s", err)
log.Printf("warning: compile the package to analyze the error")
return g.Buf.Bytes()
}
return src
}
// check type-checks the package. The package must be OK to proceed.
func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) {
pkg.defs = make(map[*ast.Ident]types.Object)
errFn := func(err error) {
cErr := err.(types.Error)
if cErr.Soft {
return
}
if strings.Contains(cErr.Msg, "has no field or method") ||
strings.Contains(cErr.Msg, "invalid operation: cannot call non-function") ||
//2016-01-11: Try and skip past issues with VendorExperiment
strings.Contains(cErr.Msg, "vendor") {
log.Printf("IGNORED: during package check: %s", cErr.Msg)
return
}
log.Fatalf("checking package: %s", cErr.Msg)
}
config := types.Config{FakeImportC: true, Error: errFn, Importer: importer.ForCompiler(fs, "source", nil)}
info := &types.Info{
Defs: pkg.defs,
}
typesPkg, _ := config.Check(pkg.dir, fs, astFiles, info)
pkg.typesPkg = typesPkg
}
// isDirectory reports whether the named file is a directory.
func isDirectory(name string) bool {
info, err := os.Stat(name)
if err != nil {
log.Fatal(err)
}
return info.IsDir()
}
func parseFieldTags(tagString *ast.BasicLit) map[string]StructTag {
tagMap := make(map[string]StructTag)
if tagString != nil {
sanitized := strings.Replace(tagString.Value, "`", "", -1)
var buffer []byte = make([]byte, 0, 10)
var key string
var inTag bool
for i := 0; i < len(sanitized); i++ {
if sanitized[i] == ':' {
key = strings.TrimSpace(bytes.NewBuffer(buffer).String())
buffer = make([]byte, 0, 10)
continue
}
if sanitized[i] == '"' {
if inTag {
tagMap[key] = StructTag{Name: key, Value: strings.TrimSpace(bytes.NewBuffer(buffer).String())}
buffer, key = make([]byte, 0, 10), ""
//key = ""
inTag = false
continue
} else {
inTag = true
continue
}
}
buffer = append(buffer, sanitized[i])
}
}
return tagMap
}
| {
continue
} | conditional_block |
s3driver.go | package sftp
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
var BLOCK_DOWNLOADS_IP_ADDRESSES []string
type S3 interface {
ListObjectsV2(input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
DeleteObject(input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
CopyObject(input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
PutObject(input *s3.PutObjectInput) (*s3.PutObjectOutput, error)
GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error)
}
type S3Driver struct {
s3 S3
bucket string
prefix string
homePath string
remoteIPAddress string
kmsKeyID *string
lg Logger
}
func (d S3Driver) Stat(path string) (os.FileInfo, error) {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
resp, err := d.s3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(d.bucket),
Prefix: aws.String(localPath),
MaxKeys: aws.Int64(1),
})
if err != nil {
return nil, err
}
if resp.Contents == nil || *resp.KeyCount == 0 {
return nil, os.ErrNotExist
}
info := &fileInfo{
name: localPath,
mode: os.ModePerm,
size: *resp.Contents[0].Size,
mtime: *resp.Contents[0].LastModified,
}
if strings.HasSuffix(*resp.Contents[0].Key, "/") {
info.name = strings.TrimRight(info.name, "/")
info.mode = os.ModeDir
}
return info, nil
}
func (d S3Driver) ListDir(path string) ([]os.FileInfo, error) {
prefix, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
if !strings.HasSuffix(prefix, "/") {
prefix = prefix + "/"
}
var nextContinuationToken *string
files := []os.FileInfo{}
for {
objects, err := d.s3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(d.bucket),
Prefix: aws.String(prefix),
Delimiter: aws.String("/"),
ContinuationToken: nextContinuationToken,
})
if err != nil {
return nil, err
}
for _, o := range objects.Contents {
if *o.Key == prefix {
continue
}
files = append(files, &fileInfo{
name: strings.TrimPrefix(*o.Key, prefix),
size: *o.Size,
mtime: *o.LastModified,
})
}
for _, o := range objects.CommonPrefixes {
files = append(files, &fileInfo{
name: strings.TrimSuffix(strings.TrimPrefix(*o.Prefix, prefix), "/"),
size: 4096,
mtime: time.Unix(1, 0),
mode: os.ModeDir,
})
}
if !*objects.IsTruncated {
return files, nil
}
nextContinuationToken = objects.NextContinuationToken
}
}
func (d S3Driver) DeleteDir(path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
// s3 DeleteObject needs a trailing slash for directories
directoryPath := translatedPath
if !strings.HasSuffix(translatedPath, "/") {
directoryPath = translatedPath + "/"
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(directoryPath),
})
return err
}
func (d S3Driver) DeleteFile(path string) error |
func (d S3Driver) Rename(oldpath string, newpath string) error {
translatedOldpath, err := TranslatePath(d.prefix, d.homePath, oldpath)
if err != nil {
return err
}
translatedNewpath, err := TranslatePath(d.prefix, d.homePath, newpath)
if err != nil {
return err
}
input := &s3.CopyObjectInput{
Bucket: aws.String(d.bucket),
CopySource: aws.String(d.bucket + "/" + translatedOldpath),
Key: &translatedNewpath,
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
if _, err := d.s3.CopyObject(input); err != nil {
return err
}
if _, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: &translatedOldpath,
}); err != nil {
return err
}
return nil
}
func (d S3Driver) MakeDir(path string) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
if !strings.HasSuffix(localPath, "/") {
localPath += "/"
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader([]byte{}),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
return err
}
func (d S3Driver) GetFile(path string) (io.ReadCloser, error) {
denyList := map[string]string{
"5baac2eca47b2e0001fba7bc": "",
"57559919ba5df50100000371": "",
"5948272c6737440001c6d97f": "",
"5dc35c1bc06f8d000102e30f": "",
"5b9c35c6a47b2e0001fba77c": "",
"56a91ecf599456010000089e": "",
"5859884232aee60001eb363c": "",
"610c95d3f8fe797dd7069926": "",
"572227ff2ccd540100000942": "",
"5d94ff814070e90001c74ae9": "",
"562a767542fcde0100000cd3": "",
"5d727c3d091c7a0001b6167b": "",
"577ec13a78ef4c010000010c": "",
"5a2eae046e18690001b2b671": "",
"596923cd7eb87f000134bd31": "",
"5d96661ed0c8470001afd962": "",
"5a7338b0e1d9a40001ec9f6b": "",
"544662a92b57f07b1d00003f": "",
"59b9f1145f63950001db2c2f": "",
"5efe7d59472dcc000193b4f1": "",
"5d65a3c947fec2000169542d": "",
"5d38d1d91269ea0001a7666f": "",
"5c0023e2d5e6320001392a1b": "",
"59a1e74bd0b14b0001af5bcc": "",
"57e153297406ba010000069c": "",
"57d9a194fc7c6301000003ec": "",
"55a7ffb439c12e0100000012": "",
"57222718dbfe7d01000009fd": "",
"5e46ef81836224000116c303": "",
"540dff9944ee2f1443004a7e": "",
"5f28cde4e3e8ee0001f65046": "",
"59cd5854f3a91c0001017a79": "",
"5ca25ee8f59f0b0001c3755a": "",
"6359869802626ad09401b198": "",
"635c47c9b773024858b7ce2e": "",
}
ip, port := getIPAndPort(d.remoteIPAddress)
if _, ok := denyList[d.prefix]; ok {
d.lg.ErrorD("s3-get-file-blocked-district", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
for _, blockedIP := range BLOCK_DOWNLOADS_IP_ADDRESSES {
if ip == blockedIP {
d.lg.ErrorD("s3-get-file-blocked-ip", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
}
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
obj, err := d.s3.GetObject(&s3.GetObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
})
if err != nil {
return nil, err
}
if d.lg != nil {
d.lg.InfoD("s3-get-file-success", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": localPath,
"client-ip": ip,
"client-port": port,
"file_bytes_size": obj.ContentLength,
})
}
return obj.Body, nil
}
func (d S3Driver) PutFile(path string, r io.Reader) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
rawData, err := ioutil.ReadAll(r)
if err != nil {
return err
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader(rawData),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
if err != nil {
return err
}
ip, port := getIPAndPort(d.remoteIPAddress)
if d.lg != nil {
d.lg.InfoD("s3-put-file-success", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "PUT",
"path": localPath,
"client-ip": ip,
"client-port": port,
"file_bytes_size": bytes.NewReader(rawData).Size(),
})
}
return nil
}
func (d S3Driver) RealPath(path string) string {
result, _ := TranslatePath("/", d.homePath, path)
return "/" + result
}
// translatePath takes in a S3 root prefix, a home directory, and either an absolute or relative path to append, and returns a cleaned and validated path.
// It will resolve things like '..' while disallowing the prefix to be escaped.
// It also preserves a single trailing slash if one is present, so it can be used on both directories and files.
func TranslatePath(prefix, home, path string) (string, error) {
if path == "" {
return filepath.Clean("/" + prefix + "/" + home), nil
}
var cleanPath string
if strings.HasPrefix(path, "/") {
cleanPath = filepath.Clean(prefix + path)
if !strings.HasPrefix(cleanPath, prefix) {
cleanPath = prefix
}
} else {
cleanPath = filepath.Clean("/" + prefix + "/" + home + filepath.Clean("/"+path))
}
// For some reason, filepath.Clean drops trailing /'s, so if there was one we have to put it back
if strings.HasSuffix(path, "/") {
cleanPath += "/"
}
return strings.TrimLeft(cleanPath, "/"), nil
}
// NewS3Driver creates a new S3Driver with the AWS credentials and S3 parameters.
// bucket: name of S3 bucket
// prefix: key within the S3 bucket, if applicable
// homePath: default home directory for user (can be different from prefix)
func NewS3Driver(
bucket,
prefix,
homePath,
region,
awsAccessKeyID,
awsSecretKey,
awsToken,
remoteIPAddress string,
kmsKeyID *string,
lg Logger,
) *S3Driver {
config := aws.NewConfig().
WithRegion(region).
WithCredentials(credentials.NewStaticCredentials(awsAccessKeyID, awsSecretKey, awsToken))
s3 := s3.New(session.New(), config)
blockDownloadIPAddressesStr := os.Getenv("BLOCK_DOWNLOADS_IP_ADDRESSES")
BLOCK_DOWNLOADS_IP_ADDRESSES = []string{}
for _, addr := range strings.Split(blockDownloadIPAddressesStr, ",") {
BLOCK_DOWNLOADS_IP_ADDRESSES = append(BLOCK_DOWNLOADS_IP_ADDRESSES, strings.TrimSpace(addr))
}
return &S3Driver{
s3: s3,
bucket: bucket,
prefix: prefix,
homePath: homePath,
remoteIPAddress: remoteIPAddress,
kmsKeyID: kmsKeyID,
lg: lg,
}
}
func getIPAndPort(combined string) (string, string) {
urlArray := strings.Split(combined, ":")
ip := ""
port := ""
if len(urlArray) > 0 {
ip = urlArray[0]
}
if len(urlArray) > 1 {
port = urlArray[1]
}
return ip, port
}
| {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(translatedPath),
})
return err
} | identifier_body |
s3driver.go | package sftp
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
var BLOCK_DOWNLOADS_IP_ADDRESSES []string
type S3 interface {
ListObjectsV2(input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
DeleteObject(input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
CopyObject(input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
PutObject(input *s3.PutObjectInput) (*s3.PutObjectOutput, error)
GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error)
}
type S3Driver struct {
s3 S3
bucket string
prefix string
homePath string
remoteIPAddress string
kmsKeyID *string
lg Logger
}
func (d S3Driver) Stat(path string) (os.FileInfo, error) {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
resp, err := d.s3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(d.bucket),
Prefix: aws.String(localPath),
MaxKeys: aws.Int64(1),
})
if err != nil {
return nil, err
}
if resp.Contents == nil || *resp.KeyCount == 0 {
return nil, os.ErrNotExist
}
info := &fileInfo{
name: localPath,
mode: os.ModePerm,
size: *resp.Contents[0].Size,
mtime: *resp.Contents[0].LastModified,
}
if strings.HasSuffix(*resp.Contents[0].Key, "/") {
info.name = strings.TrimRight(info.name, "/")
info.mode = os.ModeDir
}
return info, nil
}
func (d S3Driver) ListDir(path string) ([]os.FileInfo, error) {
prefix, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
if !strings.HasSuffix(prefix, "/") {
prefix = prefix + "/"
}
var nextContinuationToken *string
files := []os.FileInfo{}
for {
objects, err := d.s3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(d.bucket),
Prefix: aws.String(prefix),
Delimiter: aws.String("/"),
ContinuationToken: nextContinuationToken,
})
if err != nil {
return nil, err
}
for _, o := range objects.Contents {
if *o.Key == prefix {
continue
}
files = append(files, &fileInfo{
name: strings.TrimPrefix(*o.Key, prefix),
size: *o.Size,
mtime: *o.LastModified,
})
}
for _, o := range objects.CommonPrefixes {
files = append(files, &fileInfo{
name: strings.TrimSuffix(strings.TrimPrefix(*o.Prefix, prefix), "/"),
size: 4096,
mtime: time.Unix(1, 0),
mode: os.ModeDir,
})
}
if !*objects.IsTruncated {
return files, nil
}
nextContinuationToken = objects.NextContinuationToken
}
}
func (d S3Driver) DeleteDir(path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
// s3 DeleteObject needs a trailing slash for directories
directoryPath := translatedPath
if !strings.HasSuffix(translatedPath, "/") {
directoryPath = translatedPath + "/"
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(directoryPath),
})
return err
}
func (d S3Driver) DeleteFile(path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(translatedPath),
})
return err
}
func (d S3Driver) Rename(oldpath string, newpath string) error {
translatedOldpath, err := TranslatePath(d.prefix, d.homePath, oldpath)
if err != nil {
return err
}
translatedNewpath, err := TranslatePath(d.prefix, d.homePath, newpath)
if err != nil {
return err
}
input := &s3.CopyObjectInput{
Bucket: aws.String(d.bucket),
CopySource: aws.String(d.bucket + "/" + translatedOldpath),
Key: &translatedNewpath,
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
if _, err := d.s3.CopyObject(input); err != nil {
return err
}
if _, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: &translatedOldpath,
}); err != nil {
return err
}
return nil
}
func (d S3Driver) MakeDir(path string) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
if !strings.HasSuffix(localPath, "/") {
localPath += "/"
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader([]byte{}),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
return err
}
func (d S3Driver) GetFile(path string) (io.ReadCloser, error) {
denyList := map[string]string{
"5baac2eca47b2e0001fba7bc": "",
"57559919ba5df50100000371": "",
"5948272c6737440001c6d97f": "",
"5dc35c1bc06f8d000102e30f": "",
"5b9c35c6a47b2e0001fba77c": "",
"56a91ecf599456010000089e": "",
"5859884232aee60001eb363c": "",
"610c95d3f8fe797dd7069926": "",
"572227ff2ccd540100000942": "",
"5d94ff814070e90001c74ae9": "",
"562a767542fcde0100000cd3": "",
"5d727c3d091c7a0001b6167b": "",
"577ec13a78ef4c010000010c": "",
"5a2eae046e18690001b2b671": "",
"596923cd7eb87f000134bd31": "",
"5d96661ed0c8470001afd962": "",
"5a7338b0e1d9a40001ec9f6b": "",
"544662a92b57f07b1d00003f": "",
"59b9f1145f63950001db2c2f": "",
"5efe7d59472dcc000193b4f1": "",
"5d65a3c947fec2000169542d": "",
"5d38d1d91269ea0001a7666f": "",
"5c0023e2d5e6320001392a1b": "",
"59a1e74bd0b14b0001af5bcc": "",
"57e153297406ba010000069c": "",
"57d9a194fc7c6301000003ec": "",
"55a7ffb439c12e0100000012": "",
"57222718dbfe7d01000009fd": "",
"5e46ef81836224000116c303": "",
"540dff9944ee2f1443004a7e": "",
"5f28cde4e3e8ee0001f65046": "",
"59cd5854f3a91c0001017a79": "",
"5ca25ee8f59f0b0001c3755a": "",
"6359869802626ad09401b198": "",
"635c47c9b773024858b7ce2e": "",
}
ip, port := getIPAndPort(d.remoteIPAddress)
if _, ok := denyList[d.prefix]; ok {
d.lg.ErrorD("s3-get-file-blocked-district", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
for _, blockedIP := range BLOCK_DOWNLOADS_IP_ADDRESSES {
if ip == blockedIP {
d.lg.ErrorD("s3-get-file-blocked-ip", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
}
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
obj, err := d.s3.GetObject(&s3.GetObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
})
if err != nil {
return nil, err
}
if d.lg != nil {
d.lg.InfoD("s3-get-file-success", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": localPath,
"client-ip": ip,
"client-port": port,
"file_bytes_size": obj.ContentLength,
})
}
return obj.Body, nil
}
func (d S3Driver) PutFile(path string, r io.Reader) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
rawData, err := ioutil.ReadAll(r)
if err != nil {
return err
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader(rawData),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
if err != nil {
return err
}
ip, port := getIPAndPort(d.remoteIPAddress)
if d.lg != nil {
d.lg.InfoD("s3-put-file-success", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "PUT",
"path": localPath,
"client-ip": ip,
"client-port": port,
"file_bytes_size": bytes.NewReader(rawData).Size(),
})
}
return nil
}
func (d S3Driver) RealPath(path string) string {
result, _ := TranslatePath("/", d.homePath, path)
return "/" + result
}
// translatePath takes in a S3 root prefix, a home directory, and either an absolute or relative path to append, and returns a cleaned and validated path.
// It will resolve things like '..' while disallowing the prefix to be escaped.
// It also preserves a single trailing slash if one is present, so it can be used on both directories and files.
func TranslatePath(prefix, home, path string) (string, error) {
if path == "" {
return filepath.Clean("/" + prefix + "/" + home), nil
}
var cleanPath string
if strings.HasPrefix(path, "/") {
cleanPath = filepath.Clean(prefix + path)
if !strings.HasPrefix(cleanPath, prefix) {
cleanPath = prefix
}
} else {
cleanPath = filepath.Clean("/" + prefix + "/" + home + filepath.Clean("/"+path))
}
// For some reason, filepath.Clean drops trailing /'s, so if there was one we have to put it back
if strings.HasSuffix(path, "/") {
cleanPath += "/"
}
return strings.TrimLeft(cleanPath, "/"), nil
}
// NewS3Driver creates a new S3Driver with the AWS credentials and S3 parameters.
// bucket: name of S3 bucket
// prefix: key within the S3 bucket, if applicable
// homePath: default home directory for user (can be different from prefix)
func NewS3Driver(
bucket,
prefix,
homePath,
region,
awsAccessKeyID,
awsSecretKey,
awsToken,
remoteIPAddress string,
kmsKeyID *string,
lg Logger,
) *S3Driver {
config := aws.NewConfig().
WithRegion(region).
WithCredentials(credentials.NewStaticCredentials(awsAccessKeyID, awsSecretKey, awsToken))
s3 := s3.New(session.New(), config)
blockDownloadIPAddressesStr := os.Getenv("BLOCK_DOWNLOADS_IP_ADDRESSES")
BLOCK_DOWNLOADS_IP_ADDRESSES = []string{}
for _, addr := range strings.Split(blockDownloadIPAddressesStr, ",") {
BLOCK_DOWNLOADS_IP_ADDRESSES = append(BLOCK_DOWNLOADS_IP_ADDRESSES, strings.TrimSpace(addr))
}
return &S3Driver{
s3: s3,
bucket: bucket,
prefix: prefix,
homePath: homePath,
remoteIPAddress: remoteIPAddress,
kmsKeyID: kmsKeyID,
lg: lg,
}
}
| urlArray := strings.Split(combined, ":")
ip := ""
port := ""
if len(urlArray) > 0 {
ip = urlArray[0]
}
if len(urlArray) > 1 {
port = urlArray[1]
}
return ip, port
} | func getIPAndPort(combined string) (string, string) { | random_line_split |
s3driver.go | package sftp
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
var BLOCK_DOWNLOADS_IP_ADDRESSES []string
type S3 interface {
ListObjectsV2(input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
DeleteObject(input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
CopyObject(input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
PutObject(input *s3.PutObjectInput) (*s3.PutObjectOutput, error)
GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error)
}
type S3Driver struct {
s3 S3
bucket string
prefix string
homePath string
remoteIPAddress string
kmsKeyID *string
lg Logger
}
func (d S3Driver) Stat(path string) (os.FileInfo, error) {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
resp, err := d.s3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(d.bucket),
Prefix: aws.String(localPath),
MaxKeys: aws.Int64(1),
})
if err != nil {
return nil, err
}
if resp.Contents == nil || *resp.KeyCount == 0 {
return nil, os.ErrNotExist
}
info := &fileInfo{
name: localPath,
mode: os.ModePerm,
size: *resp.Contents[0].Size,
mtime: *resp.Contents[0].LastModified,
}
if strings.HasSuffix(*resp.Contents[0].Key, "/") {
info.name = strings.TrimRight(info.name, "/")
info.mode = os.ModeDir
}
return info, nil
}
func (d S3Driver) ListDir(path string) ([]os.FileInfo, error) {
prefix, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
if !strings.HasSuffix(prefix, "/") {
prefix = prefix + "/"
}
var nextContinuationToken *string
files := []os.FileInfo{}
for {
objects, err := d.s3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(d.bucket),
Prefix: aws.String(prefix),
Delimiter: aws.String("/"),
ContinuationToken: nextContinuationToken,
})
if err != nil {
return nil, err
}
for _, o := range objects.Contents {
if *o.Key == prefix {
continue
}
files = append(files, &fileInfo{
name: strings.TrimPrefix(*o.Key, prefix),
size: *o.Size,
mtime: *o.LastModified,
})
}
for _, o := range objects.CommonPrefixes {
files = append(files, &fileInfo{
name: strings.TrimSuffix(strings.TrimPrefix(*o.Prefix, prefix), "/"),
size: 4096,
mtime: time.Unix(1, 0),
mode: os.ModeDir,
})
}
if !*objects.IsTruncated {
return files, nil
}
nextContinuationToken = objects.NextContinuationToken
}
}
func (d S3Driver) | (path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
// s3 DeleteObject needs a trailing slash for directories
directoryPath := translatedPath
if !strings.HasSuffix(translatedPath, "/") {
directoryPath = translatedPath + "/"
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(directoryPath),
})
return err
}
func (d S3Driver) DeleteFile(path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(translatedPath),
})
return err
}
func (d S3Driver) Rename(oldpath string, newpath string) error {
translatedOldpath, err := TranslatePath(d.prefix, d.homePath, oldpath)
if err != nil {
return err
}
translatedNewpath, err := TranslatePath(d.prefix, d.homePath, newpath)
if err != nil {
return err
}
input := &s3.CopyObjectInput{
Bucket: aws.String(d.bucket),
CopySource: aws.String(d.bucket + "/" + translatedOldpath),
Key: &translatedNewpath,
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
if _, err := d.s3.CopyObject(input); err != nil {
return err
}
if _, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: &translatedOldpath,
}); err != nil {
return err
}
return nil
}
func (d S3Driver) MakeDir(path string) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
if !strings.HasSuffix(localPath, "/") {
localPath += "/"
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader([]byte{}),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
return err
}
func (d S3Driver) GetFile(path string) (io.ReadCloser, error) {
denyList := map[string]string{
"5baac2eca47b2e0001fba7bc": "",
"57559919ba5df50100000371": "",
"5948272c6737440001c6d97f": "",
"5dc35c1bc06f8d000102e30f": "",
"5b9c35c6a47b2e0001fba77c": "",
"56a91ecf599456010000089e": "",
"5859884232aee60001eb363c": "",
"610c95d3f8fe797dd7069926": "",
"572227ff2ccd540100000942": "",
"5d94ff814070e90001c74ae9": "",
"562a767542fcde0100000cd3": "",
"5d727c3d091c7a0001b6167b": "",
"577ec13a78ef4c010000010c": "",
"5a2eae046e18690001b2b671": "",
"596923cd7eb87f000134bd31": "",
"5d96661ed0c8470001afd962": "",
"5a7338b0e1d9a40001ec9f6b": "",
"544662a92b57f07b1d00003f": "",
"59b9f1145f63950001db2c2f": "",
"5efe7d59472dcc000193b4f1": "",
"5d65a3c947fec2000169542d": "",
"5d38d1d91269ea0001a7666f": "",
"5c0023e2d5e6320001392a1b": "",
"59a1e74bd0b14b0001af5bcc": "",
"57e153297406ba010000069c": "",
"57d9a194fc7c6301000003ec": "",
"55a7ffb439c12e0100000012": "",
"57222718dbfe7d01000009fd": "",
"5e46ef81836224000116c303": "",
"540dff9944ee2f1443004a7e": "",
"5f28cde4e3e8ee0001f65046": "",
"59cd5854f3a91c0001017a79": "",
"5ca25ee8f59f0b0001c3755a": "",
"6359869802626ad09401b198": "",
"635c47c9b773024858b7ce2e": "",
}
ip, port := getIPAndPort(d.remoteIPAddress)
if _, ok := denyList[d.prefix]; ok {
d.lg.ErrorD("s3-get-file-blocked-district", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
for _, blockedIP := range BLOCK_DOWNLOADS_IP_ADDRESSES {
if ip == blockedIP {
d.lg.ErrorD("s3-get-file-blocked-ip", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
}
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
obj, err := d.s3.GetObject(&s3.GetObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
})
if err != nil {
return nil, err
}
if d.lg != nil {
d.lg.InfoD("s3-get-file-success", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": localPath,
"client-ip": ip,
"client-port": port,
"file_bytes_size": obj.ContentLength,
})
}
return obj.Body, nil
}
func (d S3Driver) PutFile(path string, r io.Reader) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
rawData, err := ioutil.ReadAll(r)
if err != nil {
return err
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader(rawData),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
if err != nil {
return err
}
ip, port := getIPAndPort(d.remoteIPAddress)
if d.lg != nil {
d.lg.InfoD("s3-put-file-success", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "PUT",
"path": localPath,
"client-ip": ip,
"client-port": port,
"file_bytes_size": bytes.NewReader(rawData).Size(),
})
}
return nil
}
func (d S3Driver) RealPath(path string) string {
result, _ := TranslatePath("/", d.homePath, path)
return "/" + result
}
// translatePath takes in a S3 root prefix, a home directory, and either an absolute or relative path to append, and returns a cleaned and validated path.
// It will resolve things like '..' while disallowing the prefix to be escaped.
// It also preserves a single trailing slash if one is present, so it can be used on both directories and files.
func TranslatePath(prefix, home, path string) (string, error) {
if path == "" {
return filepath.Clean("/" + prefix + "/" + home), nil
}
var cleanPath string
if strings.HasPrefix(path, "/") {
cleanPath = filepath.Clean(prefix + path)
if !strings.HasPrefix(cleanPath, prefix) {
cleanPath = prefix
}
} else {
cleanPath = filepath.Clean("/" + prefix + "/" + home + filepath.Clean("/"+path))
}
// For some reason, filepath.Clean drops trailing /'s, so if there was one we have to put it back
if strings.HasSuffix(path, "/") {
cleanPath += "/"
}
return strings.TrimLeft(cleanPath, "/"), nil
}
// NewS3Driver creates a new S3Driver with the AWS credentials and S3 parameters.
// bucket: name of S3 bucket
// prefix: key within the S3 bucket, if applicable
// homePath: default home directory for user (can be different from prefix)
func NewS3Driver(
bucket,
prefix,
homePath,
region,
awsAccessKeyID,
awsSecretKey,
awsToken,
remoteIPAddress string,
kmsKeyID *string,
lg Logger,
) *S3Driver {
config := aws.NewConfig().
WithRegion(region).
WithCredentials(credentials.NewStaticCredentials(awsAccessKeyID, awsSecretKey, awsToken))
s3 := s3.New(session.New(), config)
blockDownloadIPAddressesStr := os.Getenv("BLOCK_DOWNLOADS_IP_ADDRESSES")
BLOCK_DOWNLOADS_IP_ADDRESSES = []string{}
for _, addr := range strings.Split(blockDownloadIPAddressesStr, ",") {
BLOCK_DOWNLOADS_IP_ADDRESSES = append(BLOCK_DOWNLOADS_IP_ADDRESSES, strings.TrimSpace(addr))
}
return &S3Driver{
s3: s3,
bucket: bucket,
prefix: prefix,
homePath: homePath,
remoteIPAddress: remoteIPAddress,
kmsKeyID: kmsKeyID,
lg: lg,
}
}
func getIPAndPort(combined string) (string, string) {
urlArray := strings.Split(combined, ":")
ip := ""
port := ""
if len(urlArray) > 0 {
ip = urlArray[0]
}
if len(urlArray) > 1 {
port = urlArray[1]
}
return ip, port
}
| DeleteDir | identifier_name |
s3driver.go | package sftp
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
var BLOCK_DOWNLOADS_IP_ADDRESSES []string
type S3 interface {
ListObjectsV2(input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
DeleteObject(input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
CopyObject(input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
PutObject(input *s3.PutObjectInput) (*s3.PutObjectOutput, error)
GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error)
}
type S3Driver struct {
s3 S3
bucket string
prefix string
homePath string
remoteIPAddress string
kmsKeyID *string
lg Logger
}
func (d S3Driver) Stat(path string) (os.FileInfo, error) {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
resp, err := d.s3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(d.bucket),
Prefix: aws.String(localPath),
MaxKeys: aws.Int64(1),
})
if err != nil {
return nil, err
}
if resp.Contents == nil || *resp.KeyCount == 0 {
return nil, os.ErrNotExist
}
info := &fileInfo{
name: localPath,
mode: os.ModePerm,
size: *resp.Contents[0].Size,
mtime: *resp.Contents[0].LastModified,
}
if strings.HasSuffix(*resp.Contents[0].Key, "/") {
info.name = strings.TrimRight(info.name, "/")
info.mode = os.ModeDir
}
return info, nil
}
func (d S3Driver) ListDir(path string) ([]os.FileInfo, error) {
prefix, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
if !strings.HasSuffix(prefix, "/") {
prefix = prefix + "/"
}
var nextContinuationToken *string
files := []os.FileInfo{}
for {
objects, err := d.s3.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(d.bucket),
Prefix: aws.String(prefix),
Delimiter: aws.String("/"),
ContinuationToken: nextContinuationToken,
})
if err != nil {
return nil, err
}
for _, o := range objects.Contents {
if *o.Key == prefix |
files = append(files, &fileInfo{
name: strings.TrimPrefix(*o.Key, prefix),
size: *o.Size,
mtime: *o.LastModified,
})
}
for _, o := range objects.CommonPrefixes {
files = append(files, &fileInfo{
name: strings.TrimSuffix(strings.TrimPrefix(*o.Prefix, prefix), "/"),
size: 4096,
mtime: time.Unix(1, 0),
mode: os.ModeDir,
})
}
if !*objects.IsTruncated {
return files, nil
}
nextContinuationToken = objects.NextContinuationToken
}
}
func (d S3Driver) DeleteDir(path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
// s3 DeleteObject needs a trailing slash for directories
directoryPath := translatedPath
if !strings.HasSuffix(translatedPath, "/") {
directoryPath = translatedPath + "/"
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(directoryPath),
})
return err
}
func (d S3Driver) DeleteFile(path string) error {
translatedPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
_, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(translatedPath),
})
return err
}
func (d S3Driver) Rename(oldpath string, newpath string) error {
translatedOldpath, err := TranslatePath(d.prefix, d.homePath, oldpath)
if err != nil {
return err
}
translatedNewpath, err := TranslatePath(d.prefix, d.homePath, newpath)
if err != nil {
return err
}
input := &s3.CopyObjectInput{
Bucket: aws.String(d.bucket),
CopySource: aws.String(d.bucket + "/" + translatedOldpath),
Key: &translatedNewpath,
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
if _, err := d.s3.CopyObject(input); err != nil {
return err
}
if _, err = d.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(d.bucket),
Key: &translatedOldpath,
}); err != nil {
return err
}
return nil
}
func (d S3Driver) MakeDir(path string) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
if !strings.HasSuffix(localPath, "/") {
localPath += "/"
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader([]byte{}),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
return err
}
func (d S3Driver) GetFile(path string) (io.ReadCloser, error) {
denyList := map[string]string{
"5baac2eca47b2e0001fba7bc": "",
"57559919ba5df50100000371": "",
"5948272c6737440001c6d97f": "",
"5dc35c1bc06f8d000102e30f": "",
"5b9c35c6a47b2e0001fba77c": "",
"56a91ecf599456010000089e": "",
"5859884232aee60001eb363c": "",
"610c95d3f8fe797dd7069926": "",
"572227ff2ccd540100000942": "",
"5d94ff814070e90001c74ae9": "",
"562a767542fcde0100000cd3": "",
"5d727c3d091c7a0001b6167b": "",
"577ec13a78ef4c010000010c": "",
"5a2eae046e18690001b2b671": "",
"596923cd7eb87f000134bd31": "",
"5d96661ed0c8470001afd962": "",
"5a7338b0e1d9a40001ec9f6b": "",
"544662a92b57f07b1d00003f": "",
"59b9f1145f63950001db2c2f": "",
"5efe7d59472dcc000193b4f1": "",
"5d65a3c947fec2000169542d": "",
"5d38d1d91269ea0001a7666f": "",
"5c0023e2d5e6320001392a1b": "",
"59a1e74bd0b14b0001af5bcc": "",
"57e153297406ba010000069c": "",
"57d9a194fc7c6301000003ec": "",
"55a7ffb439c12e0100000012": "",
"57222718dbfe7d01000009fd": "",
"5e46ef81836224000116c303": "",
"540dff9944ee2f1443004a7e": "",
"5f28cde4e3e8ee0001f65046": "",
"59cd5854f3a91c0001017a79": "",
"5ca25ee8f59f0b0001c3755a": "",
"6359869802626ad09401b198": "",
"635c47c9b773024858b7ce2e": "",
}
ip, port := getIPAndPort(d.remoteIPAddress)
if _, ok := denyList[d.prefix]; ok {
d.lg.ErrorD("s3-get-file-blocked-district", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
for _, blockedIP := range BLOCK_DOWNLOADS_IP_ADDRESSES {
if ip == blockedIP {
d.lg.ErrorD("s3-get-file-blocked-ip", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": path,
"client-ip": ip,
"client-port": port,
})
return nil, fmt.Errorf("not supported")
}
}
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return nil, err
}
obj, err := d.s3.GetObject(&s3.GetObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
})
if err != nil {
return nil, err
}
if d.lg != nil {
d.lg.InfoD("s3-get-file-success", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "GET",
"path": localPath,
"client-ip": ip,
"client-port": port,
"file_bytes_size": obj.ContentLength,
})
}
return obj.Body, nil
}
func (d S3Driver) PutFile(path string, r io.Reader) error {
localPath, err := TranslatePath(d.prefix, d.homePath, path)
if err != nil {
return err
}
rawData, err := ioutil.ReadAll(r)
if err != nil {
return err
}
input := &s3.PutObjectInput{
Bucket: aws.String(d.bucket),
Key: aws.String(localPath),
Body: bytes.NewReader(rawData),
}
if d.kmsKeyID == nil {
input.ServerSideEncryption = aws.String("AES256")
} else {
input.ServerSideEncryption = aws.String("aws:kms")
input.SSEKMSKeyId = aws.String(*d.kmsKeyID)
}
_, err = d.s3.PutObject(input)
if err != nil {
return err
}
ip, port := getIPAndPort(d.remoteIPAddress)
if d.lg != nil {
d.lg.InfoD("s3-put-file-success", meta{
"district_id": d.prefix,
"s3_bucket": d.bucket,
"method": "PUT",
"path": localPath,
"client-ip": ip,
"client-port": port,
"file_bytes_size": bytes.NewReader(rawData).Size(),
})
}
return nil
}
func (d S3Driver) RealPath(path string) string {
result, _ := TranslatePath("/", d.homePath, path)
return "/" + result
}
// translatePath takes in a S3 root prefix, a home directory, and either an absolute or relative path to append, and returns a cleaned and validated path.
// It will resolve things like '..' while disallowing the prefix to be escaped.
// It also preserves a single trailing slash if one is present, so it can be used on both directories and files.
func TranslatePath(prefix, home, path string) (string, error) {
if path == "" {
return filepath.Clean("/" + prefix + "/" + home), nil
}
var cleanPath string
if strings.HasPrefix(path, "/") {
cleanPath = filepath.Clean(prefix + path)
if !strings.HasPrefix(cleanPath, prefix) {
cleanPath = prefix
}
} else {
cleanPath = filepath.Clean("/" + prefix + "/" + home + filepath.Clean("/"+path))
}
// For some reason, filepath.Clean drops trailing /'s, so if there was one we have to put it back
if strings.HasSuffix(path, "/") {
cleanPath += "/"
}
return strings.TrimLeft(cleanPath, "/"), nil
}
// NewS3Driver creates a new S3Driver with the AWS credentials and S3 parameters.
// bucket: name of S3 bucket
// prefix: key within the S3 bucket, if applicable
// homePath: default home directory for user (can be different from prefix)
func NewS3Driver(
bucket,
prefix,
homePath,
region,
awsAccessKeyID,
awsSecretKey,
awsToken,
remoteIPAddress string,
kmsKeyID *string,
lg Logger,
) *S3Driver {
config := aws.NewConfig().
WithRegion(region).
WithCredentials(credentials.NewStaticCredentials(awsAccessKeyID, awsSecretKey, awsToken))
s3 := s3.New(session.New(), config)
blockDownloadIPAddressesStr := os.Getenv("BLOCK_DOWNLOADS_IP_ADDRESSES")
BLOCK_DOWNLOADS_IP_ADDRESSES = []string{}
for _, addr := range strings.Split(blockDownloadIPAddressesStr, ",") {
BLOCK_DOWNLOADS_IP_ADDRESSES = append(BLOCK_DOWNLOADS_IP_ADDRESSES, strings.TrimSpace(addr))
}
return &S3Driver{
s3: s3,
bucket: bucket,
prefix: prefix,
homePath: homePath,
remoteIPAddress: remoteIPAddress,
kmsKeyID: kmsKeyID,
lg: lg,
}
}
func getIPAndPort(combined string) (string, string) {
urlArray := strings.Split(combined, ":")
ip := ""
port := ""
if len(urlArray) > 0 {
ip = urlArray[0]
}
if len(urlArray) > 1 {
port = urlArray[1]
}
return ip, port
}
| {
continue
} | conditional_block |
app.js | // scroll header
function eventScroll() {
window.addEventListener("scroll", function() {
let header = document.querySelector("header");
if (window.scrollY >= 20)
header.classList.add('sticky');
else if (window.scrollY < 19) {
header.classList.remove('sticky');
// header.classList.add('fadeBlock');
}
});
}
// btn nav
var colapse = document.querySelector('.header__btn-colapse');
var btnColapse = document.getElementById("nav-btn-colapse");
var header = document.querySelector('.header__colapse');
var menuOpen = false;
header.addEventListener('click', function(e) {
if (document.querySelector('.header__colapse').contains(e.target)) {
btnColapse.classList.remove('open');
menuOpen = false;
header.classList.remove('hide');
}
});
colapse.onclick = function() {
if (!menuOpen) {
btnColapse.classList.add('open');
menuOpen = true;
header.classList.add('hide');
} else {
btnColapse.classList.remove('open');
menuOpen = false;
header.classList.remove('hide');
}
}
$('.owl-carousel').owlCarousel({
loop: true,
margin: 20,
nav: true,
responsiveClass: true,
responsive: {
0: {
items: 1
},
576: {
items: 2,
nav: false
},
768: {
items: 3,
nav: true
},
992: {
items: 4,
nav: true
},
1200: {
items: 6,
nav: true
}
}
})
// điều hướng
// data products 10 sản phẩm thôi
var dataProducts = [{
id: "item-1",
name: "(Mẫu HOT) Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
url: "../assets/img/items/item-1.jpg",
price: 149000,
describe1: "Túi Xách Nữ Thời Trang 💖FREESHIP 50k💖 Túi Xách Nữ Đeo Chéo Dây Da Baniclassic Trẻ trung Chất Cực Đẹp TX04",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-2",
name: "(Mẫu HOT) Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
url: "../assets/img/items/item-2.jpg",
price: 249000,
describe1: "TÚI ĐEO CHÉO QUẢNG CHÂU⚜️",
describe2: "Từ những cô nàng bình thường nhất cho tới những ngôi sao hàng đầu, tất cả đều chia sẻ một tình yêu vĩ đại với TÚI XÁCH NỮ ĐẸP của mình TÚI XÁCH NỮ hợp dáng người, hợp màu sắc làm tăng vẻ đẹp của trang phục bạn mặc và khẳng định ấn tượng của bạn trong mắt người đối diện. Tuy nhiên, không phải ai cũng biết chọn một chiếc TÚI XÁCH NỮ DA PU thực sự phù hợp với phom cơ thể của mình. Mang tới cho các cô nàng sự thoải mái khi đi dạo phố hoặc hẹn hò bè bạn vì không phải cầm mang những vật dụng linh tinh,",
describe3: "chiếc TÚI XÁCH NỮ DA ĐẸP đã trở thành người bạn không thể thiếu các nàng. Chúng có sự đa dạng từ kiểu cách tới màu sắc, size…tùy theo nhu cầu của mình mà các nàng lựa chọn một sản phẩm thích hợp. Và nếu bạn cũng đang đi tìm một chiếc ví thể thể hiện được cá tính của bản thân một cách rõ nét nhất và đang... lạc lối, thì hãy cùng khám phá và cảm nhận những nét đẹp và quyến rũ của Túi Xách nữ da lộn mà Túi Xách Nữ ZABUZA cung cấp nhé.",
orderQty: 2
},
{
id: "item-3",
name: "Túi Kẹp Nách Nữ Túi Đeo Chéo Nữ Vintage Hottrend KR 180- 2 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
url: "../assets/img/items/item-3.jpg",
price: 349000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-4",
name: "(Hottrend) Túi kẹp nách nữ vải dù cao cấp KR 274- Chất Dù cao cấp, Size 24, 4 màu lựa chọn- KARA 274",
url: "../assets/img/items/item-4.jpg",
price: 139000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi kẹp nách nữ vải dù cao cấp KR 274- Chất Dù cao cấp, Size 24, 4 màu lựa chọn- KARA 274",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 4 màu Hot (Trắng , Đen, Xanh Dương, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 3
},
{
id: "item-5",
name: "(HOT) Túi Kẹp Nách Nữ Caro Vintage Hottrend KR 180- 7 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
url: "../assets/img/items/item-5.jpg",
price: 259000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi Kẹp Nách Nữ Caro Vintage Hottrend KR 180- 7 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cmTúi Kẹp nách nữ có kích thước: Dài 26 cm, Rộng 6 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 7 màu Hot (Trắng Caro, Xanh Caro, Đỏ Caro, Xám Caro, Tím Hồng, Vàng Xanh, Đen Xanh) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-6",
name: "[Mã FAMAYWA giảm 10k đơn từ 50k] Túi xách nữ, túi kẹp nách da mềm trơn BH 433",
url: "../assets/img/items/item-6.jpg",
price: 299000,
describe1: "Túi xách là một phụ kiện thời trang không thể thiếu của các bạn trẻ dù là nam hay nữ, nó thể hiện phong cách cũng như cá tính của chủ nhân.",
describe2: "Nếu bạn yêu thích phong cách đơn giản nhưng tinh tế thì chiếc túi xách là một lựa chọn hoàn hảo cho bạn.Chiếc túi xách Tote 6 sở hữu thiết kế thời trang với phần nắp túi cách điệu kết hợp tông màu nổi bật, những đường may tinh tế, cùng gam màu trung tính trẻ trung năng động sẽ vô cùng hữu dụng cho bạn trong việc chứa các vật dụng cá nhân.",
describe3: " Bên cạnh đó, túi xách còn thể hiện gu thời trang và cá tính của bạn.",
orderQty: 2
},
{
id: "item-7",
name: "Túi Cói Merci",
url: "../assets/img/items/item-7.jpg",
price: 599000,
describe1: "Túi Cói Merci - nhỏ nhỏ xinh xinh nhưng tiện vô cùng . Sống ảo cũng xinh lung linhh ✨✨🔥🔥 ",
describe2: "Để mà đi du lịch sống ảo k thể thiếu em túi này lun ý ce ạ 🤩" +
"TÚI CÓI MERCI hot hit 🌴🌴🌴" +
"Túi rộng tha hồ đựng đồ nha ce",
describe3: "Size loại 1: 35x36cm" +
"size loại 2: 32x35cm,đựng vừa A4, vừa laptop, đi học đi làm , du lịch , còn hợp vs ai bỉm sữa mà vẫn muốn trend :))" +
"Túi rất nhẹ gập gọn cất cốp được, sống ảo xịn sò luôn nha 😌😌",
orderQty: 3
},
{
id: "item-8",
name: "TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP SL15",
url: "../assets/img/items/item-8.jpg",
price: 679000,
describe1: "--- TÚI XÁCH ALISA ---" +
" [HÀNG MỚI VỀ] TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP" +
"---Đặc Điểm Nổi Bật----" +
" - Trẻ trung phong cách " +
" - Thiết kế mới 2019" +
"- Họa tiết trái tim, thắt nơ siêu xinh",
describe2: "Túi nữ 2 Ngăn Phối Nơ Phiên Bản Hàn Quốc",
describe3: "----Thông Tin Chi Tiết----" +
"- Chất Liệu: Da pu cao cấp mềm mịn" +
"- Màu sắc: , hồng" +
"- Kích thước:19*15*8*15cm" +
"- Phong Cách : Hàn Quốc" +
"- Công dụng:đi chơi , đi làm , đi học , đi du lịch…." +
"-màu sắc: màu hồng" +
"- Mix Đồ: Có Thể kết hợp với mọi trang phục khác nhau",
orderQty: 1
},
{
id: "item-9",
name: "Túi Xách Nữ Tote Da PU Cao Cấp Mềm Đẹp Phom Vuông Kèm Ví Nhỏ Xinh Có Dây Đeo Chéo Style Thời Trang Công Sở Đi Làm Đi Học",
url: "../assets/img/items/item-9.jpg",
price: 238000,
describe1: "Sức nóng của túi tote da chưa bao giờ hạ nhiệt trong giới trẻ sành mốt bởi tính tiện dụng, sang trọng, mà vô cùng cá tính. Combo túi ví tote da Pu dày đẹp với thiết kế tinh tế đem đến phong cách thời trang sành điệu cho các nàng khi đi học, đi làm hay đi chơi.",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 2
},
{
id: "item-10",
name: "Túi xách tay nữ thương hiệu NEVENKA phong cách trẻ trung thanh lịch N9291",
url: "../assets/img/items/item-10.jpg",
price: 238000,
describe1: "Phong cách: trẻ trung, thanh lịch Kiểu cách: Túi đeo vai, đeo chéo nữ, túi xách tay thời trang Vật liệu chính: Da Pu Vật liệu dây đeo: Dây da PU Bề mặt da: Da trơn",
describe2: "Công nghệ vật liệu: Da nhân tạo Vật liệu lót: PVC Hình dáng: Hình chữ nhật ngang Kích thước: 23 x 16 x 10 cm Kiểu khóa: Khóa kéo miệng túi Màu sắc: Xanh, Trắng , Đen",
describe3: "Thương hiệu: NEVENKA Xuất xứ: Trung Quốc Phù hợp sử dụng: Đi chơi, đi làm, đi dạo phố.....",
orderQty: 2
}
];
// data user
// danh sách giỏ hàng mặc định
danhsachGioHang = [{ id: "item-1", n: 3 },
{ id: "item-2", n: 1 },
{ id: "item-6", n: 2 }
];
var users = [{
username: "admin",
password: "admin",
productID: danhsachGioHang
}]
// data cart
function saveListUser() {
var list = JSON.parse(localStorage.getItem("listUser"));
if (list)
users = list;
}
saveListUser();
function Redirect(url) {
window.location = url;
}
// tạo hàm đăng ký
function checkLogin() {
var user = JSON.parse(localStorage.getItem("userLogin"));
var names = document.querySelectorAll(".user-name");
var logout = document.getElementsByClassName("logout");
var hasUser = document.querySelector('.user');
if (user) {
for (var name of names) {
name.innerHTML = `
<a class="text-danger" href="../pages/login.html">${user.username}</a>
`;
}
if (logout[0].textContent == "Đăng nhập")
logout[0].textContent = "Đăng xuất";
hasUser.classList.add("user-has-account");
return user;
}
logout[0].textContent = "Đăng nhập";
hasUser.classList.remove("user-has-account");
return "";
}
var bool = Boolean(checkLogin());
var userNow = checkLogin();
console.log(bool);
// logout
function Logout() {
var logouts = document.getElementsByClassName("logout");
for (var logout of logouts) {
logout.onclick = () => {
localStorage.removeItem("userLogin");
}
}
}
Logout();
var i = 0;
// thêm sản phẩm
function addRow(product, index) {
var table = document.getElementById("datarow");
var row = `
<tr>
<td class="text-center" >${++i}</td>
<td class="text-center" >
<img src="${product.url}" class="img-product">
</td>
<td class="text-center" >${product.name}</td>
<td class="text-center">${product.price}</td>
<td class="text-center d-flex justify-content-center">
<input style="width: 45px; border: none; outline: none;" type="number"
class="d-block" name="number" id="number" value="${product.orderQty}" onchange ="totalPrice();" min="1">
</td>
<td class="text-center">${product.price * product.orderQty}</td>
<td class="text-center">
<a id="${product.id}" class="btn btn-danger btn-delete-sanpham">
<i class="fa fa-trash" aria-hidden="true"></i> Xóa
</a>
</td>
</tr>
`;
var newRow = table.insertRow(table.length);
newRow.innerHTML = row;
}
// xoa 1 item carrt
var removeByAttr = function(arr, attr, value) {
var i = arr.length;
while (i--) {
if (arr[i] &&
arr[i].hasOwnProperty(attr) &&
(arguments.length > 2 && arr[i][attr] === value)) {
arr.splice(i, 1);
}
}
totalProduct();
return arr;
}
function deleteItemInCart(productID) {
removeByAttr(userNow.productID, "id", productID);
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// khi thay đổi số lượng sản phẩm
function whenChageQty() {
var numbers = document.querySelectorAll("#datarow #number");
var products = userNow.productID;
for (var number in numbers) {
if (numbers.hasOwnProperty(number)) {
products[number].n = numbers[number].value;
// console.log(numbers[number].value);
}
}
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// tổng giá
var totalPrice = function() {
var table = document.getElementById("datarow");
var deletes = document.querySelectorAll(".btn-delete-sanpham");
var totalPr = 0;
for (var i = 0; i < table.rows.length; ++i) {
var quantity = table.rows[i].cells[4].querySelector("input").value;
var price = table.rows[i].cells[3].innerText;
var total = quantity * price;
table.rows[i].cells[5].innerText = total;
totalPr += total;
deletes[i].onclick = () => {
table.deleteRow(--i);
totalPrice();
deleteItemInCart(deletes[i].id);
}
}
document.getElementById("totalPrice").innerText = totalPr;
return totalPr;
}
// hàm lấy ra sản phẩm từ user
function userCartList(user) {
var products = [];
if (user) {
var danhsachGioHang = user.productID;
for (var item of danhsachGioHang) {
var product = dataProducts.find(function(value) {
return value.id == item.id;
});
product.orderQty = item.n;
products.push(product)
}
}
return products;
}
// add product vào cart
// userCartList(users[0])
var addProduct = function(products) {
var prd = products(checkLogin());
if (prd) {
for (var product of prd) {
addRow(product);
}
totalPrice();
return true;
}
return false;
}
// end them sản phẩm
// tat ca san pham
var pushProduct = function(dataProducts, n) {
var productList = document.getElementById("listProducts");
var products = [];
// in ra ngẫu nhiên số sản phẩm theo n
if (n) {
setTimeout(function() {
for (let i = 0; i < n; ++i) {
let k = Math.floor(Math.random() * 10);
var item = `
<a href="./products-detail.html" id="${dataProducts[k].id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${dataProducts[k].url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${dataProducts[k].name}" class="sale__discript d-block">${dataProducts[k].name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${dataProducts[k].price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.innerHTML = item;
// div.id = dataProducts[k].id;
productList.appendChild(div);
products.push(dataProducts[k]);
}
}, 500);
} else {
// in ra tat cả sản phẩm có trong mảng
for (var product of dataProducts) {
var item = `
<a href="./products-detail.html" id="${product.id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${product.url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${product.name}" class="sale__discript d-block">${product.name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${product.price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.id = product.id;
div.innerHTML = item;
productList.appendChild(div);
}
}
return products;
}
// sự kiện filter
function filter(a, number) {
var btnFs = document.querySelectorAll('.btn-filter');
for (var btn of btnFs) {
if (btn.classList.contains("active")) {
btn.classList.remove("active");
break;
}
}
Redirect('./products.html');
localStorage.setItem("filterActive", number);
}
// tìm kiếm
var btnSearch = document.querySelector(".search__btn");
var inputSearch = document.getElementById("search");
inputSearch.addEventListener("keyup", ({ key }) => {
if (key === "Enter") {
dataSearch();
}
})
function dataSearch() {
var text = document.getElementById("search").value.toLowerCase();
var products = dataProducts.filter(function(product) {
return product.name.toLowerCase().includes(text);
});
localStorage.setItem("filterActive", 4);
localStorage.setItem('searchProducts', JSON.stringify(products));
window.location = "../pages/products.html";
}
btnSearch.addEventListener("click", function(e) {
e.preventDefault();
dataSearch();
});
var btnPro = document.getElementById("btnProduct");
btnPro.addEventListener("click", function(event) {
localStorage.setItem("filterActive", "0");
});
function sortFilter(n) {
if (n == 3) {
dataProducts.sort(function(data1, data2) {
return data1.price - data2.price;
});
pushProduct(dataProducts);
}
if (n == 4) {
var products = JSON.parse(localStorage.getItem("searchProducts"));
pushProduct(products);
} else {
pushProduct(dataProducts, 30);
}
}
// sự kiện khi ấn vào giỏ hàng
var cart = document.querySelector(".cart-link");
cart.addEventListener("click", function(event) {
event.preventDefault();
if (bool) {
Redirect("../pages/cart.html");
} else
alert("vui lòng đăng nhập trước");
});
// đăng ký
function checkRegister() {
var form = document.querySelector('#frmdangky');
var data = Object.fromEntries(new FormData(form).entries());
var regUserName = /(?=.*[a-zA-Z_0-9])\w{6,}/; // ít nhất phải có 6 ký tự không chứa ký tự đặc biệt
var regPassword = /^(?=.*[0-9])(?=.*[a-z])([a-zA-Z0-9]{8,})$/; //phải có 8 ký tự trở lên và có ít nhất 1 số
var regEmail = /^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$/;
var regName = /^([A-Z][a-z]+)(\s+[A-Z][a-z]+)+$/; // chữ cái đầu tiên phải bắt đầu bằng chữ in hoa và phải có họ và tên
var regPhone = /[0-9]{10}/; // số điện thoại phải là số và có 10 chữ số
var lbUserName = document.querySelector("#lbTenDangNhap");
var lbMatKhau = document.querySelector("#lbMatKhau");
var lbNhapLaiMatKhau = document.querySelector("#lbNhapLaiMatKhau");
var lbTen = document.querySelector("#lbTen");
var lbDiaChi = document.querySelector("#lbDiaChi");
var lbDt = document.querySelector("#lbDt");
var lbEmail = document.querySelector("#lbEmail");
var lbNgaySinh = document.querySelector("#lbNgaySinh");
if (!regUserName.test(data.username)) {
lbUserName.innerText = "Tên đăng nhập ít nhất phải có 6 ký tự không chứa ký tự đặc biệt";
return false;
}
lbUserName.innerText = "";
if (!regPassword.test(data.password)) {
lbMatKhau.innerText = "Mật khẩu phải có 8 ký tự trở lên và có ít nhất 1 số";
return false;
}
lbMatKhau.innerText = "";
if (data.password !== data.kh_nhaplaimatkhau) {
lbNhapLaiMatKhau.innerText = "Mật khẩu chưa khớp";
return false;
}
lbNhapLaiMatKhau.innerText = "";
if (!regName.test(data.kh_ten)) {
lbTen.innerText = "Chữ cái đầu tiên phải bắt đầu bằng chữ in hoa và phải có họ và tên";
return false;
}
lbTen.innerText = "";
if (data.kh_diachi.trim().length == 0) {
lbDiaChi.innerText = "Địa chỉ không được bỏ trống";
return false;
}
lbDiaChi.innerText = "";
if (!regPhone.test(data.kh_dienthoai)) {
lbDt.innerText = "số điện thoại phải là số và có 10 chữ số ";
return false;
}
lbDt.innerText = "";
if (!regEmail.test(data.kh_email)) {
lbEmail.innerText = "vui lòng điền đúng định dạng email";
return false;
}
lbEmail.innerText = "";
if (data.kh_namsinh > 2002) {
lbNgaySinh.innerText = "bạn phải đủ 18 tuổi";
return false;
}
lbNgaySinh.innerText = "";
return true;
}
// get thông tin
var getThongTin = function(user) {
document.getElementById("kh_ten").value = user.kh_ten;
document.getElementById("kh_gioitinh").value = user.kh_gioitinh == 0 ? "Nam" : "Nữ";
document.getElementById("kh_diachi").value = user.kh_diachi;
document.getElementById("kh_dienthoai").value = user.kh_dienthoai;
document.getElementById("kh_email").value = user.kh_email;
document.getElementById("kh_ngaysinh").value = user.kh_ngaysinh + "/" + user.kh_thangsinh + "/" + user.kh_namsinh;
}
// phần thanh toán paying.html
var pay = function() {
// lấy sản phẩm từ user ra
var list = document.getElementById("listProductPay");
var product = userCartList(userNow);
var total = 0;
for (var p of product) {
var item = `
<li class="list-group-item d-flex justify-content-between ">
<div>
<h4 class="my-0">${p.name}</h4>
<small class="text-muted">${p.price} x ${p.orderQty} </small>
</div>
<span class="text-muted">${p.orderQty}</span>
</li>
`;
list.innerHTML += item;
total += p.price * p.orderQty;
}
var totalPrice = `
<li class="list-group-item d-flex justify-content-between">
<span>Tổng thành tiền</span>
<strong id="thanhTien">${total}</strong>
</li>
`;
list.innerHTML += totalPrice;
}
// sự kiện ấn vào sản phẩm
var getProductId = function() {
var a = document.getElementsByClassName("sale__item-link");
for (var i = 0; i < a.length; i++) {
a[i].addEventListener("click", func | indow.location = "./pages/products-detail.html?" + productID;
})
}
}
var showDetailProduct = function() {
var linkItem = window.location.href;
var id = linkItem.split("?")[1];
var data = dataProducts.find(function(value, index) {
return value.id == id;
})
var imgProduct = document.querySelector(".product__detail-left");
var linkImg = data.url.split(".jpg")[0];
var imgLink2 = linkImg + ".1.jpg";
var imgLink3 = linkImg + ".2.jpg";
var dataImg = `
<img src="${data.url}" class="product__detail-main w-50" data-bs-toggle="modal" data-bs-target="#imageView"></img>
<div class="product__details row">
<div class="d-flex justify-content-center"g-1 mt-2 row justify-content-center">
<img src="${data.url}" style="width:100px" class=" mt-4 product__detail-img"></img>
<img src="${imgLink2}" style="width:100px" class=" mt-4 product__detail-img"></img>
<img src="${imgLink3}" style="width:100px" class=" mt-4 product__detail-img"></img>
</div>
</div>
`;
var modalViewImg = document.getElementById("modalViewImg");
modalViewImg.innerHTML = `<img src="${data.url}" class="w-100"></img>`;
imgProduct.id = id;
imgProduct.innerHTML = dataImg;
var name = document.getElementById("name");
var price = document.getElementById("price");
var describe1 = document.getElementById("describe1");
var describe2 = document.getElementById("describe2");
var describe3 = document.getElementById("describe3");
name.innerText = data.name;
price.innerHTML = `Giá: <span class="text-danger">${data.price}<sup>đ<sup></span>`;
describe1.innerText = data.describe1;
describe2.innerText = data.describe2;
describe3.innerText = data.describe3;
}
$(document).ready(function() {
$("#btnAddToCard").click(function() {
$('.toast').toast('show');
})
});
// đếm sản phẩm trên giỏ hàng
var totalProduct = function() {
var totalProduct = document.querySelector(".totalProduct");
var total = userNow.productID.length;
totalProduct.innerText = total;
}
if (userNow)
totalProduct(); | tion(e) {
e.preventDefault();
var productID = this.id;
w | conditional_block |
app.js | // scroll header
function eventScroll() {
window.addEventListener("scroll", function() {
let header = document.querySelector("header");
if (window.scrollY >= 20)
header.classList.add('sticky');
else if (window.scrollY < 19) {
header.classList.remove('sticky');
// header.classList.add('fadeBlock');
}
});
}
// btn nav
var colapse = document.querySelector('.header__btn-colapse');
var btnColapse = document.getElementById("nav-btn-colapse");
var header = document.querySelector('.header__colapse');
var menuOpen = false;
header.addEventListener('click', function(e) {
if (document.querySelector('.header__colapse').contains(e.target)) {
btnColapse.classList.remove('open');
menuOpen = false;
header.classList.remove('hide');
}
});
colapse.onclick = function() {
if (!menuOpen) {
btnColapse.classList.add('open');
menuOpen = true;
header.classList.add('hide');
} else {
btnColapse.classList.remove('open');
menuOpen = false;
header.classList.remove('hide');
}
}
$('.owl-carousel').owlCarousel({
loop: true,
margin: 20,
nav: true,
responsiveClass: true,
responsive: {
0: {
items: 1
},
576: {
items: 2,
nav: false
},
768: {
items: 3,
nav: true
},
992: {
items: 4,
nav: true
},
1200: {
items: 6,
nav: true
}
}
})
// điều hướng
// data products 10 sản phẩm thôi
var dataProducts = [{
id: "item-1",
name: "(Mẫu HOT) Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
url: "../assets/img/items/item-1.jpg",
price: 149000,
describe1: "Túi Xách Nữ Thời Trang 💖FREESHIP 50k💖 Túi Xách Nữ Đeo Chéo Dây Da Baniclassic Trẻ trung Chất Cực Đẹp TX04",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-2",
name: "(Mẫu HOT) Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
url: "../assets/img/items/item-2.jpg",
price: 249000,
describe1: "TÚI ĐEO CHÉO QUẢNG CHÂU⚜️",
describe2: "Từ những cô nàng bình thường nhất cho tới những ngôi sao hàng đầu, tất cả đều chia sẻ một tình yêu vĩ đại với TÚI XÁCH NỮ ĐẸP của mình TÚI XÁCH NỮ hợp dáng người, hợp màu sắc làm tăng vẻ đẹp của trang phục bạn mặc và khẳng định ấn tượng của bạn trong mắt người đối diện. Tuy nhiên, không phải ai cũng biết chọn một chiếc TÚI XÁCH NỮ DA PU thực sự phù hợp với phom cơ thể của mình. Mang tới cho các cô nàng sự thoải mái khi đi dạo phố hoặc hẹn hò bè bạn vì không phải cầm mang những vật dụng linh tinh,",
describe3: "chiếc TÚI XÁCH NỮ DA ĐẸP đã trở thành người bạn không thể thiếu các nàng. Chúng có sự đa dạng từ kiểu cách tới màu sắc, size…tùy theo nhu cầu của mình mà các nàng lựa chọn một sản phẩm thích hợp. Và nếu bạn cũng đang đi tìm một chiếc ví thể thể hiện được cá tính của bản thân một cách rõ nét nhất và đang... lạc lối, thì hãy cùng khám phá và cảm nhận những nét đẹp và quyến rũ của Túi Xách nữ da lộn mà Túi Xách Nữ ZABUZA cung cấp nhé.",
orderQty: 2
},
{
id: "item-3",
name: "Túi Kẹp Nách Nữ Túi Đeo Chéo Nữ Vintage Hottrend KR 180- 2 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
url: "../assets/img/items/item-3.jpg",
price: 349000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-4",
name: "(Hottrend) Túi kẹp nách nữ vải dù cao cấp KR 274- Chất Dù cao cấp, Size 24, 4 màu lựa chọn- KARA 274",
url: "../assets/img/items/item-4.jpg",
price: 139000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi kẹp nách nữ vải dù cao cấp KR 274- Chất Dù cao cấp, Size 24, 4 màu lựa chọn- KARA 274",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 4 màu Hot (Trắng , Đen, Xanh Dương, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 3
},
{
id: "item-5",
name: "(HOT) Túi Kẹp Nách Nữ Caro Vintage Hottrend KR 180- 7 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
url: "../assets/img/items/item-5.jpg",
price: 259000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi Kẹp Nách Nữ Caro Vintage Hottrend KR 180- 7 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cmTúi Kẹp nách nữ có kích thước: Dài 26 cm, Rộng 6 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 7 màu Hot (Trắng Caro, Xanh Caro, Đỏ Caro, Xám Caro, Tím Hồng, Vàng Xanh, Đen Xanh) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-6",
name: "[Mã FAMAYWA giảm 10k đơn từ 50k] Túi xách nữ, túi kẹp nách da mềm trơn BH 433",
url: "../assets/img/items/item-6.jpg",
price: 299000,
describe1: "Túi xách là một phụ kiện thời trang không thể thiếu của các bạn trẻ dù là nam hay nữ, nó thể hiện phong cách cũng như cá tính của chủ nhân.",
describe2: "Nếu bạn yêu thích phong cách đơn giản nhưng tinh tế thì chiếc túi xách là một lựa chọn hoàn hảo cho bạn.Chiếc túi xách Tote 6 sở hữu thiết kế thời trang với phần nắp túi cách điệu kết hợp tông màu nổi bật, những đường may tinh tế, cùng gam màu trung tính trẻ trung năng động sẽ vô cùng hữu dụng cho bạn trong việc chứa các vật dụng cá nhân.",
describe3: " Bên cạnh đó, túi xách còn thể hiện gu thời trang và cá tính của bạn.",
orderQty: 2
},
{
id: "item-7",
name: "Túi Cói Merci",
url: "../assets/img/items/item-7.jpg",
price: 599000,
describe1: "Túi Cói Merci - nhỏ nhỏ xinh xinh nhưng tiện vô cùng . Sống ảo cũng xinh lung linhh ✨✨🔥🔥 ",
describe2: "Để mà đi du lịch sống ảo k thể thiếu em túi này lun ý ce ạ 🤩" +
"TÚI CÓI MERCI hot hit 🌴🌴🌴" +
"Túi rộng tha hồ đựng đồ nha ce",
describe3: "Size loại 1: 35x36cm" +
"size loại 2: 32x35cm,đựng vừa A4, vừa laptop, đi học đi làm , du lịch , còn hợp vs ai bỉm sữa mà vẫn muốn trend :))" +
"Túi rất nhẹ gập gọn cất cốp được, sống ảo xịn sò luôn nha 😌😌",
orderQty: 3
},
{
id: "item-8",
name: "TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP SL15",
url: "../assets/img/items/item-8.jpg",
price: 679000,
describe1: "--- TÚI XÁCH ALISA ---" +
" [HÀNG MỚI VỀ] TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP" +
"---Đặc Điểm Nổi Bật----" +
" - Trẻ trung phong cách " +
" - Thiết kế mới 2019" +
"- Họa tiết trái tim, thắt nơ siêu xinh",
describe2: "Túi nữ 2 Ngăn Phối Nơ Phiên Bản Hàn Quốc",
describe3: "----Thông Tin Chi Tiết----" +
"- Chất Liệu: Da pu cao cấp mềm mịn" +
"- Màu sắc: , hồng" +
"- Kích thước:19*15*8*15cm" +
"- Phong Cách : Hàn Quốc" +
"- Công dụng:đi chơi , đi làm , đi học , đi du lịch…." +
"-màu sắc: màu hồng" +
"- Mix Đồ: Có Thể kết hợp với mọi trang phục khác nhau",
orderQty: 1
},
{
id: "item-9",
name: "Túi Xách Nữ Tote Da PU Cao Cấp Mềm Đẹp Phom Vuông Kèm Ví Nhỏ Xinh Có Dây Đeo Chéo Style Thời Trang Công Sở Đi Làm Đi Học",
url: "../assets/img/items/item-9.jpg",
price: 238000,
describe1: "Sức nóng của túi tote da chưa bao giờ hạ nhiệt trong giới trẻ sành mốt bởi tính tiện dụng, sang trọng, mà vô cùng cá tính. Combo túi ví tote da Pu dày đẹp với thiết kế tinh tế đem đến phong cách thời trang sành điệu cho các nàng khi đi học, đi làm hay đi chơi.",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 2
},
{
id: "item-10",
name: "Túi xách tay nữ thương hiệu NEVENKA phong cách trẻ trung thanh lịch N9291",
url: "../assets/img/items/item-10.jpg",
price: 238000,
describe1: "Phong cách: trẻ trung, thanh lịch Kiểu cách: Túi đeo vai, đeo chéo nữ, túi xách tay thời trang Vật liệu chính: Da Pu Vật liệu dây đeo: Dây da PU Bề mặt da: Da trơn",
describe2: "Công nghệ vật liệu: Da nhân tạo Vật liệu lót: PVC Hình dáng: Hình chữ nhật ngang Kích thước: 23 x 16 x 10 cm Kiểu khóa: Khóa kéo miệng túi Màu sắc: Xanh, Trắng , Đen",
describe3: "Thương hiệu: NEVENKA Xuất xứ: Trung Quốc Phù hợp sử dụng: Đi chơi, đi làm, đi dạo phố.....",
orderQty: 2
}
];
// data user
// danh sách giỏ hàng mặc định
danhsachGioHang = [{ id: "item-1", n: 3 },
{ id: "item-2", n: 1 },
{ id: "item-6", n: 2 }
];
var users = [{
username: "admin",
password: "admin",
productID: danhsachGioHang
}]
// data cart
function saveListUser() {
var list = JSON.parse(localStorage.getItem("listUser"));
if (list)
users = list;
}
saveListUser();
function Redirect(url) {
window.location = url;
}
// tạo hàm đăng ký
function checkLogin() {
var user = JSON.parse(localStorage.getItem("userLogin"));
var names = document.querySelectorAll(".user-name");
var logout = document.getElementsByClassName("logout");
var hasUser = document.querySelector('.user');
if (user) {
for (var name of names) {
name.innerHTML = `
<a class="text-danger" href="../pages/login.html">${user.username}</a>
`;
}
if (logout[0].textContent == "Đăng nhập")
logout[0].textContent = "Đăng xuất";
hasUser.classList.add("user-has-account");
return user;
}
logout[0].textContent = "Đăng nhập";
hasUser.classList.remove("user-has-account");
return "";
}
var bool = Boolean(checkLogin());
var userNow = checkLogin();
console.log(bool);
// logout
function Logout() {
var logouts = document.getElementsByClassName("logout");
for (var logout of logouts) {
logout.onclick = () => {
localStorage.removeItem("userLogin");
}
}
}
Logout();
var i = 0;
// thêm sản phẩm
function addRow(product, index) {
var table = document.getElementById("datarow");
var row = `
<tr>
<td class="text-center" >${++i}</td>
<td class="text-center" >
<img src="${product.url}" class="img-product">
</td>
<td class="text-center" >${product.name}</td>
<td class="text-center">${product.price}</td>
<td class="text-center d-flex justify-content-center">
<input style="width: 45px; border: none; outline: none;" type="number"
class="d-block" name="number" id="number" value="${product.orderQty}" onchange ="totalPrice();" min="1">
</td>
<td class="text-center">${product.price * product.orderQty}</td>
<td class="text-center">
<a id="${product.id}" class="btn btn-danger btn-delete-sanpham">
<i class="fa fa-trash" aria-hidden="true"></i> Xóa
</a>
</td>
</tr>
`;
var newRow = table.insertRow(table.length);
newRow.innerHTML = row;
}
// xoa 1 item carrt
var removeByAttr = function(arr, attr, value) {
var i = arr.length;
while (i--) {
if (arr[i] &&
arr[i].hasOwnProperty(attr) &&
(arguments.length > 2 && arr[i][attr] === value)) {
arr.splice(i, 1);
}
}
totalProduct();
return arr;
}
function deleteItemInCart(productID) {
removeByAttr(userNow.productID, "id", productID);
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// khi thay đổi số lượng sản phẩm
function whenChageQty() {
var numbers = document.querySelectorAll("#datarow #number");
var products = userNow.productID;
for (var number in numbers) {
if (numbers.hasOwnProperty(number)) {
products[number].n = numbers[number].value;
// console.log(numbers[number].value);
}
}
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// tổng giá
var totalPrice = function() {
var table = document.getElementById("datarow");
var deletes = document.querySelectorAll(".btn-delete-sanpham");
var totalPr = 0;
for (var i = 0; i < table.rows.length; ++i) {
var quantity = table.rows[i].cells[4].querySelector("input").value;
var price = table.rows[i].cells[3].innerText;
var total = quantity * price;
table.rows[i].cells[5].innerText = total;
totalPr += total;
deletes[i].onclick = () => {
table.deleteRow(--i);
totalPrice();
deleteItemInCart(deletes[i].id);
}
}
document.getElementById("totalPrice").innerText = totalPr;
return totalPr;
}
// hàm lấy ra sản phẩm từ user
function userCartList(user) {
var products = [];
if (user) {
var danhsachGioHang = user.productID;
for (var item of danhsachGioHang | ty = item.n;
products.push(product)
}
}
return products;
}
// add product vào cart
// userCartList(users[0])
var addProduct = function(products) {
var prd = products(checkLogin());
if (prd) {
for (var product of prd) {
addRow(product);
}
totalPrice();
return true;
}
return false;
}
// end them sản phẩm
// tat ca san pham
var pushProduct = function(dataProducts, n) {
var productList = document.getElementById("listProducts");
var products = [];
// in ra ngẫu nhiên số sản phẩm theo n
if (n) {
setTimeout(function() {
for (let i = 0; i < n; ++i) {
let k = Math.floor(Math.random() * 10);
var item = `
<a href="./products-detail.html" id="${dataProducts[k].id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${dataProducts[k].url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${dataProducts[k].name}" class="sale__discript d-block">${dataProducts[k].name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${dataProducts[k].price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.innerHTML = item;
// div.id = dataProducts[k].id;
productList.appendChild(div);
products.push(dataProducts[k]);
}
}, 500);
} else {
// in ra tat cả sản phẩm có trong mảng
for (var product of dataProducts) {
var item = `
<a href="./products-detail.html" id="${product.id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${product.url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${product.name}" class="sale__discript d-block">${product.name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${product.price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.id = product.id;
div.innerHTML = item;
productList.appendChild(div);
}
}
return products;
}
// sự kiện filter
function filter(a, number) {
var btnFs = document.querySelectorAll('.btn-filter');
for (var btn of btnFs) {
if (btn.classList.contains("active")) {
btn.classList.remove("active");
break;
}
}
Redirect('./products.html');
localStorage.setItem("filterActive", number);
}
// tìm kiếm
var btnSearch = document.querySelector(".search__btn");
var inputSearch = document.getElementById("search");
inputSearch.addEventListener("keyup", ({ key }) => {
if (key === "Enter") {
dataSearch();
}
})
function dataSearch() {
var text = document.getElementById("search").value.toLowerCase();
var products = dataProducts.filter(function(product) {
return product.name.toLowerCase().includes(text);
});
localStorage.setItem("filterActive", 4);
localStorage.setItem('searchProducts', JSON.stringify(products));
window.location = "../pages/products.html";
}
btnSearch.addEventListener("click", function(e) {
e.preventDefault();
dataSearch();
});
var btnPro = document.getElementById("btnProduct");
btnPro.addEventListener("click", function(event) {
localStorage.setItem("filterActive", "0");
});
function sortFilter(n) {
if (n == 3) {
dataProducts.sort(function(data1, data2) {
return data1.price - data2.price;
});
pushProduct(dataProducts);
}
if (n == 4) {
var products = JSON.parse(localStorage.getItem("searchProducts"));
pushProduct(products);
} else {
pushProduct(dataProducts, 30);
}
}
// sự kiện khi ấn vào giỏ hàng
var cart = document.querySelector(".cart-link");
cart.addEventListener("click", function(event) {
event.preventDefault();
if (bool) {
Redirect("../pages/cart.html");
} else
alert("vui lòng đăng nhập trước");
});
// đăng ký
function checkRegister() {
var form = document.querySelector('#frmdangky');
var data = Object.fromEntries(new FormData(form).entries());
var regUserName = /(?=.*[a-zA-Z_0-9])\w{6,}/; // ít nhất phải có 6 ký tự không chứa ký tự đặc biệt
var regPassword = /^(?=.*[0-9])(?=.*[a-z])([a-zA-Z0-9]{8,})$/; //phải có 8 ký tự trở lên và có ít nhất 1 số
var regEmail = /^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$/;
var regName = /^([A-Z][a-z]+)(\s+[A-Z][a-z]+)+$/; // chữ cái đầu tiên phải bắt đầu bằng chữ in hoa và phải có họ và tên
var regPhone = /[0-9]{10}/; // số điện thoại phải là số và có 10 chữ số
var lbUserName = document.querySelector("#lbTenDangNhap");
var lbMatKhau = document.querySelector("#lbMatKhau");
var lbNhapLaiMatKhau = document.querySelector("#lbNhapLaiMatKhau");
var lbTen = document.querySelector("#lbTen");
var lbDiaChi = document.querySelector("#lbDiaChi");
var lbDt = document.querySelector("#lbDt");
var lbEmail = document.querySelector("#lbEmail");
var lbNgaySinh = document.querySelector("#lbNgaySinh");
if (!regUserName.test(data.username)) {
lbUserName.innerText = "Tên đăng nhập ít nhất phải có 6 ký tự không chứa ký tự đặc biệt";
return false;
}
lbUserName.innerText = "";
if (!regPassword.test(data.password)) {
lbMatKhau.innerText = "Mật khẩu phải có 8 ký tự trở lên và có ít nhất 1 số";
return false;
}
lbMatKhau.innerText = "";
if (data.password !== data.kh_nhaplaimatkhau) {
lbNhapLaiMatKhau.innerText = "Mật khẩu chưa khớp";
return false;
}
lbNhapLaiMatKhau.innerText = "";
if (!regName.test(data.kh_ten)) {
lbTen.innerText = "Chữ cái đầu tiên phải bắt đầu bằng chữ in hoa và phải có họ và tên";
return false;
}
lbTen.innerText = "";
if (data.kh_diachi.trim().length == 0) {
lbDiaChi.innerText = "Địa chỉ không được bỏ trống";
return false;
}
lbDiaChi.innerText = "";
if (!regPhone.test(data.kh_dienthoai)) {
lbDt.innerText = "số điện thoại phải là số và có 10 chữ số ";
return false;
}
lbDt.innerText = "";
if (!regEmail.test(data.kh_email)) {
lbEmail.innerText = "vui lòng điền đúng định dạng email";
return false;
}
lbEmail.innerText = "";
if (data.kh_namsinh > 2002) {
lbNgaySinh.innerText = "bạn phải đủ 18 tuổi";
return false;
}
lbNgaySinh.innerText = "";
return true;
}
// get thông tin
var getThongTin = function(user) {
document.getElementById("kh_ten").value = user.kh_ten;
document.getElementById("kh_gioitinh").value = user.kh_gioitinh == 0 ? "Nam" : "Nữ";
document.getElementById("kh_diachi").value = user.kh_diachi;
document.getElementById("kh_dienthoai").value = user.kh_dienthoai;
document.getElementById("kh_email").value = user.kh_email;
document.getElementById("kh_ngaysinh").value = user.kh_ngaysinh + "/" + user.kh_thangsinh + "/" + user.kh_namsinh;
}
// phần thanh toán paying.html
var pay = function() {
// lấy sản phẩm từ user ra
var list = document.getElementById("listProductPay");
var product = userCartList(userNow);
var total = 0;
for (var p of product) {
var item = `
<li class="list-group-item d-flex justify-content-between ">
<div>
<h4 class="my-0">${p.name}</h4>
<small class="text-muted">${p.price} x ${p.orderQty} </small>
</div>
<span class="text-muted">${p.orderQty}</span>
</li>
`;
list.innerHTML += item;
total += p.price * p.orderQty;
}
var totalPrice = `
<li class="list-group-item d-flex justify-content-between">
<span>Tổng thành tiền</span>
<strong id="thanhTien">${total}</strong>
</li>
`;
list.innerHTML += totalPrice;
}
// sự kiện ấn vào sản phẩm
var getProductId = function() {
var a = document.getElementsByClassName("sale__item-link");
for (var i = 0; i < a.length; i++) {
a[i].addEventListener("click", function(e) {
e.preventDefault();
var productID = this.id;
window.location = "./pages/products-detail.html?" + productID;
})
}
}
var showDetailProduct = function() {
var linkItem = window.location.href;
var id = linkItem.split("?")[1];
var data = dataProducts.find(function(value, index) {
return value.id == id;
})
var imgProduct = document.querySelector(".product__detail-left");
var linkImg = data.url.split(".jpg")[0];
var imgLink2 = linkImg + ".1.jpg";
var imgLink3 = linkImg + ".2.jpg";
var dataImg = `
<img src="${data.url}" class="product__detail-main w-50" data-bs-toggle="modal" data-bs-target="#imageView"></img>
<div class="product__details row">
<div class="d-flex justify-content-center"g-1 mt-2 row justify-content-center">
<img src="${data.url}" style="width:100px" class=" mt-4 product__detail-img"></img>
<img src="${imgLink2}" style="width:100px" class=" mt-4 product__detail-img"></img>
<img src="${imgLink3}" style="width:100px" class=" mt-4 product__detail-img"></img>
</div>
</div>
`;
var modalViewImg = document.getElementById("modalViewImg");
modalViewImg.innerHTML = `<img src="${data.url}" class="w-100"></img>`;
imgProduct.id = id;
imgProduct.innerHTML = dataImg;
var name = document.getElementById("name");
var price = document.getElementById("price");
var describe1 = document.getElementById("describe1");
var describe2 = document.getElementById("describe2");
var describe3 = document.getElementById("describe3");
name.innerText = data.name;
price.innerHTML = `Giá: <span class="text-danger">${data.price}<sup>đ<sup></span>`;
describe1.innerText = data.describe1;
describe2.innerText = data.describe2;
describe3.innerText = data.describe3;
}
$(document).ready(function() {
$("#btnAddToCard").click(function() {
$('.toast').toast('show');
})
});
// đếm sản phẩm trên giỏ hàng
var totalProduct = function() {
var totalProduct = document.querySelector(".totalProduct");
var total = userNow.productID.length;
totalProduct.innerText = total;
}
if (userNow)
totalProduct(); | ) {
var product = dataProducts.find(function(value) {
return value.id == item.id;
});
product.orderQ | identifier_body |
app.js | // scroll header
function eventScroll() {
window.addEventListener("scroll", function() {
let header = document.querySelector("header");
if (window.scrollY >= 20)
header.classList.add('sticky');
else if (window.scrollY < 19) {
header.classList.remove('sticky');
// header.classList.add('fadeBlock');
}
});
}
// btn nav
var colapse = document.querySelector('.header__btn-colapse');
var btnColapse = document.getElementById("nav-btn-colapse");
var header = document.querySelector('.header__colapse');
var menuOpen = false;
header.addEventListener('click', function(e) {
if (document.querySelector('.header__colapse').contains(e.target)) {
btnColapse.classList.remove('open');
menuOpen = false;
header.classList.remove('hide');
}
});
colapse.onclick = function() {
if (!menuOpen) {
btnColapse.classList.add('open');
menuOpen = true;
header.classList.add('hide');
} else {
btnColapse.classList.remove('open');
menuOpen = false;
header.classList.remove('hide');
}
}
$('.owl-carousel').owlCarousel({
loop: true,
margin: 20,
nav: true,
responsiveClass: true,
responsive: {
0: {
items: 1
},
576: {
items: 2,
nav: false
},
768: {
items: 3,
nav: true
},
992: {
items: 4,
nav: true
},
1200: {
items: 6,
nav: true
}
}
})
// điều hướng
// data products 10 sản phẩm thôi
var dataProducts = [{
id: "item-1",
name: "(Mẫu HOT) Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
url: "../assets/img/items/item-1.jpg",
price: 149000,
describe1: "Túi Xách Nữ Thời Trang 💖FREESHIP 50k💖 Túi Xách Nữ Đeo Chéo Dây Da Baniclassic Trẻ trung Chất Cực Đẹp TX04",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-2",
name: "(Mẫu HOT) Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
url: "../assets/img/items/item-2.jpg",
price: 249000,
describe1: "TÚI ĐEO CHÉO QUẢNG CHÂU⚜️",
describe2: "Từ những cô nàng bình thường nhất cho tới những ngôi sao hàng đầu, tất cả đều chia sẻ một tình yêu vĩ đại với TÚI XÁCH NỮ ĐẸP của mình TÚI XÁCH NỮ hợp dáng người, hợp màu sắc làm tăng vẻ đẹp của trang phục bạn mặc và khẳng định ấn tượng của bạn trong mắt người đối diện. Tuy nhiên, không phải ai cũng biết chọn một chiếc TÚI XÁCH NỮ DA PU thực sự phù hợp với phom cơ thể của mình. Mang tới cho các cô nàng sự thoải mái khi đi dạo phố hoặc hẹn hò bè bạn vì không phải cầm mang những vật dụng linh tinh,",
describe3: "chiếc TÚI XÁCH NỮ DA ĐẸP đã trở thành người bạn không thể thiếu các nàng. Chúng có sự đa dạng từ kiểu cách tới màu sắc, size…tùy theo nhu cầu của mình mà các nàng lựa chọn một sản phẩm thích hợp. Và nếu bạn cũng đang đi tìm một chiếc ví thể thể hiện được cá tính của bản thân một cách rõ nét nhất và đang... lạc lối, thì hãy cùng khám phá và cảm nhận những nét đẹp và quyến rũ của Túi Xách nữ da lộn mà Túi Xách Nữ ZABUZA cung cấp nhé.",
orderQty: 2
},
{
id: "item-3",
name: "Túi Kẹp Nách Nữ Túi Đeo Chéo Nữ Vintage Hottrend KR 180- 2 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
url: "../assets/img/items/item-3.jpg",
price: 349000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-4",
name: "(Hottrend) Túi kẹp nách nữ vải dù cao cấp KR 274- Chất Dù cao cấp, Size 24, 4 màu lựa chọn- KARA 274",
url: "../assets/img/items/item-4.jpg",
price: 139000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi kẹp nách nữ vải dù cao cấp KR 274- Chất Dù cao cấp, Size 24, 4 màu lựa chọn- KARA 274",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 4 màu Hot (Trắng , Đen, Xanh Dương, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 3
},
{
id: "item-5",
name: "(HOT) Túi Kẹp Nách Nữ Caro Vintage Hottrend KR 180- 7 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
url: "../assets/img/items/item-5.jpg",
price: 259000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi Kẹp Nách Nữ Caro Vintage Hottrend KR 180- 7 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cmTúi Kẹp nách nữ có kích thước: Dài 26 cm, Rộng 6 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 7 màu Hot (Trắng Caro, Xanh Caro, Đỏ Caro, Xám Caro, Tím Hồng, Vàng Xanh, Đen Xanh) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-6",
name: "[Mã FAMAYWA giảm 10k đơn từ 50k] Túi xách nữ, túi kẹp nách da mềm trơn BH 433",
url: "../assets/img/items/item-6.jpg",
price: 299000,
describe1: "Túi xách là một phụ kiện thời trang không thể thiếu của các bạn trẻ dù là nam hay nữ, nó thể hiện phong cách cũng như cá tính của chủ nhân.",
describe2: "Nếu bạn yêu thích phong cách đơn giản nhưng tinh tế thì chiếc túi xách là một lựa chọn hoàn hảo cho bạn.Chiếc túi xách Tote 6 sở hữu thiết kế thời trang với phần nắp túi cách điệu kết hợp tông màu nổi bật, những đường may tinh tế, cùng gam màu trung tính trẻ trung năng động sẽ vô cùng hữu dụng cho bạn trong việc chứa các vật dụng cá nhân.",
describe3: " Bên cạnh đó, túi xách còn thể hiện gu thời trang và cá tính của bạn.",
orderQty: 2
},
{
id: "item-7",
name: "Túi Cói Merci",
url: "../assets/img/items/item-7.jpg",
price: 599000,
describe1: "Túi Cói Merci - nhỏ nhỏ xinh xinh nhưng tiện vô cùng . Sống ảo cũng xinh lung linhh ✨✨🔥🔥 ",
describe2: "Để mà đi du lịch sống ảo k thể thiếu em túi này lun ý ce ạ 🤩" +
"TÚI CÓI MERCI hot hit 🌴🌴🌴" +
"Túi rộng tha hồ đựng đồ nha ce",
describe3: "Size loại 1: 35x36cm" +
"size loại 2: 32x35cm,đựng vừa A4, vừa laptop, đi học đi làm , du lịch , còn hợp vs ai bỉm sữa mà vẫn muốn trend :))" +
"Túi rất nhẹ gập gọn cất cốp được, sống ảo xịn sò luôn nha 😌😌",
orderQty: 3
},
{
id: "item-8",
name: "TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP SL15",
url: "../assets/img/items/item-8.jpg",
price: 679000,
describe1: "--- TÚI XÁCH ALISA ---" +
" [HÀNG MỚI VỀ] TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP" +
"---Đặc Điểm Nổi Bật----" +
" - Trẻ trung phong cách " +
" - Thiết kế mới 2019" +
"- Họa tiết trái tim, thắt nơ siêu xinh",
describe2: "Túi nữ 2 Ngăn Phối Nơ Phiên Bản Hàn Quốc",
describe3: "----Thông Tin Chi Tiết----" +
"- Chất Liệu: Da pu cao cấp mềm mịn" +
"- Màu sắc: , hồng" +
"- Kích thước:19*15*8*15cm" +
"- Phong Cách : Hàn Quốc" +
"- Công dụng:đi chơi , đi làm , đi học , đi du lịch…." +
"-màu sắc: màu hồng" +
"- Mix Đồ: Có Thể kết hợp với mọi trang phục khác nhau",
orderQty: 1
},
{
id: "item-9",
name: "Túi Xách Nữ Tote Da PU Cao Cấp Mềm Đẹp Phom Vuông Kèm Ví Nhỏ Xinh Có Dây Đeo Chéo Style Thời Trang Công Sở Đi Làm Đi Học",
url: "../assets/img/items/item-9.jpg",
price: 238000,
describe1: "Sức nóng của túi tote da chưa bao giờ hạ nhiệt trong giới trẻ sành mốt bởi tính tiện dụng, sang trọng, mà vô cùng cá tính. Combo túi ví tote da Pu dày đẹp với thiết kế tinh tế đem đến phong cách thời trang sành điệu cho các nàng khi đi học, đi làm hay đi chơi.",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 2
},
{
id: "item-10",
name: "Túi xách tay nữ thương hiệu NEVENKA phong cách trẻ trung thanh lịch N9291",
url: "../assets/img/items/item-10.jpg",
price: 238000,
describe1: "Phong cách: trẻ trung, thanh lịch Kiểu cách: Túi đeo vai, đeo chéo nữ, túi xách tay thời trang Vật liệu chính: Da Pu Vật liệu dây đeo: Dây da PU Bề mặt da: Da trơn",
describe2: "Công nghệ vật liệu: Da nhân tạo Vật liệu lót: PVC Hình dáng: Hình chữ nhật ngang Kích thước: 23 x 16 x 10 cm Kiểu khóa: Khóa kéo miệng túi Màu sắc: Xanh, Trắng , Đen",
describe3: "Thương hiệu: NEVENKA Xuất xứ: Trung Quốc Phù hợp sử dụng: Đi chơi, đi làm, đi dạo phố.....",
orderQty: 2
}
];
// data user
// danh sách giỏ hàng mặc định
danhsachGioHang = [{ id: "item-1", n: 3 },
{ id: "item-2", n: 1 },
{ id: "item-6", n: 2 }
];
var users = [{
username: "admin",
password: "admin",
productID: danhsachGioHang
}]
// data cart
function saveListUser() {
var list = JSON.parse(localStorage.getItem("listUser"));
if (list)
users = list;
}
saveListUser();
function Redirect(url) {
window.location = url;
}
// tạo hàm đăng ký
function checkLogin() {
var user = JSON.parse(localStorage.getItem("userLogin"));
var names = document.querySelectorAll(".user-name");
var logout = document.getElementsByClassName("logout");
var hasUser = document.querySelector('.user');
if (user) {
for (var name of names) {
name.innerHTML = `
<a class="text-danger" href="../pages/login.html">${user.username}</a>
`;
}
if (logout[0].textContent == "Đăng nhập")
logout[0].textContent = "Đăng xuất";
hasUser.classList.add("user-has-account");
return user;
}
logout[0].textContent = "Đăng nhập";
hasUser.classList.remove("user-has-account");
return "";
}
var bool = Boolean(checkLogin());
var userNow = checkLogin();
console.log(bool);
// logout
function Logout() {
var logouts = document.getElementsByClassName("logout");
for (var logout of logouts) {
logout.onclick = () => {
localStorage.removeItem("userLogin");
}
}
}
Logout();
var i = 0;
// thêm sản phẩm
function addRow(product, index) {
var table = document.getElementById("datarow");
var row = `
<tr>
<td class="text-center" >${++i}</td>
<td class="text-center" >
<img sr | .url}" class="img-product">
</td>
<td class="text-center" >${product.name}</td>
<td class="text-center">${product.price}</td>
<td class="text-center d-flex justify-content-center">
<input style="width: 45px; border: none; outline: none;" type="number"
class="d-block" name="number" id="number" value="${product.orderQty}" onchange ="totalPrice();" min="1">
</td>
<td class="text-center">${product.price * product.orderQty}</td>
<td class="text-center">
<a id="${product.id}" class="btn btn-danger btn-delete-sanpham">
<i class="fa fa-trash" aria-hidden="true"></i> Xóa
</a>
</td>
</tr>
`;
var newRow = table.insertRow(table.length);
newRow.innerHTML = row;
}
// xoa 1 item carrt
var removeByAttr = function(arr, attr, value) {
var i = arr.length;
while (i--) {
if (arr[i] &&
arr[i].hasOwnProperty(attr) &&
(arguments.length > 2 && arr[i][attr] === value)) {
arr.splice(i, 1);
}
}
totalProduct();
return arr;
}
function deleteItemInCart(productID) {
removeByAttr(userNow.productID, "id", productID);
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// khi thay đổi số lượng sản phẩm
function whenChageQty() {
var numbers = document.querySelectorAll("#datarow #number");
var products = userNow.productID;
for (var number in numbers) {
if (numbers.hasOwnProperty(number)) {
products[number].n = numbers[number].value;
// console.log(numbers[number].value);
}
}
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// tổng giá
var totalPrice = function() {
var table = document.getElementById("datarow");
var deletes = document.querySelectorAll(".btn-delete-sanpham");
var totalPr = 0;
for (var i = 0; i < table.rows.length; ++i) {
var quantity = table.rows[i].cells[4].querySelector("input").value;
var price = table.rows[i].cells[3].innerText;
var total = quantity * price;
table.rows[i].cells[5].innerText = total;
totalPr += total;
deletes[i].onclick = () => {
table.deleteRow(--i);
totalPrice();
deleteItemInCart(deletes[i].id);
}
}
document.getElementById("totalPrice").innerText = totalPr;
return totalPr;
}
// hàm lấy ra sản phẩm từ user
function userCartList(user) {
var products = [];
if (user) {
var danhsachGioHang = user.productID;
for (var item of danhsachGioHang) {
var product = dataProducts.find(function(value) {
return value.id == item.id;
});
product.orderQty = item.n;
products.push(product)
}
}
return products;
}
// add product vào cart
// userCartList(users[0])
var addProduct = function(products) {
var prd = products(checkLogin());
if (prd) {
for (var product of prd) {
addRow(product);
}
totalPrice();
return true;
}
return false;
}
// end them sản phẩm
// tat ca san pham
var pushProduct = function(dataProducts, n) {
var productList = document.getElementById("listProducts");
var products = [];
// in ra ngẫu nhiên số sản phẩm theo n
if (n) {
setTimeout(function() {
for (let i = 0; i < n; ++i) {
let k = Math.floor(Math.random() * 10);
var item = `
<a href="./products-detail.html" id="${dataProducts[k].id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${dataProducts[k].url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${dataProducts[k].name}" class="sale__discript d-block">${dataProducts[k].name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${dataProducts[k].price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.innerHTML = item;
// div.id = dataProducts[k].id;
productList.appendChild(div);
products.push(dataProducts[k]);
}
}, 500);
} else {
// in ra tat cả sản phẩm có trong mảng
for (var product of dataProducts) {
var item = `
<a href="./products-detail.html" id="${product.id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${product.url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${product.name}" class="sale__discript d-block">${product.name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${product.price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.id = product.id;
div.innerHTML = item;
productList.appendChild(div);
}
}
return products;
}
// sự kiện filter
function filter(a, number) {
var btnFs = document.querySelectorAll('.btn-filter');
for (var btn of btnFs) {
if (btn.classList.contains("active")) {
btn.classList.remove("active");
break;
}
}
Redirect('./products.html');
localStorage.setItem("filterActive", number);
}
// tìm kiếm
var btnSearch = document.querySelector(".search__btn");
var inputSearch = document.getElementById("search");
inputSearch.addEventListener("keyup", ({ key }) => {
if (key === "Enter") {
dataSearch();
}
})
function dataSearch() {
var text = document.getElementById("search").value.toLowerCase();
var products = dataProducts.filter(function(product) {
return product.name.toLowerCase().includes(text);
});
localStorage.setItem("filterActive", 4);
localStorage.setItem('searchProducts', JSON.stringify(products));
window.location = "../pages/products.html";
}
btnSearch.addEventListener("click", function(e) {
e.preventDefault();
dataSearch();
});
var btnPro = document.getElementById("btnProduct");
btnPro.addEventListener("click", function(event) {
localStorage.setItem("filterActive", "0");
});
function sortFilter(n) {
if (n == 3) {
dataProducts.sort(function(data1, data2) {
return data1.price - data2.price;
});
pushProduct(dataProducts);
}
if (n == 4) {
var products = JSON.parse(localStorage.getItem("searchProducts"));
pushProduct(products);
} else {
pushProduct(dataProducts, 30);
}
}
// sự kiện khi ấn vào giỏ hàng
var cart = document.querySelector(".cart-link");
cart.addEventListener("click", function(event) {
event.preventDefault();
if (bool) {
Redirect("../pages/cart.html");
} else
alert("vui lòng đăng nhập trước");
});
// đăng ký
function checkRegister() {
var form = document.querySelector('#frmdangky');
var data = Object.fromEntries(new FormData(form).entries());
var regUserName = /(?=.*[a-zA-Z_0-9])\w{6,}/; // ít nhất phải có 6 ký tự không chứa ký tự đặc biệt
var regPassword = /^(?=.*[0-9])(?=.*[a-z])([a-zA-Z0-9]{8,})$/; //phải có 8 ký tự trở lên và có ít nhất 1 số
var regEmail = /^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$/;
var regName = /^([A-Z][a-z]+)(\s+[A-Z][a-z]+)+$/; // chữ cái đầu tiên phải bắt đầu bằng chữ in hoa và phải có họ và tên
var regPhone = /[0-9]{10}/; // số điện thoại phải là số và có 10 chữ số
var lbUserName = document.querySelector("#lbTenDangNhap");
var lbMatKhau = document.querySelector("#lbMatKhau");
var lbNhapLaiMatKhau = document.querySelector("#lbNhapLaiMatKhau");
var lbTen = document.querySelector("#lbTen");
var lbDiaChi = document.querySelector("#lbDiaChi");
var lbDt = document.querySelector("#lbDt");
var lbEmail = document.querySelector("#lbEmail");
var lbNgaySinh = document.querySelector("#lbNgaySinh");
if (!regUserName.test(data.username)) {
lbUserName.innerText = "Tên đăng nhập ít nhất phải có 6 ký tự không chứa ký tự đặc biệt";
return false;
}
lbUserName.innerText = "";
if (!regPassword.test(data.password)) {
lbMatKhau.innerText = "Mật khẩu phải có 8 ký tự trở lên và có ít nhất 1 số";
return false;
}
lbMatKhau.innerText = "";
if (data.password !== data.kh_nhaplaimatkhau) {
lbNhapLaiMatKhau.innerText = "Mật khẩu chưa khớp";
return false;
}
lbNhapLaiMatKhau.innerText = "";
if (!regName.test(data.kh_ten)) {
lbTen.innerText = "Chữ cái đầu tiên phải bắt đầu bằng chữ in hoa và phải có họ và tên";
return false;
}
lbTen.innerText = "";
if (data.kh_diachi.trim().length == 0) {
lbDiaChi.innerText = "Địa chỉ không được bỏ trống";
return false;
}
lbDiaChi.innerText = "";
if (!regPhone.test(data.kh_dienthoai)) {
lbDt.innerText = "số điện thoại phải là số và có 10 chữ số ";
return false;
}
lbDt.innerText = "";
if (!regEmail.test(data.kh_email)) {
lbEmail.innerText = "vui lòng điền đúng định dạng email";
return false;
}
lbEmail.innerText = "";
if (data.kh_namsinh > 2002) {
lbNgaySinh.innerText = "bạn phải đủ 18 tuổi";
return false;
}
lbNgaySinh.innerText = "";
return true;
}
// get thông tin
var getThongTin = function(user) {
document.getElementById("kh_ten").value = user.kh_ten;
document.getElementById("kh_gioitinh").value = user.kh_gioitinh == 0 ? "Nam" : "Nữ";
document.getElementById("kh_diachi").value = user.kh_diachi;
document.getElementById("kh_dienthoai").value = user.kh_dienthoai;
document.getElementById("kh_email").value = user.kh_email;
document.getElementById("kh_ngaysinh").value = user.kh_ngaysinh + "/" + user.kh_thangsinh + "/" + user.kh_namsinh;
}
// phần thanh toán paying.html
var pay = function() {
// lấy sản phẩm từ user ra
var list = document.getElementById("listProductPay");
var product = userCartList(userNow);
var total = 0;
for (var p of product) {
var item = `
<li class="list-group-item d-flex justify-content-between ">
<div>
<h4 class="my-0">${p.name}</h4>
<small class="text-muted">${p.price} x ${p.orderQty} </small>
</div>
<span class="text-muted">${p.orderQty}</span>
</li>
`;
list.innerHTML += item;
total += p.price * p.orderQty;
}
var totalPrice = `
<li class="list-group-item d-flex justify-content-between">
<span>Tổng thành tiền</span>
<strong id="thanhTien">${total}</strong>
</li>
`;
list.innerHTML += totalPrice;
}
// sự kiện ấn vào sản phẩm
var getProductId = function() {
var a = document.getElementsByClassName("sale__item-link");
for (var i = 0; i < a.length; i++) {
a[i].addEventListener("click", function(e) {
e.preventDefault();
var productID = this.id;
window.location = "./pages/products-detail.html?" + productID;
})
}
}
var showDetailProduct = function() {
var linkItem = window.location.href;
var id = linkItem.split("?")[1];
var data = dataProducts.find(function(value, index) {
return value.id == id;
})
var imgProduct = document.querySelector(".product__detail-left");
var linkImg = data.url.split(".jpg")[0];
var imgLink2 = linkImg + ".1.jpg";
var imgLink3 = linkImg + ".2.jpg";
var dataImg = `
<img src="${data.url}" class="product__detail-main w-50" data-bs-toggle="modal" data-bs-target="#imageView"></img>
<div class="product__details row">
<div class="d-flex justify-content-center"g-1 mt-2 row justify-content-center">
<img src="${data.url}" style="width:100px" class=" mt-4 product__detail-img"></img>
<img src="${imgLink2}" style="width:100px" class=" mt-4 product__detail-img"></img>
<img src="${imgLink3}" style="width:100px" class=" mt-4 product__detail-img"></img>
</div>
</div>
`;
var modalViewImg = document.getElementById("modalViewImg");
modalViewImg.innerHTML = `<img src="${data.url}" class="w-100"></img>`;
imgProduct.id = id;
imgProduct.innerHTML = dataImg;
var name = document.getElementById("name");
var price = document.getElementById("price");
var describe1 = document.getElementById("describe1");
var describe2 = document.getElementById("describe2");
var describe3 = document.getElementById("describe3");
name.innerText = data.name;
price.innerHTML = `Giá: <span class="text-danger">${data.price}<sup>đ<sup></span>`;
describe1.innerText = data.describe1;
describe2.innerText = data.describe2;
describe3.innerText = data.describe3;
}
$(document).ready(function() {
$("#btnAddToCard").click(function() {
$('.toast').toast('show');
})
});
// đếm sản phẩm trên giỏ hàng
var totalProduct = function() {
var totalProduct = document.querySelector(".totalProduct");
var total = userNow.productID.length;
totalProduct.innerText = total;
}
if (userNow)
totalProduct(); | c="${product | identifier_name |
app.js | // scroll header
function eventScroll() {
window.addEventListener("scroll", function() {
let header = document.querySelector("header");
if (window.scrollY >= 20)
header.classList.add('sticky');
else if (window.scrollY < 19) {
header.classList.remove('sticky');
// header.classList.add('fadeBlock');
}
});
}
// btn nav
var colapse = document.querySelector('.header__btn-colapse');
var btnColapse = document.getElementById("nav-btn-colapse");
var header = document.querySelector('.header__colapse');
var menuOpen = false;
header.addEventListener('click', function(e) {
if (document.querySelector('.header__colapse').contains(e.target)) {
btnColapse.classList.remove('open');
menuOpen = false;
header.classList.remove('hide');
}
});
colapse.onclick = function() {
if (!menuOpen) {
btnColapse.classList.add('open');
menuOpen = true;
header.classList.add('hide');
} else {
btnColapse.classList.remove('open');
menuOpen = false;
header.classList.remove('hide');
}
}
$('.owl-carousel').owlCarousel({
loop: true,
margin: 20,
nav: true,
responsiveClass: true,
responsive: {
0: {
items: 1
},
576: {
items: 2,
nav: false
},
768: {
items: 3,
nav: true
},
992: {
items: 4,
nav: true
},
1200: {
items: 6,
nav: true
}
}
})
// điều hướng
// data products 10 sản phẩm thôi
var dataProducts = [{
id: "item-1",
name: "(Mẫu HOT) Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
url: "../assets/img/items/item-1.jpg",
price: 149000,
describe1: "Túi Xách Nữ Thời Trang 💖FREESHIP 50k💖 Túi Xách Nữ Đeo Chéo Dây Da Baniclassic Trẻ trung Chất Cực Đẹp TX04",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-2",
name: "(Mẫu HOT) Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
url: "../assets/img/items/item-2.jpg",
price: 249000,
describe1: "TÚI ĐEO CHÉO QUẢNG CHÂU⚜️",
describe2: "Từ những cô nàng bình thường nhất cho tới những ngôi sao hàng đầu, tất cả đều chia sẻ một tình yêu vĩ đại với TÚI XÁCH NỮ ĐẸP của mình TÚI XÁCH NỮ hợp dáng người, hợp màu sắc làm tăng vẻ đẹp của trang phục bạn mặc và khẳng định ấn tượng của bạn trong mắt người đối diện. Tuy nhiên, không phải ai cũng biết chọn một chiếc TÚI XÁCH NỮ DA PU thực sự phù hợp với phom cơ thể của mình. Mang tới cho các cô nàng sự thoải mái khi đi dạo phố hoặc hẹn hò bè bạn vì không phải cầm mang những vật dụng linh tinh,",
describe3: "chiếc TÚI XÁCH NỮ DA ĐẸP đã trở thành người bạn không thể thiếu các nàng. Chúng có sự đa dạng từ kiểu cách tới màu sắc, size…tùy theo nhu cầu của mình mà các nàng lựa chọn một sản phẩm thích hợp. Và nếu bạn cũng đang đi tìm một chiếc ví thể thể hiện được cá tính của bản thân một cách rõ nét nhất và đang... lạc lối, thì hãy cùng khám phá và cảm nhận những nét đẹp và quyến rũ của Túi Xách nữ da lộn mà Túi Xách Nữ ZABUZA cung cấp nhé.",
orderQty: 2
},
{
id: "item-3",
name: "Túi Kẹp Nách Nữ Túi Đeo Chéo Nữ Vintage Hottrend KR 180- 2 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
url: "../assets/img/items/item-3.jpg",
price: 349000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi kẹp nách nữ vải dù dây xích KR 323- Chất Dù cao cấp, Size 24, 3 màu lựa chọn- KARA 323",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-4",
name: "(Hottrend) Túi kẹp nách nữ vải dù cao cấp KR 274- Chất Dù cao cấp, Size 24, 4 màu lựa chọn- KARA 274",
url: "../assets/img/items/item-4.jpg",
price: 139000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi kẹp nách nữ vải dù cao cấp KR 274- Chất Dù cao cấp, Size 24, 4 màu lựa chọn- KARA 274",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 4 màu Hot (Trắng , Đen, Xanh Dương, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 3
},
{
id: "item-5",
name: "(HOT) Túi Kẹp Nách Nữ Caro Vintage Hottrend KR 180- 7 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
url: "../assets/img/items/item-5.jpg",
price: 259000,
describe1: "KARA Shop xin gửi quý khách sản phẩm HOT: Túi Kẹp Nách Nữ Caro Vintage Hottrend KR 180- 7 Màu Lựa chọn, Chất liệu cao cấp, Có 2 Dây- KARA 180",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cmTúi Kẹp nách nữ có kích thước: Dài 26 cm, Rộng 6 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 7 màu Hot (Trắng Caro, Xanh Caro, Đỏ Caro, Xám Caro, Tím Hồng, Vàng Xanh, Đen Xanh) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 1
},
{
id: "item-6",
name: "[Mã FAMAYWA giảm 10k đơn từ 50k] Túi xách nữ, túi kẹp nách da mềm trơn BH 433",
url: "../assets/img/items/item-6.jpg",
price: 299000,
describe1: "Túi xách là một phụ kiện thời trang không thể thiếu của các bạn trẻ dù là nam hay nữ, nó thể hiện phong cách cũng như cá tính của chủ nhân.",
describe2: "Nếu bạn yêu thích phong cách đơn giản nhưng tinh tế thì chiếc túi xách là một lựa chọn hoàn hảo cho bạn.Chiếc túi xách Tote 6 sở hữu thiết kế thời trang với phần nắp túi cách điệu kết hợp tông màu nổi bật, những đường may tinh tế, cùng gam màu trung tính trẻ trung năng động sẽ vô cùng hữu dụng cho bạn trong việc chứa các vật dụng cá nhân.",
describe3: " Bên cạnh đó, túi xách còn thể hiện gu thời trang và cá tính của bạn.",
orderQty: 2
},
{
id: "item-7",
name: "Túi Cói Merci",
url: "../assets/img/items/item-7.jpg",
price: 599000,
describe1: "Túi Cói Merci - nhỏ nhỏ xinh xinh nhưng tiện vô cùng . Sống ảo cũng xinh lung linhh ✨✨🔥🔥 ",
describe2: "Để mà đi du lịch sống ảo k thể thiếu em túi này lun ý ce ạ 🤩" +
"TÚI CÓI MERCI hot hit 🌴🌴🌴" +
"Túi rộng tha hồ đựng đồ nha ce",
describe3: "Size loại 1: 35x36cm" +
"size loại 2: 32x35cm,đựng vừa A4, vừa laptop, đi học đi làm , du lịch , còn hợp vs ai bỉm sữa mà vẫn muốn trend :))" +
"Túi rất nhẹ gập gọn cất cốp được, sống ảo xịn sò luôn nha 😌😌",
orderQty: 3
},
{
id: "item-8",
name: "TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP SL15",
url: "../assets/img/items/item-8.jpg",
price: 679000,
describe1: "--- TÚI XÁCH ALISA ---" +
" [HÀNG MỚI VỀ] TÚI XÁCH NỮ 2 NGĂN PHỐI NƠ KIỂU DÁNG HÀN QUỐC CỰC ĐẸP" +
"---Đặc Điểm Nổi Bật----" +
" - Trẻ trung phong cách " +
" - Thiết kế mới 2019" +
"- Họa tiết trái tim, thắt nơ siêu xinh",
describe2: "Túi nữ 2 Ngăn Phối Nơ Phiên Bản Hàn Quốc",
describe3: "----Thông Tin Chi Tiết----" +
"- Chất Liệu: Da pu cao cấp mềm mịn" +
"- Màu sắc: , hồng" +
"- Kích thước:19*15*8*15cm" +
"- Phong Cách : Hàn Quốc" +
"- Công dụng:đi chơi , đi làm , đi học , đi du lịch…." +
"-màu sắc: màu hồng" +
"- Mix Đồ: Có Thể kết hợp với mọi trang phục khác nhau",
orderQty: 1
},
{
id: "item-9",
name: "Túi Xách Nữ Tote Da PU Cao Cấp Mềm Đẹp Phom Vuông Kèm Ví Nhỏ Xinh Có Dây Đeo Chéo Style Thời Trang Công Sở Đi Làm Đi Học",
url: "../assets/img/items/item-9.jpg",
price: 238000,
describe1: "Sức nóng của túi tote da chưa bao giờ hạ nhiệt trong giới trẻ sành mốt bởi tính tiện dụng, sang trọng, mà vô cùng cá tính. Combo túi ví tote da Pu dày đẹp với thiết kế tinh tế đem đến phong cách thời trang sành điệu cho các nàng khi đi học, đi làm hay đi chơi.",
describe2: "Túi Kẹp nách nữ có kích thước: Dài 24 cm, Rộng 5 cm, Cao 14 cm",
describe3: "Túi Kẹp nách nữ Với 3 màu Hot (Trắng , Đen, Vàng) được cách điệu cho tạo form dáng hiện đại, trẻ trung",
orderQty: 2
},
{
id: "item-10",
name: "Túi xách tay nữ thương hiệu NEVENKA phong cách trẻ trung thanh lịch N9291",
url: "../assets/img/items/item-10.jpg",
price: 238000,
describe1: "Phong cách: trẻ trung, thanh lịch Kiểu cách: Túi đeo vai, đeo chéo nữ, túi xách tay thời trang Vật liệu chính: Da Pu Vật liệu dây đeo: Dây da PU Bề mặt da: Da trơn",
describe2: "Công nghệ vật liệu: Da nhân tạo Vật liệu lót: PVC Hình dáng: Hình chữ nhật ngang Kích thước: 23 x 16 x 10 cm Kiểu khóa: Khóa kéo miệng túi Màu sắc: Xanh, Trắng , Đen",
describe3: "Thương hiệu: NEVENKA Xuất xứ: Trung Quốc Phù hợp sử dụng: Đi chơi, đi làm, đi dạo phố.....",
orderQty: 2
}
];
// data user
// danh sách giỏ hàng mặc định
danhsachGioHang = [{ id: "item-1", n: 3 },
{ id: "item-2", n: 1 },
{ id: "item-6", n: 2 }
];
var users = [{
username: "admin",
password: "admin",
productID: danhsachGioHang
}]
// data cart
function saveListUser() {
var list = JSON.parse(localStorage.getItem("listUser"));
if (list)
users = list;
}
saveListUser();
function Redirect(url) {
window.location = url;
}
// tạo hàm đăng ký
function checkLogin() {
var user = JSON.parse(localStorage.getItem("userLogin"));
var names = document.querySelectorAll(".user-name");
var logout = document.getElementsByClassName("logout");
var hasUser = document.querySelector('.user');
if (user) {
for (var name of names) {
name.innerHTML = `
<a class="text-danger" href="../pages/login.html">${user.username}</a>
`;
}
if (logout[0].textContent == "Đăng nhập")
logout[0].textContent = "Đăng xuất";
hasUser.classList.add("user-has-account");
return user;
}
logout[0].textContent = "Đăng nhập";
hasUser.classList.remove("user-has-account");
return "";
}
var bool = Boolean(checkLogin());
var userNow = checkLogin();
console.log(bool);
// logout
function Logout() {
var logouts = document.getElementsByClassName("logout");
for (var logout of logouts) {
logout.onclick = () => {
localStorage.removeItem("userLogin");
}
}
}
Logout();
var i = 0;
// thêm sản phẩm
function addRow(product, index) {
var table = document.getElementById("datarow");
var row = `
<tr>
<td class="text-center" >${++i}</td>
<td class="text-center" >
<img src="${product.url}" class="img-product">
</td>
<td class="text-center" >${product.name}</td>
<td class="text-center">${product.price}</td>
<td class="text-center d-flex justify-content-center">
<input style="width: 45px; border: none; outline: none;" type="number"
class="d-block" name="number" id="number" value="${product.orderQty}" onchange ="totalPrice();" min="1">
</td>
<td class="text-center">${product.price * product.orderQty}</td>
<td class="text-center">
<a id="${product.id}" class="btn btn-danger btn-delete-sanpham">
<i class="fa fa-trash" aria-hidden="true"></i> Xóa
</a>
</td>
</tr>
`;
var newRow = table.insertRow(table.length);
newRow.innerHTML = row;
}
// xoa 1 item carrt
var removeByAttr = function(arr, attr, value) {
var i = arr.length;
while (i--) {
if (arr[i] &&
arr[i].hasOwnProperty(attr) &&
(arguments.length > 2 && arr[i][attr] === value)) {
arr.splice(i, 1);
}
}
totalProduct();
return arr;
}
function deleteItemInCart(productID) {
removeByAttr(userNow.productID, "id", productID);
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// khi thay đổi số lượng sản phẩm
function whenChageQty() {
var numbers = document.querySelectorAll("#datarow #number");
var products = userNow.productID;
for (var number in numbers) {
if (numbers.hasOwnProperty(number)) {
products[number].n = numbers[number].value;
// console.log(numbers[number].value);
}
}
var userLogin = userNow;
localStorage.setItem("userLogin", JSON.stringify(userLogin));
}
// tổng giá
var totalPrice = function() {
var table = document.getElementById("datarow");
var deletes = document.querySelectorAll(".btn-delete-sanpham");
var totalPr = 0;
for (var i = 0; i < table.rows.length; ++i) {
var quantity = table.rows[i].cells[4].querySelector("input").value;
var price = table.rows[i].cells[3].innerText;
var total = quantity * price;
table.rows[i].cells[5].innerText = total;
totalPr += total;
deletes[i].onclick = () => {
table.deleteRow(--i);
totalPrice();
deleteItemInCart(deletes[i].id);
}
}
document.getElementById("totalPrice").innerText = totalPr;
return totalPr;
}
// hàm lấy ra sản phẩm từ user
function userCartList(user) {
var products = [];
if (user) {
var danhsachGioHang = user.productID;
for (var item of danhsachGioHang) {
var product = dataProducts.find(function(value) {
return value.id == item.id;
});
product.orderQty = item.n;
products.push(product)
}
}
return products;
}
// add product vào cart
// userCartList(users[0])
var addProduct = function(products) {
var prd = products(checkLogin());
if (prd) {
for (var product of prd) {
addRow(product);
}
totalPrice();
return true;
}
return false;
}
// end them sản phẩm
// tat ca san pham
var pushProduct = function(dataProducts, n) {
var productList = document.getElementById("listProducts");
var products = [];
// in ra ngẫu nhiên số sản phẩm theo n
if (n) {
setTimeout(function() {
for (let i = 0; i < n; ++i) {
let k = Math.floor(Math.random() * 10);
var item = `
<a href="./products-detail.html" id="${dataProducts[k].id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${dataProducts[k].url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${dataProducts[k].name}" class="sale__discript d-block">${dataProducts[k].name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${dataProducts[k].price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.innerHTML = item;
// div.id = dataProducts[k].id;
productList.appendChild(div);
products.push(dataProducts[k]);
}
}, 500);
} else {
// in ra tat cả sản phẩm có trong mảng
for (var product of dataProducts) {
var item = `
<a href="./products-detail.html" id="${product.id}" class="sale__item-link">
<div class="sale__wrap-img">
<img style="width:100%;" src="${product.url}" alt="" class="sale__img">
<span class="sale__view">Xem chi tiết</span>
</div>
<span title="${product.name}" class="sale__discript d-block">${product.name}</span>
<span class="sale__price text-danger d-block"> <sup>₫</sup>${product.price}</span>
</a>
`;
var div = document.createElement("div");
div.classList.add("item", "col-6", "col-sm-6", "col-md-4", "col-lg-3", "col-xl-2", "py-4");
div.id = product.id;
div.innerHTML = item;
productList.appendChild(div);
}
}
return products;
}
// sự kiện filter
function filter(a, number) {
var btnFs = document.querySelectorAll('.btn-filter');
for (var btn of btnFs) {
if (btn.classList.contains("active")) {
btn.classList.remove("active");
break;
}
}
Redirect('./products.html');
localStorage.setItem("filterActive", number);
}
// tìm kiếm
var btnSearch = document.querySelector(".search__btn");
var inputSearch = document.getElementById("search");
inputSearch.addEventListener("keyup", ({ key }) => {
if (key === "Enter") {
dataSearch();
}
})
function dataSearch() {
var text = document.getElementById("search").value.toLowerCase();
var products = dataProducts.filter(function(product) {
return product.name.toLowerCase().includes(text);
});
localStorage.setItem("filterActive", 4);
localStorage.setItem('searchProducts', JSON.stringify(products));
window.location = "../pages/products.html";
}
btnSearch.addEventListener("click", function(e) {
e.preventDefault();
dataSearch();
});
var btnPro = document.getElementById("btnProduct");
btnPro.addEventListener("click", function(event) {
localStorage.setItem("filterActive", "0");
});
function sortFilter(n) {
if (n == 3) {
dataProducts.sort(function(data1, data2) {
return data1.price - data2.price;
});
pushProduct(dataProducts);
}
if (n == 4) {
var products = JSON.parse(localStorage.getItem("searchProducts"));
pushProduct(products);
} else {
pushProduct(dataProducts, 30);
}
}
// sự kiện khi ấn vào giỏ hàng
var cart = document.querySelector(".cart-link");
cart.addEventListener("click", function(event) {
event.preventDefault();
if (bool) {
Redirect("../pages/cart.html");
} else
alert("vui lòng đăng nhập trước");
});
// đăng ký
function checkRegister() {
var form = document.querySelector('#frmdangky');
var data = Object.fromEntries(new FormData(form).entries());
var regUserName = /(?=.*[a-zA-Z_0-9])\w{6,}/; // ít nhất phải có 6 ký tự không chứa ký tự đặc biệt
var regPassword = /^(?=.*[0-9])(?=.*[a-z])([a-zA-Z0-9]{8,})$/; //phải có 8 ký tự trở lên và có ít nhất 1 số
var regEmail = /^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$/;
var regName = /^([A-Z][a-z]+)(\s+[A-Z][a-z]+)+$/; // chữ cái đầu tiên phải bắt đầu bằng chữ in hoa và phải có họ và tên
var regPhone = /[0-9]{10}/; // số điện thoại phải là số và có 10 chữ số
var lbUserName = document.querySelector("#lbTenDangNhap");
var lbMatKhau = document.querySelector("#lbMatKhau"); | var lbEmail = document.querySelector("#lbEmail");
var lbNgaySinh = document.querySelector("#lbNgaySinh");
if (!regUserName.test(data.username)) {
lbUserName.innerText = "Tên đăng nhập ít nhất phải có 6 ký tự không chứa ký tự đặc biệt";
return false;
}
lbUserName.innerText = "";
if (!regPassword.test(data.password)) {
lbMatKhau.innerText = "Mật khẩu phải có 8 ký tự trở lên và có ít nhất 1 số";
return false;
}
lbMatKhau.innerText = "";
if (data.password !== data.kh_nhaplaimatkhau) {
lbNhapLaiMatKhau.innerText = "Mật khẩu chưa khớp";
return false;
}
lbNhapLaiMatKhau.innerText = "";
if (!regName.test(data.kh_ten)) {
lbTen.innerText = "Chữ cái đầu tiên phải bắt đầu bằng chữ in hoa và phải có họ và tên";
return false;
}
lbTen.innerText = "";
if (data.kh_diachi.trim().length == 0) {
lbDiaChi.innerText = "Địa chỉ không được bỏ trống";
return false;
}
lbDiaChi.innerText = "";
if (!regPhone.test(data.kh_dienthoai)) {
lbDt.innerText = "số điện thoại phải là số và có 10 chữ số ";
return false;
}
lbDt.innerText = "";
if (!regEmail.test(data.kh_email)) {
lbEmail.innerText = "vui lòng điền đúng định dạng email";
return false;
}
lbEmail.innerText = "";
if (data.kh_namsinh > 2002) {
lbNgaySinh.innerText = "bạn phải đủ 18 tuổi";
return false;
}
lbNgaySinh.innerText = "";
return true;
}
// get thông tin
var getThongTin = function(user) {
document.getElementById("kh_ten").value = user.kh_ten;
document.getElementById("kh_gioitinh").value = user.kh_gioitinh == 0 ? "Nam" : "Nữ";
document.getElementById("kh_diachi").value = user.kh_diachi;
document.getElementById("kh_dienthoai").value = user.kh_dienthoai;
document.getElementById("kh_email").value = user.kh_email;
document.getElementById("kh_ngaysinh").value = user.kh_ngaysinh + "/" + user.kh_thangsinh + "/" + user.kh_namsinh;
}
// phần thanh toán paying.html
var pay = function() {
// lấy sản phẩm từ user ra
var list = document.getElementById("listProductPay");
var product = userCartList(userNow);
var total = 0;
for (var p of product) {
var item = `
<li class="list-group-item d-flex justify-content-between ">
<div>
<h4 class="my-0">${p.name}</h4>
<small class="text-muted">${p.price} x ${p.orderQty} </small>
</div>
<span class="text-muted">${p.orderQty}</span>
</li>
`;
list.innerHTML += item;
total += p.price * p.orderQty;
}
var totalPrice = `
<li class="list-group-item d-flex justify-content-between">
<span>Tổng thành tiền</span>
<strong id="thanhTien">${total}</strong>
</li>
`;
list.innerHTML += totalPrice;
}
// sự kiện ấn vào sản phẩm
var getProductId = function() {
var a = document.getElementsByClassName("sale__item-link");
for (var i = 0; i < a.length; i++) {
a[i].addEventListener("click", function(e) {
e.preventDefault();
var productID = this.id;
window.location = "./pages/products-detail.html?" + productID;
})
}
}
var showDetailProduct = function() {
var linkItem = window.location.href;
var id = linkItem.split("?")[1];
var data = dataProducts.find(function(value, index) {
return value.id == id;
})
var imgProduct = document.querySelector(".product__detail-left");
var linkImg = data.url.split(".jpg")[0];
var imgLink2 = linkImg + ".1.jpg";
var imgLink3 = linkImg + ".2.jpg";
var dataImg = `
<img src="${data.url}" class="product__detail-main w-50" data-bs-toggle="modal" data-bs-target="#imageView"></img>
<div class="product__details row">
<div class="d-flex justify-content-center"g-1 mt-2 row justify-content-center">
<img src="${data.url}" style="width:100px" class=" mt-4 product__detail-img"></img>
<img src="${imgLink2}" style="width:100px" class=" mt-4 product__detail-img"></img>
<img src="${imgLink3}" style="width:100px" class=" mt-4 product__detail-img"></img>
</div>
</div>
`;
var modalViewImg = document.getElementById("modalViewImg");
modalViewImg.innerHTML = `<img src="${data.url}" class="w-100"></img>`;
imgProduct.id = id;
imgProduct.innerHTML = dataImg;
var name = document.getElementById("name");
var price = document.getElementById("price");
var describe1 = document.getElementById("describe1");
var describe2 = document.getElementById("describe2");
var describe3 = document.getElementById("describe3");
name.innerText = data.name;
price.innerHTML = `Giá: <span class="text-danger">${data.price}<sup>đ<sup></span>`;
describe1.innerText = data.describe1;
describe2.innerText = data.describe2;
describe3.innerText = data.describe3;
}
$(document).ready(function() {
$("#btnAddToCard").click(function() {
$('.toast').toast('show');
})
});
// đếm sản phẩm trên giỏ hàng
var totalProduct = function() {
var totalProduct = document.querySelector(".totalProduct");
var total = userNow.productID.length;
totalProduct.innerText = total;
}
if (userNow)
totalProduct(); | var lbNhapLaiMatKhau = document.querySelector("#lbNhapLaiMatKhau");
var lbTen = document.querySelector("#lbTen");
var lbDiaChi = document.querySelector("#lbDiaChi");
var lbDt = document.querySelector("#lbDt"); | random_line_split |
x25519.rs | use core::ops::{Deref, DerefMut};
use super::common::*;
use super::error::Error;
use super::field25519::*;
const POINT_BYTES: usize = 32;
/// Non-uniform output of a scalar multiplication.
/// This represents a point on the curve, and should not be used directly as a
/// cipher key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct DHOutput([u8; DHOutput::BYTES]);
impl DHOutput {
pub const BYTES: usize = 32;
}
impl Deref for DHOutput {
type Target = [u8; DHOutput::BYTES];
/// Returns the output of the scalar multiplication as bytes.
/// The output is not uniform, and should be hashed before use.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for DHOutput {
/// Returns the output of the scalar multiplication as bytes.
/// The output is not uniform, and should be hashed before use.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<DHOutput> for PublicKey {
fn from(dh: DHOutput) -> Self {
PublicKey(dh.0)
}
}
impl From<DHOutput> for SecretKey {
fn from(dh: DHOutput) -> Self {
SecretKey(dh.0)
}
}
impl Drop for DHOutput {
fn drop(&mut self) {
Mem::wipe(self.0)
}
}
/// A public key.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct PublicKey([u8; POINT_BYTES]);
impl PublicKey {
/// Number of raw bytes in a public key.
pub const BYTES: usize = POINT_BYTES;
/// Creates a public key from raw bytes.
pub fn new(pk: [u8; PublicKey::BYTES]) -> Self {
PublicKey(pk)
}
/// Creates a public key from a slice.
pub fn from_slice(pk: &[u8]) -> Result<Self, Error> {
let mut pk_ = [0u8; PublicKey::BYTES];
if pk.len() != pk_.len() {
return Err(Error::InvalidPublicKey);
}
Fe::reject_noncanonical(pk)?;
pk_.copy_from_slice(pk);
Ok(PublicKey::new(pk_))
}
/// Multiply a point by the cofactor, returning an error if the element is
/// in a small-order group.
pub fn clear_cofactor(&self) -> Result<[u8; PublicKey::BYTES], Error> {
let cofactor = [
8u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
];
self.ladder(&cofactor, 4)
}
/// Multiply the point represented by the public key by the scalar after
/// clamping it
pub fn dh(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
let sk = sk.clamped();
Ok(DHOutput(self.ladder(&sk.0, 255)?))
}
/// Multiply the point represented by the public key by the scalar WITHOUT
/// CLAMPING
pub fn unclamped_mul(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
self.clear_cofactor()?;
Ok(DHOutput(self.ladder(&sk.0, 256)?))
}
fn ladder(&self, s: &[u8], bits: usize) -> Result<[u8; POINT_BYTES], Error> {
let x1 = Fe::from_bytes(&self.0);
let mut x2 = FE_ONE;
let mut z2 = FE_ZERO;
let mut x3 = x1;
let mut z3 = FE_ONE;
let mut swap: u8 = 0;
let mut pos = bits - 1;
loop {
let bit = (s[pos >> 3] >> (pos & 7)) & 1;
swap ^= bit;
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
swap = bit;
let a = x2 + z2;
let b = x2 - z2;
let aa = a.square();
let bb = b.square();
x2 = aa * bb;
let e = aa - bb;
let da = (x3 - z3) * a;
let cb = (x3 + z3) * b;
x3 = (da + cb).square();
z3 = x1 * ((da - cb).square());
z2 = e * (bb + (e.mul32(121666)));
if pos == 0 {
break;
}
pos -= 1;
}
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
z2 = z2.invert();
x2 = x2 * z2;
if x2.is_zero() {
return Err(Error::WeakPublicKey);
}
Ok(x2.to_bytes())
}
/// The Curve25519 base point
#[inline]
pub fn base_point() -> PublicKey {
PublicKey(FE_CURVE25519_BASEPOINT.to_bytes())
}
}
impl Deref for PublicKey {
type Target = [u8; PublicKey::BYTES];
/// Returns a public key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for PublicKey {
/// Returns a public key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A secret key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct SecretKey([u8; SecretKey::BYTES]);
impl SecretKey {
/// Number of bytes in a secret key.
pub const BYTES: usize = 32;
/// Creates a secret key from raw bytes.
pub fn new(sk: [u8; SecretKey::BYTES]) -> Self {
SecretKey(sk)
}
/// Creates a secret key from a slice.
pub fn from_slice(sk: &[u8]) -> Result<Self, Error> {
let mut sk_ = [0u8; SecretKey::BYTES];
if sk.len() != sk_.len() |
sk_.copy_from_slice(sk);
Ok(SecretKey::new(sk_))
}
/// Perform the X25519 clamping magic
pub fn clamped(&self) -> SecretKey {
let mut clamped = self.clone();
clamped[0] &= 248;
clamped[31] &= 63;
clamped[31] |= 64;
clamped
}
/// Recover the public key
pub fn recover_public_key(&self) -> Result<PublicKey, Error> {
let sk = self.clamped();
Ok(PublicKey(PublicKey::base_point().ladder(&sk.0, 255)?))
}
/// Returns `Ok(())` if the given public key is the public counterpart of
/// this secret key.
/// Returns `Err(Error::InvalidPublicKey)` otherwise.
pub fn validate_public_key(&self, pk: &PublicKey) -> Result<(), Error> {
let recovered_pk = self.recover_public_key()?;
if recovered_pk != *pk {
return Err(Error::InvalidPublicKey);
}
Ok(())
}
}
impl Drop for SecretKey {
fn drop(&mut self) {
Mem::wipe(self.0)
}
}
impl Deref for SecretKey {
type Target = [u8; SecretKey::BYTES];
/// Returns a secret key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for SecretKey {
/// Returns a secret key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A key pair.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct KeyPair {
/// Public key part of the key pair.
pub pk: PublicKey,
/// Secret key part of the key pair.
pub sk: SecretKey,
}
impl KeyPair {
/// Generates a new key pair.
#[cfg(feature = "random")]
pub fn generate() -> KeyPair {
let mut sk = [0u8; SecretKey::BYTES];
getrandom::getrandom(&mut sk).expect("getrandom");
if Fe::from_bytes(&sk).is_zero() {
panic!("All-zero secret key");
}
let sk = SecretKey(sk);
let pk = sk
.recover_public_key()
.expect("generated public key is weak");
KeyPair { pk, sk }
}
/// Check that the public key is valid for the secret key.
pub fn validate(&self) -> Result<(), Error> {
self.sk.validate_public_key(&self.pk)
}
}
#[cfg(not(feature = "disable-signatures"))]
mod from_ed25519 {
use super::super::{
edwards25519, sha512, KeyPair as EdKeyPair, PublicKey as EdPublicKey,
SecretKey as EdSecretKey,
};
use super::*;
impl SecretKey {
/// Convert an Ed25519 secret key to a X25519 secret key.
pub fn from_ed25519(edsk: &EdSecretKey) -> Result<SecretKey, Error> {
let seed = edsk.seed();
let az: [u8; 64] = {
let mut hash_output = sha512::Hash::hash(*seed);
hash_output[0] &= 248;
hash_output[31] &= 63;
hash_output[31] |= 64;
hash_output
};
SecretKey::from_slice(&az[..32])
}
}
impl PublicKey {
/// Convert an Ed25519 public key to a X25519 public key.
pub fn from_ed25519(edpk: &EdPublicKey) -> Result<PublicKey, Error> {
let pk = PublicKey::from_slice(
&edwards25519::ge_to_x25519_vartime(edpk).ok_or(Error::InvalidPublicKey)?,
)?;
pk.clear_cofactor()?;
Ok(pk)
}
}
impl KeyPair {
/// Convert an Ed25519 key pair to a X25519 key pair.
pub fn from_ed25519(edkp: &EdKeyPair) -> Result<KeyPair, Error> {
let pk = PublicKey::from_ed25519(&edkp.pk)?;
let sk = SecretKey::from_ed25519(&edkp.sk)?;
Ok(KeyPair { pk, sk })
}
}
}
#[cfg(not(feature = "disable-signatures"))]
pub use from_ed25519::*;
#[test]
fn test_x25519() {
let sk_1 = SecretKey::from_slice(&[
1u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
])
.unwrap();
let output = PublicKey::base_point().unclamped_mul(&sk_1).unwrap();
assert_eq!(PublicKey::from(output), PublicKey::base_point());
let kp_a = KeyPair::generate();
let kp_b = KeyPair::generate();
let output_a = kp_b.pk.dh(&kp_a.sk).unwrap();
let output_b = kp_a.pk.dh(&kp_b.sk).unwrap();
assert_eq!(output_a, output_b);
}
#[cfg(not(feature = "disable-signatures"))]
#[test]
fn test_x25519_map() {
use super::KeyPair as EdKeyPair;
let edkp_a = EdKeyPair::generate();
let edkp_b = EdKeyPair::generate();
let kp_a = KeyPair::from_ed25519(&edkp_a).unwrap();
let kp_b = KeyPair::from_ed25519(&edkp_b).unwrap();
let output_a = kp_b.pk.dh(&kp_a.sk).unwrap();
let output_b = kp_a.pk.dh(&kp_b.sk).unwrap();
assert_eq!(output_a, output_b);
}
#[test]
#[cfg(all(not(feature = "disable-signatures"), feature = "random"))]
fn test_x25519_invalid_keypair() {
let kp1 = KeyPair::generate();
let kp2 = KeyPair::generate();
assert_eq!(
kp1.sk.validate_public_key(&kp2.pk).unwrap_err(),
Error::InvalidPublicKey
);
assert_eq!(
kp2.sk.validate_public_key(&kp1.pk).unwrap_err(),
Error::InvalidPublicKey
);
assert!(kp1.sk.validate_public_key(&kp1.pk).is_ok());
assert!(kp2.sk.validate_public_key(&kp2.pk).is_ok());
assert!(kp1.validate().is_ok());
}
| {
return Err(Error::InvalidSecretKey);
} | conditional_block |
x25519.rs | use core::ops::{Deref, DerefMut};
use super::common::*;
use super::error::Error;
use super::field25519::*;
const POINT_BYTES: usize = 32;
/// Non-uniform output of a scalar multiplication.
/// This represents a point on the curve, and should not be used directly as a
/// cipher key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct DHOutput([u8; DHOutput::BYTES]);
impl DHOutput {
pub const BYTES: usize = 32;
}
impl Deref for DHOutput {
type Target = [u8; DHOutput::BYTES];
/// Returns the output of the scalar multiplication as bytes.
/// The output is not uniform, and should be hashed before use.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for DHOutput {
/// Returns the output of the scalar multiplication as bytes.
/// The output is not uniform, and should be hashed before use.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<DHOutput> for PublicKey {
fn | (dh: DHOutput) -> Self {
PublicKey(dh.0)
}
}
impl From<DHOutput> for SecretKey {
fn from(dh: DHOutput) -> Self {
SecretKey(dh.0)
}
}
impl Drop for DHOutput {
fn drop(&mut self) {
Mem::wipe(self.0)
}
}
/// A public key.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct PublicKey([u8; POINT_BYTES]);
impl PublicKey {
/// Number of raw bytes in a public key.
pub const BYTES: usize = POINT_BYTES;
/// Creates a public key from raw bytes.
pub fn new(pk: [u8; PublicKey::BYTES]) -> Self {
PublicKey(pk)
}
/// Creates a public key from a slice.
pub fn from_slice(pk: &[u8]) -> Result<Self, Error> {
let mut pk_ = [0u8; PublicKey::BYTES];
if pk.len() != pk_.len() {
return Err(Error::InvalidPublicKey);
}
Fe::reject_noncanonical(pk)?;
pk_.copy_from_slice(pk);
Ok(PublicKey::new(pk_))
}
/// Multiply a point by the cofactor, returning an error if the element is
/// in a small-order group.
pub fn clear_cofactor(&self) -> Result<[u8; PublicKey::BYTES], Error> {
let cofactor = [
8u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
];
self.ladder(&cofactor, 4)
}
/// Multiply the point represented by the public key by the scalar after
/// clamping it
pub fn dh(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
let sk = sk.clamped();
Ok(DHOutput(self.ladder(&sk.0, 255)?))
}
/// Multiply the point represented by the public key by the scalar WITHOUT
/// CLAMPING
pub fn unclamped_mul(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
self.clear_cofactor()?;
Ok(DHOutput(self.ladder(&sk.0, 256)?))
}
fn ladder(&self, s: &[u8], bits: usize) -> Result<[u8; POINT_BYTES], Error> {
let x1 = Fe::from_bytes(&self.0);
let mut x2 = FE_ONE;
let mut z2 = FE_ZERO;
let mut x3 = x1;
let mut z3 = FE_ONE;
let mut swap: u8 = 0;
let mut pos = bits - 1;
loop {
let bit = (s[pos >> 3] >> (pos & 7)) & 1;
swap ^= bit;
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
swap = bit;
let a = x2 + z2;
let b = x2 - z2;
let aa = a.square();
let bb = b.square();
x2 = aa * bb;
let e = aa - bb;
let da = (x3 - z3) * a;
let cb = (x3 + z3) * b;
x3 = (da + cb).square();
z3 = x1 * ((da - cb).square());
z2 = e * (bb + (e.mul32(121666)));
if pos == 0 {
break;
}
pos -= 1;
}
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
z2 = z2.invert();
x2 = x2 * z2;
if x2.is_zero() {
return Err(Error::WeakPublicKey);
}
Ok(x2.to_bytes())
}
/// The Curve25519 base point
#[inline]
pub fn base_point() -> PublicKey {
PublicKey(FE_CURVE25519_BASEPOINT.to_bytes())
}
}
impl Deref for PublicKey {
type Target = [u8; PublicKey::BYTES];
/// Returns a public key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for PublicKey {
/// Returns a public key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A secret key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct SecretKey([u8; SecretKey::BYTES]);
impl SecretKey {
/// Number of bytes in a secret key.
pub const BYTES: usize = 32;
/// Creates a secret key from raw bytes.
pub fn new(sk: [u8; SecretKey::BYTES]) -> Self {
SecretKey(sk)
}
/// Creates a secret key from a slice.
pub fn from_slice(sk: &[u8]) -> Result<Self, Error> {
let mut sk_ = [0u8; SecretKey::BYTES];
if sk.len() != sk_.len() {
return Err(Error::InvalidSecretKey);
}
sk_.copy_from_slice(sk);
Ok(SecretKey::new(sk_))
}
/// Perform the X25519 clamping magic
pub fn clamped(&self) -> SecretKey {
let mut clamped = self.clone();
clamped[0] &= 248;
clamped[31] &= 63;
clamped[31] |= 64;
clamped
}
/// Recover the public key
pub fn recover_public_key(&self) -> Result<PublicKey, Error> {
let sk = self.clamped();
Ok(PublicKey(PublicKey::base_point().ladder(&sk.0, 255)?))
}
/// Returns `Ok(())` if the given public key is the public counterpart of
/// this secret key.
/// Returns `Err(Error::InvalidPublicKey)` otherwise.
pub fn validate_public_key(&self, pk: &PublicKey) -> Result<(), Error> {
let recovered_pk = self.recover_public_key()?;
if recovered_pk != *pk {
return Err(Error::InvalidPublicKey);
}
Ok(())
}
}
impl Drop for SecretKey {
fn drop(&mut self) {
Mem::wipe(self.0)
}
}
impl Deref for SecretKey {
type Target = [u8; SecretKey::BYTES];
/// Returns a secret key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for SecretKey {
/// Returns a secret key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A key pair.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct KeyPair {
/// Public key part of the key pair.
pub pk: PublicKey,
/// Secret key part of the key pair.
pub sk: SecretKey,
}
impl KeyPair {
/// Generates a new key pair.
#[cfg(feature = "random")]
pub fn generate() -> KeyPair {
let mut sk = [0u8; SecretKey::BYTES];
getrandom::getrandom(&mut sk).expect("getrandom");
if Fe::from_bytes(&sk).is_zero() {
panic!("All-zero secret key");
}
let sk = SecretKey(sk);
let pk = sk
.recover_public_key()
.expect("generated public key is weak");
KeyPair { pk, sk }
}
/// Check that the public key is valid for the secret key.
pub fn validate(&self) -> Result<(), Error> {
self.sk.validate_public_key(&self.pk)
}
}
#[cfg(not(feature = "disable-signatures"))]
mod from_ed25519 {
use super::super::{
edwards25519, sha512, KeyPair as EdKeyPair, PublicKey as EdPublicKey,
SecretKey as EdSecretKey,
};
use super::*;
impl SecretKey {
/// Convert an Ed25519 secret key to a X25519 secret key.
pub fn from_ed25519(edsk: &EdSecretKey) -> Result<SecretKey, Error> {
let seed = edsk.seed();
let az: [u8; 64] = {
let mut hash_output = sha512::Hash::hash(*seed);
hash_output[0] &= 248;
hash_output[31] &= 63;
hash_output[31] |= 64;
hash_output
};
SecretKey::from_slice(&az[..32])
}
}
impl PublicKey {
/// Convert an Ed25519 public key to a X25519 public key.
pub fn from_ed25519(edpk: &EdPublicKey) -> Result<PublicKey, Error> {
let pk = PublicKey::from_slice(
&edwards25519::ge_to_x25519_vartime(edpk).ok_or(Error::InvalidPublicKey)?,
)?;
pk.clear_cofactor()?;
Ok(pk)
}
}
impl KeyPair {
/// Convert an Ed25519 key pair to a X25519 key pair.
pub fn from_ed25519(edkp: &EdKeyPair) -> Result<KeyPair, Error> {
let pk = PublicKey::from_ed25519(&edkp.pk)?;
let sk = SecretKey::from_ed25519(&edkp.sk)?;
Ok(KeyPair { pk, sk })
}
}
}
#[cfg(not(feature = "disable-signatures"))]
pub use from_ed25519::*;
#[test]
fn test_x25519() {
let sk_1 = SecretKey::from_slice(&[
1u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
])
.unwrap();
let output = PublicKey::base_point().unclamped_mul(&sk_1).unwrap();
assert_eq!(PublicKey::from(output), PublicKey::base_point());
let kp_a = KeyPair::generate();
let kp_b = KeyPair::generate();
let output_a = kp_b.pk.dh(&kp_a.sk).unwrap();
let output_b = kp_a.pk.dh(&kp_b.sk).unwrap();
assert_eq!(output_a, output_b);
}
#[cfg(not(feature = "disable-signatures"))]
#[test]
fn test_x25519_map() {
use super::KeyPair as EdKeyPair;
let edkp_a = EdKeyPair::generate();
let edkp_b = EdKeyPair::generate();
let kp_a = KeyPair::from_ed25519(&edkp_a).unwrap();
let kp_b = KeyPair::from_ed25519(&edkp_b).unwrap();
let output_a = kp_b.pk.dh(&kp_a.sk).unwrap();
let output_b = kp_a.pk.dh(&kp_b.sk).unwrap();
assert_eq!(output_a, output_b);
}
#[test]
#[cfg(all(not(feature = "disable-signatures"), feature = "random"))]
fn test_x25519_invalid_keypair() {
let kp1 = KeyPair::generate();
let kp2 = KeyPair::generate();
assert_eq!(
kp1.sk.validate_public_key(&kp2.pk).unwrap_err(),
Error::InvalidPublicKey
);
assert_eq!(
kp2.sk.validate_public_key(&kp1.pk).unwrap_err(),
Error::InvalidPublicKey
);
assert!(kp1.sk.validate_public_key(&kp1.pk).is_ok());
assert!(kp2.sk.validate_public_key(&kp2.pk).is_ok());
assert!(kp1.validate().is_ok());
}
| from | identifier_name |
x25519.rs | use core::ops::{Deref, DerefMut};
use super::common::*;
use super::error::Error;
use super::field25519::*;
const POINT_BYTES: usize = 32;
/// Non-uniform output of a scalar multiplication.
/// This represents a point on the curve, and should not be used directly as a
/// cipher key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct DHOutput([u8; DHOutput::BYTES]);
impl DHOutput {
pub const BYTES: usize = 32;
}
impl Deref for DHOutput {
type Target = [u8; DHOutput::BYTES];
/// Returns the output of the scalar multiplication as bytes.
/// The output is not uniform, and should be hashed before use.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for DHOutput {
/// Returns the output of the scalar multiplication as bytes.
/// The output is not uniform, and should be hashed before use.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<DHOutput> for PublicKey {
fn from(dh: DHOutput) -> Self {
PublicKey(dh.0)
}
}
impl From<DHOutput> for SecretKey {
fn from(dh: DHOutput) -> Self {
SecretKey(dh.0)
}
}
impl Drop for DHOutput {
fn drop(&mut self) {
Mem::wipe(self.0)
}
}
/// A public key.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct PublicKey([u8; POINT_BYTES]);
impl PublicKey {
/// Number of raw bytes in a public key.
pub const BYTES: usize = POINT_BYTES;
/// Creates a public key from raw bytes.
pub fn new(pk: [u8; PublicKey::BYTES]) -> Self {
PublicKey(pk)
}
/// Creates a public key from a slice.
pub fn from_slice(pk: &[u8]) -> Result<Self, Error> {
let mut pk_ = [0u8; PublicKey::BYTES];
if pk.len() != pk_.len() {
return Err(Error::InvalidPublicKey);
}
Fe::reject_noncanonical(pk)?;
pk_.copy_from_slice(pk);
Ok(PublicKey::new(pk_))
}
/// Multiply a point by the cofactor, returning an error if the element is
/// in a small-order group.
pub fn clear_cofactor(&self) -> Result<[u8; PublicKey::BYTES], Error> {
let cofactor = [
8u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
];
self.ladder(&cofactor, 4)
}
/// Multiply the point represented by the public key by the scalar after
/// clamping it
pub fn dh(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
let sk = sk.clamped();
Ok(DHOutput(self.ladder(&sk.0, 255)?))
}
/// Multiply the point represented by the public key by the scalar WITHOUT
/// CLAMPING
pub fn unclamped_mul(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
self.clear_cofactor()?;
Ok(DHOutput(self.ladder(&sk.0, 256)?))
}
fn ladder(&self, s: &[u8], bits: usize) -> Result<[u8; POINT_BYTES], Error> {
let x1 = Fe::from_bytes(&self.0);
let mut x2 = FE_ONE;
let mut z2 = FE_ZERO;
let mut x3 = x1;
let mut z3 = FE_ONE;
let mut swap: u8 = 0;
let mut pos = bits - 1;
loop {
let bit = (s[pos >> 3] >> (pos & 7)) & 1;
swap ^= bit;
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
swap = bit;
let a = x2 + z2;
let b = x2 - z2;
let aa = a.square();
let bb = b.square();
x2 = aa * bb;
let e = aa - bb;
let da = (x3 - z3) * a;
let cb = (x3 + z3) * b;
x3 = (da + cb).square();
z3 = x1 * ((da - cb).square());
z2 = e * (bb + (e.mul32(121666)));
if pos == 0 {
break;
}
pos -= 1;
}
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
z2 = z2.invert();
x2 = x2 * z2;
if x2.is_zero() {
return Err(Error::WeakPublicKey);
}
Ok(x2.to_bytes())
}
/// The Curve25519 base point
#[inline]
pub fn base_point() -> PublicKey {
PublicKey(FE_CURVE25519_BASEPOINT.to_bytes())
}
}
impl Deref for PublicKey {
type Target = [u8; PublicKey::BYTES];
/// Returns a public key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for PublicKey {
/// Returns a public key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A secret key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct SecretKey([u8; SecretKey::BYTES]);
impl SecretKey {
/// Number of bytes in a secret key.
pub const BYTES: usize = 32;
/// Creates a secret key from raw bytes.
pub fn new(sk: [u8; SecretKey::BYTES]) -> Self {
SecretKey(sk)
}
/// Creates a secret key from a slice.
pub fn from_slice(sk: &[u8]) -> Result<Self, Error> {
let mut sk_ = [0u8; SecretKey::BYTES];
if sk.len() != sk_.len() {
return Err(Error::InvalidSecretKey);
}
sk_.copy_from_slice(sk);
Ok(SecretKey::new(sk_))
}
/// Perform the X25519 clamping magic
pub fn clamped(&self) -> SecretKey {
let mut clamped = self.clone();
clamped[0] &= 248;
clamped[31] &= 63;
clamped[31] |= 64;
clamped
}
/// Recover the public key
pub fn recover_public_key(&self) -> Result<PublicKey, Error> {
let sk = self.clamped();
Ok(PublicKey(PublicKey::base_point().ladder(&sk.0, 255)?))
}
/// Returns `Ok(())` if the given public key is the public counterpart of
/// this secret key.
/// Returns `Err(Error::InvalidPublicKey)` otherwise.
pub fn validate_public_key(&self, pk: &PublicKey) -> Result<(), Error> {
let recovered_pk = self.recover_public_key()?;
if recovered_pk != *pk {
return Err(Error::InvalidPublicKey);
}
Ok(())
}
}
impl Drop for SecretKey {
fn drop(&mut self) {
Mem::wipe(self.0)
}
}
impl Deref for SecretKey {
type Target = [u8; SecretKey::BYTES];
/// Returns a secret key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for SecretKey {
/// Returns a secret key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A key pair.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct KeyPair {
/// Public key part of the key pair.
pub pk: PublicKey,
/// Secret key part of the key pair.
pub sk: SecretKey,
}
impl KeyPair {
/// Generates a new key pair.
#[cfg(feature = "random")]
pub fn generate() -> KeyPair {
let mut sk = [0u8; SecretKey::BYTES];
getrandom::getrandom(&mut sk).expect("getrandom");
if Fe::from_bytes(&sk).is_zero() {
panic!("All-zero secret key");
}
let sk = SecretKey(sk);
let pk = sk
.recover_public_key()
.expect("generated public key is weak");
KeyPair { pk, sk }
}
/// Check that the public key is valid for the secret key.
pub fn validate(&self) -> Result<(), Error> {
self.sk.validate_public_key(&self.pk)
}
}
#[cfg(not(feature = "disable-signatures"))]
mod from_ed25519 {
use super::super::{
edwards25519, sha512, KeyPair as EdKeyPair, PublicKey as EdPublicKey,
SecretKey as EdSecretKey,
};
use super::*;
impl SecretKey {
/// Convert an Ed25519 secret key to a X25519 secret key.
pub fn from_ed25519(edsk: &EdSecretKey) -> Result<SecretKey, Error> {
let seed = edsk.seed();
let az: [u8; 64] = {
let mut hash_output = sha512::Hash::hash(*seed);
hash_output[0] &= 248;
hash_output[31] &= 63;
hash_output[31] |= 64;
hash_output
};
SecretKey::from_slice(&az[..32])
}
}
impl PublicKey {
/// Convert an Ed25519 public key to a X25519 public key.
pub fn from_ed25519(edpk: &EdPublicKey) -> Result<PublicKey, Error> {
let pk = PublicKey::from_slice(
&edwards25519::ge_to_x25519_vartime(edpk).ok_or(Error::InvalidPublicKey)?,
)?;
pk.clear_cofactor()?;
Ok(pk)
}
}
impl KeyPair {
/// Convert an Ed25519 key pair to a X25519 key pair.
pub fn from_ed25519(edkp: &EdKeyPair) -> Result<KeyPair, Error> {
let pk = PublicKey::from_ed25519(&edkp.pk)?;
let sk = SecretKey::from_ed25519(&edkp.sk)?;
Ok(KeyPair { pk, sk })
}
}
}
| let sk_1 = SecretKey::from_slice(&[
1u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
])
.unwrap();
let output = PublicKey::base_point().unclamped_mul(&sk_1).unwrap();
assert_eq!(PublicKey::from(output), PublicKey::base_point());
let kp_a = KeyPair::generate();
let kp_b = KeyPair::generate();
let output_a = kp_b.pk.dh(&kp_a.sk).unwrap();
let output_b = kp_a.pk.dh(&kp_b.sk).unwrap();
assert_eq!(output_a, output_b);
}
#[cfg(not(feature = "disable-signatures"))]
#[test]
fn test_x25519_map() {
use super::KeyPair as EdKeyPair;
let edkp_a = EdKeyPair::generate();
let edkp_b = EdKeyPair::generate();
let kp_a = KeyPair::from_ed25519(&edkp_a).unwrap();
let kp_b = KeyPair::from_ed25519(&edkp_b).unwrap();
let output_a = kp_b.pk.dh(&kp_a.sk).unwrap();
let output_b = kp_a.pk.dh(&kp_b.sk).unwrap();
assert_eq!(output_a, output_b);
}
#[test]
#[cfg(all(not(feature = "disable-signatures"), feature = "random"))]
fn test_x25519_invalid_keypair() {
let kp1 = KeyPair::generate();
let kp2 = KeyPair::generate();
assert_eq!(
kp1.sk.validate_public_key(&kp2.pk).unwrap_err(),
Error::InvalidPublicKey
);
assert_eq!(
kp2.sk.validate_public_key(&kp1.pk).unwrap_err(),
Error::InvalidPublicKey
);
assert!(kp1.sk.validate_public_key(&kp1.pk).is_ok());
assert!(kp2.sk.validate_public_key(&kp2.pk).is_ok());
assert!(kp1.validate().is_ok());
} | #[cfg(not(feature = "disable-signatures"))]
pub use from_ed25519::*;
#[test]
fn test_x25519() { | random_line_split |
nasdaq_itch_vwap.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ==============================================================================
# Created By : Karl Thompson
# Created Date: Mon March 25 17:34:00 CDT 2019
# ==============================================================================
"""nasdaq_itch_vwap - Generate a table of running volume-weighted average price
[VWAP] for NASDAQ stocks at trading hours based on Nasdaq TotalView-ITCH 5 data.
Data available at: ftp://emi.nasdaq.com/ITCH/01302019.NASDAQ_ITCH50.gz
If you use this code in your work, please cite the following:
Karl H. Thompson, NASDAQ-ITCH-VWAP, (2019), GitHub repository,
https://github.com/karlhthompson/nasdaq-itch-vwap"""
# ==============================================================================
# Imports
# ==============================================================================
import pandas as pd
import struct
import gzip
import csv
# function to parse select messages in ITCH data:
def parse_itch_data(itch_data):
# read the first byte of each message in the data file:
msg_header = itch_data.read(1)
# initialize the csv file that will store parsed Add Order and Add Order
# with MPID messages:
add_order_data = open('add_order_data.csv','w')
add_order_wrtr = csv.writer(add_order_data)
# initialize the csv file that will store parsed Order Executed messages:
ord_exec_data = open('ord_exec_data.csv','w')
ord_exec_wrtr = csv.writer(ord_exec_data)
# initialize the csv file that will store parsed Order Executed With Price
# messages:
ord_exec_pr_data = open('ord_exec_pr_data.csv','w')
ord_exec_pr_wrtr = csv.writer(ord_exec_pr_data)
# initialize the csv file that will store parsed Trade messages:
trade_data = open('trade_data.csv','w')
trade_wrtr = csv.writer(trade_data)
# iterate over all messages in the data file:
while msg_header:
# process Add Order and Add Order with MPID messages:
if msg_header == b'A' or msg_header == b'F':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQcI8cI',message)
re_pkd = struct.pack('>s4s2s6sQsI8sI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13])
parsed_ao = list(struct.unpack('>sHHQQsI8sI',re_pkd))
# filter for data with valid Buy/Sell Indicators:
if parsed_ao[5] == b'B' or parsed_ao[5] == b'S':
# further filter for data with plausible field values:
if (parsed_ao[4] < 1e14 and parsed_ao[6] < 1e8):
# write the parsed message to the csv file:
try:
sto = parsed_ao[7].decode() # stock
except:
sto = '0' # Write 0 if stock byte decode fails
tim = parsed_ao[3] # timestamp
ref = parsed_ao[4] # order reference number
sha = parsed_ao[6] # shares
pri = float(parsed_ao[8])/1e4 # price
add_order_wrtr.writerow([sto, tim, ref, sha, pri])
# process Order Executed messages:
if msg_header == b'E':
message = itch_data.read(30)
if len(message) < 30: break
un_pkd = struct.unpack('>4s6sQIQ',message)
re_pkd = struct.pack('>s4s2s6sQIQ',msg_header,un_pkd[0],b'\x00\x00',
un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4])
parsed_oe = list(struct.unpack('>sHHQQIQ',re_pkd))
# filter for data with plausible field values:
if (parsed_oe[4] < 1e14 and parsed_oe[5] < 1e8 and parsed_oe[6] < 1e11):
# write the parsed message to the csv file:
ref = parsed_oe[4] # order reference number
sha = parsed_oe[5] # shares
ord_exec_wrtr.writerow([ref, sha])
# process Order Executed With Price messages:
if msg_header == b'C':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQIQcI',message)
re_pkd = struct.pack('>s4s2s6sQIQsI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],un_pkd[5],
un_pkd[6])
parsed_oewp = list(struct.unpack('>sHHQQIQsI',re_pkd))
# filter for data with plausible field values:
if (parsed_oewp[4] < 1e14 and parsed_oewp[5] < 1e6 and
parsed_oewp[6] < 1e10 and parsed_oewp[7] == b'Y'):
# write the parsed message to the csv file:
ref = parsed_oewp[4] # order reference number
sha = parsed_oewp[5] # shares
pri = float(parsed_oewp[8])/1e4 # new price
ord_exec_pr_wrtr.writerow([ref, sha, pri])
# process Trade messages:
if msg_header == b'P':
|
# advance the file position to the next message:
msg_header = itch_data.read(1)
# close the csv files:
add_order_data.close()
ord_exec_data.close()
ord_exec_pr_data.close()
trade_data.close()
# function to calculate the hourly VWAP based on parsed ITCH data:
def calculate_vwap():
# import the parsed Add Order data into a Pandas dataframe:
add_order_df = pd.read_csv('add_order_data.csv', index_col = None,
names = ['Stock', 'Timestamp', 'Reference', 'Shares', 'Price'])
# import the parsed Order Executed data into a Pandas dataframe:
ord_exec_df = pd.read_csv('ord_exec_data.csv', index_col = None,
names = ['Reference', 'Shares'])
# import the parsed Order Executed With Price data into a Pandas dataframe:
ord_exec_pr_df = pd.read_csv('ord_exec_pr_data.csv', index_col = None,
names = ['Reference', 'Shares', 'Price'])
# import the parsed Trade data into a Pandas dataframe:
trade_1_df = pd.read_csv('trade_data.csv', index_col = 0,
names=['Stock', 'Timestamp', 'Shares', 'Price', 'Product'])
# merge the Order Executed data with the Add Order data to extract
# the executed trades data within:
trade_2_df = ord_exec_df.merge(add_order_df,on=['Reference'],how='inner')
trade_2_df = trade_2_df[trade_2_df['Stock']!='0']
trade_2_df = trade_2_df[['Stock', 'Timestamp', 'Shares_x', 'Price']].set_index('Stock')
trade_2_df = trade_2_df.rename(columns={"Shares_x": "Shares"})
trade_2_df['Product'] = trade_2_df['Price']*trade_2_df['Shares']
# merge the Order Executed With Price data with the Add Order data
# to extract the executed trades data within:
trade_3_df = ord_exec_pr_df.merge(add_order_df,on=['Reference'],how='inner')
trade_3_df = trade_3_df[trade_3_df['Stock']!='0']
trade_3_df = trade_3_df[['Stock', 'Timestamp', 'Shares_x', 'Price_x']].set_index('Stock')
trade_3_df = trade_3_df.rename(columns={"Shares_x": "Shares", "Price_x": "Price"})
trade_3_df['Product'] = trade_3_df['Price']*trade_3_df['Shares']
# concatenate all three trade dataframes (trades from Trade messages,
# trades from Executed Order messages, and trades from Executed Order
# With Price messages) into a comprehensive dataframe:
trade_df = pd.concat([trade_1_df, trade_2_df, trade_3_df])
# create a dataframe for hourly running VWAP values:
vwap_df = trade_df.groupby(['Stock']).all().drop(
columns=['Timestamp', 'Shares', 'Price', 'Product'])
# create a list of trading hours in nanoseconds:
hour_list = [3.6e12 * i for i in [9.5, 10, 11, 12, 13, 14, 15, 16]]
# iterate over the trading hours list:
for hour in hour_list:
# extract data for trades that occurred before the specified hour:
trade_df_copy = trade_df[trade_df.Timestamp <= hour]
# group the trade dataframe by stock:
trade_df_groups = trade_df_copy.groupby(['Stock'])
# calculate the mean for all trade data:
trade_df_mean = trade_df_groups.mean(numeric_only=False)
# calculate the VWAP for all stocks:
trade_df_mean['VWAP'] = trade_df_mean['Product']/trade_df_mean['Shares']
# merge the calculated VWAP fields into the VWAP dataframe:
vwap_df = pd.merge(vwap_df,trade_df_mean['VWAP'],on=['Stock'],how='left')
# adjust the column names in the VWAP dataframe:
vwap_df.columns = ['VWAP at 09:30AM','VWAP at 10:00AM','VWAP at 11:00AM',
'VWAP at 12:00PM','VWAP at 01:00PM','VWAP at 02:00PM',
'VWAP at 03:00PM', 'VWAP at 04:00PM']
# save the hourly VWAP table in Excel format:
vwap_df.to_excel("NASDAQ_VWAP_01_30_2019.xlsx")
if __name__ == '__main__':
# open the ITCH data file:
itch_data = gzip.open('01302019.NASDAQ_ITCH50.gz','rb')
# parse the data:
parse_itch_data(itch_data)
# close the ITCH data file:
itch_data.close()
# calculate the hourly VWAP for all stocks:
calculate_vwap()
| message = itch_data.read(43)
if len(message) < 43: break
un_pkd = struct.unpack('>4s6sQcI8cIQ',message)
re_pkd = struct.pack('>s4s2s6sQsI8sIQ',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13],un_pkd[14])
parsed_t = list(struct.unpack('>sHHQQsI8sIQ',re_pkd))
# filter for data with valid Order Reference Numbers
# and Buy/Sell Indicators:
if parsed_t[4] == 0 and parsed_t[5] == b'B':
# write the parsed message to the csv file:
sto = parsed_t[7].decode() # stock
tim = parsed_t[3] # timestamp
sha = parsed_t[6] # shares
pri = float(parsed_t[8])/1e4 # price
pro = parsed_t[6]*float(parsed_t[8])/1e4 # product
trade_wrtr.writerow([sto, tim, sha, pri, pro]) | conditional_block |
nasdaq_itch_vwap.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ==============================================================================
# Created By : Karl Thompson
# Created Date: Mon March 25 17:34:00 CDT 2019
# ==============================================================================
"""nasdaq_itch_vwap - Generate a table of running volume-weighted average price
[VWAP] for NASDAQ stocks at trading hours based on Nasdaq TotalView-ITCH 5 data.
Data available at: ftp://emi.nasdaq.com/ITCH/01302019.NASDAQ_ITCH50.gz
If you use this code in your work, please cite the following:
Karl H. Thompson, NASDAQ-ITCH-VWAP, (2019), GitHub repository,
https://github.com/karlhthompson/nasdaq-itch-vwap"""
# ==============================================================================
# Imports
# ==============================================================================
import pandas as pd
import struct
import gzip
import csv
# function to parse select messages in ITCH data:
def parse_itch_data(itch_data):
# read the first byte of each message in the data file:
msg_header = itch_data.read(1)
# initialize the csv file that will store parsed Add Order and Add Order
# with MPID messages:
add_order_data = open('add_order_data.csv','w')
add_order_wrtr = csv.writer(add_order_data)
# initialize the csv file that will store parsed Order Executed messages:
ord_exec_data = open('ord_exec_data.csv','w')
ord_exec_wrtr = csv.writer(ord_exec_data)
# initialize the csv file that will store parsed Order Executed With Price
# messages:
ord_exec_pr_data = open('ord_exec_pr_data.csv','w')
ord_exec_pr_wrtr = csv.writer(ord_exec_pr_data)
# initialize the csv file that will store parsed Trade messages:
trade_data = open('trade_data.csv','w')
trade_wrtr = csv.writer(trade_data)
# iterate over all messages in the data file:
while msg_header:
# process Add Order and Add Order with MPID messages:
if msg_header == b'A' or msg_header == b'F':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQcI8cI',message)
re_pkd = struct.pack('>s4s2s6sQsI8sI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13])
parsed_ao = list(struct.unpack('>sHHQQsI8sI',re_pkd))
# filter for data with valid Buy/Sell Indicators:
if parsed_ao[5] == b'B' or parsed_ao[5] == b'S':
# further filter for data with plausible field values:
if (parsed_ao[4] < 1e14 and parsed_ao[6] < 1e8):
# write the parsed message to the csv file:
try:
sto = parsed_ao[7].decode() # stock
except:
sto = '0' # Write 0 if stock byte decode fails
tim = parsed_ao[3] # timestamp
ref = parsed_ao[4] # order reference number
sha = parsed_ao[6] # shares
pri = float(parsed_ao[8])/1e4 # price
add_order_wrtr.writerow([sto, tim, ref, sha, pri])
# process Order Executed messages:
if msg_header == b'E':
message = itch_data.read(30)
if len(message) < 30: break
un_pkd = struct.unpack('>4s6sQIQ',message)
re_pkd = struct.pack('>s4s2s6sQIQ',msg_header,un_pkd[0],b'\x00\x00',
un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4])
parsed_oe = list(struct.unpack('>sHHQQIQ',re_pkd))
# filter for data with plausible field values:
if (parsed_oe[4] < 1e14 and parsed_oe[5] < 1e8 and parsed_oe[6] < 1e11):
# write the parsed message to the csv file:
ref = parsed_oe[4] # order reference number
sha = parsed_oe[5] # shares
ord_exec_wrtr.writerow([ref, sha])
# process Order Executed With Price messages:
if msg_header == b'C':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQIQcI',message)
re_pkd = struct.pack('>s4s2s6sQIQsI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],un_pkd[5],
un_pkd[6])
parsed_oewp = list(struct.unpack('>sHHQQIQsI',re_pkd))
# filter for data with plausible field values:
| sha = parsed_oewp[5] # shares
pri = float(parsed_oewp[8])/1e4 # new price
ord_exec_pr_wrtr.writerow([ref, sha, pri])
# process Trade messages:
if msg_header == b'P':
message = itch_data.read(43)
if len(message) < 43: break
un_pkd = struct.unpack('>4s6sQcI8cIQ',message)
re_pkd = struct.pack('>s4s2s6sQsI8sIQ',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13],un_pkd[14])
parsed_t = list(struct.unpack('>sHHQQsI8sIQ',re_pkd))
# filter for data with valid Order Reference Numbers
# and Buy/Sell Indicators:
if parsed_t[4] == 0 and parsed_t[5] == b'B':
# write the parsed message to the csv file:
sto = parsed_t[7].decode() # stock
tim = parsed_t[3] # timestamp
sha = parsed_t[6] # shares
pri = float(parsed_t[8])/1e4 # price
pro = parsed_t[6]*float(parsed_t[8])/1e4 # product
trade_wrtr.writerow([sto, tim, sha, pri, pro])
# advance the file position to the next message:
msg_header = itch_data.read(1)
# close the csv files:
add_order_data.close()
ord_exec_data.close()
ord_exec_pr_data.close()
trade_data.close()
# function to calculate the hourly VWAP based on parsed ITCH data:
def calculate_vwap():
# import the parsed Add Order data into a Pandas dataframe:
add_order_df = pd.read_csv('add_order_data.csv', index_col = None,
names = ['Stock', 'Timestamp', 'Reference', 'Shares', 'Price'])
# import the parsed Order Executed data into a Pandas dataframe:
ord_exec_df = pd.read_csv('ord_exec_data.csv', index_col = None,
names = ['Reference', 'Shares'])
# import the parsed Order Executed With Price data into a Pandas dataframe:
ord_exec_pr_df = pd.read_csv('ord_exec_pr_data.csv', index_col = None,
names = ['Reference', 'Shares', 'Price'])
# import the parsed Trade data into a Pandas dataframe:
trade_1_df = pd.read_csv('trade_data.csv', index_col = 0,
names=['Stock', 'Timestamp', 'Shares', 'Price', 'Product'])
# merge the Order Executed data with the Add Order data to extract
# the executed trades data within:
trade_2_df = ord_exec_df.merge(add_order_df,on=['Reference'],how='inner')
trade_2_df = trade_2_df[trade_2_df['Stock']!='0']
trade_2_df = trade_2_df[['Stock', 'Timestamp', 'Shares_x', 'Price']].set_index('Stock')
trade_2_df = trade_2_df.rename(columns={"Shares_x": "Shares"})
trade_2_df['Product'] = trade_2_df['Price']*trade_2_df['Shares']
# merge the Order Executed With Price data with the Add Order data
# to extract the executed trades data within:
trade_3_df = ord_exec_pr_df.merge(add_order_df,on=['Reference'],how='inner')
trade_3_df = trade_3_df[trade_3_df['Stock']!='0']
trade_3_df = trade_3_df[['Stock', 'Timestamp', 'Shares_x', 'Price_x']].set_index('Stock')
trade_3_df = trade_3_df.rename(columns={"Shares_x": "Shares", "Price_x": "Price"})
trade_3_df['Product'] = trade_3_df['Price']*trade_3_df['Shares']
# concatenate all three trade dataframes (trades from Trade messages,
# trades from Executed Order messages, and trades from Executed Order
# With Price messages) into a comprehensive dataframe:
trade_df = pd.concat([trade_1_df, trade_2_df, trade_3_df])
# create a dataframe for hourly running VWAP values:
vwap_df = trade_df.groupby(['Stock']).all().drop(
columns=['Timestamp', 'Shares', 'Price', 'Product'])
# create a list of trading hours in nanoseconds:
hour_list = [3.6e12 * i for i in [9.5, 10, 11, 12, 13, 14, 15, 16]]
# iterate over the trading hours list:
for hour in hour_list:
# extract data for trades that occurred before the specified hour:
trade_df_copy = trade_df[trade_df.Timestamp <= hour]
# group the trade dataframe by stock:
trade_df_groups = trade_df_copy.groupby(['Stock'])
# calculate the mean for all trade data:
trade_df_mean = trade_df_groups.mean(numeric_only=False)
# calculate the VWAP for all stocks:
trade_df_mean['VWAP'] = trade_df_mean['Product']/trade_df_mean['Shares']
# merge the calculated VWAP fields into the VWAP dataframe:
vwap_df = pd.merge(vwap_df,trade_df_mean['VWAP'],on=['Stock'],how='left')
# adjust the column names in the VWAP dataframe:
vwap_df.columns = ['VWAP at 09:30AM','VWAP at 10:00AM','VWAP at 11:00AM',
'VWAP at 12:00PM','VWAP at 01:00PM','VWAP at 02:00PM',
'VWAP at 03:00PM', 'VWAP at 04:00PM']
# save the hourly VWAP table in Excel format:
vwap_df.to_excel("NASDAQ_VWAP_01_30_2019.xlsx")
if __name__ == '__main__':
# open the ITCH data file:
itch_data = gzip.open('01302019.NASDAQ_ITCH50.gz','rb')
# parse the data:
parse_itch_data(itch_data)
# close the ITCH data file:
itch_data.close()
# calculate the hourly VWAP for all stocks:
calculate_vwap() | if (parsed_oewp[4] < 1e14 and parsed_oewp[5] < 1e6 and
parsed_oewp[6] < 1e10 and parsed_oewp[7] == b'Y'):
# write the parsed message to the csv file:
ref = parsed_oewp[4] # order reference number
| random_line_split |
nasdaq_itch_vwap.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ==============================================================================
# Created By : Karl Thompson
# Created Date: Mon March 25 17:34:00 CDT 2019
# ==============================================================================
"""nasdaq_itch_vwap - Generate a table of running volume-weighted average price
[VWAP] for NASDAQ stocks at trading hours based on Nasdaq TotalView-ITCH 5 data.
Data available at: ftp://emi.nasdaq.com/ITCH/01302019.NASDAQ_ITCH50.gz
If you use this code in your work, please cite the following:
Karl H. Thompson, NASDAQ-ITCH-VWAP, (2019), GitHub repository,
https://github.com/karlhthompson/nasdaq-itch-vwap"""
# ==============================================================================
# Imports
# ==============================================================================
import pandas as pd
import struct
import gzip
import csv
# function to parse select messages in ITCH data:
def parse_itch_data(itch_data):
# read the first byte of each message in the data file:
msg_header = itch_data.read(1)
# initialize the csv file that will store parsed Add Order and Add Order
# with MPID messages:
add_order_data = open('add_order_data.csv','w')
add_order_wrtr = csv.writer(add_order_data)
# initialize the csv file that will store parsed Order Executed messages:
ord_exec_data = open('ord_exec_data.csv','w')
ord_exec_wrtr = csv.writer(ord_exec_data)
# initialize the csv file that will store parsed Order Executed With Price
# messages:
ord_exec_pr_data = open('ord_exec_pr_data.csv','w')
ord_exec_pr_wrtr = csv.writer(ord_exec_pr_data)
# initialize the csv file that will store parsed Trade messages:
trade_data = open('trade_data.csv','w')
trade_wrtr = csv.writer(trade_data)
# iterate over all messages in the data file:
while msg_header:
# process Add Order and Add Order with MPID messages:
if msg_header == b'A' or msg_header == b'F':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQcI8cI',message)
re_pkd = struct.pack('>s4s2s6sQsI8sI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13])
parsed_ao = list(struct.unpack('>sHHQQsI8sI',re_pkd))
# filter for data with valid Buy/Sell Indicators:
if parsed_ao[5] == b'B' or parsed_ao[5] == b'S':
# further filter for data with plausible field values:
if (parsed_ao[4] < 1e14 and parsed_ao[6] < 1e8):
# write the parsed message to the csv file:
try:
sto = parsed_ao[7].decode() # stock
except:
sto = '0' # Write 0 if stock byte decode fails
tim = parsed_ao[3] # timestamp
ref = parsed_ao[4] # order reference number
sha = parsed_ao[6] # shares
pri = float(parsed_ao[8])/1e4 # price
add_order_wrtr.writerow([sto, tim, ref, sha, pri])
# process Order Executed messages:
if msg_header == b'E':
message = itch_data.read(30)
if len(message) < 30: break
un_pkd = struct.unpack('>4s6sQIQ',message)
re_pkd = struct.pack('>s4s2s6sQIQ',msg_header,un_pkd[0],b'\x00\x00',
un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4])
parsed_oe = list(struct.unpack('>sHHQQIQ',re_pkd))
# filter for data with plausible field values:
if (parsed_oe[4] < 1e14 and parsed_oe[5] < 1e8 and parsed_oe[6] < 1e11):
# write the parsed message to the csv file:
ref = parsed_oe[4] # order reference number
sha = parsed_oe[5] # shares
ord_exec_wrtr.writerow([ref, sha])
# process Order Executed With Price messages:
if msg_header == b'C':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQIQcI',message)
re_pkd = struct.pack('>s4s2s6sQIQsI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],un_pkd[5],
un_pkd[6])
parsed_oewp = list(struct.unpack('>sHHQQIQsI',re_pkd))
# filter for data with plausible field values:
if (parsed_oewp[4] < 1e14 and parsed_oewp[5] < 1e6 and
parsed_oewp[6] < 1e10 and parsed_oewp[7] == b'Y'):
# write the parsed message to the csv file:
ref = parsed_oewp[4] # order reference number
sha = parsed_oewp[5] # shares
pri = float(parsed_oewp[8])/1e4 # new price
ord_exec_pr_wrtr.writerow([ref, sha, pri])
# process Trade messages:
if msg_header == b'P':
message = itch_data.read(43)
if len(message) < 43: break
un_pkd = struct.unpack('>4s6sQcI8cIQ',message)
re_pkd = struct.pack('>s4s2s6sQsI8sIQ',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13],un_pkd[14])
parsed_t = list(struct.unpack('>sHHQQsI8sIQ',re_pkd))
# filter for data with valid Order Reference Numbers
# and Buy/Sell Indicators:
if parsed_t[4] == 0 and parsed_t[5] == b'B':
# write the parsed message to the csv file:
sto = parsed_t[7].decode() # stock
tim = parsed_t[3] # timestamp
sha = parsed_t[6] # shares
pri = float(parsed_t[8])/1e4 # price
pro = parsed_t[6]*float(parsed_t[8])/1e4 # product
trade_wrtr.writerow([sto, tim, sha, pri, pro])
# advance the file position to the next message:
msg_header = itch_data.read(1)
# close the csv files:
add_order_data.close()
ord_exec_data.close()
ord_exec_pr_data.close()
trade_data.close()
# function to calculate the hourly VWAP based on parsed ITCH data:
def calculate_vwap():
# import the parsed Add Order data into a Pandas dataframe:
|
if __name__ == '__main__':
# open the ITCH data file:
itch_data = gzip.open('01302019.NASDAQ_ITCH50.gz','rb')
# parse the data:
parse_itch_data(itch_data)
# close the ITCH data file:
itch_data.close()
# calculate the hourly VWAP for all stocks:
calculate_vwap()
| add_order_df = pd.read_csv('add_order_data.csv', index_col = None,
names = ['Stock', 'Timestamp', 'Reference', 'Shares', 'Price'])
# import the parsed Order Executed data into a Pandas dataframe:
ord_exec_df = pd.read_csv('ord_exec_data.csv', index_col = None,
names = ['Reference', 'Shares'])
# import the parsed Order Executed With Price data into a Pandas dataframe:
ord_exec_pr_df = pd.read_csv('ord_exec_pr_data.csv', index_col = None,
names = ['Reference', 'Shares', 'Price'])
# import the parsed Trade data into a Pandas dataframe:
trade_1_df = pd.read_csv('trade_data.csv', index_col = 0,
names=['Stock', 'Timestamp', 'Shares', 'Price', 'Product'])
# merge the Order Executed data with the Add Order data to extract
# the executed trades data within:
trade_2_df = ord_exec_df.merge(add_order_df,on=['Reference'],how='inner')
trade_2_df = trade_2_df[trade_2_df['Stock']!='0']
trade_2_df = trade_2_df[['Stock', 'Timestamp', 'Shares_x', 'Price']].set_index('Stock')
trade_2_df = trade_2_df.rename(columns={"Shares_x": "Shares"})
trade_2_df['Product'] = trade_2_df['Price']*trade_2_df['Shares']
# merge the Order Executed With Price data with the Add Order data
# to extract the executed trades data within:
trade_3_df = ord_exec_pr_df.merge(add_order_df,on=['Reference'],how='inner')
trade_3_df = trade_3_df[trade_3_df['Stock']!='0']
trade_3_df = trade_3_df[['Stock', 'Timestamp', 'Shares_x', 'Price_x']].set_index('Stock')
trade_3_df = trade_3_df.rename(columns={"Shares_x": "Shares", "Price_x": "Price"})
trade_3_df['Product'] = trade_3_df['Price']*trade_3_df['Shares']
# concatenate all three trade dataframes (trades from Trade messages,
# trades from Executed Order messages, and trades from Executed Order
# With Price messages) into a comprehensive dataframe:
trade_df = pd.concat([trade_1_df, trade_2_df, trade_3_df])
# create a dataframe for hourly running VWAP values:
vwap_df = trade_df.groupby(['Stock']).all().drop(
columns=['Timestamp', 'Shares', 'Price', 'Product'])
# create a list of trading hours in nanoseconds:
hour_list = [3.6e12 * i for i in [9.5, 10, 11, 12, 13, 14, 15, 16]]
# iterate over the trading hours list:
for hour in hour_list:
# extract data for trades that occurred before the specified hour:
trade_df_copy = trade_df[trade_df.Timestamp <= hour]
# group the trade dataframe by stock:
trade_df_groups = trade_df_copy.groupby(['Stock'])
# calculate the mean for all trade data:
trade_df_mean = trade_df_groups.mean(numeric_only=False)
# calculate the VWAP for all stocks:
trade_df_mean['VWAP'] = trade_df_mean['Product']/trade_df_mean['Shares']
# merge the calculated VWAP fields into the VWAP dataframe:
vwap_df = pd.merge(vwap_df,trade_df_mean['VWAP'],on=['Stock'],how='left')
# adjust the column names in the VWAP dataframe:
vwap_df.columns = ['VWAP at 09:30AM','VWAP at 10:00AM','VWAP at 11:00AM',
'VWAP at 12:00PM','VWAP at 01:00PM','VWAP at 02:00PM',
'VWAP at 03:00PM', 'VWAP at 04:00PM']
# save the hourly VWAP table in Excel format:
vwap_df.to_excel("NASDAQ_VWAP_01_30_2019.xlsx") | identifier_body |
nasdaq_itch_vwap.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ==============================================================================
# Created By : Karl Thompson
# Created Date: Mon March 25 17:34:00 CDT 2019
# ==============================================================================
"""nasdaq_itch_vwap - Generate a table of running volume-weighted average price
[VWAP] for NASDAQ stocks at trading hours based on Nasdaq TotalView-ITCH 5 data.
Data available at: ftp://emi.nasdaq.com/ITCH/01302019.NASDAQ_ITCH50.gz
If you use this code in your work, please cite the following:
Karl H. Thompson, NASDAQ-ITCH-VWAP, (2019), GitHub repository,
https://github.com/karlhthompson/nasdaq-itch-vwap"""
# ==============================================================================
# Imports
# ==============================================================================
import pandas as pd
import struct
import gzip
import csv
# function to parse select messages in ITCH data:
def parse_itch_data(itch_data):
# read the first byte of each message in the data file:
msg_header = itch_data.read(1)
# initialize the csv file that will store parsed Add Order and Add Order
# with MPID messages:
add_order_data = open('add_order_data.csv','w')
add_order_wrtr = csv.writer(add_order_data)
# initialize the csv file that will store parsed Order Executed messages:
ord_exec_data = open('ord_exec_data.csv','w')
ord_exec_wrtr = csv.writer(ord_exec_data)
# initialize the csv file that will store parsed Order Executed With Price
# messages:
ord_exec_pr_data = open('ord_exec_pr_data.csv','w')
ord_exec_pr_wrtr = csv.writer(ord_exec_pr_data)
# initialize the csv file that will store parsed Trade messages:
trade_data = open('trade_data.csv','w')
trade_wrtr = csv.writer(trade_data)
# iterate over all messages in the data file:
while msg_header:
# process Add Order and Add Order with MPID messages:
if msg_header == b'A' or msg_header == b'F':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQcI8cI',message)
re_pkd = struct.pack('>s4s2s6sQsI8sI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13])
parsed_ao = list(struct.unpack('>sHHQQsI8sI',re_pkd))
# filter for data with valid Buy/Sell Indicators:
if parsed_ao[5] == b'B' or parsed_ao[5] == b'S':
# further filter for data with plausible field values:
if (parsed_ao[4] < 1e14 and parsed_ao[6] < 1e8):
# write the parsed message to the csv file:
try:
sto = parsed_ao[7].decode() # stock
except:
sto = '0' # Write 0 if stock byte decode fails
tim = parsed_ao[3] # timestamp
ref = parsed_ao[4] # order reference number
sha = parsed_ao[6] # shares
pri = float(parsed_ao[8])/1e4 # price
add_order_wrtr.writerow([sto, tim, ref, sha, pri])
# process Order Executed messages:
if msg_header == b'E':
message = itch_data.read(30)
if len(message) < 30: break
un_pkd = struct.unpack('>4s6sQIQ',message)
re_pkd = struct.pack('>s4s2s6sQIQ',msg_header,un_pkd[0],b'\x00\x00',
un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4])
parsed_oe = list(struct.unpack('>sHHQQIQ',re_pkd))
# filter for data with plausible field values:
if (parsed_oe[4] < 1e14 and parsed_oe[5] < 1e8 and parsed_oe[6] < 1e11):
# write the parsed message to the csv file:
ref = parsed_oe[4] # order reference number
sha = parsed_oe[5] # shares
ord_exec_wrtr.writerow([ref, sha])
# process Order Executed With Price messages:
if msg_header == b'C':
message = itch_data.read(35)
if len(message) < 35: break
un_pkd = struct.unpack('>4s6sQIQcI',message)
re_pkd = struct.pack('>s4s2s6sQIQsI',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],un_pkd[5],
un_pkd[6])
parsed_oewp = list(struct.unpack('>sHHQQIQsI',re_pkd))
# filter for data with plausible field values:
if (parsed_oewp[4] < 1e14 and parsed_oewp[5] < 1e6 and
parsed_oewp[6] < 1e10 and parsed_oewp[7] == b'Y'):
# write the parsed message to the csv file:
ref = parsed_oewp[4] # order reference number
sha = parsed_oewp[5] # shares
pri = float(parsed_oewp[8])/1e4 # new price
ord_exec_pr_wrtr.writerow([ref, sha, pri])
# process Trade messages:
if msg_header == b'P':
message = itch_data.read(43)
if len(message) < 43: break
un_pkd = struct.unpack('>4s6sQcI8cIQ',message)
re_pkd = struct.pack('>s4s2s6sQsI8sIQ',msg_header,un_pkd[0],
b'\x00\x00',un_pkd[1],un_pkd[2],un_pkd[3],un_pkd[4],
b''.join(list(un_pkd[5:13])),un_pkd[13],un_pkd[14])
parsed_t = list(struct.unpack('>sHHQQsI8sIQ',re_pkd))
# filter for data with valid Order Reference Numbers
# and Buy/Sell Indicators:
if parsed_t[4] == 0 and parsed_t[5] == b'B':
# write the parsed message to the csv file:
sto = parsed_t[7].decode() # stock
tim = parsed_t[3] # timestamp
sha = parsed_t[6] # shares
pri = float(parsed_t[8])/1e4 # price
pro = parsed_t[6]*float(parsed_t[8])/1e4 # product
trade_wrtr.writerow([sto, tim, sha, pri, pro])
# advance the file position to the next message:
msg_header = itch_data.read(1)
# close the csv files:
add_order_data.close()
ord_exec_data.close()
ord_exec_pr_data.close()
trade_data.close()
# function to calculate the hourly VWAP based on parsed ITCH data:
def | ():
# import the parsed Add Order data into a Pandas dataframe:
add_order_df = pd.read_csv('add_order_data.csv', index_col = None,
names = ['Stock', 'Timestamp', 'Reference', 'Shares', 'Price'])
# import the parsed Order Executed data into a Pandas dataframe:
ord_exec_df = pd.read_csv('ord_exec_data.csv', index_col = None,
names = ['Reference', 'Shares'])
# import the parsed Order Executed With Price data into a Pandas dataframe:
ord_exec_pr_df = pd.read_csv('ord_exec_pr_data.csv', index_col = None,
names = ['Reference', 'Shares', 'Price'])
# import the parsed Trade data into a Pandas dataframe:
trade_1_df = pd.read_csv('trade_data.csv', index_col = 0,
names=['Stock', 'Timestamp', 'Shares', 'Price', 'Product'])
# merge the Order Executed data with the Add Order data to extract
# the executed trades data within:
trade_2_df = ord_exec_df.merge(add_order_df,on=['Reference'],how='inner')
trade_2_df = trade_2_df[trade_2_df['Stock']!='0']
trade_2_df = trade_2_df[['Stock', 'Timestamp', 'Shares_x', 'Price']].set_index('Stock')
trade_2_df = trade_2_df.rename(columns={"Shares_x": "Shares"})
trade_2_df['Product'] = trade_2_df['Price']*trade_2_df['Shares']
# merge the Order Executed With Price data with the Add Order data
# to extract the executed trades data within:
trade_3_df = ord_exec_pr_df.merge(add_order_df,on=['Reference'],how='inner')
trade_3_df = trade_3_df[trade_3_df['Stock']!='0']
trade_3_df = trade_3_df[['Stock', 'Timestamp', 'Shares_x', 'Price_x']].set_index('Stock')
trade_3_df = trade_3_df.rename(columns={"Shares_x": "Shares", "Price_x": "Price"})
trade_3_df['Product'] = trade_3_df['Price']*trade_3_df['Shares']
# concatenate all three trade dataframes (trades from Trade messages,
# trades from Executed Order messages, and trades from Executed Order
# With Price messages) into a comprehensive dataframe:
trade_df = pd.concat([trade_1_df, trade_2_df, trade_3_df])
# create a dataframe for hourly running VWAP values:
vwap_df = trade_df.groupby(['Stock']).all().drop(
columns=['Timestamp', 'Shares', 'Price', 'Product'])
# create a list of trading hours in nanoseconds:
hour_list = [3.6e12 * i for i in [9.5, 10, 11, 12, 13, 14, 15, 16]]
# iterate over the trading hours list:
for hour in hour_list:
# extract data for trades that occurred before the specified hour:
trade_df_copy = trade_df[trade_df.Timestamp <= hour]
# group the trade dataframe by stock:
trade_df_groups = trade_df_copy.groupby(['Stock'])
# calculate the mean for all trade data:
trade_df_mean = trade_df_groups.mean(numeric_only=False)
# calculate the VWAP for all stocks:
trade_df_mean['VWAP'] = trade_df_mean['Product']/trade_df_mean['Shares']
# merge the calculated VWAP fields into the VWAP dataframe:
vwap_df = pd.merge(vwap_df,trade_df_mean['VWAP'],on=['Stock'],how='left')
# adjust the column names in the VWAP dataframe:
vwap_df.columns = ['VWAP at 09:30AM','VWAP at 10:00AM','VWAP at 11:00AM',
'VWAP at 12:00PM','VWAP at 01:00PM','VWAP at 02:00PM',
'VWAP at 03:00PM', 'VWAP at 04:00PM']
# save the hourly VWAP table in Excel format:
vwap_df.to_excel("NASDAQ_VWAP_01_30_2019.xlsx")
if __name__ == '__main__':
# open the ITCH data file:
itch_data = gzip.open('01302019.NASDAQ_ITCH50.gz','rb')
# parse the data:
parse_itch_data(itch_data)
# close the ITCH data file:
itch_data.close()
# calculate the hourly VWAP for all stocks:
calculate_vwap()
| calculate_vwap | identifier_name |
physically_monotonic.rs | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! Implementation of [crate::plan::interpret::Interpreter] for inference
//! of physical monotonicity in single-time dataflows.
use std::cmp::Reverse;
use std::collections::BTreeSet;
use std::marker::PhantomData;
use differential_dataflow::lattice::Lattice;
use mz_expr::{EvalError, Id, MapFilterProject, MirScalarExpr, TableFunc};
use mz_repr::{Diff, GlobalId, Row};
use timely::PartialOrder;
use crate::plan::interpret::{BoundedLattice, Context, Interpreter};
use crate::plan::join::JoinPlan;
use crate::plan::reduce::{KeyValPlan, ReducePlan};
use crate::plan::threshold::ThresholdPlan;
use crate::plan::top_k::TopKPlan;
use crate::plan::{AvailableCollections, GetPlan};
/// Represents a boolean physical monotonicity property, where the bottom value
/// is true (i.e., physically monotonic) and the top value is false (i.e. not
/// physically monotonic).
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub struct PhysicallyMonotonic(pub bool);
impl BoundedLattice for PhysicallyMonotonic {
fn top() -> Self {
PhysicallyMonotonic(false)
}
fn bottom() -> Self {
PhysicallyMonotonic(true)
}
}
impl Lattice for PhysicallyMonotonic {
fn join(&self, other: &Self) -> Self {
PhysicallyMonotonic(self.0 && other.0)
}
fn meet(&self, other: &Self) -> Self {
PhysicallyMonotonic(self.0 || other.0)
}
}
impl PartialOrder for PhysicallyMonotonic {
fn less_equal(&self, other: &Self) -> bool {
// We employ `Reverse` ordering for `bool` here to be consistent with
// the choice of `top()` being false and `bottom()` being true.
Reverse::<bool>(self.0) <= Reverse::<bool>(other.0)
}
}
/// Provides a concrete implementation of an interpreter that determines if
/// the output of `Plan` expressions is physically monotonic in a single-time
/// dataflow, potentially taking into account judgments about its inputs. We
/// note that in a single-time dataflow, expressions in non-recursive contexts
/// (i.e., outside of `LetRec` values) process streams that are at a minimum
/// logically monotonic, i.e., may contain retractions but would cease to do
/// so if consolidated. Detecting physical monotonicity, i.e., the absence
/// of retractions in a stream, enables us to disable forced consolidation
/// whenever possible.
#[derive(Debug)]
pub struct SingleTimeMonotonic<'a, T = mz_repr::Timestamp> {
monotonic_ids: &'a BTreeSet<GlobalId>,
_phantom: PhantomData<T>,
}
impl<'a, T> SingleTimeMonotonic<'a, T> {
/// Instantiates an interpreter for single-time physical monotonicity
/// analysis.
pub fn new(monotonic_ids: &'a BTreeSet<GlobalId>) -> Self {
SingleTimeMonotonic {
monotonic_ids,
_phantom: Default::default(),
}
}
}
impl<T> Interpreter<T> for SingleTimeMonotonic<'_, T> {
type Domain = PhysicallyMonotonic;
fn constant(
&self,
_ctx: &Context<Self::Domain>,
rows: &Result<Vec<(Row, T, Diff)>, EvalError>,
) -> Self::Domain {
// A constant is physically monotonic iff the constant is an `EvalError`
// or all its rows have `Diff` values greater than zero.
PhysicallyMonotonic(
rows.as_ref()
.map_or(true, |rows| rows.iter().all(|(_, _, diff)| *diff > 0)),
)
}
fn | (
&self,
ctx: &Context<Self::Domain>,
id: &Id,
_keys: &AvailableCollections,
_plan: &GetPlan,
) -> Self::Domain {
// A get operator yields physically monotonic output iff the corresponding
// `Plan::Get` is on a local or global ID that is known to provide physically
// monotonic input. The way this becomes know is through the interpreter itself
// for non-recursive local IDs or through configuration for the global IDs of
// monotonic sources and indexes. Recursive local IDs are always assumed to
// break physical monotonicity.
// TODO(vmarcos): Consider in the future if we can ascertain whether the
// restrictions on recursive local IDs can be relaxed to take into account only
// the interpreter judgement directly.
PhysicallyMonotonic(match id {
Id::Local(id) => ctx
.bindings
.get(id)
.map_or(false, |entry| !entry.is_rec && entry.value.0),
Id::Global(id) => self.monotonic_ids.contains(id),
})
}
fn mfp(
&self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_mfp: &MapFilterProject,
_input_key_val: &Option<(Vec<MirScalarExpr>, Option<Row>)>,
) -> Self::Domain {
// In a single-time context, we just propagate the monotonicity
// status of the input
input
}
fn flat_map(
&self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_func: &TableFunc,
_exprs: &Vec<MirScalarExpr>,
_mfp: &MapFilterProject,
_input_key: &Option<Vec<MirScalarExpr>>,
) -> Self::Domain {
// In a single-time context, we just propagate the monotonicity
// status of the input
input
}
fn join(
&self,
_ctx: &Context<Self::Domain>,
inputs: Vec<Self::Domain>,
_plan: &JoinPlan,
) -> Self::Domain {
// When we see a join, we must consider that the inputs could have
// been `Plan::Get`s on arrangements. These are not in general safe
// wrt. producing physically monotonic data. So here, we conservatively
// judge that output of a join to be physically monotonic iff all
// inputs are physically monotonic.
PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0))
}
fn reduce(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_key_val_plan: &KeyValPlan,
_plan: &ReducePlan,
_input_key: &Option<Vec<MirScalarExpr>>,
) -> Self::Domain {
// In a recursive context, reduce will advance across timestamps
// and may need to retract. Outside of a recursive context, the
// fact that the dataflow is single-time implies no retraction
// is emitted out of reduce. This makes the output be physically
// monotonic, regardless of the input judgment. All `ReducePlan`
// variants behave the same in this respect.
PhysicallyMonotonic(!ctx.is_rec)
}
fn top_k(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_top_k_plan: &TopKPlan,
) -> Self::Domain {
// Top-k behaves like a reduction, producing physically monotonic
// output when exposed to a single time (i.e., when the context is
// non-recursive). Note that even a monotonic top-k will consolidate
// if necessary to ensure this property.
PhysicallyMonotonic(!ctx.is_rec)
}
fn negate(&self, _ctx: &Context<Self::Domain>, _input: Self::Domain) -> Self::Domain {
// Negation produces retractions, so it breaks physical monotonicity.
PhysicallyMonotonic(false)
}
fn threshold(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_threshold_plan: &ThresholdPlan,
) -> Self::Domain {
// Thresholding is a special kind of reduction, so the judgment
// here is the same as for reduce.
PhysicallyMonotonic(!ctx.is_rec)
}
fn union(
&self,
_ctx: &Context<Self::Domain>,
inputs: Vec<Self::Domain>,
_consolidate_output: bool,
) -> Self::Domain {
// Union just concatenates the inputs, so is physically monotonic iff
// all inputs are physically monotonic.
// (Even when we do consolidation, we can't be certain that a negative diff from an input
// is actually cancelled out. For example, Union outputs negative diffs when it's part of
// the EXCEPT pattern.)
PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0))
}
fn arrange_by(
&self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_forms: &AvailableCollections,
_input_key: &Option<Vec<MirScalarExpr>>,
_input_mfp: &MapFilterProject,
) -> Self::Domain {
// `Plan::ArrangeBy` is better thought of as `ensure_collections`, i.e., it
// makes sure that the requested `forms` are present and builds them only
// if not already available. Many `forms` may be requested, as the downstream
// consumers of this operator may be many different ones (as we support plan graphs,
// not only trees). The `forms` include arrangements, but also just the collection
// in `raw` form. So for example, if the input is arranged, then `ArrangeBy` could
// be used to request a collection instead. `ArrangeBy` will only build an arrangement
// from scratch when the input is not already arranged in a requested `form`. In our
// physical monotonicity analysis, we presently cannot assert whether only arrangements
// that `ArrangeBy` built will be used by downstream consumers, or if other `forms` that
// do not preserve physical monotonicity would be accessed instead. So we conservatively
// return the physical monotonicity judgment made for the input.
// TODO(vmarcos): Consider in the future enriching the analysis to track physical
// monotonicity not by the output of an operator, but by `forms` made available for each
// collection. With this information, we could eventually make more refined judgements
// at the points of use.
input
}
}
| get | identifier_name |
physically_monotonic.rs | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! Implementation of [crate::plan::interpret::Interpreter] for inference
//! of physical monotonicity in single-time dataflows.
use std::cmp::Reverse;
use std::collections::BTreeSet;
use std::marker::PhantomData;
use differential_dataflow::lattice::Lattice;
use mz_expr::{EvalError, Id, MapFilterProject, MirScalarExpr, TableFunc};
use mz_repr::{Diff, GlobalId, Row};
use timely::PartialOrder;
use crate::plan::interpret::{BoundedLattice, Context, Interpreter};
use crate::plan::join::JoinPlan;
use crate::plan::reduce::{KeyValPlan, ReducePlan};
use crate::plan::threshold::ThresholdPlan;
use crate::plan::top_k::TopKPlan;
use crate::plan::{AvailableCollections, GetPlan};
/// Represents a boolean physical monotonicity property, where the bottom value
/// is true (i.e., physically monotonic) and the top value is false (i.e. not
/// physically monotonic).
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub struct PhysicallyMonotonic(pub bool);
impl BoundedLattice for PhysicallyMonotonic {
fn top() -> Self {
PhysicallyMonotonic(false)
}
fn bottom() -> Self {
PhysicallyMonotonic(true)
}
}
impl Lattice for PhysicallyMonotonic {
fn join(&self, other: &Self) -> Self {
PhysicallyMonotonic(self.0 && other.0)
}
fn meet(&self, other: &Self) -> Self {
PhysicallyMonotonic(self.0 || other.0)
}
}
impl PartialOrder for PhysicallyMonotonic {
fn less_equal(&self, other: &Self) -> bool {
// We employ `Reverse` ordering for `bool` here to be consistent with
// the choice of `top()` being false and `bottom()` being true.
Reverse::<bool>(self.0) <= Reverse::<bool>(other.0)
}
}
/// Provides a concrete implementation of an interpreter that determines if
/// the output of `Plan` expressions is physically monotonic in a single-time
/// dataflow, potentially taking into account judgments about its inputs. We
/// note that in a single-time dataflow, expressions in non-recursive contexts
/// (i.e., outside of `LetRec` values) process streams that are at a minimum
/// logically monotonic, i.e., may contain retractions but would cease to do
/// so if consolidated. Detecting physical monotonicity, i.e., the absence
/// of retractions in a stream, enables us to disable forced consolidation
/// whenever possible.
#[derive(Debug)]
pub struct SingleTimeMonotonic<'a, T = mz_repr::Timestamp> {
monotonic_ids: &'a BTreeSet<GlobalId>,
_phantom: PhantomData<T>,
}
impl<'a, T> SingleTimeMonotonic<'a, T> {
/// Instantiates an interpreter for single-time physical monotonicity
/// analysis.
pub fn new(monotonic_ids: &'a BTreeSet<GlobalId>) -> Self {
SingleTimeMonotonic {
monotonic_ids,
_phantom: Default::default(),
}
}
}
impl<T> Interpreter<T> for SingleTimeMonotonic<'_, T> {
type Domain = PhysicallyMonotonic;
fn constant(
&self,
_ctx: &Context<Self::Domain>,
rows: &Result<Vec<(Row, T, Diff)>, EvalError>,
) -> Self::Domain {
// A constant is physically monotonic iff the constant is an `EvalError`
// or all its rows have `Diff` values greater than zero.
PhysicallyMonotonic(
rows.as_ref()
.map_or(true, |rows| rows.iter().all(|(_, _, diff)| *diff > 0)),
)
}
fn get(
&self,
ctx: &Context<Self::Domain>,
id: &Id,
_keys: &AvailableCollections,
_plan: &GetPlan,
) -> Self::Domain {
// A get operator yields physically monotonic output iff the corresponding
// `Plan::Get` is on a local or global ID that is known to provide physically
// monotonic input. The way this becomes know is through the interpreter itself
// for non-recursive local IDs or through configuration for the global IDs of
// monotonic sources and indexes. Recursive local IDs are always assumed to
// break physical monotonicity.
// TODO(vmarcos): Consider in the future if we can ascertain whether the
// restrictions on recursive local IDs can be relaxed to take into account only
// the interpreter judgement directly.
PhysicallyMonotonic(match id {
Id::Local(id) => ctx
.bindings
.get(id)
.map_or(false, |entry| !entry.is_rec && entry.value.0),
Id::Global(id) => self.monotonic_ids.contains(id),
})
}
fn mfp(
&self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_mfp: &MapFilterProject,
_input_key_val: &Option<(Vec<MirScalarExpr>, Option<Row>)>,
) -> Self::Domain {
// In a single-time context, we just propagate the monotonicity
// status of the input
input
}
fn flat_map(
&self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_func: &TableFunc,
_exprs: &Vec<MirScalarExpr>,
_mfp: &MapFilterProject,
_input_key: &Option<Vec<MirScalarExpr>>,
) -> Self::Domain {
// In a single-time context, we just propagate the monotonicity
// status of the input
input
}
fn join(
&self,
_ctx: &Context<Self::Domain>,
inputs: Vec<Self::Domain>,
_plan: &JoinPlan,
) -> Self::Domain {
// When we see a join, we must consider that the inputs could have
// been `Plan::Get`s on arrangements. These are not in general safe
// wrt. producing physically monotonic data. So here, we conservatively
// judge that output of a join to be physically monotonic iff all
// inputs are physically monotonic.
PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0))
}
fn reduce(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_key_val_plan: &KeyValPlan,
_plan: &ReducePlan,
_input_key: &Option<Vec<MirScalarExpr>>,
) -> Self::Domain {
// In a recursive context, reduce will advance across timestamps
// and may need to retract. Outside of a recursive context, the
// fact that the dataflow is single-time implies no retraction
// is emitted out of reduce. This makes the output be physically
// monotonic, regardless of the input judgment. All `ReducePlan`
// variants behave the same in this respect.
PhysicallyMonotonic(!ctx.is_rec)
}
fn top_k(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_top_k_plan: &TopKPlan,
) -> Self::Domain {
// Top-k behaves like a reduction, producing physically monotonic
// output when exposed to a single time (i.e., when the context is
// non-recursive). Note that even a monotonic top-k will consolidate
// if necessary to ensure this property.
PhysicallyMonotonic(!ctx.is_rec)
}
fn negate(&self, _ctx: &Context<Self::Domain>, _input: Self::Domain) -> Self::Domain {
// Negation produces retractions, so it breaks physical monotonicity.
PhysicallyMonotonic(false)
}
fn threshold(
&self,
ctx: &Context<Self::Domain>,
_input: Self::Domain,
_threshold_plan: &ThresholdPlan,
) -> Self::Domain {
// Thresholding is a special kind of reduction, so the judgment
// here is the same as for reduce.
PhysicallyMonotonic(!ctx.is_rec)
}
fn union(
&self,
_ctx: &Context<Self::Domain>,
inputs: Vec<Self::Domain>,
_consolidate_output: bool,
) -> Self::Domain {
// Union just concatenates the inputs, so is physically monotonic iff
// all inputs are physically monotonic.
// (Even when we do consolidation, we can't be certain that a negative diff from an input
// is actually cancelled out. For example, Union outputs negative diffs when it's part of
// the EXCEPT pattern.)
PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0))
}
| &self,
_ctx: &Context<Self::Domain>,
input: Self::Domain,
_forms: &AvailableCollections,
_input_key: &Option<Vec<MirScalarExpr>>,
_input_mfp: &MapFilterProject,
) -> Self::Domain {
// `Plan::ArrangeBy` is better thought of as `ensure_collections`, i.e., it
// makes sure that the requested `forms` are present and builds them only
// if not already available. Many `forms` may be requested, as the downstream
// consumers of this operator may be many different ones (as we support plan graphs,
// not only trees). The `forms` include arrangements, but also just the collection
// in `raw` form. So for example, if the input is arranged, then `ArrangeBy` could
// be used to request a collection instead. `ArrangeBy` will only build an arrangement
// from scratch when the input is not already arranged in a requested `form`. In our
// physical monotonicity analysis, we presently cannot assert whether only arrangements
// that `ArrangeBy` built will be used by downstream consumers, or if other `forms` that
// do not preserve physical monotonicity would be accessed instead. So we conservatively
// return the physical monotonicity judgment made for the input.
// TODO(vmarcos): Consider in the future enriching the analysis to track physical
// monotonicity not by the output of an operator, but by `forms` made available for each
// collection. With this information, we could eventually make more refined judgements
// at the points of use.
input
}
} | fn arrange_by( | random_line_split |
spinning_table_states.py | PKG = "spinning_table_sm"
import roslib; roslib.load_manifest(PKG)
import rospy
import smach
import sensor_msgs.msg
import trajectory_msgs.msg as tm
import geometry_msgs.msg as gm
import smach_ros
from numpy import *
from numpy.linalg import norm
from collections import defaultdict
from misc_msgs.msg import *
from rotating_grasper.msg import *
from rotating_grasper.srv import *
from geometry_msgs.msg import Point,PointStamped,PolygonStamped
import numpy as np
import sys, os, yaml, subprocess
import rospkg
DIR = roslib.packages.get_pkg_dir(PKG, required=True) + "/config/"
stream = file(DIR+"config.yaml")
config = yaml.load(stream)
spinning_table_radius = 0.5 #TODO: measure
plate_hang_buffer = 0.03
def make_fuerte_env():
versionstr = sys.version[:3]
return dict(
ROS_MASTER_URI = os.environ["ROS_MASTER_URI"],
PATH = "/opt/ros/fuerte/bin:%s"%os.environ["PATH"],
ROS_VERSION = "fuerte",
PYTHONPATH = "/opt/ros/fuerte/lib/python%s/dist-packages"%versionstr,
ROS_PACKAGE_PATH = "/opt/ros/fuerte/share:/opt/ros/fuerte/stacks")
def make_tracker():
rp = rospkg.RosPack()
pkg_path = rp.get_path("spinning_tabletop_detection")
p = subprocess.Popen(["%s/bin/test_tracker_ros"%pkg_path
,"input_cloud:=/camera/rgb/points"
,"--min_height=%s"%config["min_filter_height"]
,"--max_height=%s"%config["max_filter_height"]
,"--above_table_cutoff=%s"%config["above_table_cutoff"]
], env = make_fuerte_env(), stdout = open('/dev/null','w'))
return p
def smaller_ang(x):
return (x + pi)%(2*pi) - pi
def closer_ang(x,a,dir=0):
"""
find angle y (==x mod 2*pi) that is close to a
dir == 0: minimize absolute value of difference
dir == 1: y > x
dir == 2: y < x
"""
if dir == 0:
return a + smaller_ang(x-a)
elif dir == 1:
return a + (x-a)%(2*pi)
elif dir == -1:
return a + (x-a)%(2*pi) - 2*pi
def closer_joint_angles(pos,seed):
print "pos",pos
print "seed",seed
result = np.array(pos)
for i in [-1,-3]:
result[i] = closer_ang(pos[i],seed[i],0)
print "result",result
return result
class TopicListener(object):
"stores last message"
last_msg = None
def __init__(self,topic_name,msg_type):
self.sub = rospy.Subscriber(topic_name,msg_type,self.callback)
rospy.loginfo('waiting for the first message: %s'%topic_name)
while self.last_msg is None: rospy.sleep(.01)
rospy.loginfo('ok: %s'%topic_name)
def callback(self,msg):
self.last_msg = msg
class TrajectoryControllerWrapper(object):
def __init__(self, controller_name, listener):
self.listener = listener
self.joint_names = rospy.get_param("/%s/joints"%controller_name)
self.n_joints = len(self.joint_names)
msg = self.listener.last_msg
self.ros_joint_inds = [msg.name.index(name) for name in self.joint_names]
self.controller_pub = rospy.Publisher("%s/command"%controller_name, tm.JointTrajectory)
def get_joint_positions(self):
msg = self.listener.last_msg
return [msg.position[i] for i in self.ros_joint_inds]
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
assert len(positions_goal) == len(positions_cur)
duration = norm((r_[positions_goal] - r_[positions_cur])/self.vel_limits, ord=inf)
jt = tm.JointTrajectory()
jt.joint_names = self.joint_names
jt.header.stamp = rospy.Time.now()
jtp = tm.JointTrajectoryPoint()
jtp.positions = positions_goal
jtp.velocities = zeros(len(positions_goal))
jtp.time_from_start = rospy.Duration(duration)
jt.points = [jtp]
self.controller_pub.publish(jt)
rospy.loginfo("sleeping %.2f sec"%duration)
rospy.sleep(duration)
def follow_joint_traj(self, positions, duration = None):
positions = np.r_[np.atleast_2d(self.get_joint_positions()), positions]
positions[:,4] = np.unwrap(positions[:,4])
positions[:,6] = np.unwrap(positions[:,6])
positions, velocities, times = make_traj_with_limits(positions, self.vel_limits, self.acc_limits,smooth=True)
self.follow_timed_joint_traj(positions, velocities, times)
def mirror_arm_joints(x):
"mirror image of joints (r->l or l->r)"
return r_[-x[0],x[1],-x[2],x[3],-x[4],x[5],-x[6]]
class Arm(TrajectoryControllerWrapper):
L_POSTURES = dict(
untucked = [0.4, 1.0, 0.0, -2.05, 0.0, -0.1, 0.0],
tucked = [0.06, 1.25, 1.79, -1.68, -1.73, -0.10, -0.09],
up = [ 0.33, -0.35, 2.59, -0.15, 0.59, -1.41, -0.27],
side = [ 1.832, -0.332, 1.011, -1.437, 1.1 , -2.106, 3.074]
)
def __init__(self, lr,listener):
TrajectoryControllerWrapper.__init__(self,"%s_arm_controller"%lr, listener)
| self.vel_limits = [0.42, 0.42,0.65,0.66,0.72, 0.62,0.72]
def goto_posture(self, name):
l_joints = self.L_POSTURES[name]
joints = l_joints if self.lr == 'l' else mirror_arm_joints(l_joints)
self.goto_joint_positions(joints)
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
positions_goal = closer_joint_angles(positions_goal, positions_cur)
TrajectoryControllerWrapper.goto_joint_positions(self, positions_goal)
def set_cart_target(self, quat, xyz, ref_frame):
ps = gm.PoseStamped()
ps.header.frame_id = ref_frame
ps.header.stamp = rospy.Time(0)
ps.pose.position = gm.Point(*xyz);
ps.pose.orientation = gm.Quaternion(*quat)
self.cart_command.publish(ps)
def fix_angle(angle, center_point=math.pi):
while angle > center_point + math.pi:
angle = angle - 2*math.pi
while angle < center_point - math.pi:
angle = angle + 2*math.pi
return angle
class Head(TrajectoryControllerWrapper):
def __init__(self, listener):
TrajectoryControllerWrapper.__init__(self,"head_traj_controller",listener)
self.vel_limits = [1.,1.]
def set_pan_tilt(self, pan, tilt):
self.goto_joint_positions([pan, tilt])
####################################
class MoveArmToSide(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"])
listener = TopicListener("joint_states",sensor_msgs.msg.JointState)
self.rarm = Arm("r",listener)
self.larm = Arm("l",listener)
self.head = Head(listener)
def execute(self, userdata):
rospy.sleep(1)
self.larm.goto_posture('up')
self.rarm.goto_posture('up')
self.head.set_pan_tilt(0,.7)
return "success"
class GatherDetections(smach.State):
tracker = None
def __init__(self,detector=None):
smach.State.__init__(self, outcomes=["success", "failure"],output_keys=["cylinders"])
self.detector = detector
self.cylinders = defaultdict(list) #list of (center, radius)
self.num_detections = 0
def kill_tracker(self, *args):
self.tracker.terminate()
def execute(self, userdata):
#TODO: start detector
if self.tracker is None or self.tracker.poll() is not None:
self.tracker = make_tracker()
self.done = False
print 'subscribing'
rospy.Subscriber('/spinning_tabletop/cylinders',TrackedCylinders,self.handle_detection)
sleep_time = 10
print 'waiting for %d seconds' % sleep_time
rospy.sleep(sleep_time)
self.done = True
maxLength = -1
maxKey = -1
for key in self.cylinders.keys():
if len(self.cylinders[key]) > maxLength:
maxLength = len(self.cylinders[key])
maxKey = key
if maxKey == -1:
print 'no detections!'
return "failure"
if maxLength < 4:
return "failure"
print 'chose id %s with length %d' % (maxKey,maxLength)
userdata.cylinders = self.cylinders[maxKey]
return "success"
def handle_detection(self,detection):
if self.done: return
for i in range(len(detection.ids)):
pt = PointStamped()
pt.header = detection.header
pt.point.x = detection.xs[i]
pt.point.y = detection.ys[i]
pt.point.z = detection.zs[i] + detection.hs[i]
self.cylinders[detection.ids[i]].append((pt,detection.rs[i], detection.hs[i]))
self.num_detections += 1
class FitCircle(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"],input_keys=["cylinders"],output_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius", "object_height"])
def execute(self, userdata):
print 'fitting circle'
times = []
times_mat = mat(ones((len(userdata.cylinders),1)))
x = mat(ones((len(userdata.cylinders),1)))
y = mat(ones((len(userdata.cylinders),1)))
z = ones(len(userdata.cylinders))
r = ones(len(userdata.cylinders))
h = zeros(len(userdata.cylinders))
for i in range(len(userdata.cylinders)):
x[i,0] = userdata.cylinders[i][0].point.x
y[i,0] = userdata.cylinders[i][0].point.y
z[i] = userdata.cylinders[i][0].point.z
r[i] = userdata.cylinders[i][1]
h[i] = userdata.cylinders[i][2]
times.append(userdata.cylinders[i][0].header.stamp)
times_mat[i,0] = userdata.cylinders[i][0].header.stamp.to_sec() - userdata.cylinders[0][0].header.stamp.to_sec()
A = hstack([x, y, mat(ones(x.shape))])
b = -(power(x,2)+power(y,2))
a = asarray(linalg.lstsq(A,b)[0])
xc = -.5 * a[0];
yc = -.5 * a[1];
zc = mean(z)
center_radius = sqrt((a[0]**2+a[1]**2)/4-a[2])
object_radius = mean(r)
object_height = mean(h)
R = center_radius + object_radius
middle_ind = round(len(userdata.cylinders)/2.)
print "len %d middle ind %d" % (len(userdata.cylinders),middle_ind)
middle_angle = math.atan2(y[middle_ind,0]-yc,x[middle_ind,0]-xc)
angles = mat(ones((len(userdata.cylinders),1)))
print x.shape, y.shape, len(userdata.cylinders)
for i in range(min([len(userdata.cylinders), len(x), len(y)])):
angles[i,0] = fix_angle(math.atan2(y[i,0]-yc,x[i,0]-xc),middle_angle)
# prev_angle = angles[0,0]
# for i in range(len(userdata.cylinders)):
# while angles[i,0] < prev_angle:
# angles[i,0] = angles[i,0] + 2*math.pi
# prev_angle = angles[i,0]
A_angles = hstack([times_mat,mat(ones(angles.shape))])
#print hstack([A_angles,angles])
w_result = asarray(linalg.lstsq(A_angles,angles)[0])
w = -w_result[0]
print 'rotation rate: %.3f rad/s - one revolution in %.2f sec' % (w,2*math.pi/w)
#w = 2 * math.pi / 30.
userdata.center = Point(xc,yc,zc)
userdata.radius = R
userdata.object_radius = object_radius
userdata.object_height = object_height
userdata.rotation_rate = w
userdata.init_angle = math.atan2(y[0,0]-yc,x[0,0]-xc)
userdata.init_time = times[0]
polygon_pub = rospy.Publisher('/fit_circle', geometry_msgs.msg.PolygonStamped)
polygon1 = PolygonStamped()
polygon1.header.stamp = rospy.Time.now()
polygon1.header.frame_id = 'base_footprint'
polygon2 = PolygonStamped()
polygon2.header.stamp = rospy.Time.now()
polygon2.header.frame_id = 'base_footprint'
for angle in linspace(0,2*math.pi,math.pi/8.):
pt1 = Point(xc+center_radius+math.cos(angle),yc+center_radius+math.sin(angle),zc)
pt2 = Point(xc+R+math.cos(angle),yc+R+math.sin(angle),zc)
polygon1.polygon.points.append(pt1)
polygon2.polygon.points.append(pt2)
polygon_pub.publish(polygon1)
polygon_pub.publish(polygon2)
print 'got center (%.3f,%.3f,%.3f), radius %.3f + %.3f = %.3f' % (xc,yc,zc,center_radius,object_radius,R)
return "success"
class ExecuteGrasp(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure","missed"],input_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius","object_height"])
self.n_consecutive_grasp_failures = 0
def execute(self, userdata):
center = userdata.center
radius = userdata.radius
object_radius = userdata.object_radius
rotation_rate = userdata.rotation_rate
init_angle = userdata.init_angle
init_time = userdata.init_time
command = RotatingGrasp()
command.header.stamp = userdata.init_time
command.header.frame_id = 'base_footprint'
command.center = Point()
command.center.x = center.x
command.center.y = center.y
command.center.z = center.z
command.initial = Point()
command.initial.x = center.x + math.cos(init_angle)*radius
command.initial.y = center.y - math.sin(init_angle)*radius
command.initial.z = center.z
command.object_height = userdata.object_height+config["above_table_cutoff"]
command.object_radius = userdata.object_radius
command.rotation_rate = rotation_rate
print "radius,height",userdata.object_radius, userdata.object_height
if userdata.object_radius < .06:
print "CUP!"
object_type = "cup"
command.outward_angle = 0
elif userdata.object_radius < .12:
print "BOWL"
object_type = "bowl"
command.outward_angle = math.pi/4
else:
print "PLATE"
object_type = "plate"
command.outward_angle = math.pi/2 - math.pi/8
print 'waiting for service'
rospy.wait_for_service('rotating_grasper')
server = rospy.ServiceProxy('rotating_grasper', RotatingGrasper)
if object_type == "plate" and radius < spinning_table_radius + plate_hang_buffer:
print "plate must be pushed out"
dist_to_push = spinning_table_radius + plate_hang_buffer - radius
#TODO:
pass
try:
print 'calling...'
print command
success = True
response = server(command)
server_success = response.success
if not server_success:
print "Service call failed"
return "failure"
msg = rospy.wait_for_message('/joint_states',sensor_msgs.msg.JointState)
gripper_joint_angle = msg.position[msg.name.index("r_gripper_joint")]
grab_success = (gripper_joint_angle > .002)
if not grab_success:
print "missed!"
if self.n_consecutive_grasp_failures >= 3:
print "too many failures. exiting"
return "failure"
else:
print "failed %i times. trying again..."%self.n_consecutive_grasp_failures
self.n_consecutive_grasp_failures += 1
return "missed"
else:
self.n_consecutive_grasp_failures = 0
return "success"
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return "failure" | self.lr = lr
self.lrlong = {"r":"right", "l":"left"}[lr]
self.tool_frame = "%s_gripper_tool_frame"%lr
self.cart_command = rospy.Publisher('%s_cart/command_pose'%lr, gm.PoseStamped)
| random_line_split |
spinning_table_states.py | PKG = "spinning_table_sm"
import roslib; roslib.load_manifest(PKG)
import rospy
import smach
import sensor_msgs.msg
import trajectory_msgs.msg as tm
import geometry_msgs.msg as gm
import smach_ros
from numpy import *
from numpy.linalg import norm
from collections import defaultdict
from misc_msgs.msg import *
from rotating_grasper.msg import *
from rotating_grasper.srv import *
from geometry_msgs.msg import Point,PointStamped,PolygonStamped
import numpy as np
import sys, os, yaml, subprocess
import rospkg
DIR = roslib.packages.get_pkg_dir(PKG, required=True) + "/config/"
stream = file(DIR+"config.yaml")
config = yaml.load(stream)
spinning_table_radius = 0.5 #TODO: measure
plate_hang_buffer = 0.03
def make_fuerte_env():
versionstr = sys.version[:3]
return dict(
ROS_MASTER_URI = os.environ["ROS_MASTER_URI"],
PATH = "/opt/ros/fuerte/bin:%s"%os.environ["PATH"],
ROS_VERSION = "fuerte",
PYTHONPATH = "/opt/ros/fuerte/lib/python%s/dist-packages"%versionstr,
ROS_PACKAGE_PATH = "/opt/ros/fuerte/share:/opt/ros/fuerte/stacks")
def make_tracker():
rp = rospkg.RosPack()
pkg_path = rp.get_path("spinning_tabletop_detection")
p = subprocess.Popen(["%s/bin/test_tracker_ros"%pkg_path
,"input_cloud:=/camera/rgb/points"
,"--min_height=%s"%config["min_filter_height"]
,"--max_height=%s"%config["max_filter_height"]
,"--above_table_cutoff=%s"%config["above_table_cutoff"]
], env = make_fuerte_env(), stdout = open('/dev/null','w'))
return p
def smaller_ang(x):
return (x + pi)%(2*pi) - pi
def closer_ang(x,a,dir=0):
"""
find angle y (==x mod 2*pi) that is close to a
dir == 0: minimize absolute value of difference
dir == 1: y > x
dir == 2: y < x
"""
if dir == 0:
return a + smaller_ang(x-a)
elif dir == 1:
return a + (x-a)%(2*pi)
elif dir == -1:
return a + (x-a)%(2*pi) - 2*pi
def closer_joint_angles(pos,seed):
print "pos",pos
print "seed",seed
result = np.array(pos)
for i in [-1,-3]:
result[i] = closer_ang(pos[i],seed[i],0)
print "result",result
return result
class TopicListener(object):
"stores last message"
last_msg = None
def __init__(self,topic_name,msg_type):
self.sub = rospy.Subscriber(topic_name,msg_type,self.callback)
rospy.loginfo('waiting for the first message: %s'%topic_name)
while self.last_msg is None: rospy.sleep(.01)
rospy.loginfo('ok: %s'%topic_name)
def callback(self,msg):
self.last_msg = msg
class TrajectoryControllerWrapper(object):
def __init__(self, controller_name, listener):
self.listener = listener
self.joint_names = rospy.get_param("/%s/joints"%controller_name)
self.n_joints = len(self.joint_names)
msg = self.listener.last_msg
self.ros_joint_inds = [msg.name.index(name) for name in self.joint_names]
self.controller_pub = rospy.Publisher("%s/command"%controller_name, tm.JointTrajectory)
def get_joint_positions(self):
msg = self.listener.last_msg
return [msg.position[i] for i in self.ros_joint_inds]
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
assert len(positions_goal) == len(positions_cur)
duration = norm((r_[positions_goal] - r_[positions_cur])/self.vel_limits, ord=inf)
jt = tm.JointTrajectory()
jt.joint_names = self.joint_names
jt.header.stamp = rospy.Time.now()
jtp = tm.JointTrajectoryPoint()
jtp.positions = positions_goal
jtp.velocities = zeros(len(positions_goal))
jtp.time_from_start = rospy.Duration(duration)
jt.points = [jtp]
self.controller_pub.publish(jt)
rospy.loginfo("sleeping %.2f sec"%duration)
rospy.sleep(duration)
def follow_joint_traj(self, positions, duration = None):
positions = np.r_[np.atleast_2d(self.get_joint_positions()), positions]
positions[:,4] = np.unwrap(positions[:,4])
positions[:,6] = np.unwrap(positions[:,6])
positions, velocities, times = make_traj_with_limits(positions, self.vel_limits, self.acc_limits,smooth=True)
self.follow_timed_joint_traj(positions, velocities, times)
def mirror_arm_joints(x):
"mirror image of joints (r->l or l->r)"
return r_[-x[0],x[1],-x[2],x[3],-x[4],x[5],-x[6]]
class Arm(TrajectoryControllerWrapper):
L_POSTURES = dict(
untucked = [0.4, 1.0, 0.0, -2.05, 0.0, -0.1, 0.0],
tucked = [0.06, 1.25, 1.79, -1.68, -1.73, -0.10, -0.09],
up = [ 0.33, -0.35, 2.59, -0.15, 0.59, -1.41, -0.27],
side = [ 1.832, -0.332, 1.011, -1.437, 1.1 , -2.106, 3.074]
)
def __init__(self, lr,listener):
TrajectoryControllerWrapper.__init__(self,"%s_arm_controller"%lr, listener)
self.lr = lr
self.lrlong = {"r":"right", "l":"left"}[lr]
self.tool_frame = "%s_gripper_tool_frame"%lr
self.cart_command = rospy.Publisher('%s_cart/command_pose'%lr, gm.PoseStamped)
self.vel_limits = [0.42, 0.42,0.65,0.66,0.72, 0.62,0.72]
def goto_posture(self, name):
l_joints = self.L_POSTURES[name]
joints = l_joints if self.lr == 'l' else mirror_arm_joints(l_joints)
self.goto_joint_positions(joints)
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
positions_goal = closer_joint_angles(positions_goal, positions_cur)
TrajectoryControllerWrapper.goto_joint_positions(self, positions_goal)
def set_cart_target(self, quat, xyz, ref_frame):
ps = gm.PoseStamped()
ps.header.frame_id = ref_frame
ps.header.stamp = rospy.Time(0)
ps.pose.position = gm.Point(*xyz);
ps.pose.orientation = gm.Quaternion(*quat)
self.cart_command.publish(ps)
def fix_angle(angle, center_point=math.pi):
while angle > center_point + math.pi:
angle = angle - 2*math.pi
while angle < center_point - math.pi:
|
return angle
class Head(TrajectoryControllerWrapper):
def __init__(self, listener):
TrajectoryControllerWrapper.__init__(self,"head_traj_controller",listener)
self.vel_limits = [1.,1.]
def set_pan_tilt(self, pan, tilt):
self.goto_joint_positions([pan, tilt])
####################################
class MoveArmToSide(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"])
listener = TopicListener("joint_states",sensor_msgs.msg.JointState)
self.rarm = Arm("r",listener)
self.larm = Arm("l",listener)
self.head = Head(listener)
def execute(self, userdata):
rospy.sleep(1)
self.larm.goto_posture('up')
self.rarm.goto_posture('up')
self.head.set_pan_tilt(0,.7)
return "success"
class GatherDetections(smach.State):
tracker = None
def __init__(self,detector=None):
smach.State.__init__(self, outcomes=["success", "failure"],output_keys=["cylinders"])
self.detector = detector
self.cylinders = defaultdict(list) #list of (center, radius)
self.num_detections = 0
def kill_tracker(self, *args):
self.tracker.terminate()
def execute(self, userdata):
#TODO: start detector
if self.tracker is None or self.tracker.poll() is not None:
self.tracker = make_tracker()
self.done = False
print 'subscribing'
rospy.Subscriber('/spinning_tabletop/cylinders',TrackedCylinders,self.handle_detection)
sleep_time = 10
print 'waiting for %d seconds' % sleep_time
rospy.sleep(sleep_time)
self.done = True
maxLength = -1
maxKey = -1
for key in self.cylinders.keys():
if len(self.cylinders[key]) > maxLength:
maxLength = len(self.cylinders[key])
maxKey = key
if maxKey == -1:
print 'no detections!'
return "failure"
if maxLength < 4:
return "failure"
print 'chose id %s with length %d' % (maxKey,maxLength)
userdata.cylinders = self.cylinders[maxKey]
return "success"
def handle_detection(self,detection):
if self.done: return
for i in range(len(detection.ids)):
pt = PointStamped()
pt.header = detection.header
pt.point.x = detection.xs[i]
pt.point.y = detection.ys[i]
pt.point.z = detection.zs[i] + detection.hs[i]
self.cylinders[detection.ids[i]].append((pt,detection.rs[i], detection.hs[i]))
self.num_detections += 1
class FitCircle(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"],input_keys=["cylinders"],output_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius", "object_height"])
def execute(self, userdata):
print 'fitting circle'
times = []
times_mat = mat(ones((len(userdata.cylinders),1)))
x = mat(ones((len(userdata.cylinders),1)))
y = mat(ones((len(userdata.cylinders),1)))
z = ones(len(userdata.cylinders))
r = ones(len(userdata.cylinders))
h = zeros(len(userdata.cylinders))
for i in range(len(userdata.cylinders)):
x[i,0] = userdata.cylinders[i][0].point.x
y[i,0] = userdata.cylinders[i][0].point.y
z[i] = userdata.cylinders[i][0].point.z
r[i] = userdata.cylinders[i][1]
h[i] = userdata.cylinders[i][2]
times.append(userdata.cylinders[i][0].header.stamp)
times_mat[i,0] = userdata.cylinders[i][0].header.stamp.to_sec() - userdata.cylinders[0][0].header.stamp.to_sec()
A = hstack([x, y, mat(ones(x.shape))])
b = -(power(x,2)+power(y,2))
a = asarray(linalg.lstsq(A,b)[0])
xc = -.5 * a[0];
yc = -.5 * a[1];
zc = mean(z)
center_radius = sqrt((a[0]**2+a[1]**2)/4-a[2])
object_radius = mean(r)
object_height = mean(h)
R = center_radius + object_radius
middle_ind = round(len(userdata.cylinders)/2.)
print "len %d middle ind %d" % (len(userdata.cylinders),middle_ind)
middle_angle = math.atan2(y[middle_ind,0]-yc,x[middle_ind,0]-xc)
angles = mat(ones((len(userdata.cylinders),1)))
print x.shape, y.shape, len(userdata.cylinders)
for i in range(min([len(userdata.cylinders), len(x), len(y)])):
angles[i,0] = fix_angle(math.atan2(y[i,0]-yc,x[i,0]-xc),middle_angle)
# prev_angle = angles[0,0]
# for i in range(len(userdata.cylinders)):
# while angles[i,0] < prev_angle:
# angles[i,0] = angles[i,0] + 2*math.pi
# prev_angle = angles[i,0]
A_angles = hstack([times_mat,mat(ones(angles.shape))])
#print hstack([A_angles,angles])
w_result = asarray(linalg.lstsq(A_angles,angles)[0])
w = -w_result[0]
print 'rotation rate: %.3f rad/s - one revolution in %.2f sec' % (w,2*math.pi/w)
#w = 2 * math.pi / 30.
userdata.center = Point(xc,yc,zc)
userdata.radius = R
userdata.object_radius = object_radius
userdata.object_height = object_height
userdata.rotation_rate = w
userdata.init_angle = math.atan2(y[0,0]-yc,x[0,0]-xc)
userdata.init_time = times[0]
polygon_pub = rospy.Publisher('/fit_circle', geometry_msgs.msg.PolygonStamped)
polygon1 = PolygonStamped()
polygon1.header.stamp = rospy.Time.now()
polygon1.header.frame_id = 'base_footprint'
polygon2 = PolygonStamped()
polygon2.header.stamp = rospy.Time.now()
polygon2.header.frame_id = 'base_footprint'
for angle in linspace(0,2*math.pi,math.pi/8.):
pt1 = Point(xc+center_radius+math.cos(angle),yc+center_radius+math.sin(angle),zc)
pt2 = Point(xc+R+math.cos(angle),yc+R+math.sin(angle),zc)
polygon1.polygon.points.append(pt1)
polygon2.polygon.points.append(pt2)
polygon_pub.publish(polygon1)
polygon_pub.publish(polygon2)
print 'got center (%.3f,%.3f,%.3f), radius %.3f + %.3f = %.3f' % (xc,yc,zc,center_radius,object_radius,R)
return "success"
class ExecuteGrasp(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure","missed"],input_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius","object_height"])
self.n_consecutive_grasp_failures = 0
def execute(self, userdata):
center = userdata.center
radius = userdata.radius
object_radius = userdata.object_radius
rotation_rate = userdata.rotation_rate
init_angle = userdata.init_angle
init_time = userdata.init_time
command = RotatingGrasp()
command.header.stamp = userdata.init_time
command.header.frame_id = 'base_footprint'
command.center = Point()
command.center.x = center.x
command.center.y = center.y
command.center.z = center.z
command.initial = Point()
command.initial.x = center.x + math.cos(init_angle)*radius
command.initial.y = center.y - math.sin(init_angle)*radius
command.initial.z = center.z
command.object_height = userdata.object_height+config["above_table_cutoff"]
command.object_radius = userdata.object_radius
command.rotation_rate = rotation_rate
print "radius,height",userdata.object_radius, userdata.object_height
if userdata.object_radius < .06:
print "CUP!"
object_type = "cup"
command.outward_angle = 0
elif userdata.object_radius < .12:
print "BOWL"
object_type = "bowl"
command.outward_angle = math.pi/4
else:
print "PLATE"
object_type = "plate"
command.outward_angle = math.pi/2 - math.pi/8
print 'waiting for service'
rospy.wait_for_service('rotating_grasper')
server = rospy.ServiceProxy('rotating_grasper', RotatingGrasper)
if object_type == "plate" and radius < spinning_table_radius + plate_hang_buffer:
print "plate must be pushed out"
dist_to_push = spinning_table_radius + plate_hang_buffer - radius
#TODO:
pass
try:
print 'calling...'
print command
success = True
response = server(command)
server_success = response.success
if not server_success:
print "Service call failed"
return "failure"
msg = rospy.wait_for_message('/joint_states',sensor_msgs.msg.JointState)
gripper_joint_angle = msg.position[msg.name.index("r_gripper_joint")]
grab_success = (gripper_joint_angle > .002)
if not grab_success:
print "missed!"
if self.n_consecutive_grasp_failures >= 3:
print "too many failures. exiting"
return "failure"
else:
print "failed %i times. trying again..."%self.n_consecutive_grasp_failures
self.n_consecutive_grasp_failures += 1
return "missed"
else:
self.n_consecutive_grasp_failures = 0
return "success"
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return "failure" | angle = angle + 2*math.pi | conditional_block |
spinning_table_states.py | PKG = "spinning_table_sm"
import roslib; roslib.load_manifest(PKG)
import rospy
import smach
import sensor_msgs.msg
import trajectory_msgs.msg as tm
import geometry_msgs.msg as gm
import smach_ros
from numpy import *
from numpy.linalg import norm
from collections import defaultdict
from misc_msgs.msg import *
from rotating_grasper.msg import *
from rotating_grasper.srv import *
from geometry_msgs.msg import Point,PointStamped,PolygonStamped
import numpy as np
import sys, os, yaml, subprocess
import rospkg
DIR = roslib.packages.get_pkg_dir(PKG, required=True) + "/config/"
stream = file(DIR+"config.yaml")
config = yaml.load(stream)
spinning_table_radius = 0.5 #TODO: measure
plate_hang_buffer = 0.03
def make_fuerte_env():
versionstr = sys.version[:3]
return dict(
ROS_MASTER_URI = os.environ["ROS_MASTER_URI"],
PATH = "/opt/ros/fuerte/bin:%s"%os.environ["PATH"],
ROS_VERSION = "fuerte",
PYTHONPATH = "/opt/ros/fuerte/lib/python%s/dist-packages"%versionstr,
ROS_PACKAGE_PATH = "/opt/ros/fuerte/share:/opt/ros/fuerte/stacks")
def make_tracker():
rp = rospkg.RosPack()
pkg_path = rp.get_path("spinning_tabletop_detection")
p = subprocess.Popen(["%s/bin/test_tracker_ros"%pkg_path
,"input_cloud:=/camera/rgb/points"
,"--min_height=%s"%config["min_filter_height"]
,"--max_height=%s"%config["max_filter_height"]
,"--above_table_cutoff=%s"%config["above_table_cutoff"]
], env = make_fuerte_env(), stdout = open('/dev/null','w'))
return p
def smaller_ang(x):
return (x + pi)%(2*pi) - pi
def closer_ang(x,a,dir=0):
"""
find angle y (==x mod 2*pi) that is close to a
dir == 0: minimize absolute value of difference
dir == 1: y > x
dir == 2: y < x
"""
if dir == 0:
return a + smaller_ang(x-a)
elif dir == 1:
return a + (x-a)%(2*pi)
elif dir == -1:
return a + (x-a)%(2*pi) - 2*pi
def closer_joint_angles(pos,seed):
print "pos",pos
print "seed",seed
result = np.array(pos)
for i in [-1,-3]:
result[i] = closer_ang(pos[i],seed[i],0)
print "result",result
return result
class TopicListener(object):
"stores last message"
last_msg = None
def __init__(self,topic_name,msg_type):
self.sub = rospy.Subscriber(topic_name,msg_type,self.callback)
rospy.loginfo('waiting for the first message: %s'%topic_name)
while self.last_msg is None: rospy.sleep(.01)
rospy.loginfo('ok: %s'%topic_name)
def callback(self,msg):
self.last_msg = msg
class TrajectoryControllerWrapper(object):
def __init__(self, controller_name, listener):
self.listener = listener
self.joint_names = rospy.get_param("/%s/joints"%controller_name)
self.n_joints = len(self.joint_names)
msg = self.listener.last_msg
self.ros_joint_inds = [msg.name.index(name) for name in self.joint_names]
self.controller_pub = rospy.Publisher("%s/command"%controller_name, tm.JointTrajectory)
def get_joint_positions(self):
msg = self.listener.last_msg
return [msg.position[i] for i in self.ros_joint_inds]
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
assert len(positions_goal) == len(positions_cur)
duration = norm((r_[positions_goal] - r_[positions_cur])/self.vel_limits, ord=inf)
jt = tm.JointTrajectory()
jt.joint_names = self.joint_names
jt.header.stamp = rospy.Time.now()
jtp = tm.JointTrajectoryPoint()
jtp.positions = positions_goal
jtp.velocities = zeros(len(positions_goal))
jtp.time_from_start = rospy.Duration(duration)
jt.points = [jtp]
self.controller_pub.publish(jt)
rospy.loginfo("sleeping %.2f sec"%duration)
rospy.sleep(duration)
def follow_joint_traj(self, positions, duration = None):
positions = np.r_[np.atleast_2d(self.get_joint_positions()), positions]
positions[:,4] = np.unwrap(positions[:,4])
positions[:,6] = np.unwrap(positions[:,6])
positions, velocities, times = make_traj_with_limits(positions, self.vel_limits, self.acc_limits,smooth=True)
self.follow_timed_joint_traj(positions, velocities, times)
def mirror_arm_joints(x):
"mirror image of joints (r->l or l->r)"
return r_[-x[0],x[1],-x[2],x[3],-x[4],x[5],-x[6]]
class Arm(TrajectoryControllerWrapper):
L_POSTURES = dict(
untucked = [0.4, 1.0, 0.0, -2.05, 0.0, -0.1, 0.0],
tucked = [0.06, 1.25, 1.79, -1.68, -1.73, -0.10, -0.09],
up = [ 0.33, -0.35, 2.59, -0.15, 0.59, -1.41, -0.27],
side = [ 1.832, -0.332, 1.011, -1.437, 1.1 , -2.106, 3.074]
)
def __init__(self, lr,listener):
TrajectoryControllerWrapper.__init__(self,"%s_arm_controller"%lr, listener)
self.lr = lr
self.lrlong = {"r":"right", "l":"left"}[lr]
self.tool_frame = "%s_gripper_tool_frame"%lr
self.cart_command = rospy.Publisher('%s_cart/command_pose'%lr, gm.PoseStamped)
self.vel_limits = [0.42, 0.42,0.65,0.66,0.72, 0.62,0.72]
def goto_posture(self, name):
l_joints = self.L_POSTURES[name]
joints = l_joints if self.lr == 'l' else mirror_arm_joints(l_joints)
self.goto_joint_positions(joints)
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
positions_goal = closer_joint_angles(positions_goal, positions_cur)
TrajectoryControllerWrapper.goto_joint_positions(self, positions_goal)
def set_cart_target(self, quat, xyz, ref_frame):
ps = gm.PoseStamped()
ps.header.frame_id = ref_frame
ps.header.stamp = rospy.Time(0)
ps.pose.position = gm.Point(*xyz);
ps.pose.orientation = gm.Quaternion(*quat)
self.cart_command.publish(ps)
def fix_angle(angle, center_point=math.pi):
while angle > center_point + math.pi:
angle = angle - 2*math.pi
while angle < center_point - math.pi:
angle = angle + 2*math.pi
return angle
class Head(TrajectoryControllerWrapper):
def __init__(self, listener):
TrajectoryControllerWrapper.__init__(self,"head_traj_controller",listener)
self.vel_limits = [1.,1.]
def set_pan_tilt(self, pan, tilt):
self.goto_joint_positions([pan, tilt])
####################################
class MoveArmToSide(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"])
listener = TopicListener("joint_states",sensor_msgs.msg.JointState)
self.rarm = Arm("r",listener)
self.larm = Arm("l",listener)
self.head = Head(listener)
def execute(self, userdata):
rospy.sleep(1)
self.larm.goto_posture('up')
self.rarm.goto_posture('up')
self.head.set_pan_tilt(0,.7)
return "success"
class GatherDetections(smach.State):
tracker = None
def __init__(self,detector=None):
smach.State.__init__(self, outcomes=["success", "failure"],output_keys=["cylinders"])
self.detector = detector
self.cylinders = defaultdict(list) #list of (center, radius)
self.num_detections = 0
def kill_tracker(self, *args):
self.tracker.terminate()
def execute(self, userdata):
#TODO: start detector
if self.tracker is None or self.tracker.poll() is not None:
self.tracker = make_tracker()
self.done = False
print 'subscribing'
rospy.Subscriber('/spinning_tabletop/cylinders',TrackedCylinders,self.handle_detection)
sleep_time = 10
print 'waiting for %d seconds' % sleep_time
rospy.sleep(sleep_time)
self.done = True
maxLength = -1
maxKey = -1
for key in self.cylinders.keys():
if len(self.cylinders[key]) > maxLength:
maxLength = len(self.cylinders[key])
maxKey = key
if maxKey == -1:
print 'no detections!'
return "failure"
if maxLength < 4:
return "failure"
print 'chose id %s with length %d' % (maxKey,maxLength)
userdata.cylinders = self.cylinders[maxKey]
return "success"
def handle_detection(self,detection):
if self.done: return
for i in range(len(detection.ids)):
pt = PointStamped()
pt.header = detection.header
pt.point.x = detection.xs[i]
pt.point.y = detection.ys[i]
pt.point.z = detection.zs[i] + detection.hs[i]
self.cylinders[detection.ids[i]].append((pt,detection.rs[i], detection.hs[i]))
self.num_detections += 1
class FitCircle(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"],input_keys=["cylinders"],output_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius", "object_height"])
def execute(self, userdata):
print 'fitting circle'
times = []
times_mat = mat(ones((len(userdata.cylinders),1)))
x = mat(ones((len(userdata.cylinders),1)))
y = mat(ones((len(userdata.cylinders),1)))
z = ones(len(userdata.cylinders))
r = ones(len(userdata.cylinders))
h = zeros(len(userdata.cylinders))
for i in range(len(userdata.cylinders)):
x[i,0] = userdata.cylinders[i][0].point.x
y[i,0] = userdata.cylinders[i][0].point.y
z[i] = userdata.cylinders[i][0].point.z
r[i] = userdata.cylinders[i][1]
h[i] = userdata.cylinders[i][2]
times.append(userdata.cylinders[i][0].header.stamp)
times_mat[i,0] = userdata.cylinders[i][0].header.stamp.to_sec() - userdata.cylinders[0][0].header.stamp.to_sec()
A = hstack([x, y, mat(ones(x.shape))])
b = -(power(x,2)+power(y,2))
a = asarray(linalg.lstsq(A,b)[0])
xc = -.5 * a[0];
yc = -.5 * a[1];
zc = mean(z)
center_radius = sqrt((a[0]**2+a[1]**2)/4-a[2])
object_radius = mean(r)
object_height = mean(h)
R = center_radius + object_radius
middle_ind = round(len(userdata.cylinders)/2.)
print "len %d middle ind %d" % (len(userdata.cylinders),middle_ind)
middle_angle = math.atan2(y[middle_ind,0]-yc,x[middle_ind,0]-xc)
angles = mat(ones((len(userdata.cylinders),1)))
print x.shape, y.shape, len(userdata.cylinders)
for i in range(min([len(userdata.cylinders), len(x), len(y)])):
angles[i,0] = fix_angle(math.atan2(y[i,0]-yc,x[i,0]-xc),middle_angle)
# prev_angle = angles[0,0]
# for i in range(len(userdata.cylinders)):
# while angles[i,0] < prev_angle:
# angles[i,0] = angles[i,0] + 2*math.pi
# prev_angle = angles[i,0]
A_angles = hstack([times_mat,mat(ones(angles.shape))])
#print hstack([A_angles,angles])
w_result = asarray(linalg.lstsq(A_angles,angles)[0])
w = -w_result[0]
print 'rotation rate: %.3f rad/s - one revolution in %.2f sec' % (w,2*math.pi/w)
#w = 2 * math.pi / 30.
userdata.center = Point(xc,yc,zc)
userdata.radius = R
userdata.object_radius = object_radius
userdata.object_height = object_height
userdata.rotation_rate = w
userdata.init_angle = math.atan2(y[0,0]-yc,x[0,0]-xc)
userdata.init_time = times[0]
polygon_pub = rospy.Publisher('/fit_circle', geometry_msgs.msg.PolygonStamped)
polygon1 = PolygonStamped()
polygon1.header.stamp = rospy.Time.now()
polygon1.header.frame_id = 'base_footprint'
polygon2 = PolygonStamped()
polygon2.header.stamp = rospy.Time.now()
polygon2.header.frame_id = 'base_footprint'
for angle in linspace(0,2*math.pi,math.pi/8.):
pt1 = Point(xc+center_radius+math.cos(angle),yc+center_radius+math.sin(angle),zc)
pt2 = Point(xc+R+math.cos(angle),yc+R+math.sin(angle),zc)
polygon1.polygon.points.append(pt1)
polygon2.polygon.points.append(pt2)
polygon_pub.publish(polygon1)
polygon_pub.publish(polygon2)
print 'got center (%.3f,%.3f,%.3f), radius %.3f + %.3f = %.3f' % (xc,yc,zc,center_radius,object_radius,R)
return "success"
class | (smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure","missed"],input_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius","object_height"])
self.n_consecutive_grasp_failures = 0
def execute(self, userdata):
center = userdata.center
radius = userdata.radius
object_radius = userdata.object_radius
rotation_rate = userdata.rotation_rate
init_angle = userdata.init_angle
init_time = userdata.init_time
command = RotatingGrasp()
command.header.stamp = userdata.init_time
command.header.frame_id = 'base_footprint'
command.center = Point()
command.center.x = center.x
command.center.y = center.y
command.center.z = center.z
command.initial = Point()
command.initial.x = center.x + math.cos(init_angle)*radius
command.initial.y = center.y - math.sin(init_angle)*radius
command.initial.z = center.z
command.object_height = userdata.object_height+config["above_table_cutoff"]
command.object_radius = userdata.object_radius
command.rotation_rate = rotation_rate
print "radius,height",userdata.object_radius, userdata.object_height
if userdata.object_radius < .06:
print "CUP!"
object_type = "cup"
command.outward_angle = 0
elif userdata.object_radius < .12:
print "BOWL"
object_type = "bowl"
command.outward_angle = math.pi/4
else:
print "PLATE"
object_type = "plate"
command.outward_angle = math.pi/2 - math.pi/8
print 'waiting for service'
rospy.wait_for_service('rotating_grasper')
server = rospy.ServiceProxy('rotating_grasper', RotatingGrasper)
if object_type == "plate" and radius < spinning_table_radius + plate_hang_buffer:
print "plate must be pushed out"
dist_to_push = spinning_table_radius + plate_hang_buffer - radius
#TODO:
pass
try:
print 'calling...'
print command
success = True
response = server(command)
server_success = response.success
if not server_success:
print "Service call failed"
return "failure"
msg = rospy.wait_for_message('/joint_states',sensor_msgs.msg.JointState)
gripper_joint_angle = msg.position[msg.name.index("r_gripper_joint")]
grab_success = (gripper_joint_angle > .002)
if not grab_success:
print "missed!"
if self.n_consecutive_grasp_failures >= 3:
print "too many failures. exiting"
return "failure"
else:
print "failed %i times. trying again..."%self.n_consecutive_grasp_failures
self.n_consecutive_grasp_failures += 1
return "missed"
else:
self.n_consecutive_grasp_failures = 0
return "success"
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return "failure" | ExecuteGrasp | identifier_name |
spinning_table_states.py | PKG = "spinning_table_sm"
import roslib; roslib.load_manifest(PKG)
import rospy
import smach
import sensor_msgs.msg
import trajectory_msgs.msg as tm
import geometry_msgs.msg as gm
import smach_ros
from numpy import *
from numpy.linalg import norm
from collections import defaultdict
from misc_msgs.msg import *
from rotating_grasper.msg import *
from rotating_grasper.srv import *
from geometry_msgs.msg import Point,PointStamped,PolygonStamped
import numpy as np
import sys, os, yaml, subprocess
import rospkg
DIR = roslib.packages.get_pkg_dir(PKG, required=True) + "/config/"
stream = file(DIR+"config.yaml")
config = yaml.load(stream)
spinning_table_radius = 0.5 #TODO: measure
plate_hang_buffer = 0.03
def make_fuerte_env():
versionstr = sys.version[:3]
return dict(
ROS_MASTER_URI = os.environ["ROS_MASTER_URI"],
PATH = "/opt/ros/fuerte/bin:%s"%os.environ["PATH"],
ROS_VERSION = "fuerte",
PYTHONPATH = "/opt/ros/fuerte/lib/python%s/dist-packages"%versionstr,
ROS_PACKAGE_PATH = "/opt/ros/fuerte/share:/opt/ros/fuerte/stacks")
def make_tracker():
rp = rospkg.RosPack()
pkg_path = rp.get_path("spinning_tabletop_detection")
p = subprocess.Popen(["%s/bin/test_tracker_ros"%pkg_path
,"input_cloud:=/camera/rgb/points"
,"--min_height=%s"%config["min_filter_height"]
,"--max_height=%s"%config["max_filter_height"]
,"--above_table_cutoff=%s"%config["above_table_cutoff"]
], env = make_fuerte_env(), stdout = open('/dev/null','w'))
return p
def smaller_ang(x):
return (x + pi)%(2*pi) - pi
def closer_ang(x,a,dir=0):
"""
find angle y (==x mod 2*pi) that is close to a
dir == 0: minimize absolute value of difference
dir == 1: y > x
dir == 2: y < x
"""
if dir == 0:
return a + smaller_ang(x-a)
elif dir == 1:
return a + (x-a)%(2*pi)
elif dir == -1:
return a + (x-a)%(2*pi) - 2*pi
def closer_joint_angles(pos,seed):
print "pos",pos
print "seed",seed
result = np.array(pos)
for i in [-1,-3]:
result[i] = closer_ang(pos[i],seed[i],0)
print "result",result
return result
class TopicListener(object):
"stores last message"
last_msg = None
def __init__(self,topic_name,msg_type):
self.sub = rospy.Subscriber(topic_name,msg_type,self.callback)
rospy.loginfo('waiting for the first message: %s'%topic_name)
while self.last_msg is None: rospy.sleep(.01)
rospy.loginfo('ok: %s'%topic_name)
def callback(self,msg):
self.last_msg = msg
class TrajectoryControllerWrapper(object):
def __init__(self, controller_name, listener):
self.listener = listener
self.joint_names = rospy.get_param("/%s/joints"%controller_name)
self.n_joints = len(self.joint_names)
msg = self.listener.last_msg
self.ros_joint_inds = [msg.name.index(name) for name in self.joint_names]
self.controller_pub = rospy.Publisher("%s/command"%controller_name, tm.JointTrajectory)
def get_joint_positions(self):
msg = self.listener.last_msg
return [msg.position[i] for i in self.ros_joint_inds]
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
assert len(positions_goal) == len(positions_cur)
duration = norm((r_[positions_goal] - r_[positions_cur])/self.vel_limits, ord=inf)
jt = tm.JointTrajectory()
jt.joint_names = self.joint_names
jt.header.stamp = rospy.Time.now()
jtp = tm.JointTrajectoryPoint()
jtp.positions = positions_goal
jtp.velocities = zeros(len(positions_goal))
jtp.time_from_start = rospy.Duration(duration)
jt.points = [jtp]
self.controller_pub.publish(jt)
rospy.loginfo("sleeping %.2f sec"%duration)
rospy.sleep(duration)
def follow_joint_traj(self, positions, duration = None):
positions = np.r_[np.atleast_2d(self.get_joint_positions()), positions]
positions[:,4] = np.unwrap(positions[:,4])
positions[:,6] = np.unwrap(positions[:,6])
positions, velocities, times = make_traj_with_limits(positions, self.vel_limits, self.acc_limits,smooth=True)
self.follow_timed_joint_traj(positions, velocities, times)
def mirror_arm_joints(x):
"mirror image of joints (r->l or l->r)"
return r_[-x[0],x[1],-x[2],x[3],-x[4],x[5],-x[6]]
class Arm(TrajectoryControllerWrapper):
|
def fix_angle(angle, center_point=math.pi):
while angle > center_point + math.pi:
angle = angle - 2*math.pi
while angle < center_point - math.pi:
angle = angle + 2*math.pi
return angle
class Head(TrajectoryControllerWrapper):
def __init__(self, listener):
TrajectoryControllerWrapper.__init__(self,"head_traj_controller",listener)
self.vel_limits = [1.,1.]
def set_pan_tilt(self, pan, tilt):
self.goto_joint_positions([pan, tilt])
####################################
class MoveArmToSide(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"])
listener = TopicListener("joint_states",sensor_msgs.msg.JointState)
self.rarm = Arm("r",listener)
self.larm = Arm("l",listener)
self.head = Head(listener)
def execute(self, userdata):
rospy.sleep(1)
self.larm.goto_posture('up')
self.rarm.goto_posture('up')
self.head.set_pan_tilt(0,.7)
return "success"
class GatherDetections(smach.State):
tracker = None
def __init__(self,detector=None):
smach.State.__init__(self, outcomes=["success", "failure"],output_keys=["cylinders"])
self.detector = detector
self.cylinders = defaultdict(list) #list of (center, radius)
self.num_detections = 0
def kill_tracker(self, *args):
self.tracker.terminate()
def execute(self, userdata):
#TODO: start detector
if self.tracker is None or self.tracker.poll() is not None:
self.tracker = make_tracker()
self.done = False
print 'subscribing'
rospy.Subscriber('/spinning_tabletop/cylinders',TrackedCylinders,self.handle_detection)
sleep_time = 10
print 'waiting for %d seconds' % sleep_time
rospy.sleep(sleep_time)
self.done = True
maxLength = -1
maxKey = -1
for key in self.cylinders.keys():
if len(self.cylinders[key]) > maxLength:
maxLength = len(self.cylinders[key])
maxKey = key
if maxKey == -1:
print 'no detections!'
return "failure"
if maxLength < 4:
return "failure"
print 'chose id %s with length %d' % (maxKey,maxLength)
userdata.cylinders = self.cylinders[maxKey]
return "success"
def handle_detection(self,detection):
if self.done: return
for i in range(len(detection.ids)):
pt = PointStamped()
pt.header = detection.header
pt.point.x = detection.xs[i]
pt.point.y = detection.ys[i]
pt.point.z = detection.zs[i] + detection.hs[i]
self.cylinders[detection.ids[i]].append((pt,detection.rs[i], detection.hs[i]))
self.num_detections += 1
class FitCircle(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure"],input_keys=["cylinders"],output_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius", "object_height"])
def execute(self, userdata):
print 'fitting circle'
times = []
times_mat = mat(ones((len(userdata.cylinders),1)))
x = mat(ones((len(userdata.cylinders),1)))
y = mat(ones((len(userdata.cylinders),1)))
z = ones(len(userdata.cylinders))
r = ones(len(userdata.cylinders))
h = zeros(len(userdata.cylinders))
for i in range(len(userdata.cylinders)):
x[i,0] = userdata.cylinders[i][0].point.x
y[i,0] = userdata.cylinders[i][0].point.y
z[i] = userdata.cylinders[i][0].point.z
r[i] = userdata.cylinders[i][1]
h[i] = userdata.cylinders[i][2]
times.append(userdata.cylinders[i][0].header.stamp)
times_mat[i,0] = userdata.cylinders[i][0].header.stamp.to_sec() - userdata.cylinders[0][0].header.stamp.to_sec()
A = hstack([x, y, mat(ones(x.shape))])
b = -(power(x,2)+power(y,2))
a = asarray(linalg.lstsq(A,b)[0])
xc = -.5 * a[0];
yc = -.5 * a[1];
zc = mean(z)
center_radius = sqrt((a[0]**2+a[1]**2)/4-a[2])
object_radius = mean(r)
object_height = mean(h)
R = center_radius + object_radius
middle_ind = round(len(userdata.cylinders)/2.)
print "len %d middle ind %d" % (len(userdata.cylinders),middle_ind)
middle_angle = math.atan2(y[middle_ind,0]-yc,x[middle_ind,0]-xc)
angles = mat(ones((len(userdata.cylinders),1)))
print x.shape, y.shape, len(userdata.cylinders)
for i in range(min([len(userdata.cylinders), len(x), len(y)])):
angles[i,0] = fix_angle(math.atan2(y[i,0]-yc,x[i,0]-xc),middle_angle)
# prev_angle = angles[0,0]
# for i in range(len(userdata.cylinders)):
# while angles[i,0] < prev_angle:
# angles[i,0] = angles[i,0] + 2*math.pi
# prev_angle = angles[i,0]
A_angles = hstack([times_mat,mat(ones(angles.shape))])
#print hstack([A_angles,angles])
w_result = asarray(linalg.lstsq(A_angles,angles)[0])
w = -w_result[0]
print 'rotation rate: %.3f rad/s - one revolution in %.2f sec' % (w,2*math.pi/w)
#w = 2 * math.pi / 30.
userdata.center = Point(xc,yc,zc)
userdata.radius = R
userdata.object_radius = object_radius
userdata.object_height = object_height
userdata.rotation_rate = w
userdata.init_angle = math.atan2(y[0,0]-yc,x[0,0]-xc)
userdata.init_time = times[0]
polygon_pub = rospy.Publisher('/fit_circle', geometry_msgs.msg.PolygonStamped)
polygon1 = PolygonStamped()
polygon1.header.stamp = rospy.Time.now()
polygon1.header.frame_id = 'base_footprint'
polygon2 = PolygonStamped()
polygon2.header.stamp = rospy.Time.now()
polygon2.header.frame_id = 'base_footprint'
for angle in linspace(0,2*math.pi,math.pi/8.):
pt1 = Point(xc+center_radius+math.cos(angle),yc+center_radius+math.sin(angle),zc)
pt2 = Point(xc+R+math.cos(angle),yc+R+math.sin(angle),zc)
polygon1.polygon.points.append(pt1)
polygon2.polygon.points.append(pt2)
polygon_pub.publish(polygon1)
polygon_pub.publish(polygon2)
print 'got center (%.3f,%.3f,%.3f), radius %.3f + %.3f = %.3f' % (xc,yc,zc,center_radius,object_radius,R)
return "success"
class ExecuteGrasp(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["success", "failure","missed"],input_keys=["center","radius","rotation_rate","init_angle","init_time","object_radius","object_height"])
self.n_consecutive_grasp_failures = 0
def execute(self, userdata):
center = userdata.center
radius = userdata.radius
object_radius = userdata.object_radius
rotation_rate = userdata.rotation_rate
init_angle = userdata.init_angle
init_time = userdata.init_time
command = RotatingGrasp()
command.header.stamp = userdata.init_time
command.header.frame_id = 'base_footprint'
command.center = Point()
command.center.x = center.x
command.center.y = center.y
command.center.z = center.z
command.initial = Point()
command.initial.x = center.x + math.cos(init_angle)*radius
command.initial.y = center.y - math.sin(init_angle)*radius
command.initial.z = center.z
command.object_height = userdata.object_height+config["above_table_cutoff"]
command.object_radius = userdata.object_radius
command.rotation_rate = rotation_rate
print "radius,height",userdata.object_radius, userdata.object_height
if userdata.object_radius < .06:
print "CUP!"
object_type = "cup"
command.outward_angle = 0
elif userdata.object_radius < .12:
print "BOWL"
object_type = "bowl"
command.outward_angle = math.pi/4
else:
print "PLATE"
object_type = "plate"
command.outward_angle = math.pi/2 - math.pi/8
print 'waiting for service'
rospy.wait_for_service('rotating_grasper')
server = rospy.ServiceProxy('rotating_grasper', RotatingGrasper)
if object_type == "plate" and radius < spinning_table_radius + plate_hang_buffer:
print "plate must be pushed out"
dist_to_push = spinning_table_radius + plate_hang_buffer - radius
#TODO:
pass
try:
print 'calling...'
print command
success = True
response = server(command)
server_success = response.success
if not server_success:
print "Service call failed"
return "failure"
msg = rospy.wait_for_message('/joint_states',sensor_msgs.msg.JointState)
gripper_joint_angle = msg.position[msg.name.index("r_gripper_joint")]
grab_success = (gripper_joint_angle > .002)
if not grab_success:
print "missed!"
if self.n_consecutive_grasp_failures >= 3:
print "too many failures. exiting"
return "failure"
else:
print "failed %i times. trying again..."%self.n_consecutive_grasp_failures
self.n_consecutive_grasp_failures += 1
return "missed"
else:
self.n_consecutive_grasp_failures = 0
return "success"
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return "failure" | L_POSTURES = dict(
untucked = [0.4, 1.0, 0.0, -2.05, 0.0, -0.1, 0.0],
tucked = [0.06, 1.25, 1.79, -1.68, -1.73, -0.10, -0.09],
up = [ 0.33, -0.35, 2.59, -0.15, 0.59, -1.41, -0.27],
side = [ 1.832, -0.332, 1.011, -1.437, 1.1 , -2.106, 3.074]
)
def __init__(self, lr,listener):
TrajectoryControllerWrapper.__init__(self,"%s_arm_controller"%lr, listener)
self.lr = lr
self.lrlong = {"r":"right", "l":"left"}[lr]
self.tool_frame = "%s_gripper_tool_frame"%lr
self.cart_command = rospy.Publisher('%s_cart/command_pose'%lr, gm.PoseStamped)
self.vel_limits = [0.42, 0.42,0.65,0.66,0.72, 0.62,0.72]
def goto_posture(self, name):
l_joints = self.L_POSTURES[name]
joints = l_joints if self.lr == 'l' else mirror_arm_joints(l_joints)
self.goto_joint_positions(joints)
def goto_joint_positions(self, positions_goal):
positions_cur = self.get_joint_positions()
positions_goal = closer_joint_angles(positions_goal, positions_cur)
TrajectoryControllerWrapper.goto_joint_positions(self, positions_goal)
def set_cart_target(self, quat, xyz, ref_frame):
ps = gm.PoseStamped()
ps.header.frame_id = ref_frame
ps.header.stamp = rospy.Time(0)
ps.pose.position = gm.Point(*xyz);
ps.pose.orientation = gm.Quaternion(*quat)
self.cart_command.publish(ps) | identifier_body |
action.py | import pandas as pd
import math
import json
import html
import bs4
import re
import dateparser
from bs4 import BeautifulSoup
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, List, Dict, ClassVar, Union
from urllib.parse import urlparse
from .markdown import MarkdownData, MarkdownDocument
Url = str
@dataclass
class Action:
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ["author"]
_valid_struggles: ClassVar = [
"ethics",
"pay_and_benefits",
"working_conditions",
"discrimination",
"unfair_labor_practices",
"job_security",
]
_valid_actions: ClassVar = [
"strike",
"protest",
"open_letter",
"legal_action",
"union_drive",
"union_representation",
]
@staticmethod
def is_none(field: Any) -> bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == "none":
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def listify(self, field: Union[List[Any], Any]) -> List[Any]:
if self.is_none(field):
return None
else:
if isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(",")]
def __post_init__(self):
""" Used to validate fields. """
# self.date = datetime.strptime(self.date, "%Y-%m-%d").date()
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers)
# make sure action is a valid action
assert (
self.action in self._valid_actions
), f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
# make sure all struggles are valid struggles
for struggle in self.struggles:
assert (
struggle in self._valid_struggles
), f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
# make sure source is either a url or a html link tag <a>
for source in self.sources:
assert (
BeautifulSoup(source, "html.parser").a is not None
or urlparse(source).netloc is not ""
), f"'{source}' is in valid. source must be a valid url or an html link tag element"
# if html, extract only href from sources
self.sources = [
BeautifulSoup(source, "html.parser").a["href"]
if "href" in source
else source
for source in self.sources
]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) -> Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.items()}
def render_df(self, field: str) -> str:
""" Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ["date", "workers"]:
return str(value)
elif field in ["locations", "struggles", "companies", "tags", "sources"]:
return str(value).strip("[").strip("]").replace("'", "").replace('"', "")
else:
return value
def to_md(self, field: str, td: bs4.element.Tag) -> str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert (
field in self.__dataclass_fields__
), f"Cannot serialize {field}. Not a valid field in Action."
value = self.__getattribute__(field)
if field in ["date", "workers"]:
td.string = str(value)
elif field in ["locations", "struggles", "companies", "tags"]:
td.string = (
str(value).strip("[").strip("]").replace("'", "").replace('"', "")
) | f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(", ".join(ret)), "html.parser"))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) -> "Action":
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all("tr")
for key, val in table.attrs.items():
if key != "class":
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find("td", class_="field-key")
td_val = tr.find("td", class_="field-value")
val = "".join(str(e) for e in td_val.contents).strip()
key = "".join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) -> "Action":
""" Create an Action instance from a dataframe row. """
fields = [
key
for key, value in cls.__dataclass_fields__.items()
if value.type != ClassVar
]
d = {key: value for key, value in row.to_dict().items() if key in fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = "actions"
actions: List[Action] = field(default_factory=lambda: [])
fields: List[str] = field(
default_factory=lambda: [
key
for key, value in Action.__dataclass_fields__.items()
if value.type != ClassVar
]
)
def __len__(self) -> int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) -> "Actions":
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) -> pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient="list")
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f"<div id={self.action_id}></div>", "html.parser")
for action in self.actions:
table = soup.new_tag("table")
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag("tr")
td_key = soup.new_tag("td", attrs={"class": "field-key"})
td_val = soup.new_tag("td", attrs={"class": "field-value"})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, md_doc: MarkdownDocument) -> "Actions":
""" Create and populate an Actions instance from a Markdown Document. """
md_data = re.findall(fr'<div id="{cls.action_id}">+[\s\S]+<\/div>', md_doc)
assert len(md_data) == 1, f"multiple divs with id={cls.action_id} were found"
md_data = md_data[0]
soup = BeautifulSoup(md_data, "html.parser")
tables = soup.div.find_all("table")
actions = Actions()
for table in tables:
action = Action.create_from_md(table)
actions.append(action)
return actions
@staticmethod
def read_from_df(df: pd.DataFrame) -> "Actions":
""" Create and populate an Actions instance from a dataframe. """
actions = Actions()
for i, row in df.iterrows():
action = Action.create_from_row(row)
actions.append(action)
return actions | elif field == "sources":
ret = []
for source in value:
tag = ( | random_line_split |
action.py | import pandas as pd
import math
import json
import html
import bs4
import re
import dateparser
from bs4 import BeautifulSoup
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, List, Dict, ClassVar, Union
from urllib.parse import urlparse
from .markdown import MarkdownData, MarkdownDocument
Url = str
@dataclass
class Action:
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ["author"]
_valid_struggles: ClassVar = [
"ethics",
"pay_and_benefits",
"working_conditions",
"discrimination",
"unfair_labor_practices",
"job_security",
]
_valid_actions: ClassVar = [
"strike",
"protest",
"open_letter",
"legal_action",
"union_drive",
"union_representation",
]
@staticmethod
def is_none(field: Any) -> bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == "none":
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def listify(self, field: Union[List[Any], Any]) -> List[Any]:
if self.is_none(field):
return None
else:
if isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(",")]
def __post_init__(self):
""" Used to validate fields. """
# self.date = datetime.strptime(self.date, "%Y-%m-%d").date()
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers)
# make sure action is a valid action
assert (
self.action in self._valid_actions
), f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
# make sure all struggles are valid struggles
for struggle in self.struggles:
assert (
struggle in self._valid_struggles
), f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
# make sure source is either a url or a html link tag <a>
for source in self.sources:
assert (
BeautifulSoup(source, "html.parser").a is not None
or urlparse(source).netloc is not ""
), f"'{source}' is in valid. source must be a valid url or an html link tag element"
# if html, extract only href from sources
self.sources = [
BeautifulSoup(source, "html.parser").a["href"]
if "href" in source
else source
for source in self.sources
]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) -> Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.items()}
def render_df(self, field: str) -> str:
""" Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ["date", "workers"]:
return str(value)
elif field in ["locations", "struggles", "companies", "tags", "sources"]:
return str(value).strip("[").strip("]").replace("'", "").replace('"', "")
else:
return value
def to_md(self, field: str, td: bs4.element.Tag) -> str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert (
field in self.__dataclass_fields__
), f"Cannot serialize {field}. Not a valid field in Action."
value = self.__getattribute__(field)
if field in ["date", "workers"]:
td.string = str(value)
elif field in ["locations", "struggles", "companies", "tags"]:
td.string = (
str(value).strip("[").strip("]").replace("'", "").replace('"', "")
)
elif field == "sources":
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(", ".join(ret)), "html.parser"))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) -> "Action":
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all("tr")
for key, val in table.attrs.items():
if key != "class":
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find("td", class_="field-key")
td_val = tr.find("td", class_="field-value")
val = "".join(str(e) for e in td_val.contents).strip()
key = "".join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) -> "Action":
""" Create an Action instance from a dataframe row. """
fields = [
key
for key, value in cls.__dataclass_fields__.items()
if value.type != ClassVar
]
d = {key: value for key, value in row.to_dict().items() if key in fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = "actions"
actions: List[Action] = field(default_factory=lambda: [])
fields: List[str] = field(
default_factory=lambda: [
key
for key, value in Action.__dataclass_fields__.items()
if value.type != ClassVar
]
)
def __len__(self) -> int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
|
return False
def sort(self, *args, **kwargs) -> "Actions":
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) -> pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient="list")
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f"<div id={self.action_id}></div>", "html.parser")
for action in self.actions:
table = soup.new_tag("table")
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag("tr")
td_key = soup.new_tag("td", attrs={"class": "field-key"})
td_val = soup.new_tag("td", attrs={"class": "field-value"})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, md_doc: MarkdownDocument) -> "Actions":
""" Create and populate an Actions instance from a Markdown Document. """
md_data = re.findall(fr'<div id="{cls.action_id}">+[\s\S]+<\/div>', md_doc)
assert len(md_data) == 1, f"multiple divs with id={cls.action_id} were found"
md_data = md_data[0]
soup = BeautifulSoup(md_data, "html.parser")
tables = soup.div.find_all("table")
actions = Actions()
for table in tables:
action = Action.create_from_md(table)
actions.append(action)
return actions
@staticmethod
def read_from_df(df: pd.DataFrame) -> "Actions":
""" Create and populate an Actions instance from a dataframe. """
actions = Actions()
for i, row in df.iterrows():
action = Action.create_from_row(row)
actions.append(action)
return actions
| return self.actions == other.actions | conditional_block |
action.py | import pandas as pd
import math
import json
import html
import bs4
import re
import dateparser
from bs4 import BeautifulSoup
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, List, Dict, ClassVar, Union
from urllib.parse import urlparse
from .markdown import MarkdownData, MarkdownDocument
Url = str
@dataclass
class Action:
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ["author"]
_valid_struggles: ClassVar = [
"ethics",
"pay_and_benefits",
"working_conditions",
"discrimination",
"unfair_labor_practices",
"job_security",
]
_valid_actions: ClassVar = [
"strike",
"protest",
"open_letter",
"legal_action",
"union_drive",
"union_representation",
]
@staticmethod
def is_none(field: Any) -> bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == "none":
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def | (self, field: Union[List[Any], Any]) -> List[Any]:
if self.is_none(field):
return None
else:
if isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(",")]
def __post_init__(self):
""" Used to validate fields. """
# self.date = datetime.strptime(self.date, "%Y-%m-%d").date()
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers)
# make sure action is a valid action
assert (
self.action in self._valid_actions
), f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
# make sure all struggles are valid struggles
for struggle in self.struggles:
assert (
struggle in self._valid_struggles
), f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
# make sure source is either a url or a html link tag <a>
for source in self.sources:
assert (
BeautifulSoup(source, "html.parser").a is not None
or urlparse(source).netloc is not ""
), f"'{source}' is in valid. source must be a valid url or an html link tag element"
# if html, extract only href from sources
self.sources = [
BeautifulSoup(source, "html.parser").a["href"]
if "href" in source
else source
for source in self.sources
]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) -> Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.items()}
def render_df(self, field: str) -> str:
""" Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ["date", "workers"]:
return str(value)
elif field in ["locations", "struggles", "companies", "tags", "sources"]:
return str(value).strip("[").strip("]").replace("'", "").replace('"', "")
else:
return value
def to_md(self, field: str, td: bs4.element.Tag) -> str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert (
field in self.__dataclass_fields__
), f"Cannot serialize {field}. Not a valid field in Action."
value = self.__getattribute__(field)
if field in ["date", "workers"]:
td.string = str(value)
elif field in ["locations", "struggles", "companies", "tags"]:
td.string = (
str(value).strip("[").strip("]").replace("'", "").replace('"', "")
)
elif field == "sources":
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(", ".join(ret)), "html.parser"))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) -> "Action":
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all("tr")
for key, val in table.attrs.items():
if key != "class":
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find("td", class_="field-key")
td_val = tr.find("td", class_="field-value")
val = "".join(str(e) for e in td_val.contents).strip()
key = "".join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) -> "Action":
""" Create an Action instance from a dataframe row. """
fields = [
key
for key, value in cls.__dataclass_fields__.items()
if value.type != ClassVar
]
d = {key: value for key, value in row.to_dict().items() if key in fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = "actions"
actions: List[Action] = field(default_factory=lambda: [])
fields: List[str] = field(
default_factory=lambda: [
key
for key, value in Action.__dataclass_fields__.items()
if value.type != ClassVar
]
)
def __len__(self) -> int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) -> "Actions":
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) -> pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient="list")
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f"<div id={self.action_id}></div>", "html.parser")
for action in self.actions:
table = soup.new_tag("table")
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag("tr")
td_key = soup.new_tag("td", attrs={"class": "field-key"})
td_val = soup.new_tag("td", attrs={"class": "field-value"})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, md_doc: MarkdownDocument) -> "Actions":
""" Create and populate an Actions instance from a Markdown Document. """
md_data = re.findall(fr'<div id="{cls.action_id}">+[\s\S]+<\/div>', md_doc)
assert len(md_data) == 1, f"multiple divs with id={cls.action_id} were found"
md_data = md_data[0]
soup = BeautifulSoup(md_data, "html.parser")
tables = soup.div.find_all("table")
actions = Actions()
for table in tables:
action = Action.create_from_md(table)
actions.append(action)
return actions
@staticmethod
def read_from_df(df: pd.DataFrame) -> "Actions":
""" Create and populate an Actions instance from a dataframe. """
actions = Actions()
for i, row in df.iterrows():
action = Action.create_from_row(row)
actions.append(action)
return actions
| listify | identifier_name |
action.py | import pandas as pd
import math
import json
import html
import bs4
import re
import dateparser
from bs4 import BeautifulSoup
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, List, Dict, ClassVar, Union
from urllib.parse import urlparse
from .markdown import MarkdownData, MarkdownDocument
Url = str
@dataclass
class Action:
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ["author"]
_valid_struggles: ClassVar = [
"ethics",
"pay_and_benefits",
"working_conditions",
"discrimination",
"unfair_labor_practices",
"job_security",
]
_valid_actions: ClassVar = [
"strike",
"protest",
"open_letter",
"legal_action",
"union_drive",
"union_representation",
]
@staticmethod
def is_none(field: Any) -> bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == "none":
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def listify(self, field: Union[List[Any], Any]) -> List[Any]:
if self.is_none(field):
return None
else:
if isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(",")]
def __post_init__(self):
""" Used to validate fields. """
# self.date = datetime.strptime(self.date, "%Y-%m-%d").date()
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers)
# make sure action is a valid action
assert (
self.action in self._valid_actions
), f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
# make sure all struggles are valid struggles
for struggle in self.struggles:
assert (
struggle in self._valid_struggles
), f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
# make sure source is either a url or a html link tag <a>
for source in self.sources:
assert (
BeautifulSoup(source, "html.parser").a is not None
or urlparse(source).netloc is not ""
), f"'{source}' is in valid. source must be a valid url or an html link tag element"
# if html, extract only href from sources
self.sources = [
BeautifulSoup(source, "html.parser").a["href"]
if "href" in source
else source
for source in self.sources
]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) -> Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.items()}
def render_df(self, field: str) -> str:
|
def to_md(self, field: str, td: bs4.element.Tag) -> str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert (
field in self.__dataclass_fields__
), f"Cannot serialize {field}. Not a valid field in Action."
value = self.__getattribute__(field)
if field in ["date", "workers"]:
td.string = str(value)
elif field in ["locations", "struggles", "companies", "tags"]:
td.string = (
str(value).strip("[").strip("]").replace("'", "").replace('"', "")
)
elif field == "sources":
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(", ".join(ret)), "html.parser"))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) -> "Action":
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all("tr")
for key, val in table.attrs.items():
if key != "class":
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find("td", class_="field-key")
td_val = tr.find("td", class_="field-value")
val = "".join(str(e) for e in td_val.contents).strip()
key = "".join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) -> "Action":
""" Create an Action instance from a dataframe row. """
fields = [
key
for key, value in cls.__dataclass_fields__.items()
if value.type != ClassVar
]
d = {key: value for key, value in row.to_dict().items() if key in fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = "actions"
actions: List[Action] = field(default_factory=lambda: [])
fields: List[str] = field(
default_factory=lambda: [
key
for key, value in Action.__dataclass_fields__.items()
if value.type != ClassVar
]
)
def __len__(self) -> int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) -> "Actions":
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) -> pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient="list")
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f"<div id={self.action_id}></div>", "html.parser")
for action in self.actions:
table = soup.new_tag("table")
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag("tr")
td_key = soup.new_tag("td", attrs={"class": "field-key"})
td_val = soup.new_tag("td", attrs={"class": "field-value"})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, md_doc: MarkdownDocument) -> "Actions":
""" Create and populate an Actions instance from a Markdown Document. """
md_data = re.findall(fr'<div id="{cls.action_id}">+[\s\S]+<\/div>', md_doc)
assert len(md_data) == 1, f"multiple divs with id={cls.action_id} were found"
md_data = md_data[0]
soup = BeautifulSoup(md_data, "html.parser")
tables = soup.div.find_all("table")
actions = Actions()
for table in tables:
action = Action.create_from_md(table)
actions.append(action)
return actions
@staticmethod
def read_from_df(df: pd.DataFrame) -> "Actions":
""" Create and populate an Actions instance from a dataframe. """
actions = Actions()
for i, row in df.iterrows():
action = Action.create_from_row(row)
actions.append(action)
return actions
| """ Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ["date", "workers"]:
return str(value)
elif field in ["locations", "struggles", "companies", "tags", "sources"]:
return str(value).strip("[").strip("]").replace("'", "").replace('"', "")
else:
return value | identifier_body |
web.rs | //! This is the initial MVP of the events service to get the BDD tests to work
use db;
use models::user::IOModel;
use models::user::pg::PgModel as UserModel;
use rouille;
use rouille::input::post;
use rouille::{Request, Response};
use services::user;
use services::user::Service as UserService;
use std::collections::HashMap;
use std::error::Error;
use std::fmt;
use std::io;
use std::iter::FromIterator;
use std::str::FromStr;
use uuid::Uuid;
//
// Runs a web server that passes the BDD tests
//
pub fn run() {
eprintln!("Listening on 0.0.0.0:8080");
rouille::start_server("0.0.0.0:8080", |request| {
rouille::log(request, io::stderr(), || {
let conn = &db::connection();
let user_model = &UserModel::new(conn);
let user_service = &UserService::new(user_model, b"....");
router!(request,
(GET) (/status) => { status(user_model) },
(POST) (/oauth/register) => { oauth_register(user_service, request) },
(GET) (/oauth/register/confirm) => { oauth_register_confirm(user_service, request) },
(POST) (/oauth/token) => { oauth_token(user_service, request) },
(GET) (/oauth/me) => { me(user_service, request) },
_ => Response::empty_404()
)
})
})
}
//
// Handlers
//
#[derive(Serialize, Debug)]
struct Status<'a> {
pub status: &'a str,
}
/// this is the status endpoint
fn status(user_model: &UserModel) -> Response {
let status = user_model
.find(&Uuid::new_v4())
.map(|_| Status { status: "up" })
.unwrap_or_else(|_| Status { status: "down" });
Response::json(&status)
}
#[derive(Deserialize)]
struct RegisterForm {
name: String,
password: String,
email: String,
}
/// this is the user registration endpoint
///
/// This accepts a json POST of [`RegisterForm`]
fn oauth_register(user_service: &UserService, request: &Request) -> Response {
let data: RegisterForm = try_or_400!(rouille::input::json_input(request));
let req = user::RegisterRequest {
name: &data.name,
password: &data.password,
email: &data.email,
};
user_service
.register(&req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the user confirmation endpoint
///
/// This is a GET request for a query string of `?confirm_token`
fn oauth_register_confirm(user_service: &UserService, request: &Request) -> Response {
let confirm_token: String = try_or_400!(
request
.get_param("confirm_token")
.ok_or(WebError::MissingConfirmToken)
);
let req = &user::ConfirmNewUserRequest {
confirm_token: &confirm_token,
};
user_service
.confirm_new_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the oauth token endpoint for making password or refresh grants against
///
/// This follows the protocol set up by the following specs
///
/// - [password grant](https://tools.ietf.org/html/rfc6749#section-4.3.2)
/// - [refresh grant](https://tools.ietf.org/html/rfc6749#section-6)
///
fn oauth_token(user_service: &UserService, request: &Request) -> Response {
let form = &try_or_400!(post::raw_urlencoded_post_input(request));
let grant_type = try_or_400!(find_grant_type(form));
match grant_type {
GrantType::Password => {
let req = &try_or_400!(form_to_password_grant(form));
user_service
.password_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
GrantType::Refresh => {
let req = &try_or_400!(form_to_refresh_grant(form));
user_service
.refresh_token_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
}
}
/// The current user handler
///
/// This requires a `Authorization: Bearer {access_token}` header to make the request
fn me(user_service: &UserService, request: &Request) -> Response {
let access_token = request.header("Authorization")
.and_then(move |x| x.get(7..)) // Get everything after "Bearer "
.unwrap_or("");
let req = &user::CurrentUserRequest { access_token };
user_service
.current_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
// Cenverters
//
impl From<user::CurrentUserResponse> for Response {
fn from(result: user::CurrentUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::AccessTokenResponse> for Response {
fn from(result: user::AccessTokenResponse) -> Self {
Response::json(&result)
}
}
impl From<user::ConfirmNewUserResponse> for Response {
fn from(result: user::ConfirmNewUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::RegisterResponse> for Response {
fn from(result: user::RegisterResponse) -> Self {
Response::json(&result)
}
}
///
/// This is a private Error type for things that can go wrong
///
#[derive(Debug, PartialEq)]
enum WebError {
MissingConfirmToken,
MissingPassword,
MissingUsername,
MissingRefreshToken,
InvalidGrantType,
}
impl fmt::Display for WebError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Error for WebError {
fn description(&self) -> &str {
use self::WebError::*;
match *self {
MissingUsername => "missing username",
MissingPassword => "missing password",
MissingRefreshToken => "missing refresh_token",
MissingConfirmToken => "missing confirm token",
InvalidGrantType => "invalid grant type",
}
}
}
impl From<user::ServiceError> for Response {
fn from(err: user::ServiceError) -> Self {
use services::user::ServiceError::*;
match err {
InvalidConfirmToken => Response::text("InvalidConfirmToken").with_status_code(400),
PermissionDenied => Response::text("").with_status_code(403),
UserExists => Response::text("UserExists").with_status_code(403),
DBError(_) => Response::text("").with_status_code(500),
}
}
}
///
/// This is a enum to represent the `grant_type` strings, `"password"` and `"refresh_token"`
///
/// Note: We may want to move this to the service module
#[derive(Debug, PartialEq)]
enum GrantType {
Password,
Refresh,
}
impl FromStr for GrantType {
type Err = WebError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"password" => Ok(GrantType::Password),
"refresh_token" => Ok(GrantType::Refresh),
_ => Err(WebError::InvalidGrantType),
}
}
}
#[test]
fn test_grant_type_from_str() {
assert_eq!(
GrantType::from_str("password").unwrap(),
GrantType::Password
)
}
///
/// # Helpers
///
///
/// Finds the `grant_type` in the Vector of form fields
///
type Fields = [(String, String)];
fn find_grant_type(fields: &Fields) -> Result<GrantType, WebError> {
for &(ref k, ref v) in fields.iter() {
if k == "grant_type" {
return GrantType::from_str(v);
}
}
Err(WebError::InvalidGrantType)
}
#[test]
fn test_find_grant_type() {
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "password".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Password
);
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "refresh_token".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Refresh
);
assert_eq!(
find_grant_type(&vec![("x".into(), "y".into()), ("a".into(), "b".into())]).unwrap_err(),
WebError::InvalidGrantType
);
}
fn form_to_map(fields: &Fields) -> HashMap<&str, &str> {
HashMap::from_iter(fields.iter().map(|&(ref k, ref v)| {
let k: &str = k;
let v: &str = v;
(k, v)
}))
}
///
/// Converts the Form Fields to a `PasswordGrantRequest`
///
fn form_to_password_grant(
fields: &[(String, String)],
) -> Result<user::PasswordGrantRequest, WebError> |
#[test]
fn test_form_to_password_grant() {
assert_eq!(
form_to_password_grant(&vec![
("grant_type".into(), "password".into()),
("username".into(), "test-user".into()),
("password".into(), "test-password".into()),
]).unwrap(),
user::PasswordGrantRequest {
username: "test-user".into(),
password: "test-password".into(),
}
);
assert_eq!(
form_to_password_grant(&vec![]).unwrap_err(),
WebError::MissingUsername
);
assert_eq!(
form_to_password_grant(&vec![("username".into(), "test-user".into())]).unwrap_err(),
WebError::MissingPassword
);
assert_eq!(
form_to_password_grant(&vec![("password".into(), "test-pass".into())]).unwrap_err(),
WebError::MissingUsername
);
}
/// Converts the Form Fields into a `RefreshGrantRequest`
fn form_to_refresh_grant(fields: &Fields) -> Result<user::RefreshGrantRequest, WebError> {
let fields = form_to_map(fields);
let token = fields
.get("refresh_token")
.ok_or(WebError::MissingRefreshToken)?;
Ok(user::RefreshGrantRequest {
refresh_token: token,
})
}
#[test]
fn test_form_to_refresh_grant() {
assert_eq!(
form_to_refresh_grant(&vec![
("grant_type".into(), "refesh_token".into()),
("refresh_token".into(), "12345".into()),
]).unwrap(),
user::RefreshGrantRequest {
refresh_token: "12345".into(),
}
);
assert_eq!(
form_to_refresh_grant(&vec![]).unwrap_err(),
WebError::MissingRefreshToken
);
}
| {
let fields = form_to_map(fields);
let username = fields.get("username").ok_or(WebError::MissingUsername)?;
let password = fields.get("password").ok_or(WebError::MissingPassword)?;
Ok(user::PasswordGrantRequest { username, password })
} | identifier_body |
web.rs | //! This is the initial MVP of the events service to get the BDD tests to work
use db;
use models::user::IOModel;
use models::user::pg::PgModel as UserModel;
use rouille;
use rouille::input::post;
use rouille::{Request, Response};
use services::user;
use services::user::Service as UserService;
use std::collections::HashMap;
use std::error::Error;
use std::fmt;
use std::io;
use std::iter::FromIterator;
use std::str::FromStr;
use uuid::Uuid;
//
// Runs a web server that passes the BDD tests
//
pub fn run() {
eprintln!("Listening on 0.0.0.0:8080");
rouille::start_server("0.0.0.0:8080", |request| {
rouille::log(request, io::stderr(), || {
let conn = &db::connection();
let user_model = &UserModel::new(conn);
let user_service = &UserService::new(user_model, b"....");
router!(request,
(GET) (/status) => { status(user_model) },
(POST) (/oauth/register) => { oauth_register(user_service, request) },
(GET) (/oauth/register/confirm) => { oauth_register_confirm(user_service, request) },
(POST) (/oauth/token) => { oauth_token(user_service, request) },
(GET) (/oauth/me) => { me(user_service, request) },
_ => Response::empty_404()
)
})
})
}
//
// Handlers
//
#[derive(Serialize, Debug)]
struct | <'a> {
pub status: &'a str,
}
/// this is the status endpoint
fn status(user_model: &UserModel) -> Response {
let status = user_model
.find(&Uuid::new_v4())
.map(|_| Status { status: "up" })
.unwrap_or_else(|_| Status { status: "down" });
Response::json(&status)
}
#[derive(Deserialize)]
struct RegisterForm {
name: String,
password: String,
email: String,
}
/// this is the user registration endpoint
///
/// This accepts a json POST of [`RegisterForm`]
fn oauth_register(user_service: &UserService, request: &Request) -> Response {
let data: RegisterForm = try_or_400!(rouille::input::json_input(request));
let req = user::RegisterRequest {
name: &data.name,
password: &data.password,
email: &data.email,
};
user_service
.register(&req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the user confirmation endpoint
///
/// This is a GET request for a query string of `?confirm_token`
fn oauth_register_confirm(user_service: &UserService, request: &Request) -> Response {
let confirm_token: String = try_or_400!(
request
.get_param("confirm_token")
.ok_or(WebError::MissingConfirmToken)
);
let req = &user::ConfirmNewUserRequest {
confirm_token: &confirm_token,
};
user_service
.confirm_new_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the oauth token endpoint for making password or refresh grants against
///
/// This follows the protocol set up by the following specs
///
/// - [password grant](https://tools.ietf.org/html/rfc6749#section-4.3.2)
/// - [refresh grant](https://tools.ietf.org/html/rfc6749#section-6)
///
fn oauth_token(user_service: &UserService, request: &Request) -> Response {
let form = &try_or_400!(post::raw_urlencoded_post_input(request));
let grant_type = try_or_400!(find_grant_type(form));
match grant_type {
GrantType::Password => {
let req = &try_or_400!(form_to_password_grant(form));
user_service
.password_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
GrantType::Refresh => {
let req = &try_or_400!(form_to_refresh_grant(form));
user_service
.refresh_token_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
}
}
/// The current user handler
///
/// This requires a `Authorization: Bearer {access_token}` header to make the request
fn me(user_service: &UserService, request: &Request) -> Response {
let access_token = request.header("Authorization")
.and_then(move |x| x.get(7..)) // Get everything after "Bearer "
.unwrap_or("");
let req = &user::CurrentUserRequest { access_token };
user_service
.current_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
// Cenverters
//
impl From<user::CurrentUserResponse> for Response {
fn from(result: user::CurrentUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::AccessTokenResponse> for Response {
fn from(result: user::AccessTokenResponse) -> Self {
Response::json(&result)
}
}
impl From<user::ConfirmNewUserResponse> for Response {
fn from(result: user::ConfirmNewUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::RegisterResponse> for Response {
fn from(result: user::RegisterResponse) -> Self {
Response::json(&result)
}
}
///
/// This is a private Error type for things that can go wrong
///
#[derive(Debug, PartialEq)]
enum WebError {
MissingConfirmToken,
MissingPassword,
MissingUsername,
MissingRefreshToken,
InvalidGrantType,
}
impl fmt::Display for WebError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Error for WebError {
fn description(&self) -> &str {
use self::WebError::*;
match *self {
MissingUsername => "missing username",
MissingPassword => "missing password",
MissingRefreshToken => "missing refresh_token",
MissingConfirmToken => "missing confirm token",
InvalidGrantType => "invalid grant type",
}
}
}
impl From<user::ServiceError> for Response {
fn from(err: user::ServiceError) -> Self {
use services::user::ServiceError::*;
match err {
InvalidConfirmToken => Response::text("InvalidConfirmToken").with_status_code(400),
PermissionDenied => Response::text("").with_status_code(403),
UserExists => Response::text("UserExists").with_status_code(403),
DBError(_) => Response::text("").with_status_code(500),
}
}
}
///
/// This is a enum to represent the `grant_type` strings, `"password"` and `"refresh_token"`
///
/// Note: We may want to move this to the service module
#[derive(Debug, PartialEq)]
enum GrantType {
Password,
Refresh,
}
impl FromStr for GrantType {
type Err = WebError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"password" => Ok(GrantType::Password),
"refresh_token" => Ok(GrantType::Refresh),
_ => Err(WebError::InvalidGrantType),
}
}
}
#[test]
fn test_grant_type_from_str() {
assert_eq!(
GrantType::from_str("password").unwrap(),
GrantType::Password
)
}
///
/// # Helpers
///
///
/// Finds the `grant_type` in the Vector of form fields
///
type Fields = [(String, String)];
fn find_grant_type(fields: &Fields) -> Result<GrantType, WebError> {
for &(ref k, ref v) in fields.iter() {
if k == "grant_type" {
return GrantType::from_str(v);
}
}
Err(WebError::InvalidGrantType)
}
#[test]
fn test_find_grant_type() {
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "password".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Password
);
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "refresh_token".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Refresh
);
assert_eq!(
find_grant_type(&vec![("x".into(), "y".into()), ("a".into(), "b".into())]).unwrap_err(),
WebError::InvalidGrantType
);
}
fn form_to_map(fields: &Fields) -> HashMap<&str, &str> {
HashMap::from_iter(fields.iter().map(|&(ref k, ref v)| {
let k: &str = k;
let v: &str = v;
(k, v)
}))
}
///
/// Converts the Form Fields to a `PasswordGrantRequest`
///
fn form_to_password_grant(
fields: &[(String, String)],
) -> Result<user::PasswordGrantRequest, WebError> {
let fields = form_to_map(fields);
let username = fields.get("username").ok_or(WebError::MissingUsername)?;
let password = fields.get("password").ok_or(WebError::MissingPassword)?;
Ok(user::PasswordGrantRequest { username, password })
}
#[test]
fn test_form_to_password_grant() {
assert_eq!(
form_to_password_grant(&vec![
("grant_type".into(), "password".into()),
("username".into(), "test-user".into()),
("password".into(), "test-password".into()),
]).unwrap(),
user::PasswordGrantRequest {
username: "test-user".into(),
password: "test-password".into(),
}
);
assert_eq!(
form_to_password_grant(&vec![]).unwrap_err(),
WebError::MissingUsername
);
assert_eq!(
form_to_password_grant(&vec![("username".into(), "test-user".into())]).unwrap_err(),
WebError::MissingPassword
);
assert_eq!(
form_to_password_grant(&vec![("password".into(), "test-pass".into())]).unwrap_err(),
WebError::MissingUsername
);
}
/// Converts the Form Fields into a `RefreshGrantRequest`
fn form_to_refresh_grant(fields: &Fields) -> Result<user::RefreshGrantRequest, WebError> {
let fields = form_to_map(fields);
let token = fields
.get("refresh_token")
.ok_or(WebError::MissingRefreshToken)?;
Ok(user::RefreshGrantRequest {
refresh_token: token,
})
}
#[test]
fn test_form_to_refresh_grant() {
assert_eq!(
form_to_refresh_grant(&vec![
("grant_type".into(), "refesh_token".into()),
("refresh_token".into(), "12345".into()),
]).unwrap(),
user::RefreshGrantRequest {
refresh_token: "12345".into(),
}
);
assert_eq!(
form_to_refresh_grant(&vec![]).unwrap_err(),
WebError::MissingRefreshToken
);
}
| Status | identifier_name |
web.rs | //! This is the initial MVP of the events service to get the BDD tests to work
use db;
use models::user::IOModel;
use models::user::pg::PgModel as UserModel;
use rouille;
use rouille::input::post;
use rouille::{Request, Response};
use services::user;
use services::user::Service as UserService;
use std::collections::HashMap;
use std::error::Error;
use std::fmt;
use std::io;
use std::iter::FromIterator;
use std::str::FromStr;
use uuid::Uuid;
//
// Runs a web server that passes the BDD tests
//
pub fn run() {
eprintln!("Listening on 0.0.0.0:8080");
rouille::start_server("0.0.0.0:8080", |request| {
rouille::log(request, io::stderr(), || {
let conn = &db::connection();
let user_model = &UserModel::new(conn);
let user_service = &UserService::new(user_model, b"....");
router!(request,
(GET) (/status) => { status(user_model) },
(POST) (/oauth/register) => { oauth_register(user_service, request) },
(GET) (/oauth/register/confirm) => { oauth_register_confirm(user_service, request) },
(POST) (/oauth/token) => { oauth_token(user_service, request) },
(GET) (/oauth/me) => { me(user_service, request) },
_ => Response::empty_404()
)
})
})
}
//
// Handlers
//
#[derive(Serialize, Debug)]
struct Status<'a> {
pub status: &'a str,
}
/// this is the status endpoint
fn status(user_model: &UserModel) -> Response {
let status = user_model
.find(&Uuid::new_v4())
.map(|_| Status { status: "up" })
.unwrap_or_else(|_| Status { status: "down" });
Response::json(&status)
}
#[derive(Deserialize)]
struct RegisterForm {
name: String,
password: String,
email: String,
}
/// this is the user registration endpoint
///
/// This accepts a json POST of [`RegisterForm`]
fn oauth_register(user_service: &UserService, request: &Request) -> Response {
let data: RegisterForm = try_or_400!(rouille::input::json_input(request));
let req = user::RegisterRequest {
name: &data.name,
password: &data.password,
email: &data.email,
};
user_service
.register(&req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the user confirmation endpoint
///
/// This is a GET request for a query string of `?confirm_token`
fn oauth_register_confirm(user_service: &UserService, request: &Request) -> Response {
let confirm_token: String = try_or_400!(
request
.get_param("confirm_token")
.ok_or(WebError::MissingConfirmToken)
);
let req = &user::ConfirmNewUserRequest {
confirm_token: &confirm_token,
};
user_service
.confirm_new_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
/// this is the oauth token endpoint for making password or refresh grants against
///
/// This follows the protocol set up by the following specs
///
/// - [password grant](https://tools.ietf.org/html/rfc6749#section-4.3.2)
/// - [refresh grant](https://tools.ietf.org/html/rfc6749#section-6)
///
fn oauth_token(user_service: &UserService, request: &Request) -> Response {
let form = &try_or_400!(post::raw_urlencoded_post_input(request));
let grant_type = try_or_400!(find_grant_type(form));
match grant_type {
GrantType::Password => {
let req = &try_or_400!(form_to_password_grant(form));
user_service
.password_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
GrantType::Refresh => {
let req = &try_or_400!(form_to_refresh_grant(form));
user_service
.refresh_token_grant(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
}
}
/// The current user handler
///
/// This requires a `Authorization: Bearer {access_token}` header to make the request
fn me(user_service: &UserService, request: &Request) -> Response {
let access_token = request.header("Authorization")
.and_then(move |x| x.get(7..)) // Get everything after "Bearer "
.unwrap_or("");
let req = &user::CurrentUserRequest { access_token };
user_service
.current_user(req)
.map(Response::from)
.unwrap_or_else(Response::from)
}
// Cenverters
//
impl From<user::CurrentUserResponse> for Response {
fn from(result: user::CurrentUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::AccessTokenResponse> for Response {
fn from(result: user::AccessTokenResponse) -> Self {
Response::json(&result)
}
}
impl From<user::ConfirmNewUserResponse> for Response {
fn from(result: user::ConfirmNewUserResponse) -> Self {
Response::json(&result)
}
}
impl From<user::RegisterResponse> for Response {
fn from(result: user::RegisterResponse) -> Self {
Response::json(&result)
}
}
///
/// This is a private Error type for things that can go wrong
///
#[derive(Debug, PartialEq)]
enum WebError {
MissingConfirmToken,
MissingPassword,
MissingUsername,
MissingRefreshToken,
InvalidGrantType,
}
impl fmt::Display for WebError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Error for WebError {
fn description(&self) -> &str {
use self::WebError::*;
match *self {
MissingUsername => "missing username",
MissingPassword => "missing password",
MissingRefreshToken => "missing refresh_token",
MissingConfirmToken => "missing confirm token",
InvalidGrantType => "invalid grant type",
}
}
}
impl From<user::ServiceError> for Response {
fn from(err: user::ServiceError) -> Self {
use services::user::ServiceError::*;
match err {
InvalidConfirmToken => Response::text("InvalidConfirmToken").with_status_code(400),
PermissionDenied => Response::text("").with_status_code(403),
UserExists => Response::text("UserExists").with_status_code(403),
DBError(_) => Response::text("").with_status_code(500),
}
}
}
///
/// This is a enum to represent the `grant_type` strings, `"password"` and `"refresh_token"`
///
/// Note: We may want to move this to the service module
#[derive(Debug, PartialEq)]
enum GrantType {
Password,
Refresh,
}
impl FromStr for GrantType {
type Err = WebError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"password" => Ok(GrantType::Password),
"refresh_token" => Ok(GrantType::Refresh),
_ => Err(WebError::InvalidGrantType),
}
}
}
#[test]
fn test_grant_type_from_str() {
assert_eq!(
GrantType::from_str("password").unwrap(),
GrantType::Password
)
}
///
/// # Helpers
///
///
/// Finds the `grant_type` in the Vector of form fields
///
type Fields = [(String, String)];
fn find_grant_type(fields: &Fields) -> Result<GrantType, WebError> {
for &(ref k, ref v) in fields.iter() {
if k == "grant_type" {
return GrantType::from_str(v);
}
}
Err(WebError::InvalidGrantType)
}
#[test]
fn test_find_grant_type() {
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "password".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Password
);
assert_eq!(
find_grant_type(&vec![
("x".into(), "y".into()),
("grant_type".into(), "refresh_token".into()),
("a".into(), "b".into()),
]).unwrap(),
GrantType::Refresh
);
assert_eq!(
find_grant_type(&vec![("x".into(), "y".into()), ("a".into(), "b".into())]).unwrap_err(),
WebError::InvalidGrantType
);
}
fn form_to_map(fields: &Fields) -> HashMap<&str, &str> {
HashMap::from_iter(fields.iter().map(|&(ref k, ref v)| {
let k: &str = k;
let v: &str = v;
(k, v)
}))
}
///
/// Converts the Form Fields to a `PasswordGrantRequest`
///
fn form_to_password_grant(
fields: &[(String, String)],
) -> Result<user::PasswordGrantRequest, WebError> {
let fields = form_to_map(fields);
let username = fields.get("username").ok_or(WebError::MissingUsername)?;
let password = fields.get("password").ok_or(WebError::MissingPassword)?;
Ok(user::PasswordGrantRequest { username, password })
}
#[test]
fn test_form_to_password_grant() {
assert_eq!(
form_to_password_grant(&vec![
("grant_type".into(), "password".into()),
("username".into(), "test-user".into()),
("password".into(), "test-password".into()),
]).unwrap(),
user::PasswordGrantRequest {
username: "test-user".into(),
password: "test-password".into(),
}
);
assert_eq!(
form_to_password_grant(&vec![]).unwrap_err(),
WebError::MissingUsername
);
assert_eq!(
form_to_password_grant(&vec![("username".into(), "test-user".into())]).unwrap_err(), | );
assert_eq!(
form_to_password_grant(&vec![("password".into(), "test-pass".into())]).unwrap_err(),
WebError::MissingUsername
);
}
/// Converts the Form Fields into a `RefreshGrantRequest`
fn form_to_refresh_grant(fields: &Fields) -> Result<user::RefreshGrantRequest, WebError> {
let fields = form_to_map(fields);
let token = fields
.get("refresh_token")
.ok_or(WebError::MissingRefreshToken)?;
Ok(user::RefreshGrantRequest {
refresh_token: token,
})
}
#[test]
fn test_form_to_refresh_grant() {
assert_eq!(
form_to_refresh_grant(&vec![
("grant_type".into(), "refesh_token".into()),
("refresh_token".into(), "12345".into()),
]).unwrap(),
user::RefreshGrantRequest {
refresh_token: "12345".into(),
}
);
assert_eq!(
form_to_refresh_grant(&vec![]).unwrap_err(),
WebError::MissingRefreshToken
);
} | WebError::MissingPassword | random_line_split |
chmod.rs | //
// Copyright (c) 2018, The MesaLock Linux Project Contributors
// All rights reserved.
//
// This work is licensed under the terms of the BSD 3-Clause License.
// For a copy, see the LICENSE file.
//
// This file incorporates work covered by the following copyright and
// permission notice:
//
// Copyright (c) 2013-2018, Jordi Boggiano
// Copyright (c) 2013-2018, Alex Lyon
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
use util;
use {ArgsIter, MesaError, Result, UtilSetup, UtilWrite};
use clap::{AppSettings, Arg, ArgGroup, OsValues};
use std::ffi::{OsStr, OsString};
use std::fs;
use std::io::{self, Write};
use std::os::unix::fs::{MetadataExt, PermissionsExt};
use std::path::{Path, PathBuf};
use std::result::Result as StdResult;
use uucore::fs::display_permissions_unix;
use uucore::mode;
use walkdir::WalkDir;
const NAME: &str = "chmod";
pub(crate) const DESCRIPTION: &str = "Change the file permissions of given files";
const MODE_SYNTAX: &str = "
Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'.
";
#[derive(Fail, Debug)]
enum ChmodError {
#[fail(display = "cannot stat attributes of '{}': {}", _0, _1)]
Stat(String, #[cause] io::Error),
}
#[derive(PartialEq)]
enum Verbosity {
None,
Changes,
Quiet,
Verbose,
}
enum MessageKind {
Stdout,
Stderr,
}
// FIXME: find a better way to store this (preferably avoid allocating)
// NOTE: the message setup is to avoid duplicating chmod_file() and change_file() for every generic
// type
struct Message {
kind: MessageKind,
data: String,
}
impl Message {
pub fn stdout(data: String) -> Self {
Self {
kind: MessageKind::Stdout,
data,
}
}
pub fn stderr(data: String) -> Self {
Self {
kind: MessageKind::Stderr,
data,
}
}
}
struct Options<'a> {
verbosity: Verbosity,
preserve_root: bool,
recursive: bool,
fmode: Option<u32>,
cmode: Option<&'a str>,
current_dir: Option<PathBuf>,
}
pub fn execute<S, T>(setup: &mut S, args: T) -> Result<()>
where
S: UtilSetup,
T: ArgsIter,
{
let matches = {
let app = util_app!(NAME)
.after_help(MODE_SYNTAX)
.setting(AppSettings::AllowLeadingHyphen)
.arg(Arg::with_name("recursive")
.long("recursive")
.short("R")
.help("change files and directories recursively"))
.arg(Arg::with_name("reference")
.long("reference")
.takes_value(true)
.value_name("RFILE")
.help("use RFILE's mode instead of provided MODE values"))
.arg(Arg::with_name("preserve-root")
.long("preserve-root")
.help("fail to operate recursively on '/'"))
.arg(Arg::with_name("no-preserve-root")
.long("no-preserve-root")
.overrides_with("preserve-root")
.help("do not treat '/' specially (the default)"))
.arg(Arg::with_name("verbose")
.long("verbose")
.short("v")
.help("output a diagnostic for every file processed"))
.arg(Arg::with_name("quiet")
.long("quiet")
.short("f")
.visible_alias("silent")
.help("suppress most error messages"))
.arg(Arg::with_name("changes")
.long("changes")
.short("c")
.help("like verbose but report only when a change is made"))
.group(ArgGroup::with_name("verbosity")
.args(&["verbose", "quiet", "changes"]))
// FIXME: not sure how to tell clap that MODE can be missing if --reference is
// given by the user. clap is also unhappy that FILES (which has an
// index that occurs later than MODE) is required while MODE is not
.arg(Arg::with_name("MODE")
.index(1) | .validator_os(validate_mode)
.required(true))
//.conflicts_with("reference"))
.arg(Arg::with_name("FILES")
.index(2)
.required(true)
.multiple(true));
app.get_matches_from_safe(args)?
};
let verbosity = if matches.is_present("changes") {
Verbosity::Changes
} else if matches.is_present("quiet") {
Verbosity::Quiet
} else if matches.is_present("verbose") {
Verbosity::Verbose
} else {
Verbosity::None
};
let preserve_root = matches.is_present("preserve-root");
let recursive = matches.is_present("recursive");
let fmode = match matches.value_of_os("reference") {
Some(ref_file) => Some(fs::metadata(ref_file)
.map(|data| data.mode())
.map_err(|e| ChmodError::Stat(ref_file.to_string_lossy().into(), e))?),
None => None,
};
let current_dir = setup.current_dir().map(|p| p.to_owned());
let (_, stdout, stderr) = setup.stdio();
let mut chmoder = Chmoder {
stdout: stdout.lock()?,
stderr: stderr.lock()?,
};
let options = Options {
verbosity: verbosity,
preserve_root: preserve_root,
recursive: recursive,
fmode: fmode,
cmode: matches.value_of("MODE"),
current_dir: current_dir,
};
let exitcode = chmoder.chmod(&options, matches.values_of_os("FILES").unwrap())?;
if exitcode == 0 {
Ok(())
} else {
Err(MesaError {
exitcode: exitcode,
progname: None,
err: None,
})
}
}
fn validate_mode(arg: &OsStr) -> StdResult<(), OsString> {
// NOTE: used to use regex to match the mode, but that caused the binary size to increase
// considerably
arg.to_str()
.ok_or_else(|| "mode was not a string (must be encoded using UTF-8)".into())
.and_then(|s| {
for mode in s.split(',') {
if mode::parse_numeric(0, mode).is_err()
&& mode::parse_symbolic(0, mode, false).is_err()
{
return Err("found invalid character in mode string".into());
}
}
Ok(())
})
}
struct Chmoder<O, E>
where
O: Write,
E: Write,
{
stdout: O,
stderr: E,
}
impl<'a, O, E> Chmoder<O, E>
where
O: Write,
E: Write,
{
fn chmod<'b>(&mut self, options: &Options, files: OsValues<'b>) -> Result<i32> {
let mut r = 0;
let mut msgs = [None, None];
for filename in files {
let file = util::actual_path(&options.current_dir, filename);
r |= if file.is_dir() && options.recursive {
self.chmod_dir(options, &mut msgs, &file)
} else {
let res = chmod_file(options, &mut msgs, &file);
self.write_msgs(&mut msgs).map(|_| res)
}?;
}
Ok(r)
}
fn chmod_dir(
&mut self,
options: &Options,
msgs: &mut [Option<Message>; 2],
file: &Path,
) -> Result<i32> {
let mut r = 0;
if !options.preserve_root || file != Path::new("/") {
let walker = WalkDir::new(file).contents_first(true);
for entry in walker {
match entry {
Ok(entry) => {
r |= chmod_file(options, msgs, &entry.path());
self.write_msgs(msgs)?;
}
Err(f) => display_msg!(self.stderr, "{}", f)?,
}
}
} else {
display_msg!(
self.stderr,
"could not change permissions of directory '{}'",
file.display()
)?;
r = 1;
}
Ok(r)
}
fn write_msgs(&mut self, msgs: &mut [Option<Message>; 2]) -> Result<()> {
for msg in msgs {
if let Some(msg) = msg {
match msg.kind {
MessageKind::Stdout => display_msg!(self.stdout, "{}", msg.data)?,
MessageKind::Stderr => display_msg!(self.stderr, "{}", msg.data)?,
}
}
*msg = None;
}
Ok(())
}
}
#[cfg(any(unix, target_os = "redox"))]
fn chmod_file(options: &Options, msgs: &mut [Option<Message>; 2], file: &Path) -> i32 {
let mut fperm = match fs::metadata(file) {
Ok(meta) => meta.mode() & 0o7777,
Err(err) => {
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!(
"could not stat '{}': {}",
file.display(),
err
)));
}
return 1;
}
};
match options.fmode {
Some(mode) => change_file(options, msgs, fperm, mode, file),
None => {
let cmode_unwrapped = options.cmode.clone().unwrap();
for mode in cmode_unwrapped.split(',') {
// cmode is guaranteed to be Some in this case
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm, mode)
} else {
mode::parse_symbolic(fperm, mode, file.is_dir())
};
match result {
Ok(mode) => {
change_file(options, msgs, fperm, mode, file);
fperm = mode;
}
Err(f) => {
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!("failed to parse mode: {}", f)));
}
return 1;
}
}
}
0
}
}
}
#[cfg(unix)]
fn change_file(
options: &Options,
msgs: &mut [Option<Message>; 2],
fperm: u32,
mode: u32,
file: &Path,
) -> i32 {
if fperm == mode {
if options.verbosity == Verbosity::Verbose {
msgs[0] = Some(Message::stdout(format!(
"mode of '{}' retained as {:o} ({})",
file.display(),
fperm,
display_permissions_unix(fperm)
)));
}
return 0;
}
let mut exitcode = 0;
let res = fs::set_permissions(file, fs::Permissions::from_mode(mode));
if let Err(err) = res {
let mut count = 0;
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!(
"could not set permissions: {}",
err
)));
count += 1;
}
if options.verbosity == Verbosity::Verbose {
msgs[count] = Some(Message::stdout(format!(
"failed to change mode of file '{}' from {:o} ({}) to {:o} ({})",
file.display(),
fperm,
display_permissions_unix(fperm),
mode,
display_permissions_unix(mode)
)));
}
exitcode = 1;
} else {
if options.verbosity == Verbosity::Verbose || options.verbosity == Verbosity::Changes {
msgs[0] = Some(Message::stdout(format!(
"mode of '{}' changed from {:o} ({}) to {:o} ({})",
file.display(),
fperm,
display_permissions_unix(fperm),
mode,
display_permissions_unix(mode)
)));
}
}
exitcode
} | random_line_split |
|
chmod.rs | //
// Copyright (c) 2018, The MesaLock Linux Project Contributors
// All rights reserved.
//
// This work is licensed under the terms of the BSD 3-Clause License.
// For a copy, see the LICENSE file.
//
// This file incorporates work covered by the following copyright and
// permission notice:
//
// Copyright (c) 2013-2018, Jordi Boggiano
// Copyright (c) 2013-2018, Alex Lyon
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
use util;
use {ArgsIter, MesaError, Result, UtilSetup, UtilWrite};
use clap::{AppSettings, Arg, ArgGroup, OsValues};
use std::ffi::{OsStr, OsString};
use std::fs;
use std::io::{self, Write};
use std::os::unix::fs::{MetadataExt, PermissionsExt};
use std::path::{Path, PathBuf};
use std::result::Result as StdResult;
use uucore::fs::display_permissions_unix;
use uucore::mode;
use walkdir::WalkDir;
const NAME: &str = "chmod";
pub(crate) const DESCRIPTION: &str = "Change the file permissions of given files";
const MODE_SYNTAX: &str = "
Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'.
";
#[derive(Fail, Debug)]
enum ChmodError {
#[fail(display = "cannot stat attributes of '{}': {}", _0, _1)]
Stat(String, #[cause] io::Error),
}
#[derive(PartialEq)]
enum Verbosity {
None,
Changes,
Quiet,
Verbose,
}
enum MessageKind {
Stdout,
Stderr,
}
// FIXME: find a better way to store this (preferably avoid allocating)
// NOTE: the message setup is to avoid duplicating chmod_file() and change_file() for every generic
// type
struct Message {
kind: MessageKind,
data: String,
}
impl Message {
pub fn | (data: String) -> Self {
Self {
kind: MessageKind::Stdout,
data,
}
}
pub fn stderr(data: String) -> Self {
Self {
kind: MessageKind::Stderr,
data,
}
}
}
struct Options<'a> {
verbosity: Verbosity,
preserve_root: bool,
recursive: bool,
fmode: Option<u32>,
cmode: Option<&'a str>,
current_dir: Option<PathBuf>,
}
pub fn execute<S, T>(setup: &mut S, args: T) -> Result<()>
where
S: UtilSetup,
T: ArgsIter,
{
let matches = {
let app = util_app!(NAME)
.after_help(MODE_SYNTAX)
.setting(AppSettings::AllowLeadingHyphen)
.arg(Arg::with_name("recursive")
.long("recursive")
.short("R")
.help("change files and directories recursively"))
.arg(Arg::with_name("reference")
.long("reference")
.takes_value(true)
.value_name("RFILE")
.help("use RFILE's mode instead of provided MODE values"))
.arg(Arg::with_name("preserve-root")
.long("preserve-root")
.help("fail to operate recursively on '/'"))
.arg(Arg::with_name("no-preserve-root")
.long("no-preserve-root")
.overrides_with("preserve-root")
.help("do not treat '/' specially (the default)"))
.arg(Arg::with_name("verbose")
.long("verbose")
.short("v")
.help("output a diagnostic for every file processed"))
.arg(Arg::with_name("quiet")
.long("quiet")
.short("f")
.visible_alias("silent")
.help("suppress most error messages"))
.arg(Arg::with_name("changes")
.long("changes")
.short("c")
.help("like verbose but report only when a change is made"))
.group(ArgGroup::with_name("verbosity")
.args(&["verbose", "quiet", "changes"]))
// FIXME: not sure how to tell clap that MODE can be missing if --reference is
// given by the user. clap is also unhappy that FILES (which has an
// index that occurs later than MODE) is required while MODE is not
.arg(Arg::with_name("MODE")
.index(1)
.validator_os(validate_mode)
.required(true))
//.conflicts_with("reference"))
.arg(Arg::with_name("FILES")
.index(2)
.required(true)
.multiple(true));
app.get_matches_from_safe(args)?
};
let verbosity = if matches.is_present("changes") {
Verbosity::Changes
} else if matches.is_present("quiet") {
Verbosity::Quiet
} else if matches.is_present("verbose") {
Verbosity::Verbose
} else {
Verbosity::None
};
let preserve_root = matches.is_present("preserve-root");
let recursive = matches.is_present("recursive");
let fmode = match matches.value_of_os("reference") {
Some(ref_file) => Some(fs::metadata(ref_file)
.map(|data| data.mode())
.map_err(|e| ChmodError::Stat(ref_file.to_string_lossy().into(), e))?),
None => None,
};
let current_dir = setup.current_dir().map(|p| p.to_owned());
let (_, stdout, stderr) = setup.stdio();
let mut chmoder = Chmoder {
stdout: stdout.lock()?,
stderr: stderr.lock()?,
};
let options = Options {
verbosity: verbosity,
preserve_root: preserve_root,
recursive: recursive,
fmode: fmode,
cmode: matches.value_of("MODE"),
current_dir: current_dir,
};
let exitcode = chmoder.chmod(&options, matches.values_of_os("FILES").unwrap())?;
if exitcode == 0 {
Ok(())
} else {
Err(MesaError {
exitcode: exitcode,
progname: None,
err: None,
})
}
}
fn validate_mode(arg: &OsStr) -> StdResult<(), OsString> {
// NOTE: used to use regex to match the mode, but that caused the binary size to increase
// considerably
arg.to_str()
.ok_or_else(|| "mode was not a string (must be encoded using UTF-8)".into())
.and_then(|s| {
for mode in s.split(',') {
if mode::parse_numeric(0, mode).is_err()
&& mode::parse_symbolic(0, mode, false).is_err()
{
return Err("found invalid character in mode string".into());
}
}
Ok(())
})
}
struct Chmoder<O, E>
where
O: Write,
E: Write,
{
stdout: O,
stderr: E,
}
impl<'a, O, E> Chmoder<O, E>
where
O: Write,
E: Write,
{
fn chmod<'b>(&mut self, options: &Options, files: OsValues<'b>) -> Result<i32> {
let mut r = 0;
let mut msgs = [None, None];
for filename in files {
let file = util::actual_path(&options.current_dir, filename);
r |= if file.is_dir() && options.recursive {
self.chmod_dir(options, &mut msgs, &file)
} else {
let res = chmod_file(options, &mut msgs, &file);
self.write_msgs(&mut msgs).map(|_| res)
}?;
}
Ok(r)
}
fn chmod_dir(
&mut self,
options: &Options,
msgs: &mut [Option<Message>; 2],
file: &Path,
) -> Result<i32> {
let mut r = 0;
if !options.preserve_root || file != Path::new("/") {
let walker = WalkDir::new(file).contents_first(true);
for entry in walker {
match entry {
Ok(entry) => {
r |= chmod_file(options, msgs, &entry.path());
self.write_msgs(msgs)?;
}
Err(f) => display_msg!(self.stderr, "{}", f)?,
}
}
} else {
display_msg!(
self.stderr,
"could not change permissions of directory '{}'",
file.display()
)?;
r = 1;
}
Ok(r)
}
fn write_msgs(&mut self, msgs: &mut [Option<Message>; 2]) -> Result<()> {
for msg in msgs {
if let Some(msg) = msg {
match msg.kind {
MessageKind::Stdout => display_msg!(self.stdout, "{}", msg.data)?,
MessageKind::Stderr => display_msg!(self.stderr, "{}", msg.data)?,
}
}
*msg = None;
}
Ok(())
}
}
#[cfg(any(unix, target_os = "redox"))]
fn chmod_file(options: &Options, msgs: &mut [Option<Message>; 2], file: &Path) -> i32 {
let mut fperm = match fs::metadata(file) {
Ok(meta) => meta.mode() & 0o7777,
Err(err) => {
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!(
"could not stat '{}': {}",
file.display(),
err
)));
}
return 1;
}
};
match options.fmode {
Some(mode) => change_file(options, msgs, fperm, mode, file),
None => {
let cmode_unwrapped = options.cmode.clone().unwrap();
for mode in cmode_unwrapped.split(',') {
// cmode is guaranteed to be Some in this case
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm, mode)
} else {
mode::parse_symbolic(fperm, mode, file.is_dir())
};
match result {
Ok(mode) => {
change_file(options, msgs, fperm, mode, file);
fperm = mode;
}
Err(f) => {
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!("failed to parse mode: {}", f)));
}
return 1;
}
}
}
0
}
}
}
#[cfg(unix)]
fn change_file(
options: &Options,
msgs: &mut [Option<Message>; 2],
fperm: u32,
mode: u32,
file: &Path,
) -> i32 {
if fperm == mode {
if options.verbosity == Verbosity::Verbose {
msgs[0] = Some(Message::stdout(format!(
"mode of '{}' retained as {:o} ({})",
file.display(),
fperm,
display_permissions_unix(fperm)
)));
}
return 0;
}
let mut exitcode = 0;
let res = fs::set_permissions(file, fs::Permissions::from_mode(mode));
if let Err(err) = res {
let mut count = 0;
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!(
"could not set permissions: {}",
err
)));
count += 1;
}
if options.verbosity == Verbosity::Verbose {
msgs[count] = Some(Message::stdout(format!(
"failed to change mode of file '{}' from {:o} ({}) to {:o} ({})",
file.display(),
fperm,
display_permissions_unix(fperm),
mode,
display_permissions_unix(mode)
)));
}
exitcode = 1;
} else {
if options.verbosity == Verbosity::Verbose || options.verbosity == Verbosity::Changes {
msgs[0] = Some(Message::stdout(format!(
"mode of '{}' changed from {:o} ({}) to {:o} ({})",
file.display(),
fperm,
display_permissions_unix(fperm),
mode,
display_permissions_unix(mode)
)));
}
}
exitcode
}
| stdout | identifier_name |
chmod.rs | //
// Copyright (c) 2018, The MesaLock Linux Project Contributors
// All rights reserved.
//
// This work is licensed under the terms of the BSD 3-Clause License.
// For a copy, see the LICENSE file.
//
// This file incorporates work covered by the following copyright and
// permission notice:
//
// Copyright (c) 2013-2018, Jordi Boggiano
// Copyright (c) 2013-2018, Alex Lyon
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
use util;
use {ArgsIter, MesaError, Result, UtilSetup, UtilWrite};
use clap::{AppSettings, Arg, ArgGroup, OsValues};
use std::ffi::{OsStr, OsString};
use std::fs;
use std::io::{self, Write};
use std::os::unix::fs::{MetadataExt, PermissionsExt};
use std::path::{Path, PathBuf};
use std::result::Result as StdResult;
use uucore::fs::display_permissions_unix;
use uucore::mode;
use walkdir::WalkDir;
const NAME: &str = "chmod";
pub(crate) const DESCRIPTION: &str = "Change the file permissions of given files";
const MODE_SYNTAX: &str = "
Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'.
";
#[derive(Fail, Debug)]
enum ChmodError {
#[fail(display = "cannot stat attributes of '{}': {}", _0, _1)]
Stat(String, #[cause] io::Error),
}
#[derive(PartialEq)]
enum Verbosity {
None,
Changes,
Quiet,
Verbose,
}
enum MessageKind {
Stdout,
Stderr,
}
// FIXME: find a better way to store this (preferably avoid allocating)
// NOTE: the message setup is to avoid duplicating chmod_file() and change_file() for every generic
// type
struct Message {
kind: MessageKind,
data: String,
}
impl Message {
pub fn stdout(data: String) -> Self {
Self {
kind: MessageKind::Stdout,
data,
}
}
pub fn stderr(data: String) -> Self {
Self {
kind: MessageKind::Stderr,
data,
}
}
}
struct Options<'a> {
verbosity: Verbosity,
preserve_root: bool,
recursive: bool,
fmode: Option<u32>,
cmode: Option<&'a str>,
current_dir: Option<PathBuf>,
}
pub fn execute<S, T>(setup: &mut S, args: T) -> Result<()>
where
S: UtilSetup,
T: ArgsIter,
{
let matches = {
let app = util_app!(NAME)
.after_help(MODE_SYNTAX)
.setting(AppSettings::AllowLeadingHyphen)
.arg(Arg::with_name("recursive")
.long("recursive")
.short("R")
.help("change files and directories recursively"))
.arg(Arg::with_name("reference")
.long("reference")
.takes_value(true)
.value_name("RFILE")
.help("use RFILE's mode instead of provided MODE values"))
.arg(Arg::with_name("preserve-root")
.long("preserve-root")
.help("fail to operate recursively on '/'"))
.arg(Arg::with_name("no-preserve-root")
.long("no-preserve-root")
.overrides_with("preserve-root")
.help("do not treat '/' specially (the default)"))
.arg(Arg::with_name("verbose")
.long("verbose")
.short("v")
.help("output a diagnostic for every file processed"))
.arg(Arg::with_name("quiet")
.long("quiet")
.short("f")
.visible_alias("silent")
.help("suppress most error messages"))
.arg(Arg::with_name("changes")
.long("changes")
.short("c")
.help("like verbose but report only when a change is made"))
.group(ArgGroup::with_name("verbosity")
.args(&["verbose", "quiet", "changes"]))
// FIXME: not sure how to tell clap that MODE can be missing if --reference is
// given by the user. clap is also unhappy that FILES (which has an
// index that occurs later than MODE) is required while MODE is not
.arg(Arg::with_name("MODE")
.index(1)
.validator_os(validate_mode)
.required(true))
//.conflicts_with("reference"))
.arg(Arg::with_name("FILES")
.index(2)
.required(true)
.multiple(true));
app.get_matches_from_safe(args)?
};
let verbosity = if matches.is_present("changes") {
Verbosity::Changes
} else if matches.is_present("quiet") {
Verbosity::Quiet
} else if matches.is_present("verbose") {
Verbosity::Verbose
} else {
Verbosity::None
};
let preserve_root = matches.is_present("preserve-root");
let recursive = matches.is_present("recursive");
let fmode = match matches.value_of_os("reference") {
Some(ref_file) => Some(fs::metadata(ref_file)
.map(|data| data.mode())
.map_err(|e| ChmodError::Stat(ref_file.to_string_lossy().into(), e))?),
None => None,
};
let current_dir = setup.current_dir().map(|p| p.to_owned());
let (_, stdout, stderr) = setup.stdio();
let mut chmoder = Chmoder {
stdout: stdout.lock()?,
stderr: stderr.lock()?,
};
let options = Options {
verbosity: verbosity,
preserve_root: preserve_root,
recursive: recursive,
fmode: fmode,
cmode: matches.value_of("MODE"),
current_dir: current_dir,
};
let exitcode = chmoder.chmod(&options, matches.values_of_os("FILES").unwrap())?;
if exitcode == 0 {
Ok(())
} else {
Err(MesaError {
exitcode: exitcode,
progname: None,
err: None,
})
}
}
fn validate_mode(arg: &OsStr) -> StdResult<(), OsString> {
// NOTE: used to use regex to match the mode, but that caused the binary size to increase
// considerably
arg.to_str()
.ok_or_else(|| "mode was not a string (must be encoded using UTF-8)".into())
.and_then(|s| {
for mode in s.split(',') {
if mode::parse_numeric(0, mode).is_err()
&& mode::parse_symbolic(0, mode, false).is_err()
{
return Err("found invalid character in mode string".into());
}
}
Ok(())
})
}
struct Chmoder<O, E>
where
O: Write,
E: Write,
{
stdout: O,
stderr: E,
}
impl<'a, O, E> Chmoder<O, E>
where
O: Write,
E: Write,
{
fn chmod<'b>(&mut self, options: &Options, files: OsValues<'b>) -> Result<i32> {
let mut r = 0;
let mut msgs = [None, None];
for filename in files {
let file = util::actual_path(&options.current_dir, filename);
r |= if file.is_dir() && options.recursive {
self.chmod_dir(options, &mut msgs, &file)
} else {
let res = chmod_file(options, &mut msgs, &file);
self.write_msgs(&mut msgs).map(|_| res)
}?;
}
Ok(r)
}
fn chmod_dir(
&mut self,
options: &Options,
msgs: &mut [Option<Message>; 2],
file: &Path,
) -> Result<i32> {
let mut r = 0;
if !options.preserve_root || file != Path::new("/") {
let walker = WalkDir::new(file).contents_first(true);
for entry in walker {
match entry {
Ok(entry) => {
r |= chmod_file(options, msgs, &entry.path());
self.write_msgs(msgs)?;
}
Err(f) => display_msg!(self.stderr, "{}", f)?,
}
}
} else {
display_msg!(
self.stderr,
"could not change permissions of directory '{}'",
file.display()
)?;
r = 1;
}
Ok(r)
}
fn write_msgs(&mut self, msgs: &mut [Option<Message>; 2]) -> Result<()> {
for msg in msgs {
if let Some(msg) = msg {
match msg.kind {
MessageKind::Stdout => display_msg!(self.stdout, "{}", msg.data)?,
MessageKind::Stderr => display_msg!(self.stderr, "{}", msg.data)?,
}
}
*msg = None;
}
Ok(())
}
}
#[cfg(any(unix, target_os = "redox"))]
fn chmod_file(options: &Options, msgs: &mut [Option<Message>; 2], file: &Path) -> i32 {
let mut fperm = match fs::metadata(file) {
Ok(meta) => meta.mode() & 0o7777,
Err(err) => {
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!(
"could not stat '{}': {}",
file.display(),
err
)));
}
return 1;
}
};
match options.fmode {
Some(mode) => change_file(options, msgs, fperm, mode, file),
None => |
}
}
#[cfg(unix)]
fn change_file(
options: &Options,
msgs: &mut [Option<Message>; 2],
fperm: u32,
mode: u32,
file: &Path,
) -> i32 {
if fperm == mode {
if options.verbosity == Verbosity::Verbose {
msgs[0] = Some(Message::stdout(format!(
"mode of '{}' retained as {:o} ({})",
file.display(),
fperm,
display_permissions_unix(fperm)
)));
}
return 0;
}
let mut exitcode = 0;
let res = fs::set_permissions(file, fs::Permissions::from_mode(mode));
if let Err(err) = res {
let mut count = 0;
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!(
"could not set permissions: {}",
err
)));
count += 1;
}
if options.verbosity == Verbosity::Verbose {
msgs[count] = Some(Message::stdout(format!(
"failed to change mode of file '{}' from {:o} ({}) to {:o} ({})",
file.display(),
fperm,
display_permissions_unix(fperm),
mode,
display_permissions_unix(mode)
)));
}
exitcode = 1;
} else {
if options.verbosity == Verbosity::Verbose || options.verbosity == Verbosity::Changes {
msgs[0] = Some(Message::stdout(format!(
"mode of '{}' changed from {:o} ({}) to {:o} ({})",
file.display(),
fperm,
display_permissions_unix(fperm),
mode,
display_permissions_unix(mode)
)));
}
}
exitcode
}
| {
let cmode_unwrapped = options.cmode.clone().unwrap();
for mode in cmode_unwrapped.split(',') {
// cmode is guaranteed to be Some in this case
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm, mode)
} else {
mode::parse_symbolic(fperm, mode, file.is_dir())
};
match result {
Ok(mode) => {
change_file(options, msgs, fperm, mode, file);
fperm = mode;
}
Err(f) => {
if options.verbosity != Verbosity::Quiet {
msgs[0] = Some(Message::stderr(format!("failed to parse mode: {}", f)));
}
return 1;
}
}
}
0
} | conditional_block |
cnn.py | """
Finetuning Torchvision Models
=============================
**Author:** `Nathan Inkawhich <https://github.com/inkawhich>`__
"""
######################################################################
# In this tutorial we will take a deeper look at how to finetune and
# feature extract the `torchvision
# models <https://pytorch.org/docs/stable/torchvision/models.html>`__, all
# of which have been pretrained on the 1000-class Imagenet dataset. This
# tutorial will give an indepth look at how to work with several modern
# CNN architectures, and will build an intuition for finetuning any
# PyTorch model. Since each model architecture is different, there is no
# boilerplate finetuning code that will work in all scenarios. Rather, the
# researcher must look at the existing architecture and make custom
# adjustments for each model.
#
# In this document we will perform two types of transfer learning:
# finetuning and feature extraction. In **finetuning**, we start with a
# pretrained model and update *all* of the model’s parameters for our new
# task, in essence retraining the whole model. In **feature extraction**,
# we start with a pretrained model and only update the final layer weights
# from which we derive predictions. It is called feature extraction
# because we use the pretrained CNN as a fixed feature-extractor, and only
# change the output layer. For more technical information about transfer
# learning see `here <https://cs231n.github.io/transfer-learning/>`__ and
# `here <https://ruder.io/transfer-learning/>`__.
#
# In general both transfer learning methods follow the same few steps:
#
# - Initialize the pretrained model
# - Reshape the final layer(s) to have the same number of outputs as the
# number of classes in the new dataset
# - Define for the optimization algorithm which parameters we want to
# update during training
# - Run the training step
#
from __future__ import print_function
from __future__ import division
import sys
import PIL
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import argparse
from jpeg_layer import *
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
######################################################################
# Inputs
# ------
#
# Here are all of the parameters to change for the run. We will use the
# *hymenoptera_data* dataset which can be downloaded
# `here <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`__.
# This dataset contains two classes, **bees** and **ants**, and is
# structured such that we can use the
# `ImageFolder <https://pytorch.org/docs/stable/torchvision/datasets.html#torchvision.datasets.ImageFolder>`__
# dataset, rather than writing our own custom dataset. Download the data
# and set the ``data_dir`` input to the root directory of the dataset. The
# ``model_name`` input is the name of the model you wish to use and must
# be selected from this list:
#
# ::
#
# [resnet, alexnet, vgg, squeezenet, densenet, inception]
#
# The other inputs are as follows: ``num_classes`` is the number of
# classes in the dataset, ``batch_size`` is the batch size used for
# training and may be adjusted according to the capability of your
# machine, ``num_epochs`` is the number of training epochs we want to run,
# and ``feature_extract`` is a boolean that defines if we are finetuning
# or feature extracting. If ``feature_extract = False``, the model is
# finetuned and all model parameters are updated. If
# ``feature_extract = True``, only the last layer parameters are updated,
# the others remain fixed.
#
parser = argparse.ArgumentParser(description = \
'Neural Network with JpegLayer')
# Top level data directory. Here we assume the format of the directory conforms
# to the ImageFolder structure
#data_dir = "./hymenoptera_data"
parser.add_argument('--data_dir', '-d', type=str,\
default='/data/jenna/data/', \
help='Directory of the input data. \
String. Default: /data/jenna/data/')
# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
#model_name = "squeezenet"
parser.add_argument('--model_name', '-m', type=str,\
default='squeezenet',\
help = 'NN models to choose from [resnet, alexnet, \
vgg, squeezenet, densenet, inception]. \
String. Default: squeezenet')
# Number of classes in the dataset
#num_classes = 3
parser.add_argument('--num_classes', '-c', type=int,\
default = 3,\
help = 'Number of classes in the dataset. \
Integer. Default: 3')
# Batch size for training (change depending on how much memory you have)
#batch_size = 8
parser.add_argument('--batch_size', '-b', type=int,\
default = 8,\
help = 'Batch size for training (can change depending\
on how much memory you have. \
Integer. Default: 8)')
# Number of epochs to train for
#num_epochs = 25
parser.add_argument('-ep', '--num_epochs', type=int,\
default = 25,\
help = 'Number of echos to train for. \
Integer. Default:25')
#Flag for whether to add jpeg layer to train quantization matrix
#add_jpeg_layer = True
parser.add_argument('--add_jpeg_layer', '-jpeg', \
action = 'store_false',\
help = 'Flag for adding jpeg layer to neural network. \
Bool. Default: True')
#Flag for initialization for quantization table. When true,qtable is uniformly random. When false, qtable is jpeg standard.
parser.add_argument('--rand_qtable', '-rq', \
action = 'store_false',\
help='Flag for initialization for quantization table. \
When true,qtable is uniformly random. When false, \
qtable is jpeg standard.\
Bool. Default: True.')
# Flag for printing trained quantization matrix
parser.add_argument('--qtable', '-q', \
action = 'store_true',\
help = 'Flag for print quantization matrix. \
Bool. Default: False.')
#Flag for visualizing the jpeg layer
parser.add_argument('--visualize', '-v',\
action = 'store_false',\
help = 'Flag for visualizing the jpeg layer. \
Bool. Default: True')
#Flag for regularize the magnitude of quantization table
#regularize = True
parser.add_argument('--regularize','-r',\
action = 'store_false',\
help = 'Flag for regularize the magnitude of \
quantizaiton table. Without the term, the quantization \
table goes to 0 \
Bool. Default: True')
#Jpeg quality. To calculate a scaling factor for qtable and result in different compression rate.
parser.add_argument('--quality', type = int,\
default = 50,\
help = 'Jpeg quality. It is used to calculate \
a quality factor for different compression rate. \
Integer. Default: 50')
parser.add_argument('--quant_only', action = 'store_true')
parser.add_argument('--cnn_only', action = 'store_true')
feature_extract = False
#parse the inputs
args,unparsed = parser.parse_known_args()
print(args)
######################################################################
# Helper Functions
# ----------------
#
# Before we write the code for adjusting the models, lets define a few
# helper functions.
#
# Model Training and Validation Code
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The ``train_model`` function handles the training and validation of a
# given model. As input, it takes a PyTorch model, a dictionary of
# dataloaders, a loss function, an optimizer, a specified number of epochs
# to train and validate for, and a boolean flag for when the model is an
# Inception model. The *is_inception* flag is used to accomodate the
# *Inception v3* model, as that architecture uses an auxiliary output and
# the overall model loss respects both the auxiliary output and the final
# output, as described
# `here <https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958>`__.
# The function trains for the specified number of epochs and after each
# epoch runs a full validation step. It also keeps track of the best
# performing model (in terms of validation accuracy), and at the end of
# training returns the best performing model. After each epoch, the
# training and validation accuracies are printed.
#
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False, train = True):
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
phases =['train', 'val']
if not train:
phases = ['val']
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in phases:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
if train:
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
#add regularization
reg_loss = 0
factor = 0.1
if args.regularize:
reg_crit = nn.L1Loss(size_average=True)
target = torch.Tensor(3,8,8).cuda()
target.fill_(0)
for name, param in model.named_parameters():
if "quantize" in name:
reg_loss = factor /reg_crit(param,target) * inputs.size(0)
break
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
loss = reg_loss + loss
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# if(epoch_acc < 0.5):
# for name, param in model.named_parameters():
# if 'quantize' in name:
# print(param*255)
# torch.save(model.state_dict(), 'model.fail')
# sys.exit(0)
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
######################################################################
# Set Model Parameters’ .requires_grad attribute
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This helper function sets the ``.requires_grad`` attribute of the
# parameters in the model to False when we are feature extracting. By
# default, when we load a pretrained model all of the parameters have
# ``.requires_grad=True``, which is fine if we are training from scratch
# or finetuning. However, if we are feature extracting and only want to
# compute gradients for the newly initialized layer then we want all of
# the other parameters to not require gradients. This will make more sense
# later.
#
def set_parameter_requires_grad(model, first, feature_extract, quant_only=False, cnn_only=False):
if first and feature_extract:
quant_only = True
cnn_only = True
for name, param in model.named_parameters():
if (quant_only and 'quantize' not in name) or\
(cnn_only and 'quantize' in name):
param.requires_grad = False
######################################################################
# Initialize and Reshape the Networks
# -----------------------------------
#
# Now to the most interesting part. Here is where we handle the reshaping
# of each network. Note, this is not an automatic procedure and is unique
# to each model. Recall, the final layer of a CNN model, which is often
# times an FC layer, has the same number of nodes as the number of output
# classes in the dataset. Since all of the models have been pretrained on
# Imagenet, they all have output layers of size 1000, one node for each
# class. The goal here is to reshape the last layer to have the same
# number of inputs as before, AND to have the same number of outputs as
# the number of classes in the dataset. In the following sections we will
# discuss how to alter the architecture of each model individually. But
# first, there is one important detail regarding the difference between
# finetuning and feature-extraction.
#
# When feature extracting, we only want to update the parameters of the
# last layer, or in other words, we only want to update the parameters for
# the layer(s) we are reshaping. Therefore, we do not need to compute the
# gradients of the parameters that we are not changing, so for efficiency
# we set the .requires_grad attribute to False. This is important because
# by default, this attribute is set to True. Then, when we initialize the
# new layer and by default the new parameters have ``.requires_grad=True``
# so only the new layer’s parameters will be updated. When we are
# finetuning we can leave all of the .required_grad’s set to the default
# of True.
#
# Finally, notice that inception_v3 requires the input size to be
# (299,299), whereas all of the other models expect (224,224).
#
# Resnet
# ~~~~~~
#
# Resnet was introduced in the paper `Deep Residual Learning for Image
# Recognition <https://arxiv.org/abs/1512.03385>`__. There are several
# variants of different sizes, including Resnet18, Resnet34, Resnet50,
# Resnet101, and Resnet152, all of which are available from torchvision
# models. Here we use Resnet18, as our dataset is small and only has two
# classes. When we print the model, we see that the last layer is a fully
# connected layer as shown below:
#
# ::
#
# (fc): Linear(in_features=512, out_features=1000, bias=True)
#
# Thus, we must reinitialize ``model.fc`` to be a Linear layer with 512
# input features and 2 output features with:
#
# ::
#
# model.fc = nn.Linear(512, num_classes)
#
# Alexnet
# ~~~~~~~
#
# Alexnet was introduced in the paper `ImageNet Classification with Deep
# Convolutional Neural
# Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`__
# and was the first very successful CNN on the ImageNet dataset. When we
# print the model architecture, we see the model output comes from the 6th
# layer of the classifier
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# To use the model with our dataset we reinitialize this layer as
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# VGG
# ~~~
#
# VGG was introduced in the paper `Very Deep Convolutional Networks for
# Large-Scale Image Recognition <https://arxiv.org/pdf/1409.1556.pdf>`__.
# Torchvision offers eight versions of VGG with various lengths and some
# that have batch normalizations layers. Here we use VGG-11 with batch
# normalization. The output layer is similar to Alexnet, i.e.
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# Therefore, we use the same technique to modify the output layer
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# Squeezenet
# ~~~~~~~~~~
#
# The Squeeznet architecture is described in the paper `SqueezeNet:
# AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
# size <https://arxiv.org/abs/1602.07360>`__ and uses a different output
# structure than any of the other models shown here. Torchvision has two
# versions of Squeezenet, we use version 1.0. The output comes from a 1x1
# convolutional layer which is the 1st layer of the classifier:
#
# ::
#
# (classifier): Sequential(
# (0): Dropout(p=0.5)
# (1): Conv2d(512, 1000, kernel_size=(1, 1), stride=(1, 1))
# (2): ReLU(inplace)
# (3): AvgPool2d(kernel_size=13, stride=1, padding=0)
# )
#
# To modify the network, we reinitialize the Conv2d layer to have an
# output feature map of depth 2 as
#
# ::
#
# model.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
#
# Densenet
# ~~~~~~~~
#
# Densenet was introduced in the paper `Densely Connected Convolutional
# Networks <https://arxiv.org/abs/1608.06993>`__. Torchvision has four
# variants of Densenet but here we only use Densenet-121. The output layer
# is a linear layer with 1024 input features:
#
# ::
#
# (classifier): Linear(in_features=1024, out_features=1000, bias=True)
#
# To reshape the network, we reinitialize the classifier’s linear layer as
#
# ::
#
# model.classifier = nn.Linear(1024, num_classes)
#
# Inception v3
# ~~~~~~~~~~~~
#
# Finally, Inception v3 was first described in `Rethinking the Inception
# Architecture for Computer
# Vision <https://arxiv.org/pdf/1512.00567v1.pdf>`__. This network is
# unique because it has two output layers when training. The second output
# is known as an auxiliary output and is contained in the AuxLogits part
# of the network. The primary output is a linear layer at the end of the
# network. Note, when testing we only consider the primary output. The
# auxiliary output and primary output of the loaded model are printed as:
#
# ::
#
# (AuxLogits): InceptionAux(
# ...
# (fc): Linear(in_features=768, out_features=1000, bias=True)
# )
# ...
# (fc): Linear(in_features=2048, out_features=1000, bias=True)
#
# To finetune this model we must reshape both layers. This is accomplished
# with the following
#
# ::
#
# model.AuxLogits.fc = nn.Linear(768, num_classes)
# model.fc = nn.Linear(2048, num_classes)
#
# Notice, many of the models have similar output structures, but each must
# be handled slightly differently. Also, check out the printed model
# architecture of the reshaped network and make sure the number of output
# features is the same as the number of classes in the dataset.
#
def initialize_model(model_name, num_classes, feature_extract = False, add_jpeg_layer = False, train_quant_only = False, train_cnn_only=False, rand_qtable = True, quality = 50, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet18
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 448
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
if add_jpeg_layer:
if train_quant_only and not train_cnn_only:
model_ft.load_state_dict(torch.load("model.final"))
# if loadfull:
# model_ft.load_state_dict(torch.load("model.final"))
# model_ft = model_ft[1]
model_ft = nn.Sequential(JpegLayer( \
rand_qtable = rand_qtable, cnn_only = train_cnn_only, quality = quality),\
model_ft)
set_parameter_requires_grad(model_ft,\
False, feature_extract,
train_quant_only, train_cnn_only)
# model_ft.load_state_dict(torch.load("model.fail"))
return model_ft, input_size
# Initialize the model for this run
model_ft, input_size = initialize_model(args.model_name, args.num_classes, feature_extract, args.add_jpeg_layer, args.quant_only, args.cnn_only, args.rand_qtable, args.quality, use_pretrained=True)
# Print the model we just instantiated
print(model_ft)
######################################################################
# Load Data
# ---------
#
# Now that we know what the input size must be, we can initialize the data
# transforms, image datasets, and the dataloaders. Notice, the models were
# pretrained with the hard-coded normalization values, as described
# `here <https://pytorch.org/docs/master/torchvision/models.html>`__.
#
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: datasets.ImageFolder(os.path.join(args.data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=args.batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
######################################################################
# Create the Optimizer
# --------------------
#
# Now that the model structure is correct, the final step for finetuning
# and feature extracting is to create an optimizer that only updates the
# desired parameters. Recall that after loading the pretrained model, but
# before reshaping, if ``feature_extract=True`` we manually set all of the
# parameter’s ``.requires_grad`` attributes to False. Then the
# reinitialized layer’s parameters have ``.requires_grad=True`` by
# default. So now we know that *all parameters that have
# .requires_grad=True should be optimized.* Next, we make a list of such
# parameters and input this list to the SGD algorithm constructor.
#
# To verify this, check out the printed parameters to learn. When
# finetuning, this list should be long and include all of the model
# parameters. However, when feature extracting this list should be short
# and only include the weights and biases of the reshaped layers.
#
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = [] #model_ft.parameters()
print("Params to learn:")
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print('\t',name)
iftrain = not(args.quant_only and args.cnn_only)
if iftrain:
# Observe that all parameters are being optimized
#if train_quant_only:
#optimizer_ft = optim.Adam(params_to_update, lr = 0.0001)
#else:
optimizer_ft = optim.SGD(params_to_update, lr = 0.0005, momentum=0.9)
else:
optimizer_ft = None
# optim.SGD([{'params': params_to_update},\
# {'params': params_quantize, 'lr': 0.005, 'momentum':0.9}], lr=0.0005, momentum=0.9)
######################################################################
# Run Training and Validation Step
# --------------------------------
#
# Finally, the last step is to setup the loss for the model, then run the
# training and validation function for the set number of epochs. Notice,
# depending on the number of epochs this step may take a while on a CPU.
# Also, the default learning rate is not optimal for all of the models, so
# to achieve maximum accuracy it would be necessary to tune for each model
# separately.
#
# Setup the loss fxn
criterion = nn.CrossEntropyLoss()
# Train and evaluate
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=args.num_epochs, is_inception=(args.model_name=="inception"), train = iftrain)
if args.cnn_only == True and iftrain:
torch.save(model_ft.state_dict(), "model.final")
#print the trained quantization matrix
if args.qtable:
print('--------- the trained quantize table ---------')
for name,param in model_ft.named_parameters():
if param.requi | isualize feature maps after jpeg layer
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
if args.add_jpeg_layer:
activation = {}
model_ft[0].register_forward_hook(get_activation('0.JpegLayer'))
data, _ = image_datasets["val"][0]
f1 = data.cpu().data.numpy()
f1 = (np.transpose(f1,(1,2,0))*255).astype(np.uint8)
data.unsqueeze_(0)
output = model_ft(data.to(device))
f2 = activation['0.JpegLayer'].squeeze().cpu().data.numpy()
f2 = (np.transpose(f2, (1,2,0))*255).astype(np.uint8)
if args.visualize:
fig, axarr = plt.subplots(2)
axarr[0].imshow(f1)
axarr[1].imshow(f2)
plt.show()
#save images
from psnr import psnr, compressJ, save
from PIL import Image
save(f1, "org.bmp")
save(f2, "myJpeg.jpg")
###############################
##### standard python jpeg ####
###############################
#im = compressJ(f1,"toJpeg.jpg")
#im = np.array(im, np.int16).transpose(2,0,1)
#
##############################
##### psnr ####
##############################
#f1 = np.array(f1,np.int16).transpose(2,0,1)
#f2 = np.array(f2,np.int16).transpose(2,0,1)
#print("compression results!")
#print("PSNR - my jpeg: ", psnr(f2[0],f1[0]))
#print("PSNR - PIL jpeg", psnr(im[0], f1[0]))
#print("PSNR - my vs. PIL", psnr(im[0], f2[0]))
#######################################################################
## Comparison with Model Trained from Scratch
## ------------------------------------------
##
## Just for fun, lets see how the model learns if we do not use transfer
## learning. The performance of finetuning vs. feature extracting depends
## largely on the dataset but in general both transfer learning methods
## produce favorable results in terms of training time and overall accuracy
## versus a model trained from scratch.
##
#
#
## Initialize the non-pretrained version of the model used for this run
#scratch_model,_ = initialize_model(model_name, num_classes, feature_extract=False, use_pretrained=False)
#scratch_model = scratch_model.to(device)
#scratch_optimizer = optim.SGD(scratch_model.parameters(), lr=0.001, momentum=0.9)
#scratch_criterion = nn.CrossEntropyLoss()
#_,scratch_hist = train_model(scratch_model, dataloaders_dict, scratch_criterion, scratch_optimizer, num_epochs=num_epochs, is_inception=(model_name=="inception"))
#
## Plot the training curves of validation accuracy vs. number
## of training epochs for the transfer learning method and
## the model trained from scratch
#ohist = []
#shist = []
#
#ohist = [h.cpu().numpy() for h in hist]
#shist = [h.cpu().numpy() for h in scratch_hist]
#
#plt.title("Validation Accuracy vs. Number of Training Epochs")
#plt.xlabel("Training Epochs")
#plt.ylabel("Validation Accuracy")
#plt.plot(range(1,num_epochs+1),ohist,label="Pretrained")
#plt.plot(range(1,num_epochs+1),shist,label="Scratch")
#plt.ylim((0,1.))
#plt.xticks(np.arange(1, num_epochs+1, 1.0))
#plt.legend()
#plt.show()
#
######################################################################
# Final Thoughts and Where to Go Next
# -----------------------------------
#
# Try running some of the other models and see how good the accuracy gets.
# Also, notice that feature extracting takes less time because in the
# backward pass we do not have to calculate most of the gradients. There
# are many places to go from here. You could:
#
# - Run this code with a harder dataset and see some more benefits of
# transfer learning
# - Using the methods described here, use transfer learning to update a
# different model, perhaps in a new domain (i.e. NLP, audio, etc.)
# - Once you are happy with a model, you can export it as an ONNX model,
# or trace it using the hybrid frontend for more speed and optimization
# opportunities.
#
| res_grad == True and\
name == "0.quantize":
print('Y',param.data[0]*255)
print('Cb',param.data[1]*255)
print('Cr',param.data[2]*255)
break
# Let's v | conditional_block |
cnn.py | """
Finetuning Torchvision Models
=============================
**Author:** `Nathan Inkawhich <https://github.com/inkawhich>`__
"""
######################################################################
# In this tutorial we will take a deeper look at how to finetune and
# feature extract the `torchvision
# models <https://pytorch.org/docs/stable/torchvision/models.html>`__, all
# of which have been pretrained on the 1000-class Imagenet dataset. This
# tutorial will give an indepth look at how to work with several modern
# CNN architectures, and will build an intuition for finetuning any
# PyTorch model. Since each model architecture is different, there is no
# boilerplate finetuning code that will work in all scenarios. Rather, the
# researcher must look at the existing architecture and make custom
# adjustments for each model.
#
# In this document we will perform two types of transfer learning:
# finetuning and feature extraction. In **finetuning**, we start with a
# pretrained model and update *all* of the model’s parameters for our new
# task, in essence retraining the whole model. In **feature extraction**,
# we start with a pretrained model and only update the final layer weights
# from which we derive predictions. It is called feature extraction
# because we use the pretrained CNN as a fixed feature-extractor, and only
# change the output layer. For more technical information about transfer
# learning see `here <https://cs231n.github.io/transfer-learning/>`__ and
# `here <https://ruder.io/transfer-learning/>`__.
#
# In general both transfer learning methods follow the same few steps:
#
# - Initialize the pretrained model
# - Reshape the final layer(s) to have the same number of outputs as the
# number of classes in the new dataset
# - Define for the optimization algorithm which parameters we want to
# update during training
# - Run the training step
#
from __future__ import print_function
from __future__ import division
import sys
import PIL
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import argparse
from jpeg_layer import *
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
######################################################################
# Inputs
# ------
#
# Here are all of the parameters to change for the run. We will use the
# *hymenoptera_data* dataset which can be downloaded
# `here <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`__.
# This dataset contains two classes, **bees** and **ants**, and is
# structured such that we can use the
# `ImageFolder <https://pytorch.org/docs/stable/torchvision/datasets.html#torchvision.datasets.ImageFolder>`__
# dataset, rather than writing our own custom dataset. Download the data
# and set the ``data_dir`` input to the root directory of the dataset. The
# ``model_name`` input is the name of the model you wish to use and must
# be selected from this list:
#
# ::
#
# [resnet, alexnet, vgg, squeezenet, densenet, inception]
#
# The other inputs are as follows: ``num_classes`` is the number of
# classes in the dataset, ``batch_size`` is the batch size used for
# training and may be adjusted according to the capability of your
# machine, ``num_epochs`` is the number of training epochs we want to run,
# and ``feature_extract`` is a boolean that defines if we are finetuning
# or feature extracting. If ``feature_extract = False``, the model is
# finetuned and all model parameters are updated. If
# ``feature_extract = True``, only the last layer parameters are updated,
# the others remain fixed.
#
parser = argparse.ArgumentParser(description = \
'Neural Network with JpegLayer')
# Top level data directory. Here we assume the format of the directory conforms
# to the ImageFolder structure
#data_dir = "./hymenoptera_data"
parser.add_argument('--data_dir', '-d', type=str,\
default='/data/jenna/data/', \
help='Directory of the input data. \
String. Default: /data/jenna/data/')
# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
#model_name = "squeezenet"
parser.add_argument('--model_name', '-m', type=str,\
default='squeezenet',\
help = 'NN models to choose from [resnet, alexnet, \
vgg, squeezenet, densenet, inception]. \
String. Default: squeezenet')
# Number of classes in the dataset
#num_classes = 3
parser.add_argument('--num_classes', '-c', type=int,\
default = 3,\
help = 'Number of classes in the dataset. \
Integer. Default: 3')
# Batch size for training (change depending on how much memory you have)
#batch_size = 8
parser.add_argument('--batch_size', '-b', type=int,\
default = 8,\
help = 'Batch size for training (can change depending\
on how much memory you have. \
Integer. Default: 8)')
# Number of epochs to train for
#num_epochs = 25
parser.add_argument('-ep', '--num_epochs', type=int,\
default = 25,\
help = 'Number of echos to train for. \
Integer. Default:25')
#Flag for whether to add jpeg layer to train quantization matrix
#add_jpeg_layer = True
parser.add_argument('--add_jpeg_layer', '-jpeg', \
action = 'store_false',\
help = 'Flag for adding jpeg layer to neural network. \
Bool. Default: True')
#Flag for initialization for quantization table. When true,qtable is uniformly random. When false, qtable is jpeg standard.
parser.add_argument('--rand_qtable', '-rq', \
action = 'store_false',\
help='Flag for initialization for quantization table. \
When true,qtable is uniformly random. When false, \
qtable is jpeg standard.\
Bool. Default: True.')
# Flag for printing trained quantization matrix
parser.add_argument('--qtable', '-q', \
action = 'store_true',\
help = 'Flag for print quantization matrix. \
Bool. Default: False.')
#Flag for visualizing the jpeg layer
parser.add_argument('--visualize', '-v',\
action = 'store_false',\
help = 'Flag for visualizing the jpeg layer. \
Bool. Default: True')
#Flag for regularize the magnitude of quantization table
#regularize = True
parser.add_argument('--regularize','-r',\
action = 'store_false',\
help = 'Flag for regularize the magnitude of \
quantizaiton table. Without the term, the quantization \
table goes to 0 \
Bool. Default: True')
#Jpeg quality. To calculate a scaling factor for qtable and result in different compression rate.
parser.add_argument('--quality', type = int,\
default = 50,\
help = 'Jpeg quality. It is used to calculate \
a quality factor for different compression rate. \
Integer. Default: 50')
parser.add_argument('--quant_only', action = 'store_true')
parser.add_argument('--cnn_only', action = 'store_true')
feature_extract = False
#parse the inputs
args,unparsed = parser.parse_known_args()
print(args)
######################################################################
# Helper Functions
# ----------------
#
# Before we write the code for adjusting the models, lets define a few
# helper functions.
#
# Model Training and Validation Code
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The ``train_model`` function handles the training and validation of a
# given model. As input, it takes a PyTorch model, a dictionary of
# dataloaders, a loss function, an optimizer, a specified number of epochs
# to train and validate for, and a boolean flag for when the model is an
# Inception model. The *is_inception* flag is used to accomodate the
# *Inception v3* model, as that architecture uses an auxiliary output and
# the overall model loss respects both the auxiliary output and the final
# output, as described
# `here <https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958>`__.
# The function trains for the specified number of epochs and after each
# epoch runs a full validation step. It also keeps track of the best
# performing model (in terms of validation accuracy), and at the end of
# training returns the best performing model. After each epoch, the
# training and validation accuracies are printed.
#
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False, train = True):
si |
######################################################################
# Set Model Parameters’ .requires_grad attribute
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This helper function sets the ``.requires_grad`` attribute of the
# parameters in the model to False when we are feature extracting. By
# default, when we load a pretrained model all of the parameters have
# ``.requires_grad=True``, which is fine if we are training from scratch
# or finetuning. However, if we are feature extracting and only want to
# compute gradients for the newly initialized layer then we want all of
# the other parameters to not require gradients. This will make more sense
# later.
#
def set_parameter_requires_grad(model, first, feature_extract, quant_only=False, cnn_only=False):
if first and feature_extract:
quant_only = True
cnn_only = True
for name, param in model.named_parameters():
if (quant_only and 'quantize' not in name) or\
(cnn_only and 'quantize' in name):
param.requires_grad = False
######################################################################
# Initialize and Reshape the Networks
# -----------------------------------
#
# Now to the most interesting part. Here is where we handle the reshaping
# of each network. Note, this is not an automatic procedure and is unique
# to each model. Recall, the final layer of a CNN model, which is often
# times an FC layer, has the same number of nodes as the number of output
# classes in the dataset. Since all of the models have been pretrained on
# Imagenet, they all have output layers of size 1000, one node for each
# class. The goal here is to reshape the last layer to have the same
# number of inputs as before, AND to have the same number of outputs as
# the number of classes in the dataset. In the following sections we will
# discuss how to alter the architecture of each model individually. But
# first, there is one important detail regarding the difference between
# finetuning and feature-extraction.
#
# When feature extracting, we only want to update the parameters of the
# last layer, or in other words, we only want to update the parameters for
# the layer(s) we are reshaping. Therefore, we do not need to compute the
# gradients of the parameters that we are not changing, so for efficiency
# we set the .requires_grad attribute to False. This is important because
# by default, this attribute is set to True. Then, when we initialize the
# new layer and by default the new parameters have ``.requires_grad=True``
# so only the new layer’s parameters will be updated. When we are
# finetuning we can leave all of the .required_grad’s set to the default
# of True.
#
# Finally, notice that inception_v3 requires the input size to be
# (299,299), whereas all of the other models expect (224,224).
#
# Resnet
# ~~~~~~
#
# Resnet was introduced in the paper `Deep Residual Learning for Image
# Recognition <https://arxiv.org/abs/1512.03385>`__. There are several
# variants of different sizes, including Resnet18, Resnet34, Resnet50,
# Resnet101, and Resnet152, all of which are available from torchvision
# models. Here we use Resnet18, as our dataset is small and only has two
# classes. When we print the model, we see that the last layer is a fully
# connected layer as shown below:
#
# ::
#
# (fc): Linear(in_features=512, out_features=1000, bias=True)
#
# Thus, we must reinitialize ``model.fc`` to be a Linear layer with 512
# input features and 2 output features with:
#
# ::
#
# model.fc = nn.Linear(512, num_classes)
#
# Alexnet
# ~~~~~~~
#
# Alexnet was introduced in the paper `ImageNet Classification with Deep
# Convolutional Neural
# Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`__
# and was the first very successful CNN on the ImageNet dataset. When we
# print the model architecture, we see the model output comes from the 6th
# layer of the classifier
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# To use the model with our dataset we reinitialize this layer as
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# VGG
# ~~~
#
# VGG was introduced in the paper `Very Deep Convolutional Networks for
# Large-Scale Image Recognition <https://arxiv.org/pdf/1409.1556.pdf>`__.
# Torchvision offers eight versions of VGG with various lengths and some
# that have batch normalizations layers. Here we use VGG-11 with batch
# normalization. The output layer is similar to Alexnet, i.e.
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# Therefore, we use the same technique to modify the output layer
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# Squeezenet
# ~~~~~~~~~~
#
# The Squeeznet architecture is described in the paper `SqueezeNet:
# AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
# size <https://arxiv.org/abs/1602.07360>`__ and uses a different output
# structure than any of the other models shown here. Torchvision has two
# versions of Squeezenet, we use version 1.0. The output comes from a 1x1
# convolutional layer which is the 1st layer of the classifier:
#
# ::
#
# (classifier): Sequential(
# (0): Dropout(p=0.5)
# (1): Conv2d(512, 1000, kernel_size=(1, 1), stride=(1, 1))
# (2): ReLU(inplace)
# (3): AvgPool2d(kernel_size=13, stride=1, padding=0)
# )
#
# To modify the network, we reinitialize the Conv2d layer to have an
# output feature map of depth 2 as
#
# ::
#
# model.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
#
# Densenet
# ~~~~~~~~
#
# Densenet was introduced in the paper `Densely Connected Convolutional
# Networks <https://arxiv.org/abs/1608.06993>`__. Torchvision has four
# variants of Densenet but here we only use Densenet-121. The output layer
# is a linear layer with 1024 input features:
#
# ::
#
# (classifier): Linear(in_features=1024, out_features=1000, bias=True)
#
# To reshape the network, we reinitialize the classifier’s linear layer as
#
# ::
#
# model.classifier = nn.Linear(1024, num_classes)
#
# Inception v3
# ~~~~~~~~~~~~
#
# Finally, Inception v3 was first described in `Rethinking the Inception
# Architecture for Computer
# Vision <https://arxiv.org/pdf/1512.00567v1.pdf>`__. This network is
# unique because it has two output layers when training. The second output
# is known as an auxiliary output and is contained in the AuxLogits part
# of the network. The primary output is a linear layer at the end of the
# network. Note, when testing we only consider the primary output. The
# auxiliary output and primary output of the loaded model are printed as:
#
# ::
#
# (AuxLogits): InceptionAux(
# ...
# (fc): Linear(in_features=768, out_features=1000, bias=True)
# )
# ...
# (fc): Linear(in_features=2048, out_features=1000, bias=True)
#
# To finetune this model we must reshape both layers. This is accomplished
# with the following
#
# ::
#
# model.AuxLogits.fc = nn.Linear(768, num_classes)
# model.fc = nn.Linear(2048, num_classes)
#
# Notice, many of the models have similar output structures, but each must
# be handled slightly differently. Also, check out the printed model
# architecture of the reshaped network and make sure the number of output
# features is the same as the number of classes in the dataset.
#
def initialize_model(model_name, num_classes, feature_extract = False, add_jpeg_layer = False, train_quant_only = False, train_cnn_only=False, rand_qtable = True, quality = 50, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet18
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 448
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
if add_jpeg_layer:
if train_quant_only and not train_cnn_only:
model_ft.load_state_dict(torch.load("model.final"))
# if loadfull:
# model_ft.load_state_dict(torch.load("model.final"))
# model_ft = model_ft[1]
model_ft = nn.Sequential(JpegLayer( \
rand_qtable = rand_qtable, cnn_only = train_cnn_only, quality = quality),\
model_ft)
set_parameter_requires_grad(model_ft,\
False, feature_extract,
train_quant_only, train_cnn_only)
# model_ft.load_state_dict(torch.load("model.fail"))
return model_ft, input_size
# Initialize the model for this run
model_ft, input_size = initialize_model(args.model_name, args.num_classes, feature_extract, args.add_jpeg_layer, args.quant_only, args.cnn_only, args.rand_qtable, args.quality, use_pretrained=True)
# Print the model we just instantiated
print(model_ft)
######################################################################
# Load Data
# ---------
#
# Now that we know what the input size must be, we can initialize the data
# transforms, image datasets, and the dataloaders. Notice, the models were
# pretrained with the hard-coded normalization values, as described
# `here <https://pytorch.org/docs/master/torchvision/models.html>`__.
#
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: datasets.ImageFolder(os.path.join(args.data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=args.batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
######################################################################
# Create the Optimizer
# --------------------
#
# Now that the model structure is correct, the final step for finetuning
# and feature extracting is to create an optimizer that only updates the
# desired parameters. Recall that after loading the pretrained model, but
# before reshaping, if ``feature_extract=True`` we manually set all of the
# parameter’s ``.requires_grad`` attributes to False. Then the
# reinitialized layer’s parameters have ``.requires_grad=True`` by
# default. So now we know that *all parameters that have
# .requires_grad=True should be optimized.* Next, we make a list of such
# parameters and input this list to the SGD algorithm constructor.
#
# To verify this, check out the printed parameters to learn. When
# finetuning, this list should be long and include all of the model
# parameters. However, when feature extracting this list should be short
# and only include the weights and biases of the reshaped layers.
#
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = [] #model_ft.parameters()
print("Params to learn:")
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print('\t',name)
iftrain = not(args.quant_only and args.cnn_only)
if iftrain:
# Observe that all parameters are being optimized
#if train_quant_only:
#optimizer_ft = optim.Adam(params_to_update, lr = 0.0001)
#else:
optimizer_ft = optim.SGD(params_to_update, lr = 0.0005, momentum=0.9)
else:
optimizer_ft = None
# optim.SGD([{'params': params_to_update},\
# {'params': params_quantize, 'lr': 0.005, 'momentum':0.9}], lr=0.0005, momentum=0.9)
######################################################################
# Run Training and Validation Step
# --------------------------------
#
# Finally, the last step is to setup the loss for the model, then run the
# training and validation function for the set number of epochs. Notice,
# depending on the number of epochs this step may take a while on a CPU.
# Also, the default learning rate is not optimal for all of the models, so
# to achieve maximum accuracy it would be necessary to tune for each model
# separately.
#
# Setup the loss fxn
criterion = nn.CrossEntropyLoss()
# Train and evaluate
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=args.num_epochs, is_inception=(args.model_name=="inception"), train = iftrain)
if args.cnn_only == True and iftrain:
torch.save(model_ft.state_dict(), "model.final")
#print the trained quantization matrix
if args.qtable:
print('--------- the trained quantize table ---------')
for name,param in model_ft.named_parameters():
if param.requires_grad == True and\
name == "0.quantize":
print('Y',param.data[0]*255)
print('Cb',param.data[1]*255)
print('Cr',param.data[2]*255)
break
# Let's visualize feature maps after jpeg layer
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
if args.add_jpeg_layer:
activation = {}
model_ft[0].register_forward_hook(get_activation('0.JpegLayer'))
data, _ = image_datasets["val"][0]
f1 = data.cpu().data.numpy()
f1 = (np.transpose(f1,(1,2,0))*255).astype(np.uint8)
data.unsqueeze_(0)
output = model_ft(data.to(device))
f2 = activation['0.JpegLayer'].squeeze().cpu().data.numpy()
f2 = (np.transpose(f2, (1,2,0))*255).astype(np.uint8)
if args.visualize:
fig, axarr = plt.subplots(2)
axarr[0].imshow(f1)
axarr[1].imshow(f2)
plt.show()
#save images
from psnr import psnr, compressJ, save
from PIL import Image
save(f1, "org.bmp")
save(f2, "myJpeg.jpg")
###############################
##### standard python jpeg ####
###############################
#im = compressJ(f1,"toJpeg.jpg")
#im = np.array(im, np.int16).transpose(2,0,1)
#
##############################
##### psnr ####
##############################
#f1 = np.array(f1,np.int16).transpose(2,0,1)
#f2 = np.array(f2,np.int16).transpose(2,0,1)
#print("compression results!")
#print("PSNR - my jpeg: ", psnr(f2[0],f1[0]))
#print("PSNR - PIL jpeg", psnr(im[0], f1[0]))
#print("PSNR - my vs. PIL", psnr(im[0], f2[0]))
#######################################################################
## Comparison with Model Trained from Scratch
## ------------------------------------------
##
## Just for fun, lets see how the model learns if we do not use transfer
## learning. The performance of finetuning vs. feature extracting depends
## largely on the dataset but in general both transfer learning methods
## produce favorable results in terms of training time and overall accuracy
## versus a model trained from scratch.
##
#
#
## Initialize the non-pretrained version of the model used for this run
#scratch_model,_ = initialize_model(model_name, num_classes, feature_extract=False, use_pretrained=False)
#scratch_model = scratch_model.to(device)
#scratch_optimizer = optim.SGD(scratch_model.parameters(), lr=0.001, momentum=0.9)
#scratch_criterion = nn.CrossEntropyLoss()
#_,scratch_hist = train_model(scratch_model, dataloaders_dict, scratch_criterion, scratch_optimizer, num_epochs=num_epochs, is_inception=(model_name=="inception"))
#
## Plot the training curves of validation accuracy vs. number
## of training epochs for the transfer learning method and
## the model trained from scratch
#ohist = []
#shist = []
#
#ohist = [h.cpu().numpy() for h in hist]
#shist = [h.cpu().numpy() for h in scratch_hist]
#
#plt.title("Validation Accuracy vs. Number of Training Epochs")
#plt.xlabel("Training Epochs")
#plt.ylabel("Validation Accuracy")
#plt.plot(range(1,num_epochs+1),ohist,label="Pretrained")
#plt.plot(range(1,num_epochs+1),shist,label="Scratch")
#plt.ylim((0,1.))
#plt.xticks(np.arange(1, num_epochs+1, 1.0))
#plt.legend()
#plt.show()
#
######################################################################
# Final Thoughts and Where to Go Next
# -----------------------------------
#
# Try running some of the other models and see how good the accuracy gets.
# Also, notice that feature extracting takes less time because in the
# backward pass we do not have to calculate most of the gradients. There
# are many places to go from here. You could:
#
# - Run this code with a harder dataset and see some more benefits of
# transfer learning
# - Using the methods described here, use transfer learning to update a
# different model, perhaps in a new domain (i.e. NLP, audio, etc.)
# - Once you are happy with a model, you can export it as an ONNX model,
# or trace it using the hybrid frontend for more speed and optimization
# opportunities.
#
| nce = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
phases =['train', 'val']
if not train:
phases = ['val']
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in phases:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
if train:
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
#add regularization
reg_loss = 0
factor = 0.1
if args.regularize:
reg_crit = nn.L1Loss(size_average=True)
target = torch.Tensor(3,8,8).cuda()
target.fill_(0)
for name, param in model.named_parameters():
if "quantize" in name:
reg_loss = factor /reg_crit(param,target) * inputs.size(0)
break
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
loss = reg_loss + loss
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# if(epoch_acc < 0.5):
# for name, param in model.named_parameters():
# if 'quantize' in name:
# print(param*255)
# torch.save(model.state_dict(), 'model.fail')
# sys.exit(0)
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
| identifier_body |
cnn.py | """
Finetuning Torchvision Models
=============================
**Author:** `Nathan Inkawhich <https://github.com/inkawhich>`__
"""
######################################################################
# In this tutorial we will take a deeper look at how to finetune and
# feature extract the `torchvision
# models <https://pytorch.org/docs/stable/torchvision/models.html>`__, all
# of which have been pretrained on the 1000-class Imagenet dataset. This
# tutorial will give an indepth look at how to work with several modern
# CNN architectures, and will build an intuition for finetuning any
# PyTorch model. Since each model architecture is different, there is no
# boilerplate finetuning code that will work in all scenarios. Rather, the
# researcher must look at the existing architecture and make custom
# adjustments for each model.
#
# In this document we will perform two types of transfer learning:
# finetuning and feature extraction. In **finetuning**, we start with a
# pretrained model and update *all* of the model’s parameters for our new
# task, in essence retraining the whole model. In **feature extraction**,
# we start with a pretrained model and only update the final layer weights
# from which we derive predictions. It is called feature extraction
# because we use the pretrained CNN as a fixed feature-extractor, and only
# change the output layer. For more technical information about transfer
# learning see `here <https://cs231n.github.io/transfer-learning/>`__ and
# `here <https://ruder.io/transfer-learning/>`__.
#
# In general both transfer learning methods follow the same few steps:
#
# - Initialize the pretrained model
# - Reshape the final layer(s) to have the same number of outputs as the
# number of classes in the new dataset
# - Define for the optimization algorithm which parameters we want to
# update during training
# - Run the training step
#
from __future__ import print_function
from __future__ import division
import sys
import PIL
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import argparse
from jpeg_layer import *
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
######################################################################
# Inputs
# ------
#
# Here are all of the parameters to change for the run. We will use the
# *hymenoptera_data* dataset which can be downloaded
# `here <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`__.
# This dataset contains two classes, **bees** and **ants**, and is
# structured such that we can use the
# `ImageFolder <https://pytorch.org/docs/stable/torchvision/datasets.html#torchvision.datasets.ImageFolder>`__
# dataset, rather than writing our own custom dataset. Download the data
# and set the ``data_dir`` input to the root directory of the dataset. The
# ``model_name`` input is the name of the model you wish to use and must
# be selected from this list:
#
# ::
#
# [resnet, alexnet, vgg, squeezenet, densenet, inception]
#
# The other inputs are as follows: ``num_classes`` is the number of
# classes in the dataset, ``batch_size`` is the batch size used for
# training and may be adjusted according to the capability of your
# machine, ``num_epochs`` is the number of training epochs we want to run,
# and ``feature_extract`` is a boolean that defines if we are finetuning
# or feature extracting. If ``feature_extract = False``, the model is
# finetuned and all model parameters are updated. If
# ``feature_extract = True``, only the last layer parameters are updated,
# the others remain fixed.
#
parser = argparse.ArgumentParser(description = \
'Neural Network with JpegLayer')
# Top level data directory. Here we assume the format of the directory conforms
# to the ImageFolder structure
#data_dir = "./hymenoptera_data"
parser.add_argument('--data_dir', '-d', type=str,\
default='/data/jenna/data/', \
help='Directory of the input data. \
String. Default: /data/jenna/data/')
# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
#model_name = "squeezenet"
parser.add_argument('--model_name', '-m', type=str,\
default='squeezenet',\
help = 'NN models to choose from [resnet, alexnet, \
vgg, squeezenet, densenet, inception]. \
String. Default: squeezenet')
# Number of classes in the dataset
#num_classes = 3
parser.add_argument('--num_classes', '-c', type=int,\
default = 3,\
help = 'Number of classes in the dataset. \
Integer. Default: 3')
# Batch size for training (change depending on how much memory you have)
#batch_size = 8
parser.add_argument('--batch_size', '-b', type=int,\
default = 8,\
help = 'Batch size for training (can change depending\
on how much memory you have. \
Integer. Default: 8)')
# Number of epochs to train for
#num_epochs = 25
parser.add_argument('-ep', '--num_epochs', type=int,\
default = 25,\
help = 'Number of echos to train for. \
Integer. Default:25')
#Flag for whether to add jpeg layer to train quantization matrix
#add_jpeg_layer = True
parser.add_argument('--add_jpeg_layer', '-jpeg', \
action = 'store_false',\
help = 'Flag for adding jpeg layer to neural network. \
Bool. Default: True')
#Flag for initialization for quantization table. When true,qtable is uniformly random. When false, qtable is jpeg standard.
parser.add_argument('--rand_qtable', '-rq', \
action = 'store_false',\
help='Flag for initialization for quantization table. \
When true,qtable is uniformly random. When false, \
qtable is jpeg standard.\
Bool. Default: True.')
# Flag for printing trained quantization matrix
parser.add_argument('--qtable', '-q', \
action = 'store_true',\
help = 'Flag for print quantization matrix. \
Bool. Default: False.')
#Flag for visualizing the jpeg layer
parser.add_argument('--visualize', '-v',\
action = 'store_false',\
help = 'Flag for visualizing the jpeg layer. \
Bool. Default: True')
#Flag for regularize the magnitude of quantization table
#regularize = True
parser.add_argument('--regularize','-r',\
action = 'store_false',\
help = 'Flag for regularize the magnitude of \
quantizaiton table. Without the term, the quantization \
table goes to 0 \
Bool. Default: True')
#Jpeg quality. To calculate a scaling factor for qtable and result in different compression rate.
parser.add_argument('--quality', type = int,\
default = 50,\
help = 'Jpeg quality. It is used to calculate \
a quality factor for different compression rate. \
Integer. Default: 50')
parser.add_argument('--quant_only', action = 'store_true')
parser.add_argument('--cnn_only', action = 'store_true')
feature_extract = False
#parse the inputs
args,unparsed = parser.parse_known_args()
print(args)
######################################################################
# Helper Functions
# ----------------
#
# Before we write the code for adjusting the models, lets define a few
# helper functions.
#
# Model Training and Validation Code
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The ``train_model`` function handles the training and validation of a
# given model. As input, it takes a PyTorch model, a dictionary of
# dataloaders, a loss function, an optimizer, a specified number of epochs
# to train and validate for, and a boolean flag for when the model is an
# Inception model. The *is_inception* flag is used to accomodate the
# *Inception v3* model, as that architecture uses an auxiliary output and
# the overall model loss respects both the auxiliary output and the final
# output, as described
# `here <https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958>`__.
# The function trains for the specified number of epochs and after each
# epoch runs a full validation step. It also keeps track of the best
# performing model (in terms of validation accuracy), and at the end of
# training returns the best performing model. After each epoch, the
# training and validation accuracies are printed.
#
def tr | odel, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False, train = True):
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
phases =['train', 'val']
if not train:
phases = ['val']
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in phases:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
if train:
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
#add regularization
reg_loss = 0
factor = 0.1
if args.regularize:
reg_crit = nn.L1Loss(size_average=True)
target = torch.Tensor(3,8,8).cuda()
target.fill_(0)
for name, param in model.named_parameters():
if "quantize" in name:
reg_loss = factor /reg_crit(param,target) * inputs.size(0)
break
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
loss = reg_loss + loss
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# if(epoch_acc < 0.5):
# for name, param in model.named_parameters():
# if 'quantize' in name:
# print(param*255)
# torch.save(model.state_dict(), 'model.fail')
# sys.exit(0)
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
######################################################################
# Set Model Parameters’ .requires_grad attribute
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This helper function sets the ``.requires_grad`` attribute of the
# parameters in the model to False when we are feature extracting. By
# default, when we load a pretrained model all of the parameters have
# ``.requires_grad=True``, which is fine if we are training from scratch
# or finetuning. However, if we are feature extracting and only want to
# compute gradients for the newly initialized layer then we want all of
# the other parameters to not require gradients. This will make more sense
# later.
#
def set_parameter_requires_grad(model, first, feature_extract, quant_only=False, cnn_only=False):
if first and feature_extract:
quant_only = True
cnn_only = True
for name, param in model.named_parameters():
if (quant_only and 'quantize' not in name) or\
(cnn_only and 'quantize' in name):
param.requires_grad = False
######################################################################
# Initialize and Reshape the Networks
# -----------------------------------
#
# Now to the most interesting part. Here is where we handle the reshaping
# of each network. Note, this is not an automatic procedure and is unique
# to each model. Recall, the final layer of a CNN model, which is often
# times an FC layer, has the same number of nodes as the number of output
# classes in the dataset. Since all of the models have been pretrained on
# Imagenet, they all have output layers of size 1000, one node for each
# class. The goal here is to reshape the last layer to have the same
# number of inputs as before, AND to have the same number of outputs as
# the number of classes in the dataset. In the following sections we will
# discuss how to alter the architecture of each model individually. But
# first, there is one important detail regarding the difference between
# finetuning and feature-extraction.
#
# When feature extracting, we only want to update the parameters of the
# last layer, or in other words, we only want to update the parameters for
# the layer(s) we are reshaping. Therefore, we do not need to compute the
# gradients of the parameters that we are not changing, so for efficiency
# we set the .requires_grad attribute to False. This is important because
# by default, this attribute is set to True. Then, when we initialize the
# new layer and by default the new parameters have ``.requires_grad=True``
# so only the new layer’s parameters will be updated. When we are
# finetuning we can leave all of the .required_grad’s set to the default
# of True.
#
# Finally, notice that inception_v3 requires the input size to be
# (299,299), whereas all of the other models expect (224,224).
#
# Resnet
# ~~~~~~
#
# Resnet was introduced in the paper `Deep Residual Learning for Image
# Recognition <https://arxiv.org/abs/1512.03385>`__. There are several
# variants of different sizes, including Resnet18, Resnet34, Resnet50,
# Resnet101, and Resnet152, all of which are available from torchvision
# models. Here we use Resnet18, as our dataset is small and only has two
# classes. When we print the model, we see that the last layer is a fully
# connected layer as shown below:
#
# ::
#
# (fc): Linear(in_features=512, out_features=1000, bias=True)
#
# Thus, we must reinitialize ``model.fc`` to be a Linear layer with 512
# input features and 2 output features with:
#
# ::
#
# model.fc = nn.Linear(512, num_classes)
#
# Alexnet
# ~~~~~~~
#
# Alexnet was introduced in the paper `ImageNet Classification with Deep
# Convolutional Neural
# Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`__
# and was the first very successful CNN on the ImageNet dataset. When we
# print the model architecture, we see the model output comes from the 6th
# layer of the classifier
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# To use the model with our dataset we reinitialize this layer as
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# VGG
# ~~~
#
# VGG was introduced in the paper `Very Deep Convolutional Networks for
# Large-Scale Image Recognition <https://arxiv.org/pdf/1409.1556.pdf>`__.
# Torchvision offers eight versions of VGG with various lengths and some
# that have batch normalizations layers. Here we use VGG-11 with batch
# normalization. The output layer is similar to Alexnet, i.e.
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# Therefore, we use the same technique to modify the output layer
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# Squeezenet
# ~~~~~~~~~~
#
# The Squeeznet architecture is described in the paper `SqueezeNet:
# AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
# size <https://arxiv.org/abs/1602.07360>`__ and uses a different output
# structure than any of the other models shown here. Torchvision has two
# versions of Squeezenet, we use version 1.0. The output comes from a 1x1
# convolutional layer which is the 1st layer of the classifier:
#
# ::
#
# (classifier): Sequential(
# (0): Dropout(p=0.5)
# (1): Conv2d(512, 1000, kernel_size=(1, 1), stride=(1, 1))
# (2): ReLU(inplace)
# (3): AvgPool2d(kernel_size=13, stride=1, padding=0)
# )
#
# To modify the network, we reinitialize the Conv2d layer to have an
# output feature map of depth 2 as
#
# ::
#
# model.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
#
# Densenet
# ~~~~~~~~
#
# Densenet was introduced in the paper `Densely Connected Convolutional
# Networks <https://arxiv.org/abs/1608.06993>`__. Torchvision has four
# variants of Densenet but here we only use Densenet-121. The output layer
# is a linear layer with 1024 input features:
#
# ::
#
# (classifier): Linear(in_features=1024, out_features=1000, bias=True)
#
# To reshape the network, we reinitialize the classifier’s linear layer as
#
# ::
#
# model.classifier = nn.Linear(1024, num_classes)
#
# Inception v3
# ~~~~~~~~~~~~
#
# Finally, Inception v3 was first described in `Rethinking the Inception
# Architecture for Computer
# Vision <https://arxiv.org/pdf/1512.00567v1.pdf>`__. This network is
# unique because it has two output layers when training. The second output
# is known as an auxiliary output and is contained in the AuxLogits part
# of the network. The primary output is a linear layer at the end of the
# network. Note, when testing we only consider the primary output. The
# auxiliary output and primary output of the loaded model are printed as:
#
# ::
#
# (AuxLogits): InceptionAux(
# ...
# (fc): Linear(in_features=768, out_features=1000, bias=True)
# )
# ...
# (fc): Linear(in_features=2048, out_features=1000, bias=True)
#
# To finetune this model we must reshape both layers. This is accomplished
# with the following
#
# ::
#
# model.AuxLogits.fc = nn.Linear(768, num_classes)
# model.fc = nn.Linear(2048, num_classes)
#
# Notice, many of the models have similar output structures, but each must
# be handled slightly differently. Also, check out the printed model
# architecture of the reshaped network and make sure the number of output
# features is the same as the number of classes in the dataset.
#
def initialize_model(model_name, num_classes, feature_extract = False, add_jpeg_layer = False, train_quant_only = False, train_cnn_only=False, rand_qtable = True, quality = 50, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet18
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 448
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
if add_jpeg_layer:
if train_quant_only and not train_cnn_only:
model_ft.load_state_dict(torch.load("model.final"))
# if loadfull:
# model_ft.load_state_dict(torch.load("model.final"))
# model_ft = model_ft[1]
model_ft = nn.Sequential(JpegLayer( \
rand_qtable = rand_qtable, cnn_only = train_cnn_only, quality = quality),\
model_ft)
set_parameter_requires_grad(model_ft,\
False, feature_extract,
train_quant_only, train_cnn_only)
# model_ft.load_state_dict(torch.load("model.fail"))
return model_ft, input_size
# Initialize the model for this run
model_ft, input_size = initialize_model(args.model_name, args.num_classes, feature_extract, args.add_jpeg_layer, args.quant_only, args.cnn_only, args.rand_qtable, args.quality, use_pretrained=True)
# Print the model we just instantiated
print(model_ft)
######################################################################
# Load Data
# ---------
#
# Now that we know what the input size must be, we can initialize the data
# transforms, image datasets, and the dataloaders. Notice, the models were
# pretrained with the hard-coded normalization values, as described
# `here <https://pytorch.org/docs/master/torchvision/models.html>`__.
#
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: datasets.ImageFolder(os.path.join(args.data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=args.batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
######################################################################
# Create the Optimizer
# --------------------
#
# Now that the model structure is correct, the final step for finetuning
# and feature extracting is to create an optimizer that only updates the
# desired parameters. Recall that after loading the pretrained model, but
# before reshaping, if ``feature_extract=True`` we manually set all of the
# parameter’s ``.requires_grad`` attributes to False. Then the
# reinitialized layer’s parameters have ``.requires_grad=True`` by
# default. So now we know that *all parameters that have
# .requires_grad=True should be optimized.* Next, we make a list of such
# parameters and input this list to the SGD algorithm constructor.
#
# To verify this, check out the printed parameters to learn. When
# finetuning, this list should be long and include all of the model
# parameters. However, when feature extracting this list should be short
# and only include the weights and biases of the reshaped layers.
#
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = [] #model_ft.parameters()
print("Params to learn:")
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print('\t',name)
iftrain = not(args.quant_only and args.cnn_only)
if iftrain:
# Observe that all parameters are being optimized
#if train_quant_only:
#optimizer_ft = optim.Adam(params_to_update, lr = 0.0001)
#else:
optimizer_ft = optim.SGD(params_to_update, lr = 0.0005, momentum=0.9)
else:
optimizer_ft = None
# optim.SGD([{'params': params_to_update},\
# {'params': params_quantize, 'lr': 0.005, 'momentum':0.9}], lr=0.0005, momentum=0.9)
######################################################################
# Run Training and Validation Step
# --------------------------------
#
# Finally, the last step is to setup the loss for the model, then run the
# training and validation function for the set number of epochs. Notice,
# depending on the number of epochs this step may take a while on a CPU.
# Also, the default learning rate is not optimal for all of the models, so
# to achieve maximum accuracy it would be necessary to tune for each model
# separately.
#
# Setup the loss fxn
criterion = nn.CrossEntropyLoss()
# Train and evaluate
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=args.num_epochs, is_inception=(args.model_name=="inception"), train = iftrain)
if args.cnn_only == True and iftrain:
torch.save(model_ft.state_dict(), "model.final")
#print the trained quantization matrix
if args.qtable:
print('--------- the trained quantize table ---------')
for name,param in model_ft.named_parameters():
if param.requires_grad == True and\
name == "0.quantize":
print('Y',param.data[0]*255)
print('Cb',param.data[1]*255)
print('Cr',param.data[2]*255)
break
# Let's visualize feature maps after jpeg layer
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
if args.add_jpeg_layer:
activation = {}
model_ft[0].register_forward_hook(get_activation('0.JpegLayer'))
data, _ = image_datasets["val"][0]
f1 = data.cpu().data.numpy()
f1 = (np.transpose(f1,(1,2,0))*255).astype(np.uint8)
data.unsqueeze_(0)
output = model_ft(data.to(device))
f2 = activation['0.JpegLayer'].squeeze().cpu().data.numpy()
f2 = (np.transpose(f2, (1,2,0))*255).astype(np.uint8)
if args.visualize:
fig, axarr = plt.subplots(2)
axarr[0].imshow(f1)
axarr[1].imshow(f2)
plt.show()
#save images
from psnr import psnr, compressJ, save
from PIL import Image
save(f1, "org.bmp")
save(f2, "myJpeg.jpg")
###############################
##### standard python jpeg ####
###############################
#im = compressJ(f1,"toJpeg.jpg")
#im = np.array(im, np.int16).transpose(2,0,1)
#
##############################
##### psnr ####
##############################
#f1 = np.array(f1,np.int16).transpose(2,0,1)
#f2 = np.array(f2,np.int16).transpose(2,0,1)
#print("compression results!")
#print("PSNR - my jpeg: ", psnr(f2[0],f1[0]))
#print("PSNR - PIL jpeg", psnr(im[0], f1[0]))
#print("PSNR - my vs. PIL", psnr(im[0], f2[0]))
#######################################################################
## Comparison with Model Trained from Scratch
## ------------------------------------------
##
## Just for fun, lets see how the model learns if we do not use transfer
## learning. The performance of finetuning vs. feature extracting depends
## largely on the dataset but in general both transfer learning methods
## produce favorable results in terms of training time and overall accuracy
## versus a model trained from scratch.
##
#
#
## Initialize the non-pretrained version of the model used for this run
#scratch_model,_ = initialize_model(model_name, num_classes, feature_extract=False, use_pretrained=False)
#scratch_model = scratch_model.to(device)
#scratch_optimizer = optim.SGD(scratch_model.parameters(), lr=0.001, momentum=0.9)
#scratch_criterion = nn.CrossEntropyLoss()
#_,scratch_hist = train_model(scratch_model, dataloaders_dict, scratch_criterion, scratch_optimizer, num_epochs=num_epochs, is_inception=(model_name=="inception"))
#
## Plot the training curves of validation accuracy vs. number
## of training epochs for the transfer learning method and
## the model trained from scratch
#ohist = []
#shist = []
#
#ohist = [h.cpu().numpy() for h in hist]
#shist = [h.cpu().numpy() for h in scratch_hist]
#
#plt.title("Validation Accuracy vs. Number of Training Epochs")
#plt.xlabel("Training Epochs")
#plt.ylabel("Validation Accuracy")
#plt.plot(range(1,num_epochs+1),ohist,label="Pretrained")
#plt.plot(range(1,num_epochs+1),shist,label="Scratch")
#plt.ylim((0,1.))
#plt.xticks(np.arange(1, num_epochs+1, 1.0))
#plt.legend()
#plt.show()
#
######################################################################
# Final Thoughts and Where to Go Next
# -----------------------------------
#
# Try running some of the other models and see how good the accuracy gets.
# Also, notice that feature extracting takes less time because in the
# backward pass we do not have to calculate most of the gradients. There
# are many places to go from here. You could:
#
# - Run this code with a harder dataset and see some more benefits of
# transfer learning
# - Using the methods described here, use transfer learning to update a
# different model, perhaps in a new domain (i.e. NLP, audio, etc.)
# - Once you are happy with a model, you can export it as an ONNX model,
# or trace it using the hybrid frontend for more speed and optimization
# opportunities.
#
| ain_model(m | identifier_name |
cnn.py | """
Finetuning Torchvision Models
=============================
**Author:** `Nathan Inkawhich <https://github.com/inkawhich>`__
"""
######################################################################
# In this tutorial we will take a deeper look at how to finetune and
# feature extract the `torchvision
# models <https://pytorch.org/docs/stable/torchvision/models.html>`__, all
# of which have been pretrained on the 1000-class Imagenet dataset. This
# tutorial will give an indepth look at how to work with several modern
# CNN architectures, and will build an intuition for finetuning any
# PyTorch model. Since each model architecture is different, there is no
# boilerplate finetuning code that will work in all scenarios. Rather, the
# researcher must look at the existing architecture and make custom
# adjustments for each model.
#
# In this document we will perform two types of transfer learning:
# finetuning and feature extraction. In **finetuning**, we start with a
# pretrained model and update *all* of the model’s parameters for our new
# task, in essence retraining the whole model. In **feature extraction**,
# we start with a pretrained model and only update the final layer weights
# from which we derive predictions. It is called feature extraction
# because we use the pretrained CNN as a fixed feature-extractor, and only
# change the output layer. For more technical information about transfer
# learning see `here <https://cs231n.github.io/transfer-learning/>`__ and
# `here <https://ruder.io/transfer-learning/>`__.
#
# In general both transfer learning methods follow the same few steps:
#
# - Initialize the pretrained model
# - Reshape the final layer(s) to have the same number of outputs as the
# number of classes in the new dataset
# - Define for the optimization algorithm which parameters we want to
# update during training
# - Run the training step
#
from __future__ import print_function
from __future__ import division
import sys
import PIL
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import argparse
from jpeg_layer import *
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
######################################################################
# Inputs
# ------
#
# Here are all of the parameters to change for the run. We will use the
# *hymenoptera_data* dataset which can be downloaded
# `here <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`__.
# This dataset contains two classes, **bees** and **ants**, and is
# structured such that we can use the
# `ImageFolder <https://pytorch.org/docs/stable/torchvision/datasets.html#torchvision.datasets.ImageFolder>`__
# dataset, rather than writing our own custom dataset. Download the data
# and set the ``data_dir`` input to the root directory of the dataset. The
# ``model_name`` input is the name of the model you wish to use and must
# be selected from this list:
#
# ::
#
# [resnet, alexnet, vgg, squeezenet, densenet, inception]
#
# The other inputs are as follows: ``num_classes`` is the number of
# classes in the dataset, ``batch_size`` is the batch size used for
# training and may be adjusted according to the capability of your
# machine, ``num_epochs`` is the number of training epochs we want to run,
# and ``feature_extract`` is a boolean that defines if we are finetuning
# or feature extracting. If ``feature_extract = False``, the model is
# finetuned and all model parameters are updated. If
# ``feature_extract = True``, only the last layer parameters are updated,
# the others remain fixed.
#
parser = argparse.ArgumentParser(description = \
'Neural Network with JpegLayer')
# Top level data directory. Here we assume the format of the directory conforms
# to the ImageFolder structure
#data_dir = "./hymenoptera_data"
parser.add_argument('--data_dir', '-d', type=str,\
default='/data/jenna/data/', \
help='Directory of the input data. \
String. Default: /data/jenna/data/')
# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
#model_name = "squeezenet"
parser.add_argument('--model_name', '-m', type=str,\
default='squeezenet',\
help = 'NN models to choose from [resnet, alexnet, \
vgg, squeezenet, densenet, inception]. \
String. Default: squeezenet')
# Number of classes in the dataset
#num_classes = 3
parser.add_argument('--num_classes', '-c', type=int,\
default = 3,\
help = 'Number of classes in the dataset. \
Integer. Default: 3')
# Batch size for training (change depending on how much memory you have)
#batch_size = 8
parser.add_argument('--batch_size', '-b', type=int,\
default = 8,\
help = 'Batch size for training (can change depending\
on how much memory you have. \
Integer. Default: 8)')
# Number of epochs to train for
#num_epochs = 25
parser.add_argument('-ep', '--num_epochs', type=int,\
default = 25,\
help = 'Number of echos to train for. \
Integer. Default:25')
#Flag for whether to add jpeg layer to train quantization matrix
#add_jpeg_layer = True
parser.add_argument('--add_jpeg_layer', '-jpeg', \
action = 'store_false',\
help = 'Flag for adding jpeg layer to neural network. \
Bool. Default: True')
#Flag for initialization for quantization table. When true,qtable is uniformly random. When false, qtable is jpeg standard.
parser.add_argument('--rand_qtable', '-rq', \
action = 'store_false',\
help='Flag for initialization for quantization table. \
When true,qtable is uniformly random. When false, \
qtable is jpeg standard.\
Bool. Default: True.')
# Flag for printing trained quantization matrix
parser.add_argument('--qtable', '-q', \
action = 'store_true',\
help = 'Flag for print quantization matrix. \
Bool. Default: False.')
#Flag for visualizing the jpeg layer
parser.add_argument('--visualize', '-v',\
action = 'store_false',\
help = 'Flag for visualizing the jpeg layer. \
Bool. Default: True')
#Flag for regularize the magnitude of quantization table
#regularize = True
parser.add_argument('--regularize','-r',\
action = 'store_false',\
help = 'Flag for regularize the magnitude of \
quantizaiton table. Without the term, the quantization \
table goes to 0 \
Bool. Default: True')
#Jpeg quality. To calculate a scaling factor for qtable and result in different compression rate.
parser.add_argument('--quality', type = int,\
default = 50,\
help = 'Jpeg quality. It is used to calculate \
a quality factor for different compression rate. \
Integer. Default: 50')
parser.add_argument('--quant_only', action = 'store_true')
parser.add_argument('--cnn_only', action = 'store_true')
feature_extract = False
#parse the inputs
args,unparsed = parser.parse_known_args()
print(args)
######################################################################
# Helper Functions
# ----------------
#
# Before we write the code for adjusting the models, lets define a few
# helper functions.
#
# Model Training and Validation Code
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The ``train_model`` function handles the training and validation of a
# given model. As input, it takes a PyTorch model, a dictionary of
# dataloaders, a loss function, an optimizer, a specified number of epochs
# to train and validate for, and a boolean flag for when the model is an
# Inception model. The *is_inception* flag is used to accomodate the
# *Inception v3* model, as that architecture uses an auxiliary output and
# the overall model loss respects both the auxiliary output and the final
# output, as described
# `here <https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958>`__.
# The function trains for the specified number of epochs and after each
# epoch runs a full validation step. It also keeps track of the best
# performing model (in terms of validation accuracy), and at the end of
# training returns the best performing model. After each epoch, the
# training and validation accuracies are printed.
#
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False, train = True):
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
phases =['train', 'val']
if not train:
phases = ['val']
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in phases:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
if train:
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
#add regularization
reg_loss = 0
factor = 0.1
if args.regularize:
reg_crit = nn.L1Loss(size_average=True)
target = torch.Tensor(3,8,8).cuda()
target.fill_(0)
for name, param in model.named_parameters():
if "quantize" in name:
reg_loss = factor /reg_crit(param,target) * inputs.size(0)
break
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
loss = reg_loss + loss
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# if(epoch_acc < 0.5):
# for name, param in model.named_parameters():
# if 'quantize' in name:
# print(param*255)
# torch.save(model.state_dict(), 'model.fail')
# sys.exit(0)
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
######################################################################
# Set Model Parameters’ .requires_grad attribute
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This helper function sets the ``.requires_grad`` attribute of the
# parameters in the model to False when we are feature extracting. By
# default, when we load a pretrained model all of the parameters have
# ``.requires_grad=True``, which is fine if we are training from scratch
# or finetuning. However, if we are feature extracting and only want to
# compute gradients for the newly initialized layer then we want all of
# the other parameters to not require gradients. This will make more sense
# later.
#
def set_parameter_requires_grad(model, first, feature_extract, quant_only=False, cnn_only=False):
if first and feature_extract:
quant_only = True
cnn_only = True
for name, param in model.named_parameters():
if (quant_only and 'quantize' not in name) or\
(cnn_only and 'quantize' in name):
param.requires_grad = False
######################################################################
# Initialize and Reshape the Networks
# -----------------------------------
#
# Now to the most interesting part. Here is where we handle the reshaping
# of each network. Note, this is not an automatic procedure and is unique
# to each model. Recall, the final layer of a CNN model, which is often
# times an FC layer, has the same number of nodes as the number of output
# classes in the dataset. Since all of the models have been pretrained on
# Imagenet, they all have output layers of size 1000, one node for each
# class. The goal here is to reshape the last layer to have the same
# number of inputs as before, AND to have the same number of outputs as
# the number of classes in the dataset. In the following sections we will
# discuss how to alter the architecture of each model individually. But
# first, there is one important detail regarding the difference between
# finetuning and feature-extraction.
#
# When feature extracting, we only want to update the parameters of the
# last layer, or in other words, we only want to update the parameters for
# the layer(s) we are reshaping. Therefore, we do not need to compute the
# gradients of the parameters that we are not changing, so for efficiency
# we set the .requires_grad attribute to False. This is important because
# by default, this attribute is set to True. Then, when we initialize the
# new layer and by default the new parameters have ``.requires_grad=True``
# so only the new layer’s parameters will be updated. When we are
# finetuning we can leave all of the .required_grad’s set to the default
# of True.
#
# Finally, notice that inception_v3 requires the input size to be
# (299,299), whereas all of the other models expect (224,224).
#
# Resnet
# ~~~~~~
#
# Resnet was introduced in the paper `Deep Residual Learning for Image
# Recognition <https://arxiv.org/abs/1512.03385>`__. There are several
# variants of different sizes, including Resnet18, Resnet34, Resnet50,
# Resnet101, and Resnet152, all of which are available from torchvision
# models. Here we use Resnet18, as our dataset is small and only has two
# classes. When we print the model, we see that the last layer is a fully
# connected layer as shown below:
#
# ::
#
# (fc): Linear(in_features=512, out_features=1000, bias=True)
#
# Thus, we must reinitialize ``model.fc`` to be a Linear layer with 512
# input features and 2 output features with:
#
# ::
#
# model.fc = nn.Linear(512, num_classes)
#
# Alexnet
# ~~~~~~~
#
# Alexnet was introduced in the paper `ImageNet Classification with Deep
# Convolutional Neural
# Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`__
# and was the first very successful CNN on the ImageNet dataset. When we
# print the model architecture, we see the model output comes from the 6th
# layer of the classifier
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# To use the model with our dataset we reinitialize this layer as
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# VGG
# ~~~
#
# VGG was introduced in the paper `Very Deep Convolutional Networks for
# Large-Scale Image Recognition <https://arxiv.org/pdf/1409.1556.pdf>`__.
# Torchvision offers eight versions of VGG with various lengths and some
# that have batch normalizations layers. Here we use VGG-11 with batch
# normalization. The output layer is similar to Alexnet, i.e.
#
# ::
#
# (classifier): Sequential(
# ...
# (6): Linear(in_features=4096, out_features=1000, bias=True)
# )
#
# Therefore, we use the same technique to modify the output layer
#
# ::
#
# model.classifier[6] = nn.Linear(4096,num_classes)
#
# Squeezenet
# ~~~~~~~~~~
#
# The Squeeznet architecture is described in the paper `SqueezeNet:
# AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
# size <https://arxiv.org/abs/1602.07360>`__ and uses a different output
# structure than any of the other models shown here. Torchvision has two
# versions of Squeezenet, we use version 1.0. The output comes from a 1x1
# convolutional layer which is the 1st layer of the classifier:
#
# ::
#
# (classifier): Sequential(
# (0): Dropout(p=0.5) | # (2): ReLU(inplace)
# (3): AvgPool2d(kernel_size=13, stride=1, padding=0)
# )
#
# To modify the network, we reinitialize the Conv2d layer to have an
# output feature map of depth 2 as
#
# ::
#
# model.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
#
# Densenet
# ~~~~~~~~
#
# Densenet was introduced in the paper `Densely Connected Convolutional
# Networks <https://arxiv.org/abs/1608.06993>`__. Torchvision has four
# variants of Densenet but here we only use Densenet-121. The output layer
# is a linear layer with 1024 input features:
#
# ::
#
# (classifier): Linear(in_features=1024, out_features=1000, bias=True)
#
# To reshape the network, we reinitialize the classifier’s linear layer as
#
# ::
#
# model.classifier = nn.Linear(1024, num_classes)
#
# Inception v3
# ~~~~~~~~~~~~
#
# Finally, Inception v3 was first described in `Rethinking the Inception
# Architecture for Computer
# Vision <https://arxiv.org/pdf/1512.00567v1.pdf>`__. This network is
# unique because it has two output layers when training. The second output
# is known as an auxiliary output and is contained in the AuxLogits part
# of the network. The primary output is a linear layer at the end of the
# network. Note, when testing we only consider the primary output. The
# auxiliary output and primary output of the loaded model are printed as:
#
# ::
#
# (AuxLogits): InceptionAux(
# ...
# (fc): Linear(in_features=768, out_features=1000, bias=True)
# )
# ...
# (fc): Linear(in_features=2048, out_features=1000, bias=True)
#
# To finetune this model we must reshape both layers. This is accomplished
# with the following
#
# ::
#
# model.AuxLogits.fc = nn.Linear(768, num_classes)
# model.fc = nn.Linear(2048, num_classes)
#
# Notice, many of the models have similar output structures, but each must
# be handled slightly differently. Also, check out the printed model
# architecture of the reshaped network and make sure the number of output
# features is the same as the number of classes in the dataset.
#
def initialize_model(model_name, num_classes, feature_extract = False, add_jpeg_layer = False, train_quant_only = False, train_cnn_only=False, rand_qtable = True, quality = 50, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet18
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 448
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 448
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 448
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft,\
True, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
if add_jpeg_layer:
if train_quant_only and not train_cnn_only:
model_ft.load_state_dict(torch.load("model.final"))
# if loadfull:
# model_ft.load_state_dict(torch.load("model.final"))
# model_ft = model_ft[1]
model_ft = nn.Sequential(JpegLayer( \
rand_qtable = rand_qtable, cnn_only = train_cnn_only, quality = quality),\
model_ft)
set_parameter_requires_grad(model_ft,\
False, feature_extract,
train_quant_only, train_cnn_only)
# model_ft.load_state_dict(torch.load("model.fail"))
return model_ft, input_size
# Initialize the model for this run
model_ft, input_size = initialize_model(args.model_name, args.num_classes, feature_extract, args.add_jpeg_layer, args.quant_only, args.cnn_only, args.rand_qtable, args.quality, use_pretrained=True)
# Print the model we just instantiated
print(model_ft)
######################################################################
# Load Data
# ---------
#
# Now that we know what the input size must be, we can initialize the data
# transforms, image datasets, and the dataloaders. Notice, the models were
# pretrained with the hard-coded normalization values, as described
# `here <https://pytorch.org/docs/master/torchvision/models.html>`__.
#
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: datasets.ImageFolder(os.path.join(args.data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=args.batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
######################################################################
# Create the Optimizer
# --------------------
#
# Now that the model structure is correct, the final step for finetuning
# and feature extracting is to create an optimizer that only updates the
# desired parameters. Recall that after loading the pretrained model, but
# before reshaping, if ``feature_extract=True`` we manually set all of the
# parameter’s ``.requires_grad`` attributes to False. Then the
# reinitialized layer’s parameters have ``.requires_grad=True`` by
# default. So now we know that *all parameters that have
# .requires_grad=True should be optimized.* Next, we make a list of such
# parameters and input this list to the SGD algorithm constructor.
#
# To verify this, check out the printed parameters to learn. When
# finetuning, this list should be long and include all of the model
# parameters. However, when feature extracting this list should be short
# and only include the weights and biases of the reshaped layers.
#
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = [] #model_ft.parameters()
print("Params to learn:")
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print('\t',name)
iftrain = not(args.quant_only and args.cnn_only)
if iftrain:
# Observe that all parameters are being optimized
#if train_quant_only:
#optimizer_ft = optim.Adam(params_to_update, lr = 0.0001)
#else:
optimizer_ft = optim.SGD(params_to_update, lr = 0.0005, momentum=0.9)
else:
optimizer_ft = None
# optim.SGD([{'params': params_to_update},\
# {'params': params_quantize, 'lr': 0.005, 'momentum':0.9}], lr=0.0005, momentum=0.9)
######################################################################
# Run Training and Validation Step
# --------------------------------
#
# Finally, the last step is to setup the loss for the model, then run the
# training and validation function for the set number of epochs. Notice,
# depending on the number of epochs this step may take a while on a CPU.
# Also, the default learning rate is not optimal for all of the models, so
# to achieve maximum accuracy it would be necessary to tune for each model
# separately.
#
# Setup the loss fxn
criterion = nn.CrossEntropyLoss()
# Train and evaluate
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=args.num_epochs, is_inception=(args.model_name=="inception"), train = iftrain)
if args.cnn_only == True and iftrain:
torch.save(model_ft.state_dict(), "model.final")
#print the trained quantization matrix
if args.qtable:
print('--------- the trained quantize table ---------')
for name,param in model_ft.named_parameters():
if param.requires_grad == True and\
name == "0.quantize":
print('Y',param.data[0]*255)
print('Cb',param.data[1]*255)
print('Cr',param.data[2]*255)
break
# Let's visualize feature maps after jpeg layer
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
if args.add_jpeg_layer:
activation = {}
model_ft[0].register_forward_hook(get_activation('0.JpegLayer'))
data, _ = image_datasets["val"][0]
f1 = data.cpu().data.numpy()
f1 = (np.transpose(f1,(1,2,0))*255).astype(np.uint8)
data.unsqueeze_(0)
output = model_ft(data.to(device))
f2 = activation['0.JpegLayer'].squeeze().cpu().data.numpy()
f2 = (np.transpose(f2, (1,2,0))*255).astype(np.uint8)
if args.visualize:
fig, axarr = plt.subplots(2)
axarr[0].imshow(f1)
axarr[1].imshow(f2)
plt.show()
#save images
from psnr import psnr, compressJ, save
from PIL import Image
save(f1, "org.bmp")
save(f2, "myJpeg.jpg")
###############################
##### standard python jpeg ####
###############################
#im = compressJ(f1,"toJpeg.jpg")
#im = np.array(im, np.int16).transpose(2,0,1)
#
##############################
##### psnr ####
##############################
#f1 = np.array(f1,np.int16).transpose(2,0,1)
#f2 = np.array(f2,np.int16).transpose(2,0,1)
#print("compression results!")
#print("PSNR - my jpeg: ", psnr(f2[0],f1[0]))
#print("PSNR - PIL jpeg", psnr(im[0], f1[0]))
#print("PSNR - my vs. PIL", psnr(im[0], f2[0]))
#######################################################################
## Comparison with Model Trained from Scratch
## ------------------------------------------
##
## Just for fun, lets see how the model learns if we do not use transfer
## learning. The performance of finetuning vs. feature extracting depends
## largely on the dataset but in general both transfer learning methods
## produce favorable results in terms of training time and overall accuracy
## versus a model trained from scratch.
##
#
#
## Initialize the non-pretrained version of the model used for this run
#scratch_model,_ = initialize_model(model_name, num_classes, feature_extract=False, use_pretrained=False)
#scratch_model = scratch_model.to(device)
#scratch_optimizer = optim.SGD(scratch_model.parameters(), lr=0.001, momentum=0.9)
#scratch_criterion = nn.CrossEntropyLoss()
#_,scratch_hist = train_model(scratch_model, dataloaders_dict, scratch_criterion, scratch_optimizer, num_epochs=num_epochs, is_inception=(model_name=="inception"))
#
## Plot the training curves of validation accuracy vs. number
## of training epochs for the transfer learning method and
## the model trained from scratch
#ohist = []
#shist = []
#
#ohist = [h.cpu().numpy() for h in hist]
#shist = [h.cpu().numpy() for h in scratch_hist]
#
#plt.title("Validation Accuracy vs. Number of Training Epochs")
#plt.xlabel("Training Epochs")
#plt.ylabel("Validation Accuracy")
#plt.plot(range(1,num_epochs+1),ohist,label="Pretrained")
#plt.plot(range(1,num_epochs+1),shist,label="Scratch")
#plt.ylim((0,1.))
#plt.xticks(np.arange(1, num_epochs+1, 1.0))
#plt.legend()
#plt.show()
#
######################################################################
# Final Thoughts and Where to Go Next
# -----------------------------------
#
# Try running some of the other models and see how good the accuracy gets.
# Also, notice that feature extracting takes less time because in the
# backward pass we do not have to calculate most of the gradients. There
# are many places to go from here. You could:
#
# - Run this code with a harder dataset and see some more benefits of
# transfer learning
# - Using the methods described here, use transfer learning to update a
# different model, perhaps in a new domain (i.e. NLP, audio, etc.)
# - Once you are happy with a model, you can export it as an ONNX model,
# or trace it using the hybrid frontend for more speed and optimization
# opportunities.
# | # (1): Conv2d(512, 1000, kernel_size=(1, 1), stride=(1, 1)) | random_line_split |
Server.go | package server
import (
"bufio"
//"bytes"
"io"
"log"
"net"
"strings"
)
var (
MainServer *Server
//EndPacket []byte = []byte{128, 0, 128, 1}
)
type Job func()
type PacketHandler func(c *Client, rawPacket Packet)
type PacketProcessor func([]byte) Packet
type Server struct {
Socket *net.TCPListener
Clients map[ID]*Client
Jobs chan Job
IDGen *IDGenerator
PacketHandle []PacketHandler
PacketProcess []PacketProcessor
}
func (s *Server) Run() {
for job := range s.Jobs {
job()
}
}
type Client struct {
Socket *net.TCPConn
UDPCon *net.UDPConn
UDPAddr *net.UDPAddr
UDPWriter *bufio.Writer
TCPReader *bufio.Reader
TCPWriter *bufio.Writer
ID ID
Name string
X, Y float32
Rotation float32
Game *Game_t
GameID uint32
Character *CharacterController
Disconnected int32
}
func isTransportOver(data string) (over bool) {
over = strings.HasSuffix(data, "\r\n\r\n")
return
}
func (c *Client) Update() {}
func (c *Client) Run() {
defer c.OnPanic()
log.Println("Income connection")
c.TCPReader = bufio.NewReader(c.Socket)
c.TCPWriter = bufio.NewWriter(c.Socket)
//c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
var (
buf = make([]byte, 1024)
)
for {
bufLength, err := c.TCPReader.Read(buf) //[0:])
//log.Printf("buffer length = %d", bufLength)
switch err {
case io.EOF:
return
case nil:
{
var i uint16 = 0
for true {
pLength := uint16(buf[i+1]) | (uint16(buf[i+2]) << 8)
//log.Printf("packet length = %d", pLength)
copySlice := buf[i : i+pLength] // copy value hope this is work :)
MainServer.Jobs <- func() { c.HandlePacket(copySlice, pLength) }
i += pLength
if i >= (uint16)(bufLength) {
break
}
}
//for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
//}
//buf = make([]byte, 512) // clear
}
default: // something wrong, when connection was losted
{
// (10054 on windows WSAECONNRESET)
if c.Game != nil {
if c.Character != nil {
var id uint32 = c.Character.GameObject().GOID
for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
if id == e.Value.(PacketResultInstanceGO).GOID {
c.Game.Packages.Remove(e)
break
}
}
DestroyCharacter := PacketCharacterState{id, CS_Destroy}
c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
c.Character.GameObject().Destroy()
c.Game.RemovePlayer(c)
}
}
delete(MainServer.Clients, c.ID)
log.Printf(err.Error())
return
}
}
}
}
//func (c *Client) Run() {
// defer c.OnPanic()
// log.Println("Income connection")
// c.TCPReader = bufio.NewReader(c.Socket)
// c.TCPWriter = bufio.NewWriter(c.Socket)
// //c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
// var (
// buf = make([]byte, 512)
// )
// for {
// //n, err := c.TCPReader.Read(buf)
// //data := string(buf[:n])
// _, err := c.TCPReader.Read(buf) //[0:])
// //n++
// packets := bytes.Split(buf, EndPacket)
// switch err {
// case io.EOF:
// return
// case nil:
// {
// for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
// }
// buf = make([]byte, 512) // clear
// }
// default: // something wrong, when connection was losted
// {
// // (10054 on windows WSAECONNRESET)
// if c.Game != nil {
// if c.Character != nil {
// var id uint32 = c.Character.GameObject().GOID
// for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
// if id == e.Value.(PacketResultInstanceGO).GOID {
// c.Game.Packages.Remove(e)
// break
// }
// }
// DestroyCharacter := PacketCharacterState{id, CS_Destroy}
// c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
// c.Character.GameObject().Destroy()
// c.Game.RemovePlayer(c)
// }
// }
// delete(MainServer.Clients, c.ID)
// log.Printf(err.Error())
// return
// }
// }
// }
//}
func (c *Client) Send(p Packet) {}
func (c *Client) OnPanic() {
if x := recover(); x != nil {
//if atomic.CompareAndSwapInt32(&c.Disconnected, 0, 1) {
// log.Println(c.Name, "Disconnected. Reason:", x)
// MainServer.Jobs <- func() {
// delete(MainServer.Clients, c.ID)
// MainServer.IDGen.PutID(c.ID)
// }
//}
}
}
func (c *Client) HandlePacket(data []byte, lenght uint16) {
defer c.OnPanic()
//if MainServer.PacketProcess[data[0]] != nil {
packet := MainServer.PacketProcess[data[0]](data[:lenght])
// if MainServer.PacketHandle[data[0]] != nil {
MainServer.PacketHandle[data[0]](c, packet)
// }
//}
//switch PacketID(data[0]) {
//case ID_Login:
// {
// packet = ProcessPacketLogin(data[:lenght])
// HandlePacketLogin(c, packet)
// }
//case ID_PlayerInput:
// {
// packet = HandlePacketPlayerInput(data[:lenght])
// OnPacketPlayerInput(c, packet.(PacketPlayerInput))
// }
//case ID_RequestGames:
// {
// packet = HandlePacketRequestGames(data[:lenght])
// OnPacketRequestGames(c, packet.(PacketRequestGames))
// }
//case ID_CreateGame:
// {
// packet = HandlePacketCreateGame(data[:lenght])
// OnPacketGameCreate(c, packet.(PacketGameCreate))
// }
//case ID_JoinGame:
// {
// packet = HandlePacketJoinGame(data[:lenght])
// OnPacketJoinGame(c, packet.(PacketJoinGame))
// }
////case ID_ResolveUDP:
//// {
//// packet = HandlePacketResolveUPD(data[:lenght])
//// OnPacketResolveUPD(c, packet.(PacketResolveUPD))
//// }
//case ID_InstanceGO:
// {
// packet = HandlePacketInstanceGO(data[:lenght])
// OnPacketInstanceGO(c, packet.(PacketInstanceGO))
// }
//case 60:
// {
// log.Printf("packet: id=%d len=%d", data[0], lenght)
// var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
// c.TCPWriter.WriteString(str)
// c.TCPWriter.Flush()
// }
//default:
// {
// log.Printf("Unhandled packet: id=%d len=%d", data[0], lenght)
// }
//}
}
func AcceptUDP(UDP_Listner *net.UDPConn) { | )
_, addr, err := UDP_Listner.ReadFromUDP(buf[0:])
if err != nil {
log.Printf("AcceptUDP error:" + err.Error())
continue
}
if buf[0] == ID_ResolveUDP {
PlayerID = ID(buf[3]) | (ID(buf[4]) << 8) | (ID(buf[5])<<16 | ID(buf[6])<<24)
for _, c := range MainServer.Clients {
if PlayerID == c.ID { // TODO: must be reply TCP message with approve connection
log.Printf("%s pid=%d", addr.String(), PlayerID)
c.UDPCon = UDP_Listner
c.UDPAddr = addr
}
}
buf = make([]byte, 1024)
continue
}
}
}
func StartServer() {
TCP_addr, TCP_err := net.ResolveTCPAddr("tcp", "0.0.0.0:4354")
if TCP_err != nil {
log.Println(TCP_err)
return
}
ln, err := net.ListenTCP("tcp", TCP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (TCP)! at [%s]", TCP_addr)
UDP_addr, UDP_err := net.ResolveUDPAddr("udp4", "0.0.0.0:4354")
if UDP_err != nil {
log.Println(UDP_err)
return
}
UDP_Listner, err := net.ListenUDP("udp", UDP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (UDP)! at [%s]", UDP_addr)
//MainServer.IDGen can be not safe because the only place we use it is when
//we adding/removing clients from the list and we need to do it safe anyway
//Socket *net.TCPListener // -- ln, err := net.ListenTCP("tcp", TCP_addr)
//Clients map[ID]*Client // -- make(map[ID]*Client)
//Jobs chan Job // -- make(chan Job, 1000)
//IDGen *IDGenerator // -- NewIDGenerator(100000, false)
MainServer = &Server{ln, make(map[ID]*Client), make(chan Job, 1000), NewIDGenerator(100000, false), make([]PacketHandler, ID_Count), make([]PacketProcessor, ID_Count)}
// [login]
MainServer.PacketProcess[ID_Login] = ProcessPacketLogin
MainServer.PacketHandle[ID_Login] = HandlePacketLogin
// [InstanceGO]
MainServer.PacketProcess[ID_InstanceGO] = ProcessPacketInstanceGO //
MainServer.PacketHandle[ID_InstanceGO] = HandlePacketInstanceGO //
// [JoinGame]
MainServer.PacketProcess[ID_JoinGame] = ProcessPacketJoinGame //
MainServer.PacketHandle[ID_JoinGame] = HandlePacketJoinGame //
// [PlayerInput]
MainServer.PacketProcess[ID_PlayerInput] = ProcessPacketPlayerInput //
MainServer.PacketHandle[ID_PlayerInput] = HandlePacketPlayerInput //
// [RequestGames]
MainServer.PacketProcess[ID_RequestGames] = ProcessPacketRequestGames //
MainServer.PacketHandle[ID_RequestGames] = HandlePacketRequestGames //
// [CreateGame]
MainServer.PacketProcess[ID_CreateGame] = ProcessPacketCreateGame
MainServer.PacketHandle[ID_CreateGame] = HandlePacketCreateGame
// [ResolveUDP <- login]
//packet = HandlePacketPlayerInput(data[:lenght])
//OnPacketPlayerInput(c, packet.(PacketPlayerInput))
//packet = HandlePacketRequestGames(data[:lenght])
//OnPacketRequestGames(c, packet.(PacketRequestGames))
//packet = HandlePacketCreateGame(data[:lenght])
//HandlePacketGameCreate(c, packet.(PacketGameCreate))
//packet = HandlePacketJoinGame(data[:lenght])
//OnPacketJoinGame(c, packet.(PacketJoinGame))
//packet = HandlePacketInstanceGO(data[:lenght])
//OnPacketInstanceGO(c, packet.(PacketInstanceGO))
//log.Printf("packet: id=%d len=%d", data[0], lenght) // [60] packet unity3d web player
//var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
//c.TCPWriter.WriteString(str)
//c.TCPWriter.Flush()
go MainServer.Run()
//go MainServer.RunGameLoops()
go AcceptUDP(UDP_Listner)
for {
conn, err := ln.AcceptTCP()
if err != nil {
log.Println(err)
break
}
MainServer.Jobs <- func() {
id := MainServer.IDGen.NextID()
c := &Client{Socket: conn, ID: id}
MainServer.Clients[c.ID] = c
go c.Run()
}
}
} | for {
var (
buf = make([]byte, 1024)
PlayerID ID | random_line_split |
Server.go | package server
import (
"bufio"
//"bytes"
"io"
"log"
"net"
"strings"
)
var (
MainServer *Server
//EndPacket []byte = []byte{128, 0, 128, 1}
)
type Job func()
type PacketHandler func(c *Client, rawPacket Packet)
type PacketProcessor func([]byte) Packet
type Server struct {
Socket *net.TCPListener
Clients map[ID]*Client
Jobs chan Job
IDGen *IDGenerator
PacketHandle []PacketHandler
PacketProcess []PacketProcessor
}
func (s *Server) Run() {
for job := range s.Jobs {
job()
}
}
type Client struct {
Socket *net.TCPConn
UDPCon *net.UDPConn
UDPAddr *net.UDPAddr
UDPWriter *bufio.Writer
TCPReader *bufio.Reader
TCPWriter *bufio.Writer
ID ID
Name string
X, Y float32
Rotation float32
Game *Game_t
GameID uint32
Character *CharacterController
Disconnected int32
}
func isTransportOver(data string) (over bool) {
over = strings.HasSuffix(data, "\r\n\r\n")
return
}
func (c *Client) Update() {}
func (c *Client) Run() {
defer c.OnPanic()
log.Println("Income connection")
c.TCPReader = bufio.NewReader(c.Socket)
c.TCPWriter = bufio.NewWriter(c.Socket)
//c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
var (
buf = make([]byte, 1024)
)
for {
bufLength, err := c.TCPReader.Read(buf) //[0:])
//log.Printf("buffer length = %d", bufLength)
switch err {
case io.EOF:
return
case nil:
{
var i uint16 = 0
for true {
pLength := uint16(buf[i+1]) | (uint16(buf[i+2]) << 8)
//log.Printf("packet length = %d", pLength)
copySlice := buf[i : i+pLength] // copy value hope this is work :)
MainServer.Jobs <- func() { c.HandlePacket(copySlice, pLength) }
i += pLength
if i >= (uint16)(bufLength) {
break
}
}
//for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
//}
//buf = make([]byte, 512) // clear
}
default: // something wrong, when connection was losted
{
// (10054 on windows WSAECONNRESET)
if c.Game != nil {
if c.Character != nil {
var id uint32 = c.Character.GameObject().GOID
for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
if id == e.Value.(PacketResultInstanceGO).GOID {
c.Game.Packages.Remove(e)
break
}
}
DestroyCharacter := PacketCharacterState{id, CS_Destroy}
c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
c.Character.GameObject().Destroy()
c.Game.RemovePlayer(c)
}
}
delete(MainServer.Clients, c.ID)
log.Printf(err.Error())
return
}
}
}
}
//func (c *Client) Run() {
// defer c.OnPanic()
// log.Println("Income connection")
// c.TCPReader = bufio.NewReader(c.Socket)
// c.TCPWriter = bufio.NewWriter(c.Socket)
// //c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
// var (
// buf = make([]byte, 512)
// )
// for {
// //n, err := c.TCPReader.Read(buf)
// //data := string(buf[:n])
// _, err := c.TCPReader.Read(buf) //[0:])
// //n++
// packets := bytes.Split(buf, EndPacket)
// switch err {
// case io.EOF:
// return
// case nil:
// {
// for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
// }
// buf = make([]byte, 512) // clear
// }
// default: // something wrong, when connection was losted
// {
// // (10054 on windows WSAECONNRESET)
// if c.Game != nil {
// if c.Character != nil {
// var id uint32 = c.Character.GameObject().GOID
// for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
// if id == e.Value.(PacketResultInstanceGO).GOID {
// c.Game.Packages.Remove(e)
// break
// }
// }
// DestroyCharacter := PacketCharacterState{id, CS_Destroy}
// c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
// c.Character.GameObject().Destroy()
// c.Game.RemovePlayer(c)
// }
// }
// delete(MainServer.Clients, c.ID)
// log.Printf(err.Error())
// return
// }
// }
// }
//}
func (c *Client) Send(p Packet) {}
func (c *Client) OnPanic() {
if x := recover(); x != nil {
//if atomic.CompareAndSwapInt32(&c.Disconnected, 0, 1) {
// log.Println(c.Name, "Disconnected. Reason:", x)
// MainServer.Jobs <- func() {
// delete(MainServer.Clients, c.ID)
// MainServer.IDGen.PutID(c.ID)
// }
//}
}
}
func (c *Client) HandlePacket(data []byte, lenght uint16) {
defer c.OnPanic()
//if MainServer.PacketProcess[data[0]] != nil {
packet := MainServer.PacketProcess[data[0]](data[:lenght])
// if MainServer.PacketHandle[data[0]] != nil {
MainServer.PacketHandle[data[0]](c, packet)
// }
//}
//switch PacketID(data[0]) {
//case ID_Login:
// {
// packet = ProcessPacketLogin(data[:lenght])
// HandlePacketLogin(c, packet)
// }
//case ID_PlayerInput:
// {
// packet = HandlePacketPlayerInput(data[:lenght])
// OnPacketPlayerInput(c, packet.(PacketPlayerInput))
// }
//case ID_RequestGames:
// {
// packet = HandlePacketRequestGames(data[:lenght])
// OnPacketRequestGames(c, packet.(PacketRequestGames))
// }
//case ID_CreateGame:
// {
// packet = HandlePacketCreateGame(data[:lenght])
// OnPacketGameCreate(c, packet.(PacketGameCreate))
// }
//case ID_JoinGame:
// {
// packet = HandlePacketJoinGame(data[:lenght])
// OnPacketJoinGame(c, packet.(PacketJoinGame))
// }
////case ID_ResolveUDP:
//// {
//// packet = HandlePacketResolveUPD(data[:lenght])
//// OnPacketResolveUPD(c, packet.(PacketResolveUPD))
//// }
//case ID_InstanceGO:
// {
// packet = HandlePacketInstanceGO(data[:lenght])
// OnPacketInstanceGO(c, packet.(PacketInstanceGO))
// }
//case 60:
// {
// log.Printf("packet: id=%d len=%d", data[0], lenght)
// var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
// c.TCPWriter.WriteString(str)
// c.TCPWriter.Flush()
// }
//default:
// {
// log.Printf("Unhandled packet: id=%d len=%d", data[0], lenght)
// }
//}
}
func AcceptUDP(UDP_Listner *net.UDPConn) {
for {
var (
buf = make([]byte, 1024)
PlayerID ID
)
_, addr, err := UDP_Listner.ReadFromUDP(buf[0:])
if err != nil {
log.Printf("AcceptUDP error:" + err.Error())
continue
}
if buf[0] == ID_ResolveUDP {
PlayerID = ID(buf[3]) | (ID(buf[4]) << 8) | (ID(buf[5])<<16 | ID(buf[6])<<24)
for _, c := range MainServer.Clients |
buf = make([]byte, 1024)
continue
}
}
}
func StartServer() {
TCP_addr, TCP_err := net.ResolveTCPAddr("tcp", "0.0.0.0:4354")
if TCP_err != nil {
log.Println(TCP_err)
return
}
ln, err := net.ListenTCP("tcp", TCP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (TCP)! at [%s]", TCP_addr)
UDP_addr, UDP_err := net.ResolveUDPAddr("udp4", "0.0.0.0:4354")
if UDP_err != nil {
log.Println(UDP_err)
return
}
UDP_Listner, err := net.ListenUDP("udp", UDP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (UDP)! at [%s]", UDP_addr)
//MainServer.IDGen can be not safe because the only place we use it is when
//we adding/removing clients from the list and we need to do it safe anyway
//Socket *net.TCPListener // -- ln, err := net.ListenTCP("tcp", TCP_addr)
//Clients map[ID]*Client // -- make(map[ID]*Client)
//Jobs chan Job // -- make(chan Job, 1000)
//IDGen *IDGenerator // -- NewIDGenerator(100000, false)
MainServer = &Server{ln, make(map[ID]*Client), make(chan Job, 1000), NewIDGenerator(100000, false), make([]PacketHandler, ID_Count), make([]PacketProcessor, ID_Count)}
// [login]
MainServer.PacketProcess[ID_Login] = ProcessPacketLogin
MainServer.PacketHandle[ID_Login] = HandlePacketLogin
// [InstanceGO]
MainServer.PacketProcess[ID_InstanceGO] = ProcessPacketInstanceGO //
MainServer.PacketHandle[ID_InstanceGO] = HandlePacketInstanceGO //
// [JoinGame]
MainServer.PacketProcess[ID_JoinGame] = ProcessPacketJoinGame //
MainServer.PacketHandle[ID_JoinGame] = HandlePacketJoinGame //
// [PlayerInput]
MainServer.PacketProcess[ID_PlayerInput] = ProcessPacketPlayerInput //
MainServer.PacketHandle[ID_PlayerInput] = HandlePacketPlayerInput //
// [RequestGames]
MainServer.PacketProcess[ID_RequestGames] = ProcessPacketRequestGames //
MainServer.PacketHandle[ID_RequestGames] = HandlePacketRequestGames //
// [CreateGame]
MainServer.PacketProcess[ID_CreateGame] = ProcessPacketCreateGame
MainServer.PacketHandle[ID_CreateGame] = HandlePacketCreateGame
// [ResolveUDP <- login]
//packet = HandlePacketPlayerInput(data[:lenght])
//OnPacketPlayerInput(c, packet.(PacketPlayerInput))
//packet = HandlePacketRequestGames(data[:lenght])
//OnPacketRequestGames(c, packet.(PacketRequestGames))
//packet = HandlePacketCreateGame(data[:lenght])
//HandlePacketGameCreate(c, packet.(PacketGameCreate))
//packet = HandlePacketJoinGame(data[:lenght])
//OnPacketJoinGame(c, packet.(PacketJoinGame))
//packet = HandlePacketInstanceGO(data[:lenght])
//OnPacketInstanceGO(c, packet.(PacketInstanceGO))
//log.Printf("packet: id=%d len=%d", data[0], lenght) // [60] packet unity3d web player
//var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
//c.TCPWriter.WriteString(str)
//c.TCPWriter.Flush()
go MainServer.Run()
//go MainServer.RunGameLoops()
go AcceptUDP(UDP_Listner)
for {
conn, err := ln.AcceptTCP()
if err != nil {
log.Println(err)
break
}
MainServer.Jobs <- func() {
id := MainServer.IDGen.NextID()
c := &Client{Socket: conn, ID: id}
MainServer.Clients[c.ID] = c
go c.Run()
}
}
}
| {
if PlayerID == c.ID { // TODO: must be reply TCP message with approve connection
log.Printf("%s pid=%d", addr.String(), PlayerID)
c.UDPCon = UDP_Listner
c.UDPAddr = addr
}
} | conditional_block |
Server.go | package server
import (
"bufio"
//"bytes"
"io"
"log"
"net"
"strings"
)
var (
MainServer *Server
//EndPacket []byte = []byte{128, 0, 128, 1}
)
type Job func()
type PacketHandler func(c *Client, rawPacket Packet)
type PacketProcessor func([]byte) Packet
type Server struct {
Socket *net.TCPListener
Clients map[ID]*Client
Jobs chan Job
IDGen *IDGenerator
PacketHandle []PacketHandler
PacketProcess []PacketProcessor
}
func (s *Server) Run() {
for job := range s.Jobs {
job()
}
}
type Client struct {
Socket *net.TCPConn
UDPCon *net.UDPConn
UDPAddr *net.UDPAddr
UDPWriter *bufio.Writer
TCPReader *bufio.Reader
TCPWriter *bufio.Writer
ID ID
Name string
X, Y float32
Rotation float32
Game *Game_t
GameID uint32
Character *CharacterController
Disconnected int32
}
func isTransportOver(data string) (over bool) {
over = strings.HasSuffix(data, "\r\n\r\n")
return
}
func (c *Client) Update() {}
func (c *Client) | () {
defer c.OnPanic()
log.Println("Income connection")
c.TCPReader = bufio.NewReader(c.Socket)
c.TCPWriter = bufio.NewWriter(c.Socket)
//c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
var (
buf = make([]byte, 1024)
)
for {
bufLength, err := c.TCPReader.Read(buf) //[0:])
//log.Printf("buffer length = %d", bufLength)
switch err {
case io.EOF:
return
case nil:
{
var i uint16 = 0
for true {
pLength := uint16(buf[i+1]) | (uint16(buf[i+2]) << 8)
//log.Printf("packet length = %d", pLength)
copySlice := buf[i : i+pLength] // copy value hope this is work :)
MainServer.Jobs <- func() { c.HandlePacket(copySlice, pLength) }
i += pLength
if i >= (uint16)(bufLength) {
break
}
}
//for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
//}
//buf = make([]byte, 512) // clear
}
default: // something wrong, when connection was losted
{
// (10054 on windows WSAECONNRESET)
if c.Game != nil {
if c.Character != nil {
var id uint32 = c.Character.GameObject().GOID
for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
if id == e.Value.(PacketResultInstanceGO).GOID {
c.Game.Packages.Remove(e)
break
}
}
DestroyCharacter := PacketCharacterState{id, CS_Destroy}
c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
c.Character.GameObject().Destroy()
c.Game.RemovePlayer(c)
}
}
delete(MainServer.Clients, c.ID)
log.Printf(err.Error())
return
}
}
}
}
//func (c *Client) Run() {
// defer c.OnPanic()
// log.Println("Income connection")
// c.TCPReader = bufio.NewReader(c.Socket)
// c.TCPWriter = bufio.NewWriter(c.Socket)
// //c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
// var (
// buf = make([]byte, 512)
// )
// for {
// //n, err := c.TCPReader.Read(buf)
// //data := string(buf[:n])
// _, err := c.TCPReader.Read(buf) //[0:])
// //n++
// packets := bytes.Split(buf, EndPacket)
// switch err {
// case io.EOF:
// return
// case nil:
// {
// for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
// }
// buf = make([]byte, 512) // clear
// }
// default: // something wrong, when connection was losted
// {
// // (10054 on windows WSAECONNRESET)
// if c.Game != nil {
// if c.Character != nil {
// var id uint32 = c.Character.GameObject().GOID
// for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
// if id == e.Value.(PacketResultInstanceGO).GOID {
// c.Game.Packages.Remove(e)
// break
// }
// }
// DestroyCharacter := PacketCharacterState{id, CS_Destroy}
// c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
// c.Character.GameObject().Destroy()
// c.Game.RemovePlayer(c)
// }
// }
// delete(MainServer.Clients, c.ID)
// log.Printf(err.Error())
// return
// }
// }
// }
//}
func (c *Client) Send(p Packet) {}
func (c *Client) OnPanic() {
if x := recover(); x != nil {
//if atomic.CompareAndSwapInt32(&c.Disconnected, 0, 1) {
// log.Println(c.Name, "Disconnected. Reason:", x)
// MainServer.Jobs <- func() {
// delete(MainServer.Clients, c.ID)
// MainServer.IDGen.PutID(c.ID)
// }
//}
}
}
func (c *Client) HandlePacket(data []byte, lenght uint16) {
defer c.OnPanic()
//if MainServer.PacketProcess[data[0]] != nil {
packet := MainServer.PacketProcess[data[0]](data[:lenght])
// if MainServer.PacketHandle[data[0]] != nil {
MainServer.PacketHandle[data[0]](c, packet)
// }
//}
//switch PacketID(data[0]) {
//case ID_Login:
// {
// packet = ProcessPacketLogin(data[:lenght])
// HandlePacketLogin(c, packet)
// }
//case ID_PlayerInput:
// {
// packet = HandlePacketPlayerInput(data[:lenght])
// OnPacketPlayerInput(c, packet.(PacketPlayerInput))
// }
//case ID_RequestGames:
// {
// packet = HandlePacketRequestGames(data[:lenght])
// OnPacketRequestGames(c, packet.(PacketRequestGames))
// }
//case ID_CreateGame:
// {
// packet = HandlePacketCreateGame(data[:lenght])
// OnPacketGameCreate(c, packet.(PacketGameCreate))
// }
//case ID_JoinGame:
// {
// packet = HandlePacketJoinGame(data[:lenght])
// OnPacketJoinGame(c, packet.(PacketJoinGame))
// }
////case ID_ResolveUDP:
//// {
//// packet = HandlePacketResolveUPD(data[:lenght])
//// OnPacketResolveUPD(c, packet.(PacketResolveUPD))
//// }
//case ID_InstanceGO:
// {
// packet = HandlePacketInstanceGO(data[:lenght])
// OnPacketInstanceGO(c, packet.(PacketInstanceGO))
// }
//case 60:
// {
// log.Printf("packet: id=%d len=%d", data[0], lenght)
// var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
// c.TCPWriter.WriteString(str)
// c.TCPWriter.Flush()
// }
//default:
// {
// log.Printf("Unhandled packet: id=%d len=%d", data[0], lenght)
// }
//}
}
func AcceptUDP(UDP_Listner *net.UDPConn) {
for {
var (
buf = make([]byte, 1024)
PlayerID ID
)
_, addr, err := UDP_Listner.ReadFromUDP(buf[0:])
if err != nil {
log.Printf("AcceptUDP error:" + err.Error())
continue
}
if buf[0] == ID_ResolveUDP {
PlayerID = ID(buf[3]) | (ID(buf[4]) << 8) | (ID(buf[5])<<16 | ID(buf[6])<<24)
for _, c := range MainServer.Clients {
if PlayerID == c.ID { // TODO: must be reply TCP message with approve connection
log.Printf("%s pid=%d", addr.String(), PlayerID)
c.UDPCon = UDP_Listner
c.UDPAddr = addr
}
}
buf = make([]byte, 1024)
continue
}
}
}
func StartServer() {
TCP_addr, TCP_err := net.ResolveTCPAddr("tcp", "0.0.0.0:4354")
if TCP_err != nil {
log.Println(TCP_err)
return
}
ln, err := net.ListenTCP("tcp", TCP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (TCP)! at [%s]", TCP_addr)
UDP_addr, UDP_err := net.ResolveUDPAddr("udp4", "0.0.0.0:4354")
if UDP_err != nil {
log.Println(UDP_err)
return
}
UDP_Listner, err := net.ListenUDP("udp", UDP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (UDP)! at [%s]", UDP_addr)
//MainServer.IDGen can be not safe because the only place we use it is when
//we adding/removing clients from the list and we need to do it safe anyway
//Socket *net.TCPListener // -- ln, err := net.ListenTCP("tcp", TCP_addr)
//Clients map[ID]*Client // -- make(map[ID]*Client)
//Jobs chan Job // -- make(chan Job, 1000)
//IDGen *IDGenerator // -- NewIDGenerator(100000, false)
MainServer = &Server{ln, make(map[ID]*Client), make(chan Job, 1000), NewIDGenerator(100000, false), make([]PacketHandler, ID_Count), make([]PacketProcessor, ID_Count)}
// [login]
MainServer.PacketProcess[ID_Login] = ProcessPacketLogin
MainServer.PacketHandle[ID_Login] = HandlePacketLogin
// [InstanceGO]
MainServer.PacketProcess[ID_InstanceGO] = ProcessPacketInstanceGO //
MainServer.PacketHandle[ID_InstanceGO] = HandlePacketInstanceGO //
// [JoinGame]
MainServer.PacketProcess[ID_JoinGame] = ProcessPacketJoinGame //
MainServer.PacketHandle[ID_JoinGame] = HandlePacketJoinGame //
// [PlayerInput]
MainServer.PacketProcess[ID_PlayerInput] = ProcessPacketPlayerInput //
MainServer.PacketHandle[ID_PlayerInput] = HandlePacketPlayerInput //
// [RequestGames]
MainServer.PacketProcess[ID_RequestGames] = ProcessPacketRequestGames //
MainServer.PacketHandle[ID_RequestGames] = HandlePacketRequestGames //
// [CreateGame]
MainServer.PacketProcess[ID_CreateGame] = ProcessPacketCreateGame
MainServer.PacketHandle[ID_CreateGame] = HandlePacketCreateGame
// [ResolveUDP <- login]
//packet = HandlePacketPlayerInput(data[:lenght])
//OnPacketPlayerInput(c, packet.(PacketPlayerInput))
//packet = HandlePacketRequestGames(data[:lenght])
//OnPacketRequestGames(c, packet.(PacketRequestGames))
//packet = HandlePacketCreateGame(data[:lenght])
//HandlePacketGameCreate(c, packet.(PacketGameCreate))
//packet = HandlePacketJoinGame(data[:lenght])
//OnPacketJoinGame(c, packet.(PacketJoinGame))
//packet = HandlePacketInstanceGO(data[:lenght])
//OnPacketInstanceGO(c, packet.(PacketInstanceGO))
//log.Printf("packet: id=%d len=%d", data[0], lenght) // [60] packet unity3d web player
//var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
//c.TCPWriter.WriteString(str)
//c.TCPWriter.Flush()
go MainServer.Run()
//go MainServer.RunGameLoops()
go AcceptUDP(UDP_Listner)
for {
conn, err := ln.AcceptTCP()
if err != nil {
log.Println(err)
break
}
MainServer.Jobs <- func() {
id := MainServer.IDGen.NextID()
c := &Client{Socket: conn, ID: id}
MainServer.Clients[c.ID] = c
go c.Run()
}
}
}
| Run | identifier_name |
Server.go | package server
import (
"bufio"
//"bytes"
"io"
"log"
"net"
"strings"
)
var (
MainServer *Server
//EndPacket []byte = []byte{128, 0, 128, 1}
)
type Job func()
type PacketHandler func(c *Client, rawPacket Packet)
type PacketProcessor func([]byte) Packet
type Server struct {
Socket *net.TCPListener
Clients map[ID]*Client
Jobs chan Job
IDGen *IDGenerator
PacketHandle []PacketHandler
PacketProcess []PacketProcessor
}
func (s *Server) Run() {
for job := range s.Jobs {
job()
}
}
type Client struct {
Socket *net.TCPConn
UDPCon *net.UDPConn
UDPAddr *net.UDPAddr
UDPWriter *bufio.Writer
TCPReader *bufio.Reader
TCPWriter *bufio.Writer
ID ID
Name string
X, Y float32
Rotation float32
Game *Game_t
GameID uint32
Character *CharacterController
Disconnected int32
}
func isTransportOver(data string) (over bool) {
over = strings.HasSuffix(data, "\r\n\r\n")
return
}
func (c *Client) Update() {}
func (c *Client) Run() |
//func (c *Client) Run() {
// defer c.OnPanic()
// log.Println("Income connection")
// c.TCPReader = bufio.NewReader(c.Socket)
// c.TCPWriter = bufio.NewWriter(c.Socket)
// //c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
// var (
// buf = make([]byte, 512)
// )
// for {
// //n, err := c.TCPReader.Read(buf)
// //data := string(buf[:n])
// _, err := c.TCPReader.Read(buf) //[0:])
// //n++
// packets := bytes.Split(buf, EndPacket)
// switch err {
// case io.EOF:
// return
// case nil:
// {
// for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
// }
// buf = make([]byte, 512) // clear
// }
// default: // something wrong, when connection was losted
// {
// // (10054 on windows WSAECONNRESET)
// if c.Game != nil {
// if c.Character != nil {
// var id uint32 = c.Character.GameObject().GOID
// for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
// if id == e.Value.(PacketResultInstanceGO).GOID {
// c.Game.Packages.Remove(e)
// break
// }
// }
// DestroyCharacter := PacketCharacterState{id, CS_Destroy}
// c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
// c.Character.GameObject().Destroy()
// c.Game.RemovePlayer(c)
// }
// }
// delete(MainServer.Clients, c.ID)
// log.Printf(err.Error())
// return
// }
// }
// }
//}
func (c *Client) Send(p Packet) {}
func (c *Client) OnPanic() {
if x := recover(); x != nil {
//if atomic.CompareAndSwapInt32(&c.Disconnected, 0, 1) {
// log.Println(c.Name, "Disconnected. Reason:", x)
// MainServer.Jobs <- func() {
// delete(MainServer.Clients, c.ID)
// MainServer.IDGen.PutID(c.ID)
// }
//}
}
}
func (c *Client) HandlePacket(data []byte, lenght uint16) {
defer c.OnPanic()
//if MainServer.PacketProcess[data[0]] != nil {
packet := MainServer.PacketProcess[data[0]](data[:lenght])
// if MainServer.PacketHandle[data[0]] != nil {
MainServer.PacketHandle[data[0]](c, packet)
// }
//}
//switch PacketID(data[0]) {
//case ID_Login:
// {
// packet = ProcessPacketLogin(data[:lenght])
// HandlePacketLogin(c, packet)
// }
//case ID_PlayerInput:
// {
// packet = HandlePacketPlayerInput(data[:lenght])
// OnPacketPlayerInput(c, packet.(PacketPlayerInput))
// }
//case ID_RequestGames:
// {
// packet = HandlePacketRequestGames(data[:lenght])
// OnPacketRequestGames(c, packet.(PacketRequestGames))
// }
//case ID_CreateGame:
// {
// packet = HandlePacketCreateGame(data[:lenght])
// OnPacketGameCreate(c, packet.(PacketGameCreate))
// }
//case ID_JoinGame:
// {
// packet = HandlePacketJoinGame(data[:lenght])
// OnPacketJoinGame(c, packet.(PacketJoinGame))
// }
////case ID_ResolveUDP:
//// {
//// packet = HandlePacketResolveUPD(data[:lenght])
//// OnPacketResolveUPD(c, packet.(PacketResolveUPD))
//// }
//case ID_InstanceGO:
// {
// packet = HandlePacketInstanceGO(data[:lenght])
// OnPacketInstanceGO(c, packet.(PacketInstanceGO))
// }
//case 60:
// {
// log.Printf("packet: id=%d len=%d", data[0], lenght)
// var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
// c.TCPWriter.WriteString(str)
// c.TCPWriter.Flush()
// }
//default:
// {
// log.Printf("Unhandled packet: id=%d len=%d", data[0], lenght)
// }
//}
}
func AcceptUDP(UDP_Listner *net.UDPConn) {
for {
var (
buf = make([]byte, 1024)
PlayerID ID
)
_, addr, err := UDP_Listner.ReadFromUDP(buf[0:])
if err != nil {
log.Printf("AcceptUDP error:" + err.Error())
continue
}
if buf[0] == ID_ResolveUDP {
PlayerID = ID(buf[3]) | (ID(buf[4]) << 8) | (ID(buf[5])<<16 | ID(buf[6])<<24)
for _, c := range MainServer.Clients {
if PlayerID == c.ID { // TODO: must be reply TCP message with approve connection
log.Printf("%s pid=%d", addr.String(), PlayerID)
c.UDPCon = UDP_Listner
c.UDPAddr = addr
}
}
buf = make([]byte, 1024)
continue
}
}
}
func StartServer() {
TCP_addr, TCP_err := net.ResolveTCPAddr("tcp", "0.0.0.0:4354")
if TCP_err != nil {
log.Println(TCP_err)
return
}
ln, err := net.ListenTCP("tcp", TCP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (TCP)! at [%s]", TCP_addr)
UDP_addr, UDP_err := net.ResolveUDPAddr("udp4", "0.0.0.0:4354")
if UDP_err != nil {
log.Println(UDP_err)
return
}
UDP_Listner, err := net.ListenUDP("udp", UDP_addr)
if err != nil {
log.Println(err)
return
}
log.Printf("Server started (UDP)! at [%s]", UDP_addr)
//MainServer.IDGen can be not safe because the only place we use it is when
//we adding/removing clients from the list and we need to do it safe anyway
//Socket *net.TCPListener // -- ln, err := net.ListenTCP("tcp", TCP_addr)
//Clients map[ID]*Client // -- make(map[ID]*Client)
//Jobs chan Job // -- make(chan Job, 1000)
//IDGen *IDGenerator // -- NewIDGenerator(100000, false)
MainServer = &Server{ln, make(map[ID]*Client), make(chan Job, 1000), NewIDGenerator(100000, false), make([]PacketHandler, ID_Count), make([]PacketProcessor, ID_Count)}
// [login]
MainServer.PacketProcess[ID_Login] = ProcessPacketLogin
MainServer.PacketHandle[ID_Login] = HandlePacketLogin
// [InstanceGO]
MainServer.PacketProcess[ID_InstanceGO] = ProcessPacketInstanceGO //
MainServer.PacketHandle[ID_InstanceGO] = HandlePacketInstanceGO //
// [JoinGame]
MainServer.PacketProcess[ID_JoinGame] = ProcessPacketJoinGame //
MainServer.PacketHandle[ID_JoinGame] = HandlePacketJoinGame //
// [PlayerInput]
MainServer.PacketProcess[ID_PlayerInput] = ProcessPacketPlayerInput //
MainServer.PacketHandle[ID_PlayerInput] = HandlePacketPlayerInput //
// [RequestGames]
MainServer.PacketProcess[ID_RequestGames] = ProcessPacketRequestGames //
MainServer.PacketHandle[ID_RequestGames] = HandlePacketRequestGames //
// [CreateGame]
MainServer.PacketProcess[ID_CreateGame] = ProcessPacketCreateGame
MainServer.PacketHandle[ID_CreateGame] = HandlePacketCreateGame
// [ResolveUDP <- login]
//packet = HandlePacketPlayerInput(data[:lenght])
//OnPacketPlayerInput(c, packet.(PacketPlayerInput))
//packet = HandlePacketRequestGames(data[:lenght])
//OnPacketRequestGames(c, packet.(PacketRequestGames))
//packet = HandlePacketCreateGame(data[:lenght])
//HandlePacketGameCreate(c, packet.(PacketGameCreate))
//packet = HandlePacketJoinGame(data[:lenght])
//OnPacketJoinGame(c, packet.(PacketJoinGame))
//packet = HandlePacketInstanceGO(data[:lenght])
//OnPacketInstanceGO(c, packet.(PacketInstanceGO))
//log.Printf("packet: id=%d len=%d", data[0], lenght) // [60] packet unity3d web player
//var str string = "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\"/></cross-domain-policy>"
//c.TCPWriter.WriteString(str)
//c.TCPWriter.Flush()
go MainServer.Run()
//go MainServer.RunGameLoops()
go AcceptUDP(UDP_Listner)
for {
conn, err := ln.AcceptTCP()
if err != nil {
log.Println(err)
break
}
MainServer.Jobs <- func() {
id := MainServer.IDGen.NextID()
c := &Client{Socket: conn, ID: id}
MainServer.Clients[c.ID] = c
go c.Run()
}
}
}
| {
defer c.OnPanic()
log.Println("Income connection")
c.TCPReader = bufio.NewReader(c.Socket)
c.TCPWriter = bufio.NewWriter(c.Socket)
//c.UDPCon = net.ResolveUDPAddr(c.Socket.RemoteAddr().Network(), c.Socket.RemoteAddr().String())
var (
buf = make([]byte, 1024)
)
for {
bufLength, err := c.TCPReader.Read(buf) //[0:])
//log.Printf("buffer length = %d", bufLength)
switch err {
case io.EOF:
return
case nil:
{
var i uint16 = 0
for true {
pLength := uint16(buf[i+1]) | (uint16(buf[i+2]) << 8)
//log.Printf("packet length = %d", pLength)
copySlice := buf[i : i+pLength] // copy value hope this is work :)
MainServer.Jobs <- func() { c.HandlePacket(copySlice, pLength) }
i += pLength
if i >= (uint16)(bufLength) {
break
}
}
//for _, val := range packets {
// if val[0] != 0 {
// lengh := len(val) // copy value
// copySlice := val // copy value hope this is work :)
// MainServer.Jobs <- func() { c.HandlePacket(copySlice, lengh) }
// }
//}
//buf = make([]byte, 512) // clear
}
default: // something wrong, when connection was losted
{
// (10054 on windows WSAECONNRESET)
if c.Game != nil {
if c.Character != nil {
var id uint32 = c.Character.GameObject().GOID
for e := c.Game.Packages.Front(); e != nil; e = e.Next() {
if id == e.Value.(PacketResultInstanceGO).GOID {
c.Game.Packages.Remove(e)
break
}
}
DestroyCharacter := PacketCharacterState{id, CS_Destroy}
c.Game.Broadcast2OtherTCP(DestroyCharacter, c.ID, false)
c.Character.GameObject().Destroy()
c.Game.RemovePlayer(c)
}
}
delete(MainServer.Clients, c.ID)
log.Printf(err.Error())
return
}
}
}
} | identifier_body |
plugins.go | package plugins
import (
"os"
"encoding/json"
"fmt"
"math/rand"
"strings"
"sync"
"time"
"log"
"net/url"
"github.com/PuerkitoBio/goquery"
"../bot"
"github.com/sorcix/irc"
"github.com/turnage/graw/reddit"
)
type AutoJoin struct {
Channels []string
bot *bot.Bot
}
type Misc struct {
bot *bot.Bot
bannedUsers map[string]string
}
type OPCmd struct {
bot *bot.Bot
}
type Login struct {
Username string
Password string
bot *bot.Bot
}
type RedditParser struct {
PreloadCount int
Lurker reddit.Lurker
bot *bot.Bot
close chan bool
}
type RedditSearch struct {
Commands []string
Subreddits []string
RedditListTag string
What []string
Check func(*reddit.Post, *bot.Bot) bool
posts []*reddit.Post
mu sync.Mutex
close chan bool
NSFW bool
}
type ReplyTerms map[string]string
var RedditSearches = []RedditSearch{
RedditSearch{
Commands: []string{"nsfw"},
Subreddits: []string{
"nsfw", "nsfwhardcore", "nsfw2", "HighResNSFW", "BonerMaterial",
"porn", "iWantToFuckHer", "NSFW_nospam", "Sexy", "nude",
"UnrealGirls", "primes", "THEGOLDSTANDARD", "nsfw_hd", "UHDnsfw",
"BeautifulTitsAndAss", "FuckMarryOrKill", "NSFWCute",
"badassgirls", "HotGirls", "PornPleasure", "nsfwnonporn",
"NSFWcringe", "NSFW_PORN_ONLY", "Sex_Games", "BareGirls",
"lusciousladies", "Babes", "FilthyGirls", "NaturalWomen",
"ImgurNSFW", "Adultpics", "sexynsfw", "nsfw_sets", "OnlyGoodPorn",
"TumblrArchives", "HardcoreSex", "PornLovers", "NSFWgaming",
"Fapucational", "RealBeauties", "fappitt", "exotic_oasis", "TIFT",
"nakedbabes", "oculusnsfw", "CrossEyedFap", "TitsAssandNoClass",
"formylover", "Ass_and_Titties", "Ranked_Girls", "fapfactory",
"NSFW_hardcore", "Sexyness", "debs_and_doxies", "nsfwonly",
"pornpedia", "lineups", "Nightlysex", "spod", "nsfwnew",
"pinupstyle", "NoBSNSFW", "nsfwdumps", "FoxyLadies",
"nsfwcloseups", "NudeBeauty", "SimplyNaked", "fappygood",
"FaptasticImages", "WhichOneWouldYouPick", "TumblrPorn",
"SaturdayMorningGirls", "NSFWSector", "GirlsWithBigGuns",
"QualityNsfw", "nsfwPhotoshopBattles", "hawtness",
"fapb4momgetshome", "SeaSquared", "SexyButNotPorn", "WoahPoon",
"Reflections", "Hotness", "Erotic_Galleries", "carnalclass",
"nsfw_bw", "LaBeauteFeminine", "Sweet_Sexuality", "NSFWart",
"WomenOfColorRisque",
},
What: []string{"nsfw"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"ass"},
Subreddits: []string{
"AssOnTheGlass", "BoltedOnBooty", "BubbleButts",
"ButtsAndBareFeet", "Cheeking", "HighResASS",
"LoveToWatchYouLeave", "NoTorso", "SpreadEm", "TheUnderbun",
"Top_Tier_Asses", "Tushy", "Underbun", "ass", "assgifs",
"bigasses", "booty", "booty_gifs", "datass", "datbuttfromthefront",
"hugeass", "juicybooty", "pawg", "twerking", "whooties",
},
What: []string{"ass"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"boobs"},
Subreddits: []string{
"BeforeAndAfterBoltons", "Bigtitssmalltits", "BoltedOnMaxed",
"Boobies", "BreastEnvy", "EpicCleavage", "HardBoltOns",
"JustOneBoob", "OneInOneOut", "PM_ME_YOUR_TITS_GIRL",
"PerfectTits", "Perky", "Rush_Boobs", "Saggy", "SloMoBoobs",
"TheHangingBoobs", "TheUnderboob", "Titsgalore", "TittyDrop",
"bananatits", "boltedontits", "boobbounce", "boobgifs",
"boobkarma", "boobland", "boobs", "breastplay", "breasts",
"cleavage", "feelthemup", "handbra", "hanging", "hersheyskisstits",
"homegrowntits", "knockers", "naturaltitties", "sideboob",
"tits", "titsagainstglass", "torpedotits", "underboob",
},
What: []string{"boobs", "tits", "titties"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"trap"},
Subreddits: []string{
"Ladyboys", "asianladyboy", "transgif", "dickgirls", "futanari",
},
What: []string{"trap"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"milf"},
Subreddits: []string{
"milf",
},
What: []string{"milf"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"redhead"},
Subreddits: []string{
"redheads", "ginger", "redhead",
},
What: []string{"redhead", "ginger"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"cat"},
Subreddits: []string{
"cat", "cats", "CatGifs", "KittenGifs", "Kittens", "CatPics",
"Kitties", "Kitty", "CatPictures", "LookAtMyCat", "CatReddit",
"CatSpotting", "Kitten", "DelightfullyChubby",
},
What: []string{"cat", "kitten", "kitty"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"dog"},
Subreddits: []string{
"dog", "dogs", "lookatmydog", "DogPictures", "dogswearinghats",
"dogswatchingyoueat",
},
What: []string{"dog", "puppy", "puppeh"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"blonde"},
Subreddits: []string{
"blonde", "blondes",
},
What: []string{"blonde"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"brunette"},
Subreddits: []string{
"brunette", "brunetteass",
},
What: []string{"brunette"},
Check: checkIsImage,
NSFW: true,
},
}
func (p *AutoJoin) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.376", p.welcome)
return &bot.PluginInfo{
Name: "AutoJoin",
Description: "Auto joins channels upon connect.",
}, nil
}
func (p *AutoJoin) welcome(name string, params []interface{}) (bool, error) {
for _, channel := range p.Channels {
err := p.bot.Message(bot.Join(channel))
if err != nil {
return false, err
}
}
return false, nil
}
func (p *AutoJoin) Unload() error {
return nil
}
func (p *Misc) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.textCmd("cmd.hey", []string{"how are you?", "heya!", "hello"})
p.bot.HandleCmdRateLimited("cmd.buzz", p.buzz)
file, _ := os.Open("config/reply.json")
defer file.Close()
decoder := json.NewDecoder(file)
replyTerms := ReplyTerms{}
err := decoder.Decode(&replyTerms)
if err != nil {
log.Fatal(err)
}
for key, value := range replyTerms {
value := value
key := key
p.textReply("irc.privmsg", value, func(line string) bool {
line = strings.ToLower(line)
return strings.HasSuffix(line, key)
})
}
p.bot.HandleIRC("irc.invite", p.invite)
p.bot.HandleIRC("irc.kick", p.kick)
p.bot.HandleIRC("irc.join", p.join)
p.bot.HandleCmdRateLimited("cmd.bs", p.bullshit)
p.bannedUsers = make(map[string]string)
return &bot.PluginInfo{
Name: "Misc",
Description: "Miscellaneous commands.",
}, nil
}
func (p *Misc) Unload() error {
return nil
}
func (p *Misc) textCmd(cmd string, texts []string) {
if len(texts) == 0 {
return
}
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
text := texts[rand.Intn(len(texts))]
if len(args) > 0 {
text = args[0] + ": " + text
}
p.bot.Message(bot.PrivMsg(target, text))
return true, nil
}
p.bot.HandleCmdRateLimited(cmd, handler)
}
func (p *Misc) textReply(cmd, text string, check func(string) bool) {
handler := func(msg *irc.Message) (bool, error) {
if !check(msg.Trailing) {
return false, nil
}
if p.bot.RateLimiter.Limited(msg.Params[0]) {
return false, nil
}
p.bot.Message(bot.PrivMsg(msg.Params[0], text))
return false, nil
}
p.bot.HandleIRC(cmd, handler)
}
func (p *Misc) bullshit(source *irc.Prefix, target string, cmd string, args []string) (bool, error) |
func (p *Misc) buzz(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("annoy") {
return true, nil
}
lines := []string{
"%s",
"%s!",
"paging %s!",
"BUZZING %s",
"%s %[1]s %[1]s %[1]s %[1]s",
"hey %s!",
"%s %[1]s %[1]s %[1]s",
"%s come on",
}
times := rand.Intn(3) + 3
for i := 0; i < times; i++ {
line := lines[rand.Intn(len(lines))]
msg := fmt.Sprintf(line, args[0])
p.bot.Message(bot.PrivMsg(target, msg))
time.Sleep(time.Duration(rand.Intn(300) + 300) * time.Millisecond)
}
return true, nil
}
func (p *Misc) invite(msg *irc.Message) (bool, error) {
perms, err := p.bot.Auth(msg.Prefix)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("invite") {
return true, nil
}
//channel := msg.Trailing
channel := msg.Params[1]
err = p.bot.Message(bot.Join(channel))
return true, nil
}
func (p *Misc) kick(msg *irc.Message) (bool, error) {
channel, who := msg.Params[0], msg.Params[1]
if who != p.bot.Config.Nickname {
return false, nil
}
bannedUser := msg.Prefix.Name
if bannedUser == "X" || bannedUser == "Chanserv" {
parts := strings.Fields(msg.Trailing)
bannedUser = strings.Trim(parts[len(parts) - 1], "()")
}
p.bannedUsers[channel] = bannedUser
return false, nil
}
func (p *Misc) join(msg *irc.Message) (bool, error) {
if msg.Prefix.Name != p.bot.Config.Nickname {
return false, nil
}
channel := msg.Trailing
bannedUser, ok := p.bannedUsers[channel]
if !ok {
return false, nil
}
delete(p.bannedUsers, bannedUser)
welcome := fmt.Sprintf("%s: _)_", bannedUser)
p.bot.Message(bot.PrivMsg(channel, welcome))
return false, nil
}
func (p *OPCmd) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.HandleCmd("cmd.kb", p.kickban)
return &bot.PluginInfo{
Name: "OPCmd",
Description: "OP Commands.",
}, nil
}
func (p *OPCmd) Unload() error {
return nil
}
func (p *OPCmd) kickban(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) != 1 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("opcmds") {
return true, nil
}
whom := args[0]
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", fmt.Sprintf("ban %s %s", target, whom)))
} else {
p.bot.Message(bot.PrivMsg("Chanserv", fmt.Sprintf("ban %s %s", target, whom)))
}
return true, nil
}
func (p *Login) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.001", p.welcome)
return &bot.PluginInfo{
Name: "Login",
Description: "Authenticate to the IRC server.",
}, nil
}
func (p *Login) Unload() error {
return nil
}
func (p *Login) welcome(name string, params []interface{}) (bool, error) {
if len(p.Password) > 0 {
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", "login " + p.Username + " " + p.Password))
} else {
p.bot.Message(bot.PrivMsg("Nickserv", "identify " + p.Password))
}
}
return false, nil
}
func (p *RedditParser) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.close = make(chan bool)
if p.PreloadCount < 1 {
p.PreloadCount = 10
}
for i := range RedditSearches {
RedditSearches[i].register(p)
}
p.bot.HandleCmdRateLimited("cmd.porn", p.roulette)
return &bot.PluginInfo{
Name: "RedditParser",
Description: "Parse Reddit for useful images.",
}, nil
}
func (p *RedditParser) Unload() error {
close(p.close)
return nil
}
func (p *RedditParser) roulette(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
RedditSearch := RedditSearches[rand.Intn(len(RedditSearches))]
cmd = RedditSearch.Commands[0]
return p.bot.Event("cmd." + cmd, source, target, cmd, args)
}
func checkIsImage(post *reddit.Post, b *bot.Bot) bool {
linkURL, err := url.Parse(post.URL)
if err != nil {
return false
}
for _, host := range b.Config.ImageHosts {
if strings.Contains(linkURL.Host, host) {
return true
}
}
return false
}
func chooseRandStr(opt []string) string {
return opt[rand.Intn(len(opt))]
}
func (m *RedditSearch) get() *reddit.Post {
for i := 0; i < 5; i++ {
m.mu.Lock()
var post *reddit.Post
if len(m.posts) > 0 {
post = m.posts[len(m.posts) - 1]
m.posts = m.posts[:len(m.posts) - 1]
}
m.mu.Unlock()
if post != nil {
return post
}
select {
case <-m.close:
return nil
case <-time.After(time.Second):
}
}
return nil
}
func (m *RedditSearch) register(plug *RedditParser) {
m.posts = make([]*reddit.Post, 0, plug.PreloadCount)
m.close = plug.close
go func() {
if len(m.RedditListTag) > 0 {
m.getSubredditList()
}
if len(m.Subreddits) == 0 {
return
}
m.preload(plug.Lurker, plug.bot)
}()
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
what := chooseRandStr(m.What)
post := m.get()
if post == nil {
plug.bot.Message(bot.PrivMsg(target, fmt.Sprintf("%s: haven't indexed any %s yet", source.Name, what)))
return true, nil
}
var msg string
if len(args) > 0 {
if m.NSFW == true {
msg = fmt.Sprintf("%s, here is some %s from %s: %s NSFW (https://redd.it/%s)", args[0], what, source.Name, post.URL, post.ID)
} else {
msg = fmt.Sprintf("%s, here is some %s from %s: %s (https://redd.it/%s)", args[0], what, source.Name, post.URL, post.ID)
}
} else {
if m.NSFW == true {
msg = fmt.Sprintf("%s, here is some %s: %s NSFW (https://redd.it/%s)", source.Name, what, post.URL, post.ID)
} else {
msg = fmt.Sprintf("%s, here is some %s: %s (https://redd.it/%s)", source.Name, what, post.URL, post.ID)
}
}
plug.bot.Message(bot.PrivMsg(target, msg))
return true, nil
}
for _, cmd := range m.Commands {
plug.bot.HandleCmdRateLimited("cmd." + cmd, handler)
}
}
func (m *RedditSearch) getSubredditList() {
var url string
if m.NSFW == true {
url = "http://redditlist.com/nsfw/category/" + m.RedditListTag
} else {
url = "http://redditlist.com/sfw/category/" + m.RedditListTag
}
doc, err := goquery.NewDocument(url)
if err != nil {
log.Println("Failed to get reddit list subreddits for ", m.RedditListTag)
return
}
var subs []string
doc.Find(".result-item-slug a").Each(func(i int, s *goquery.Selection) {
sub := strings.TrimPrefix(s.Text(), "/r/")
subs = append(subs, sub)
})
m.Subreddits = append(m.Subreddits, subs...)
}
func (m *RedditSearch) preload(lurk reddit.Lurker, b *bot.Bot) {
for {
select {
case <-m.close:
return
case <-time.After(2 * time.Second):
m.mu.Lock()
full := len(m.posts) == cap(m.posts)
m.mu.Unlock()
if full {
continue
}
sub := m.Subreddits[rand.Intn(len(m.Subreddits))]
for {
post, err := lurk.Thread("/r/" + sub + "/random")
if err != nil {
log.Printf("Error while getting random post from %s: %v\n", sub, err)
sub = m.Subreddits[rand.Intn(len(m.Subreddits))]
continue
}
if m.Check != nil && !m.Check(post, b) {
continue
}
m.mu.Lock()
m.posts = append(m.posts, post)
m.mu.Unlock()
break
}
}
}
}
| {
if len(args) == 0 {
return true, nil
}
msg := fmt.Sprintf("%s: I am bullshitting you!", args[0])
p.bot.Message(bot.PrivMsg(target, msg))
return true, nil
} | identifier_body |
plugins.go | package plugins
import (
"os"
"encoding/json"
"fmt"
"math/rand"
"strings"
"sync"
"time"
"log"
"net/url"
"github.com/PuerkitoBio/goquery"
"../bot"
"github.com/sorcix/irc"
"github.com/turnage/graw/reddit"
)
type AutoJoin struct {
Channels []string
bot *bot.Bot
}
type Misc struct {
bot *bot.Bot
bannedUsers map[string]string
}
type OPCmd struct {
bot *bot.Bot
}
type Login struct {
Username string
Password string
bot *bot.Bot
}
type RedditParser struct {
PreloadCount int
Lurker reddit.Lurker
bot *bot.Bot
close chan bool
}
type RedditSearch struct {
Commands []string
Subreddits []string
RedditListTag string
What []string
Check func(*reddit.Post, *bot.Bot) bool
posts []*reddit.Post
mu sync.Mutex
close chan bool
NSFW bool
}
type ReplyTerms map[string]string
var RedditSearches = []RedditSearch{
RedditSearch{
Commands: []string{"nsfw"},
Subreddits: []string{
"nsfw", "nsfwhardcore", "nsfw2", "HighResNSFW", "BonerMaterial",
"porn", "iWantToFuckHer", "NSFW_nospam", "Sexy", "nude",
"UnrealGirls", "primes", "THEGOLDSTANDARD", "nsfw_hd", "UHDnsfw",
"BeautifulTitsAndAss", "FuckMarryOrKill", "NSFWCute",
"badassgirls", "HotGirls", "PornPleasure", "nsfwnonporn",
"NSFWcringe", "NSFW_PORN_ONLY", "Sex_Games", "BareGirls",
"lusciousladies", "Babes", "FilthyGirls", "NaturalWomen",
"ImgurNSFW", "Adultpics", "sexynsfw", "nsfw_sets", "OnlyGoodPorn",
"TumblrArchives", "HardcoreSex", "PornLovers", "NSFWgaming",
"Fapucational", "RealBeauties", "fappitt", "exotic_oasis", "TIFT",
"nakedbabes", "oculusnsfw", "CrossEyedFap", "TitsAssandNoClass",
"formylover", "Ass_and_Titties", "Ranked_Girls", "fapfactory",
"NSFW_hardcore", "Sexyness", "debs_and_doxies", "nsfwonly",
"pornpedia", "lineups", "Nightlysex", "spod", "nsfwnew",
"pinupstyle", "NoBSNSFW", "nsfwdumps", "FoxyLadies",
"nsfwcloseups", "NudeBeauty", "SimplyNaked", "fappygood",
"FaptasticImages", "WhichOneWouldYouPick", "TumblrPorn",
"SaturdayMorningGirls", "NSFWSector", "GirlsWithBigGuns",
"QualityNsfw", "nsfwPhotoshopBattles", "hawtness",
"fapb4momgetshome", "SeaSquared", "SexyButNotPorn", "WoahPoon",
"Reflections", "Hotness", "Erotic_Galleries", "carnalclass",
"nsfw_bw", "LaBeauteFeminine", "Sweet_Sexuality", "NSFWart",
"WomenOfColorRisque",
},
What: []string{"nsfw"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"ass"},
Subreddits: []string{
"AssOnTheGlass", "BoltedOnBooty", "BubbleButts",
"ButtsAndBareFeet", "Cheeking", "HighResASS",
"LoveToWatchYouLeave", "NoTorso", "SpreadEm", "TheUnderbun",
"Top_Tier_Asses", "Tushy", "Underbun", "ass", "assgifs",
"bigasses", "booty", "booty_gifs", "datass", "datbuttfromthefront",
"hugeass", "juicybooty", "pawg", "twerking", "whooties",
},
What: []string{"ass"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"boobs"},
Subreddits: []string{
"BeforeAndAfterBoltons", "Bigtitssmalltits", "BoltedOnMaxed",
"Boobies", "BreastEnvy", "EpicCleavage", "HardBoltOns",
"JustOneBoob", "OneInOneOut", "PM_ME_YOUR_TITS_GIRL",
"PerfectTits", "Perky", "Rush_Boobs", "Saggy", "SloMoBoobs",
"TheHangingBoobs", "TheUnderboob", "Titsgalore", "TittyDrop",
"bananatits", "boltedontits", "boobbounce", "boobgifs",
"boobkarma", "boobland", "boobs", "breastplay", "breasts",
"cleavage", "feelthemup", "handbra", "hanging", "hersheyskisstits",
"homegrowntits", "knockers", "naturaltitties", "sideboob",
"tits", "titsagainstglass", "torpedotits", "underboob",
},
What: []string{"boobs", "tits", "titties"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"trap"},
Subreddits: []string{
"Ladyboys", "asianladyboy", "transgif", "dickgirls", "futanari",
},
What: []string{"trap"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"milf"},
Subreddits: []string{
"milf",
},
What: []string{"milf"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"redhead"},
Subreddits: []string{
"redheads", "ginger", "redhead",
},
What: []string{"redhead", "ginger"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"cat"},
Subreddits: []string{
"cat", "cats", "CatGifs", "KittenGifs", "Kittens", "CatPics",
"Kitties", "Kitty", "CatPictures", "LookAtMyCat", "CatReddit",
"CatSpotting", "Kitten", "DelightfullyChubby",
},
What: []string{"cat", "kitten", "kitty"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"dog"},
Subreddits: []string{
"dog", "dogs", "lookatmydog", "DogPictures", "dogswearinghats",
"dogswatchingyoueat",
},
What: []string{"dog", "puppy", "puppeh"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"blonde"},
Subreddits: []string{
"blonde", "blondes",
},
What: []string{"blonde"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"brunette"},
Subreddits: []string{
"brunette", "brunetteass",
},
What: []string{"brunette"},
Check: checkIsImage,
NSFW: true,
},
}
func (p *AutoJoin) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.376", p.welcome)
return &bot.PluginInfo{
Name: "AutoJoin",
Description: "Auto joins channels upon connect.",
}, nil
}
func (p *AutoJoin) welcome(name string, params []interface{}) (bool, error) {
for _, channel := range p.Channels {
err := p.bot.Message(bot.Join(channel))
if err != nil {
return false, err
}
}
return false, nil
}
func (p *AutoJoin) | () error {
return nil
}
func (p *Misc) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.textCmd("cmd.hey", []string{"how are you?", "heya!", "hello"})
p.bot.HandleCmdRateLimited("cmd.buzz", p.buzz)
file, _ := os.Open("config/reply.json")
defer file.Close()
decoder := json.NewDecoder(file)
replyTerms := ReplyTerms{}
err := decoder.Decode(&replyTerms)
if err != nil {
log.Fatal(err)
}
for key, value := range replyTerms {
value := value
key := key
p.textReply("irc.privmsg", value, func(line string) bool {
line = strings.ToLower(line)
return strings.HasSuffix(line, key)
})
}
p.bot.HandleIRC("irc.invite", p.invite)
p.bot.HandleIRC("irc.kick", p.kick)
p.bot.HandleIRC("irc.join", p.join)
p.bot.HandleCmdRateLimited("cmd.bs", p.bullshit)
p.bannedUsers = make(map[string]string)
return &bot.PluginInfo{
Name: "Misc",
Description: "Miscellaneous commands.",
}, nil
}
func (p *Misc) Unload() error {
return nil
}
func (p *Misc) textCmd(cmd string, texts []string) {
if len(texts) == 0 {
return
}
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
text := texts[rand.Intn(len(texts))]
if len(args) > 0 {
text = args[0] + ": " + text
}
p.bot.Message(bot.PrivMsg(target, text))
return true, nil
}
p.bot.HandleCmdRateLimited(cmd, handler)
}
func (p *Misc) textReply(cmd, text string, check func(string) bool) {
handler := func(msg *irc.Message) (bool, error) {
if !check(msg.Trailing) {
return false, nil
}
if p.bot.RateLimiter.Limited(msg.Params[0]) {
return false, nil
}
p.bot.Message(bot.PrivMsg(msg.Params[0], text))
return false, nil
}
p.bot.HandleIRC(cmd, handler)
}
func (p *Misc) bullshit(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
msg := fmt.Sprintf("%s: I am bullshitting you!", args[0])
p.bot.Message(bot.PrivMsg(target, msg))
return true, nil
}
func (p *Misc) buzz(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("annoy") {
return true, nil
}
lines := []string{
"%s",
"%s!",
"paging %s!",
"BUZZING %s",
"%s %[1]s %[1]s %[1]s %[1]s",
"hey %s!",
"%s %[1]s %[1]s %[1]s",
"%s come on",
}
times := rand.Intn(3) + 3
for i := 0; i < times; i++ {
line := lines[rand.Intn(len(lines))]
msg := fmt.Sprintf(line, args[0])
p.bot.Message(bot.PrivMsg(target, msg))
time.Sleep(time.Duration(rand.Intn(300) + 300) * time.Millisecond)
}
return true, nil
}
func (p *Misc) invite(msg *irc.Message) (bool, error) {
perms, err := p.bot.Auth(msg.Prefix)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("invite") {
return true, nil
}
//channel := msg.Trailing
channel := msg.Params[1]
err = p.bot.Message(bot.Join(channel))
return true, nil
}
func (p *Misc) kick(msg *irc.Message) (bool, error) {
channel, who := msg.Params[0], msg.Params[1]
if who != p.bot.Config.Nickname {
return false, nil
}
bannedUser := msg.Prefix.Name
if bannedUser == "X" || bannedUser == "Chanserv" {
parts := strings.Fields(msg.Trailing)
bannedUser = strings.Trim(parts[len(parts) - 1], "()")
}
p.bannedUsers[channel] = bannedUser
return false, nil
}
func (p *Misc) join(msg *irc.Message) (bool, error) {
if msg.Prefix.Name != p.bot.Config.Nickname {
return false, nil
}
channel := msg.Trailing
bannedUser, ok := p.bannedUsers[channel]
if !ok {
return false, nil
}
delete(p.bannedUsers, bannedUser)
welcome := fmt.Sprintf("%s: _)_", bannedUser)
p.bot.Message(bot.PrivMsg(channel, welcome))
return false, nil
}
func (p *OPCmd) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.HandleCmd("cmd.kb", p.kickban)
return &bot.PluginInfo{
Name: "OPCmd",
Description: "OP Commands.",
}, nil
}
func (p *OPCmd) Unload() error {
return nil
}
func (p *OPCmd) kickban(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) != 1 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("opcmds") {
return true, nil
}
whom := args[0]
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", fmt.Sprintf("ban %s %s", target, whom)))
} else {
p.bot.Message(bot.PrivMsg("Chanserv", fmt.Sprintf("ban %s %s", target, whom)))
}
return true, nil
}
func (p *Login) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.001", p.welcome)
return &bot.PluginInfo{
Name: "Login",
Description: "Authenticate to the IRC server.",
}, nil
}
func (p *Login) Unload() error {
return nil
}
func (p *Login) welcome(name string, params []interface{}) (bool, error) {
if len(p.Password) > 0 {
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", "login " + p.Username + " " + p.Password))
} else {
p.bot.Message(bot.PrivMsg("Nickserv", "identify " + p.Password))
}
}
return false, nil
}
func (p *RedditParser) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.close = make(chan bool)
if p.PreloadCount < 1 {
p.PreloadCount = 10
}
for i := range RedditSearches {
RedditSearches[i].register(p)
}
p.bot.HandleCmdRateLimited("cmd.porn", p.roulette)
return &bot.PluginInfo{
Name: "RedditParser",
Description: "Parse Reddit for useful images.",
}, nil
}
func (p *RedditParser) Unload() error {
close(p.close)
return nil
}
func (p *RedditParser) roulette(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
RedditSearch := RedditSearches[rand.Intn(len(RedditSearches))]
cmd = RedditSearch.Commands[0]
return p.bot.Event("cmd." + cmd, source, target, cmd, args)
}
func checkIsImage(post *reddit.Post, b *bot.Bot) bool {
linkURL, err := url.Parse(post.URL)
if err != nil {
return false
}
for _, host := range b.Config.ImageHosts {
if strings.Contains(linkURL.Host, host) {
return true
}
}
return false
}
func chooseRandStr(opt []string) string {
return opt[rand.Intn(len(opt))]
}
func (m *RedditSearch) get() *reddit.Post {
for i := 0; i < 5; i++ {
m.mu.Lock()
var post *reddit.Post
if len(m.posts) > 0 {
post = m.posts[len(m.posts) - 1]
m.posts = m.posts[:len(m.posts) - 1]
}
m.mu.Unlock()
if post != nil {
return post
}
select {
case <-m.close:
return nil
case <-time.After(time.Second):
}
}
return nil
}
func (m *RedditSearch) register(plug *RedditParser) {
m.posts = make([]*reddit.Post, 0, plug.PreloadCount)
m.close = plug.close
go func() {
if len(m.RedditListTag) > 0 {
m.getSubredditList()
}
if len(m.Subreddits) == 0 {
return
}
m.preload(plug.Lurker, plug.bot)
}()
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
what := chooseRandStr(m.What)
post := m.get()
if post == nil {
plug.bot.Message(bot.PrivMsg(target, fmt.Sprintf("%s: haven't indexed any %s yet", source.Name, what)))
return true, nil
}
var msg string
if len(args) > 0 {
if m.NSFW == true {
msg = fmt.Sprintf("%s, here is some %s from %s: %s NSFW (https://redd.it/%s)", args[0], what, source.Name, post.URL, post.ID)
} else {
msg = fmt.Sprintf("%s, here is some %s from %s: %s (https://redd.it/%s)", args[0], what, source.Name, post.URL, post.ID)
}
} else {
if m.NSFW == true {
msg = fmt.Sprintf("%s, here is some %s: %s NSFW (https://redd.it/%s)", source.Name, what, post.URL, post.ID)
} else {
msg = fmt.Sprintf("%s, here is some %s: %s (https://redd.it/%s)", source.Name, what, post.URL, post.ID)
}
}
plug.bot.Message(bot.PrivMsg(target, msg))
return true, nil
}
for _, cmd := range m.Commands {
plug.bot.HandleCmdRateLimited("cmd." + cmd, handler)
}
}
func (m *RedditSearch) getSubredditList() {
var url string
if m.NSFW == true {
url = "http://redditlist.com/nsfw/category/" + m.RedditListTag
} else {
url = "http://redditlist.com/sfw/category/" + m.RedditListTag
}
doc, err := goquery.NewDocument(url)
if err != nil {
log.Println("Failed to get reddit list subreddits for ", m.RedditListTag)
return
}
var subs []string
doc.Find(".result-item-slug a").Each(func(i int, s *goquery.Selection) {
sub := strings.TrimPrefix(s.Text(), "/r/")
subs = append(subs, sub)
})
m.Subreddits = append(m.Subreddits, subs...)
}
func (m *RedditSearch) preload(lurk reddit.Lurker, b *bot.Bot) {
for {
select {
case <-m.close:
return
case <-time.After(2 * time.Second):
m.mu.Lock()
full := len(m.posts) == cap(m.posts)
m.mu.Unlock()
if full {
continue
}
sub := m.Subreddits[rand.Intn(len(m.Subreddits))]
for {
post, err := lurk.Thread("/r/" + sub + "/random")
if err != nil {
log.Printf("Error while getting random post from %s: %v\n", sub, err)
sub = m.Subreddits[rand.Intn(len(m.Subreddits))]
continue
}
if m.Check != nil && !m.Check(post, b) {
continue
}
m.mu.Lock()
m.posts = append(m.posts, post)
m.mu.Unlock()
break
}
}
}
}
| Unload | identifier_name |
plugins.go | package plugins
import (
"os"
"encoding/json"
"fmt"
"math/rand"
"strings"
"sync"
"time"
"log"
"net/url"
"github.com/PuerkitoBio/goquery"
"../bot"
"github.com/sorcix/irc"
"github.com/turnage/graw/reddit"
)
type AutoJoin struct {
Channels []string
bot *bot.Bot
}
type Misc struct {
bot *bot.Bot
bannedUsers map[string]string
}
type OPCmd struct {
bot *bot.Bot
}
type Login struct {
Username string
Password string
bot *bot.Bot
}
type RedditParser struct {
PreloadCount int
Lurker reddit.Lurker
bot *bot.Bot
close chan bool
}
type RedditSearch struct {
Commands []string
Subreddits []string
RedditListTag string
What []string
Check func(*reddit.Post, *bot.Bot) bool
posts []*reddit.Post
mu sync.Mutex
close chan bool
NSFW bool
}
type ReplyTerms map[string]string
var RedditSearches = []RedditSearch{
RedditSearch{
Commands: []string{"nsfw"},
Subreddits: []string{
"nsfw", "nsfwhardcore", "nsfw2", "HighResNSFW", "BonerMaterial",
"porn", "iWantToFuckHer", "NSFW_nospam", "Sexy", "nude",
"UnrealGirls", "primes", "THEGOLDSTANDARD", "nsfw_hd", "UHDnsfw",
"BeautifulTitsAndAss", "FuckMarryOrKill", "NSFWCute",
"badassgirls", "HotGirls", "PornPleasure", "nsfwnonporn",
"NSFWcringe", "NSFW_PORN_ONLY", "Sex_Games", "BareGirls",
"lusciousladies", "Babes", "FilthyGirls", "NaturalWomen",
"ImgurNSFW", "Adultpics", "sexynsfw", "nsfw_sets", "OnlyGoodPorn",
"TumblrArchives", "HardcoreSex", "PornLovers", "NSFWgaming",
"Fapucational", "RealBeauties", "fappitt", "exotic_oasis", "TIFT",
"nakedbabes", "oculusnsfw", "CrossEyedFap", "TitsAssandNoClass",
"formylover", "Ass_and_Titties", "Ranked_Girls", "fapfactory",
"NSFW_hardcore", "Sexyness", "debs_and_doxies", "nsfwonly",
"pornpedia", "lineups", "Nightlysex", "spod", "nsfwnew",
"pinupstyle", "NoBSNSFW", "nsfwdumps", "FoxyLadies",
"nsfwcloseups", "NudeBeauty", "SimplyNaked", "fappygood",
"FaptasticImages", "WhichOneWouldYouPick", "TumblrPorn",
"SaturdayMorningGirls", "NSFWSector", "GirlsWithBigGuns",
"QualityNsfw", "nsfwPhotoshopBattles", "hawtness",
"fapb4momgetshome", "SeaSquared", "SexyButNotPorn", "WoahPoon",
"Reflections", "Hotness", "Erotic_Galleries", "carnalclass",
"nsfw_bw", "LaBeauteFeminine", "Sweet_Sexuality", "NSFWart",
"WomenOfColorRisque",
},
What: []string{"nsfw"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"ass"},
Subreddits: []string{
"AssOnTheGlass", "BoltedOnBooty", "BubbleButts",
"ButtsAndBareFeet", "Cheeking", "HighResASS",
"LoveToWatchYouLeave", "NoTorso", "SpreadEm", "TheUnderbun",
"Top_Tier_Asses", "Tushy", "Underbun", "ass", "assgifs",
"bigasses", "booty", "booty_gifs", "datass", "datbuttfromthefront",
"hugeass", "juicybooty", "pawg", "twerking", "whooties",
},
What: []string{"ass"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"boobs"},
Subreddits: []string{
"BeforeAndAfterBoltons", "Bigtitssmalltits", "BoltedOnMaxed",
"Boobies", "BreastEnvy", "EpicCleavage", "HardBoltOns",
"JustOneBoob", "OneInOneOut", "PM_ME_YOUR_TITS_GIRL",
"PerfectTits", "Perky", "Rush_Boobs", "Saggy", "SloMoBoobs",
"TheHangingBoobs", "TheUnderboob", "Titsgalore", "TittyDrop",
"bananatits", "boltedontits", "boobbounce", "boobgifs",
"boobkarma", "boobland", "boobs", "breastplay", "breasts",
"cleavage", "feelthemup", "handbra", "hanging", "hersheyskisstits",
"homegrowntits", "knockers", "naturaltitties", "sideboob",
"tits", "titsagainstglass", "torpedotits", "underboob",
},
What: []string{"boobs", "tits", "titties"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"trap"},
Subreddits: []string{
"Ladyboys", "asianladyboy", "transgif", "dickgirls", "futanari",
},
What: []string{"trap"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"milf"},
Subreddits: []string{
"milf",
},
What: []string{"milf"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"redhead"},
Subreddits: []string{
"redheads", "ginger", "redhead",
},
What: []string{"redhead", "ginger"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"cat"},
Subreddits: []string{
"cat", "cats", "CatGifs", "KittenGifs", "Kittens", "CatPics",
"Kitties", "Kitty", "CatPictures", "LookAtMyCat", "CatReddit",
"CatSpotting", "Kitten", "DelightfullyChubby",
},
What: []string{"cat", "kitten", "kitty"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"dog"},
Subreddits: []string{
"dog", "dogs", "lookatmydog", "DogPictures", "dogswearinghats",
"dogswatchingyoueat",
},
What: []string{"dog", "puppy", "puppeh"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"blonde"},
Subreddits: []string{
"blonde", "blondes",
},
What: []string{"blonde"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"brunette"},
Subreddits: []string{
"brunette", "brunetteass",
},
What: []string{"brunette"},
Check: checkIsImage,
NSFW: true,
},
}
func (p *AutoJoin) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.376", p.welcome)
return &bot.PluginInfo{
Name: "AutoJoin",
Description: "Auto joins channels upon connect.",
}, nil
}
func (p *AutoJoin) welcome(name string, params []interface{}) (bool, error) {
for _, channel := range p.Channels {
err := p.bot.Message(bot.Join(channel))
if err != nil {
return false, err
}
}
return false, nil
}
func (p *AutoJoin) Unload() error {
return nil
}
func (p *Misc) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.textCmd("cmd.hey", []string{"how are you?", "heya!", "hello"})
p.bot.HandleCmdRateLimited("cmd.buzz", p.buzz)
file, _ := os.Open("config/reply.json")
defer file.Close()
decoder := json.NewDecoder(file)
replyTerms := ReplyTerms{}
err := decoder.Decode(&replyTerms)
if err != nil {
log.Fatal(err)
}
for key, value := range replyTerms {
value := value
key := key
p.textReply("irc.privmsg", value, func(line string) bool {
line = strings.ToLower(line)
return strings.HasSuffix(line, key)
})
}
p.bot.HandleIRC("irc.invite", p.invite)
p.bot.HandleIRC("irc.kick", p.kick)
p.bot.HandleIRC("irc.join", p.join)
p.bot.HandleCmdRateLimited("cmd.bs", p.bullshit)
p.bannedUsers = make(map[string]string)
return &bot.PluginInfo{
Name: "Misc",
Description: "Miscellaneous commands.",
}, nil
}
func (p *Misc) Unload() error {
return nil
}
func (p *Misc) textCmd(cmd string, texts []string) {
if len(texts) == 0 {
return
}
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
text := texts[rand.Intn(len(texts))]
if len(args) > 0 {
text = args[0] + ": " + text
}
p.bot.Message(bot.PrivMsg(target, text))
return true, nil
}
p.bot.HandleCmdRateLimited(cmd, handler)
}
func (p *Misc) textReply(cmd, text string, check func(string) bool) {
handler := func(msg *irc.Message) (bool, error) {
if !check(msg.Trailing) {
return false, nil
}
if p.bot.RateLimiter.Limited(msg.Params[0]) {
return false, nil
}
p.bot.Message(bot.PrivMsg(msg.Params[0], text))
return false, nil
}
p.bot.HandleIRC(cmd, handler)
}
func (p *Misc) bullshit(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
msg := fmt.Sprintf("%s: I am bullshitting you!", args[0])
p.bot.Message(bot.PrivMsg(target, msg))
return true, nil
}
func (p *Misc) buzz(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("annoy") {
return true, nil
}
lines := []string{
"%s",
"%s!",
"paging %s!",
"BUZZING %s",
"%s %[1]s %[1]s %[1]s %[1]s",
"hey %s!",
"%s %[1]s %[1]s %[1]s",
"%s come on",
}
times := rand.Intn(3) + 3
for i := 0; i < times; i++ {
line := lines[rand.Intn(len(lines))]
msg := fmt.Sprintf(line, args[0])
p.bot.Message(bot.PrivMsg(target, msg))
time.Sleep(time.Duration(rand.Intn(300) + 300) * time.Millisecond)
}
return true, nil
}
func (p *Misc) invite(msg *irc.Message) (bool, error) {
perms, err := p.bot.Auth(msg.Prefix)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("invite") {
return true, nil
}
//channel := msg.Trailing
channel := msg.Params[1]
err = p.bot.Message(bot.Join(channel))
return true, nil
}
func (p *Misc) kick(msg *irc.Message) (bool, error) {
channel, who := msg.Params[0], msg.Params[1]
if who != p.bot.Config.Nickname {
return false, nil
}
bannedUser := msg.Prefix.Name
if bannedUser == "X" || bannedUser == "Chanserv" {
parts := strings.Fields(msg.Trailing)
bannedUser = strings.Trim(parts[len(parts) - 1], "()")
}
p.bannedUsers[channel] = bannedUser
return false, nil
}
func (p *Misc) join(msg *irc.Message) (bool, error) {
if msg.Prefix.Name != p.bot.Config.Nickname {
return false, nil
}
channel := msg.Trailing
bannedUser, ok := p.bannedUsers[channel]
if !ok {
return false, nil
}
delete(p.bannedUsers, bannedUser)
welcome := fmt.Sprintf("%s: _)_", bannedUser)
p.bot.Message(bot.PrivMsg(channel, welcome))
return false, nil
}
func (p *OPCmd) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.HandleCmd("cmd.kb", p.kickban)
return &bot.PluginInfo{
Name: "OPCmd",
Description: "OP Commands.",
}, nil
}
func (p *OPCmd) Unload() error {
return nil
}
func (p *OPCmd) kickban(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) != 1 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("opcmds") {
return true, nil
}
whom := args[0]
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", fmt.Sprintf("ban %s %s", target, whom)))
} else {
p.bot.Message(bot.PrivMsg("Chanserv", fmt.Sprintf("ban %s %s", target, whom)))
}
return true, nil
}
func (p *Login) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.001", p.welcome)
return &bot.PluginInfo{
Name: "Login",
Description: "Authenticate to the IRC server.",
}, nil
}
func (p *Login) Unload() error {
return nil
}
func (p *Login) welcome(name string, params []interface{}) (bool, error) {
if len(p.Password) > 0 {
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", "login " + p.Username + " " + p.Password))
} else {
p.bot.Message(bot.PrivMsg("Nickserv", "identify " + p.Password))
}
}
return false, nil
}
func (p *RedditParser) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.close = make(chan bool)
if p.PreloadCount < 1 {
p.PreloadCount = 10
}
for i := range RedditSearches {
RedditSearches[i].register(p)
}
p.bot.HandleCmdRateLimited("cmd.porn", p.roulette)
return &bot.PluginInfo{
Name: "RedditParser",
Description: "Parse Reddit for useful images.",
}, nil
}
func (p *RedditParser) Unload() error {
close(p.close)
return nil
}
func (p *RedditParser) roulette(source *irc.Prefix, target string, cmd string, args []string) (bool, error) { | cmd = RedditSearch.Commands[0]
return p.bot.Event("cmd." + cmd, source, target, cmd, args)
}
func checkIsImage(post *reddit.Post, b *bot.Bot) bool {
linkURL, err := url.Parse(post.URL)
if err != nil {
return false
}
for _, host := range b.Config.ImageHosts {
if strings.Contains(linkURL.Host, host) {
return true
}
}
return false
}
func chooseRandStr(opt []string) string {
return opt[rand.Intn(len(opt))]
}
func (m *RedditSearch) get() *reddit.Post {
for i := 0; i < 5; i++ {
m.mu.Lock()
var post *reddit.Post
if len(m.posts) > 0 {
post = m.posts[len(m.posts) - 1]
m.posts = m.posts[:len(m.posts) - 1]
}
m.mu.Unlock()
if post != nil {
return post
}
select {
case <-m.close:
return nil
case <-time.After(time.Second):
}
}
return nil
}
func (m *RedditSearch) register(plug *RedditParser) {
m.posts = make([]*reddit.Post, 0, plug.PreloadCount)
m.close = plug.close
go func() {
if len(m.RedditListTag) > 0 {
m.getSubredditList()
}
if len(m.Subreddits) == 0 {
return
}
m.preload(plug.Lurker, plug.bot)
}()
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
what := chooseRandStr(m.What)
post := m.get()
if post == nil {
plug.bot.Message(bot.PrivMsg(target, fmt.Sprintf("%s: haven't indexed any %s yet", source.Name, what)))
return true, nil
}
var msg string
if len(args) > 0 {
if m.NSFW == true {
msg = fmt.Sprintf("%s, here is some %s from %s: %s NSFW (https://redd.it/%s)", args[0], what, source.Name, post.URL, post.ID)
} else {
msg = fmt.Sprintf("%s, here is some %s from %s: %s (https://redd.it/%s)", args[0], what, source.Name, post.URL, post.ID)
}
} else {
if m.NSFW == true {
msg = fmt.Sprintf("%s, here is some %s: %s NSFW (https://redd.it/%s)", source.Name, what, post.URL, post.ID)
} else {
msg = fmt.Sprintf("%s, here is some %s: %s (https://redd.it/%s)", source.Name, what, post.URL, post.ID)
}
}
plug.bot.Message(bot.PrivMsg(target, msg))
return true, nil
}
for _, cmd := range m.Commands {
plug.bot.HandleCmdRateLimited("cmd." + cmd, handler)
}
}
func (m *RedditSearch) getSubredditList() {
var url string
if m.NSFW == true {
url = "http://redditlist.com/nsfw/category/" + m.RedditListTag
} else {
url = "http://redditlist.com/sfw/category/" + m.RedditListTag
}
doc, err := goquery.NewDocument(url)
if err != nil {
log.Println("Failed to get reddit list subreddits for ", m.RedditListTag)
return
}
var subs []string
doc.Find(".result-item-slug a").Each(func(i int, s *goquery.Selection) {
sub := strings.TrimPrefix(s.Text(), "/r/")
subs = append(subs, sub)
})
m.Subreddits = append(m.Subreddits, subs...)
}
func (m *RedditSearch) preload(lurk reddit.Lurker, b *bot.Bot) {
for {
select {
case <-m.close:
return
case <-time.After(2 * time.Second):
m.mu.Lock()
full := len(m.posts) == cap(m.posts)
m.mu.Unlock()
if full {
continue
}
sub := m.Subreddits[rand.Intn(len(m.Subreddits))]
for {
post, err := lurk.Thread("/r/" + sub + "/random")
if err != nil {
log.Printf("Error while getting random post from %s: %v\n", sub, err)
sub = m.Subreddits[rand.Intn(len(m.Subreddits))]
continue
}
if m.Check != nil && !m.Check(post, b) {
continue
}
m.mu.Lock()
m.posts = append(m.posts, post)
m.mu.Unlock()
break
}
}
}
} | RedditSearch := RedditSearches[rand.Intn(len(RedditSearches))] | random_line_split |
plugins.go | package plugins
import (
"os"
"encoding/json"
"fmt"
"math/rand"
"strings"
"sync"
"time"
"log"
"net/url"
"github.com/PuerkitoBio/goquery"
"../bot"
"github.com/sorcix/irc"
"github.com/turnage/graw/reddit"
)
type AutoJoin struct {
Channels []string
bot *bot.Bot
}
type Misc struct {
bot *bot.Bot
bannedUsers map[string]string
}
type OPCmd struct {
bot *bot.Bot
}
type Login struct {
Username string
Password string
bot *bot.Bot
}
type RedditParser struct {
PreloadCount int
Lurker reddit.Lurker
bot *bot.Bot
close chan bool
}
type RedditSearch struct {
Commands []string
Subreddits []string
RedditListTag string
What []string
Check func(*reddit.Post, *bot.Bot) bool
posts []*reddit.Post
mu sync.Mutex
close chan bool
NSFW bool
}
type ReplyTerms map[string]string
var RedditSearches = []RedditSearch{
RedditSearch{
Commands: []string{"nsfw"},
Subreddits: []string{
"nsfw", "nsfwhardcore", "nsfw2", "HighResNSFW", "BonerMaterial",
"porn", "iWantToFuckHer", "NSFW_nospam", "Sexy", "nude",
"UnrealGirls", "primes", "THEGOLDSTANDARD", "nsfw_hd", "UHDnsfw",
"BeautifulTitsAndAss", "FuckMarryOrKill", "NSFWCute",
"badassgirls", "HotGirls", "PornPleasure", "nsfwnonporn",
"NSFWcringe", "NSFW_PORN_ONLY", "Sex_Games", "BareGirls",
"lusciousladies", "Babes", "FilthyGirls", "NaturalWomen",
"ImgurNSFW", "Adultpics", "sexynsfw", "nsfw_sets", "OnlyGoodPorn",
"TumblrArchives", "HardcoreSex", "PornLovers", "NSFWgaming",
"Fapucational", "RealBeauties", "fappitt", "exotic_oasis", "TIFT",
"nakedbabes", "oculusnsfw", "CrossEyedFap", "TitsAssandNoClass",
"formylover", "Ass_and_Titties", "Ranked_Girls", "fapfactory",
"NSFW_hardcore", "Sexyness", "debs_and_doxies", "nsfwonly",
"pornpedia", "lineups", "Nightlysex", "spod", "nsfwnew",
"pinupstyle", "NoBSNSFW", "nsfwdumps", "FoxyLadies",
"nsfwcloseups", "NudeBeauty", "SimplyNaked", "fappygood",
"FaptasticImages", "WhichOneWouldYouPick", "TumblrPorn",
"SaturdayMorningGirls", "NSFWSector", "GirlsWithBigGuns",
"QualityNsfw", "nsfwPhotoshopBattles", "hawtness",
"fapb4momgetshome", "SeaSquared", "SexyButNotPorn", "WoahPoon",
"Reflections", "Hotness", "Erotic_Galleries", "carnalclass",
"nsfw_bw", "LaBeauteFeminine", "Sweet_Sexuality", "NSFWart",
"WomenOfColorRisque",
},
What: []string{"nsfw"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"ass"},
Subreddits: []string{
"AssOnTheGlass", "BoltedOnBooty", "BubbleButts",
"ButtsAndBareFeet", "Cheeking", "HighResASS",
"LoveToWatchYouLeave", "NoTorso", "SpreadEm", "TheUnderbun",
"Top_Tier_Asses", "Tushy", "Underbun", "ass", "assgifs",
"bigasses", "booty", "booty_gifs", "datass", "datbuttfromthefront",
"hugeass", "juicybooty", "pawg", "twerking", "whooties",
},
What: []string{"ass"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"boobs"},
Subreddits: []string{
"BeforeAndAfterBoltons", "Bigtitssmalltits", "BoltedOnMaxed",
"Boobies", "BreastEnvy", "EpicCleavage", "HardBoltOns",
"JustOneBoob", "OneInOneOut", "PM_ME_YOUR_TITS_GIRL",
"PerfectTits", "Perky", "Rush_Boobs", "Saggy", "SloMoBoobs",
"TheHangingBoobs", "TheUnderboob", "Titsgalore", "TittyDrop",
"bananatits", "boltedontits", "boobbounce", "boobgifs",
"boobkarma", "boobland", "boobs", "breastplay", "breasts",
"cleavage", "feelthemup", "handbra", "hanging", "hersheyskisstits",
"homegrowntits", "knockers", "naturaltitties", "sideboob",
"tits", "titsagainstglass", "torpedotits", "underboob",
},
What: []string{"boobs", "tits", "titties"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"trap"},
Subreddits: []string{
"Ladyboys", "asianladyboy", "transgif", "dickgirls", "futanari",
},
What: []string{"trap"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"milf"},
Subreddits: []string{
"milf",
},
What: []string{"milf"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"redhead"},
Subreddits: []string{
"redheads", "ginger", "redhead",
},
What: []string{"redhead", "ginger"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"cat"},
Subreddits: []string{
"cat", "cats", "CatGifs", "KittenGifs", "Kittens", "CatPics",
"Kitties", "Kitty", "CatPictures", "LookAtMyCat", "CatReddit",
"CatSpotting", "Kitten", "DelightfullyChubby",
},
What: []string{"cat", "kitten", "kitty"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"dog"},
Subreddits: []string{
"dog", "dogs", "lookatmydog", "DogPictures", "dogswearinghats",
"dogswatchingyoueat",
},
What: []string{"dog", "puppy", "puppeh"},
Check: checkIsImage,
NSFW: false,
},
RedditSearch{
Commands: []string{"blonde"},
Subreddits: []string{
"blonde", "blondes",
},
What: []string{"blonde"},
Check: checkIsImage,
NSFW: true,
},
RedditSearch{
Commands: []string{"brunette"},
Subreddits: []string{
"brunette", "brunetteass",
},
What: []string{"brunette"},
Check: checkIsImage,
NSFW: true,
},
}
func (p *AutoJoin) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.376", p.welcome)
return &bot.PluginInfo{
Name: "AutoJoin",
Description: "Auto joins channels upon connect.",
}, nil
}
func (p *AutoJoin) welcome(name string, params []interface{}) (bool, error) {
for _, channel := range p.Channels {
err := p.bot.Message(bot.Join(channel))
if err != nil {
return false, err
}
}
return false, nil
}
func (p *AutoJoin) Unload() error {
return nil
}
func (p *Misc) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.textCmd("cmd.hey", []string{"how are you?", "heya!", "hello"})
p.bot.HandleCmdRateLimited("cmd.buzz", p.buzz)
file, _ := os.Open("config/reply.json")
defer file.Close()
decoder := json.NewDecoder(file)
replyTerms := ReplyTerms{}
err := decoder.Decode(&replyTerms)
if err != nil {
log.Fatal(err)
}
for key, value := range replyTerms {
value := value
key := key
p.textReply("irc.privmsg", value, func(line string) bool {
line = strings.ToLower(line)
return strings.HasSuffix(line, key)
})
}
p.bot.HandleIRC("irc.invite", p.invite)
p.bot.HandleIRC("irc.kick", p.kick)
p.bot.HandleIRC("irc.join", p.join)
p.bot.HandleCmdRateLimited("cmd.bs", p.bullshit)
p.bannedUsers = make(map[string]string)
return &bot.PluginInfo{
Name: "Misc",
Description: "Miscellaneous commands.",
}, nil
}
func (p *Misc) Unload() error {
return nil
}
func (p *Misc) textCmd(cmd string, texts []string) {
if len(texts) == 0 {
return
}
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
text := texts[rand.Intn(len(texts))]
if len(args) > 0 {
text = args[0] + ": " + text
}
p.bot.Message(bot.PrivMsg(target, text))
return true, nil
}
p.bot.HandleCmdRateLimited(cmd, handler)
}
func (p *Misc) textReply(cmd, text string, check func(string) bool) {
handler := func(msg *irc.Message) (bool, error) {
if !check(msg.Trailing) {
return false, nil
}
if p.bot.RateLimiter.Limited(msg.Params[0]) {
return false, nil
}
p.bot.Message(bot.PrivMsg(msg.Params[0], text))
return false, nil
}
p.bot.HandleIRC(cmd, handler)
}
func (p *Misc) bullshit(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
msg := fmt.Sprintf("%s: I am bullshitting you!", args[0])
p.bot.Message(bot.PrivMsg(target, msg))
return true, nil
}
func (p *Misc) buzz(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) == 0 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("annoy") {
return true, nil
}
lines := []string{
"%s",
"%s!",
"paging %s!",
"BUZZING %s",
"%s %[1]s %[1]s %[1]s %[1]s",
"hey %s!",
"%s %[1]s %[1]s %[1]s",
"%s come on",
}
times := rand.Intn(3) + 3
for i := 0; i < times; i++ {
line := lines[rand.Intn(len(lines))]
msg := fmt.Sprintf(line, args[0])
p.bot.Message(bot.PrivMsg(target, msg))
time.Sleep(time.Duration(rand.Intn(300) + 300) * time.Millisecond)
}
return true, nil
}
func (p *Misc) invite(msg *irc.Message) (bool, error) {
perms, err := p.bot.Auth(msg.Prefix)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("invite") {
return true, nil
}
//channel := msg.Trailing
channel := msg.Params[1]
err = p.bot.Message(bot.Join(channel))
return true, nil
}
func (p *Misc) kick(msg *irc.Message) (bool, error) {
channel, who := msg.Params[0], msg.Params[1]
if who != p.bot.Config.Nickname {
return false, nil
}
bannedUser := msg.Prefix.Name
if bannedUser == "X" || bannedUser == "Chanserv" {
parts := strings.Fields(msg.Trailing)
bannedUser = strings.Trim(parts[len(parts) - 1], "()")
}
p.bannedUsers[channel] = bannedUser
return false, nil
}
func (p *Misc) join(msg *irc.Message) (bool, error) {
if msg.Prefix.Name != p.bot.Config.Nickname {
return false, nil
}
channel := msg.Trailing
bannedUser, ok := p.bannedUsers[channel]
if !ok {
return false, nil
}
delete(p.bannedUsers, bannedUser)
welcome := fmt.Sprintf("%s: _)_", bannedUser)
p.bot.Message(bot.PrivMsg(channel, welcome))
return false, nil
}
func (p *OPCmd) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.HandleCmd("cmd.kb", p.kickban)
return &bot.PluginInfo{
Name: "OPCmd",
Description: "OP Commands.",
}, nil
}
func (p *OPCmd) Unload() error {
return nil
}
func (p *OPCmd) kickban(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
if len(args) != 1 {
return true, nil
}
perms, err := p.bot.Auth(source)
if err != nil {
return false, err
}
if perms == nil || !perms.Can("opcmds") {
return true, nil
}
whom := args[0]
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", fmt.Sprintf("ban %s %s", target, whom)))
} else {
p.bot.Message(bot.PrivMsg("Chanserv", fmt.Sprintf("ban %s %s", target, whom)))
}
return true, nil
}
func (p *Login) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
b.Handle("irc.001", p.welcome)
return &bot.PluginInfo{
Name: "Login",
Description: "Authenticate to the IRC server.",
}, nil
}
func (p *Login) Unload() error {
return nil
}
func (p *Login) welcome(name string, params []interface{}) (bool, error) {
if len(p.Password) > 0 {
if p.bot.Config.ServerType == "undernet" {
p.bot.Message(bot.PrivMsg("[email protected]", "login " + p.Username + " " + p.Password))
} else {
p.bot.Message(bot.PrivMsg("Nickserv", "identify " + p.Password))
}
}
return false, nil
}
func (p *RedditParser) Load(b *bot.Bot) (*bot.PluginInfo, error) {
p.bot = b
p.close = make(chan bool)
if p.PreloadCount < 1 {
p.PreloadCount = 10
}
for i := range RedditSearches {
RedditSearches[i].register(p)
}
p.bot.HandleCmdRateLimited("cmd.porn", p.roulette)
return &bot.PluginInfo{
Name: "RedditParser",
Description: "Parse Reddit for useful images.",
}, nil
}
func (p *RedditParser) Unload() error {
close(p.close)
return nil
}
func (p *RedditParser) roulette(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
RedditSearch := RedditSearches[rand.Intn(len(RedditSearches))]
cmd = RedditSearch.Commands[0]
return p.bot.Event("cmd." + cmd, source, target, cmd, args)
}
func checkIsImage(post *reddit.Post, b *bot.Bot) bool {
linkURL, err := url.Parse(post.URL)
if err != nil {
return false
}
for _, host := range b.Config.ImageHosts {
if strings.Contains(linkURL.Host, host) {
return true
}
}
return false
}
func chooseRandStr(opt []string) string {
return opt[rand.Intn(len(opt))]
}
func (m *RedditSearch) get() *reddit.Post {
for i := 0; i < 5; i++ {
m.mu.Lock()
var post *reddit.Post
if len(m.posts) > 0 {
post = m.posts[len(m.posts) - 1]
m.posts = m.posts[:len(m.posts) - 1]
}
m.mu.Unlock()
if post != nil {
return post
}
select {
case <-m.close:
return nil
case <-time.After(time.Second):
}
}
return nil
}
func (m *RedditSearch) register(plug *RedditParser) {
m.posts = make([]*reddit.Post, 0, plug.PreloadCount)
m.close = plug.close
go func() {
if len(m.RedditListTag) > 0 {
m.getSubredditList()
}
if len(m.Subreddits) == 0 {
return
}
m.preload(plug.Lurker, plug.bot)
}()
handler := func(source *irc.Prefix, target string, cmd string, args []string) (bool, error) {
what := chooseRandStr(m.What)
post := m.get()
if post == nil |
var msg string
if len(args) > 0 {
if m.NSFW == true {
msg = fmt.Sprintf("%s, here is some %s from %s: %s NSFW (https://redd.it/%s)", args[0], what, source.Name, post.URL, post.ID)
} else {
msg = fmt.Sprintf("%s, here is some %s from %s: %s (https://redd.it/%s)", args[0], what, source.Name, post.URL, post.ID)
}
} else {
if m.NSFW == true {
msg = fmt.Sprintf("%s, here is some %s: %s NSFW (https://redd.it/%s)", source.Name, what, post.URL, post.ID)
} else {
msg = fmt.Sprintf("%s, here is some %s: %s (https://redd.it/%s)", source.Name, what, post.URL, post.ID)
}
}
plug.bot.Message(bot.PrivMsg(target, msg))
return true, nil
}
for _, cmd := range m.Commands {
plug.bot.HandleCmdRateLimited("cmd." + cmd, handler)
}
}
func (m *RedditSearch) getSubredditList() {
var url string
if m.NSFW == true {
url = "http://redditlist.com/nsfw/category/" + m.RedditListTag
} else {
url = "http://redditlist.com/sfw/category/" + m.RedditListTag
}
doc, err := goquery.NewDocument(url)
if err != nil {
log.Println("Failed to get reddit list subreddits for ", m.RedditListTag)
return
}
var subs []string
doc.Find(".result-item-slug a").Each(func(i int, s *goquery.Selection) {
sub := strings.TrimPrefix(s.Text(), "/r/")
subs = append(subs, sub)
})
m.Subreddits = append(m.Subreddits, subs...)
}
func (m *RedditSearch) preload(lurk reddit.Lurker, b *bot.Bot) {
for {
select {
case <-m.close:
return
case <-time.After(2 * time.Second):
m.mu.Lock()
full := len(m.posts) == cap(m.posts)
m.mu.Unlock()
if full {
continue
}
sub := m.Subreddits[rand.Intn(len(m.Subreddits))]
for {
post, err := lurk.Thread("/r/" + sub + "/random")
if err != nil {
log.Printf("Error while getting random post from %s: %v\n", sub, err)
sub = m.Subreddits[rand.Intn(len(m.Subreddits))]
continue
}
if m.Check != nil && !m.Check(post, b) {
continue
}
m.mu.Lock()
m.posts = append(m.posts, post)
m.mu.Unlock()
break
}
}
}
}
| {
plug.bot.Message(bot.PrivMsg(target, fmt.Sprintf("%s: haven't indexed any %s yet", source.Name, what)))
return true, nil
} | conditional_block |
partitions.go | // Copyright 2018 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The storage stage is responsible for partitioning disks, creating RAID
// arrays, formatting partitions, writing files, writing systemd units, and
// writing network units.
package disks
import (
"errors"
"fmt"
"regexp"
"sort"
"strconv"
"strings"
cutil "github.com/coreos/ignition/v2/config/util"
"github.com/coreos/ignition/v2/config/v3_5_experimental/types"
"github.com/coreos/ignition/v2/internal/exec/util"
"github.com/coreos/ignition/v2/internal/sgdisk"
)
var (
ErrBadSgdiskOutput = errors.New("sgdisk had unexpected output")
)
// createPartitions creates the partitions described in config.Storage.Disks.
func (s stage) createPartitions(config types.Config) error {
if len(config.Storage.Disks) == 0 {
return nil
}
s.Logger.PushPrefix("createPartitions")
defer s.Logger.PopPrefix()
devs := []string{}
for _, disk := range config.Storage.Disks {
devs = append(devs, string(disk.Device))
}
if err := s.waitOnDevicesAndCreateAliases(devs, "disks"); err != nil {
return err
}
for _, dev := range config.Storage.Disks {
devAlias := util.DeviceAlias(string(dev.Device))
err := s.Logger.LogOp(func() error {
return s.partitionDisk(dev, devAlias)
}, "partitioning %q", devAlias)
if err != nil {
return err
}
}
return nil
}
// partitionMatches determines if the existing partition matches the spec given. See doc/operator notes for what
// what it means for an existing partition to match the spec. spec must have non-zero Start and Size.
func partitionMatches(existing util.PartitionInfo, spec sgdisk.Partition) error {
if err := partitionMatchesCommon(existing, spec); err != nil {
return err
}
if spec.SizeInSectors != nil && *spec.SizeInSectors != existing.SizeInSectors {
return fmt.Errorf("size did not match (specified %d, got %d)", *spec.SizeInSectors, existing.SizeInSectors)
}
return nil
}
// partitionMatchesResize returns if the existing partition should be resized by evaluating if
// `resize`field is true and partition matches in all respects except size.
func partitionMatchesResize(existing util.PartitionInfo, spec sgdisk.Partition) bool {
return cutil.IsTrue(spec.Resize) && partitionMatchesCommon(existing, spec) == nil
}
// partitionMatchesCommon handles the common tests (excluding the partition size) to determine
// if the existing partition matches the spec given.
func partitionMatchesCommon(existing util.PartitionInfo, spec sgdisk.Partition) error {
if spec.Number != existing.Number {
return fmt.Errorf("partition numbers did not match (specified %d, got %d). This should not happen, please file a bug.", spec.Number, existing.Number)
}
if spec.StartSector != nil && *spec.StartSector != existing.StartSector {
return fmt.Errorf("starting sector did not match (specified %d, got %d)", *spec.StartSector, existing.StartSector)
}
if cutil.NotEmpty(spec.GUID) && !strings.EqualFold(*spec.GUID, existing.GUID) {
return fmt.Errorf("GUID did not match (specified %q, got %q)", *spec.GUID, existing.GUID)
}
if cutil.NotEmpty(spec.TypeGUID) && !strings.EqualFold(*spec.TypeGUID, existing.TypeGUID) {
return fmt.Errorf("type GUID did not match (specified %q, got %q)", *spec.TypeGUID, existing.TypeGUID)
}
if spec.Label != nil && *spec.Label != existing.Label {
return fmt.Errorf("label did not match (specified %q, got %q)", *spec.Label, existing.Label)
}
return nil
}
// partitionShouldBeInspected returns if the partition has zeroes that need to be resolved to sectors.
func partitionShouldBeInspected(part sgdisk.Partition) bool {
if part.Number == 0 {
return false
}
return (part.StartSector != nil && *part.StartSector == 0) ||
(part.SizeInSectors != nil && *part.SizeInSectors == 0)
}
func convertMiBToSectors(mib *int, sectorSize int) *int64 {
if mib != nil {
v := int64(*mib) * (1024 * 1024 / int64(sectorSize))
return &v
} else {
return nil
}
}
// getRealStartAndSize returns a map of partition numbers to a struct that contains what their real start
// and end sector should be. It runs sgdisk --pretend to determine what the partitions would look like if
// everything specified were to be (re)created.
func (s stage) getRealStartAndSize(dev types.Disk, devAlias string, diskInfo util.DiskInfo) ([]sgdisk.Partition, error) {
partitions := []sgdisk.Partition{}
for _, cpart := range dev.Partitions {
partitions = append(partitions, sgdisk.Partition{
Partition: cpart,
StartSector: convertMiBToSectors(cpart.StartMiB, diskInfo.LogicalSectorSize),
SizeInSectors: convertMiBToSectors(cpart.SizeMiB, diskInfo.LogicalSectorSize),
})
}
op := sgdisk.Begin(s.Logger, devAlias)
for _, part := range partitions {
if info, exists := diskInfo.GetPartition(part.Number); exists {
// delete all existing partitions
op.DeletePartition(part.Number)
if part.StartSector == nil && !cutil.IsTrue(part.WipePartitionEntry) {
// don't care means keep the same if we can't wipe, otherwise stick it at start 0
part.StartSector = &info.StartSector
}
if part.SizeInSectors == nil && !cutil.IsTrue(part.WipePartitionEntry) {
part.SizeInSectors = &info.SizeInSectors
}
}
if partitionShouldExist(part) {
// Clear the label. sgdisk doesn't escape control characters. This makes parsing easier
part.Label = nil
op.CreatePartition(part)
}
}
// We only care to examine partitions that have start or size 0.
partitionsToInspect := []int{}
for _, part := range partitions {
if partitionShouldBeInspected(part) {
op.Info(part.Number)
partitionsToInspect = append(partitionsToInspect, part.Number)
}
}
output, err := op.Pretend()
if err != nil {
return nil, err
}
realDimensions, err := parseSgdiskPretend(output, partitionsToInspect)
if err != nil {
return nil, err
}
result := []sgdisk.Partition{}
for _, part := range partitions {
if dims, ok := realDimensions[part.Number]; ok {
if part.StartSector != nil {
part.StartSector = &dims.start
}
if part.SizeInSectors != nil {
part.SizeInSectors = &dims.size
}
}
result = append(result, part)
}
return result, nil
}
type sgdiskOutput struct {
start int64
size int64
}
// parseLine takes a regexp that captures an int64 and a string to match on. On success it returns
// the captured int64 and nil. If the regexp does not match it returns -1 and nil. If it encountered
// an error it returns 0 and the error.
func parseLine(r *regexp.Regexp, line string) (int64, error) {
matches := r.FindStringSubmatch(line)
switch len(matches) {
case 0:
return -1, nil
case 2:
return strconv.ParseInt(matches[1], 10, 64)
default:
return 0, ErrBadSgdiskOutput
}
}
// parseSgdiskPretend parses the output of running sgdisk pretend with --info specified for each partition
// number specified in partitionNumbers. E.g. if paritionNumbers is [1,4,5], it is expected that the sgdisk
// output was from running `sgdisk --pretend <commands> --info=1 --info=4 --info=5`. It assumes the the
// partition labels are well behaved (i.e. contain no control characters). It returns a list of partitions
// matching the partition numbers specified, but with the start and size information as determined by sgdisk.
// The partition numbers need to passed in because sgdisk includes them in its output.
func | (sgdiskOut string, partitionNumbers []int) (map[int]sgdiskOutput, error) {
if len(partitionNumbers) == 0 {
return nil, nil
}
startRegex := regexp.MustCompile(`^First sector: (\d*) \(.*\)$`)
endRegex := regexp.MustCompile(`^Last sector: (\d*) \(.*\)$`)
const (
START = iota
END = iota
FAIL_ON_START_END = iota
)
output := map[int]sgdiskOutput{}
state := START
current := sgdiskOutput{}
i := 0
lines := strings.Split(sgdiskOut, "\n")
for _, line := range lines {
switch state {
case START:
start, err := parseLine(startRegex, line)
if err != nil {
return nil, err
}
if start != -1 {
current.start = start
state = END
}
case END:
end, err := parseLine(endRegex, line)
if err != nil {
return nil, err
}
if end != -1 {
current.size = 1 + end - current.start
output[partitionNumbers[i]] = current
i++
if i == len(partitionNumbers) {
state = FAIL_ON_START_END
} else {
current = sgdiskOutput{}
state = START
}
}
case FAIL_ON_START_END:
if len(startRegex.FindStringSubmatch(line)) != 0 ||
len(endRegex.FindStringSubmatch(line)) != 0 {
return nil, ErrBadSgdiskOutput
}
}
}
if state != FAIL_ON_START_END {
// We stopped parsing in the middle of a info block. Something is wrong
return nil, ErrBadSgdiskOutput
}
return output, nil
}
// partitionShouldExist returns whether a bool is indicating if a partition should exist or not.
// nil (unspecified in json) is treated the same as true.
func partitionShouldExist(part sgdisk.Partition) bool {
return !cutil.IsFalse(part.ShouldExist)
}
// getPartitionMap returns a map of partitions on device, indexed by partition number
func (s stage) getPartitionMap(device string) (util.DiskInfo, error) {
info := util.DiskInfo{}
err := s.Logger.LogOp(
func() error {
var err error
info, err = util.DumpDisk(device)
return err
}, "reading partition table of %q", device)
if err != nil {
return util.DiskInfo{}, err
}
return info, nil
}
// Allow sorting partitions (must be a stable sort) so partition number 0 happens last
// regardless of where it was in the list.
type PartitionList []types.Partition
func (p PartitionList) Len() int {
return len(p)
}
// We only care about partitions with number 0 being considered the "largest" elements
// so they are processed last.
func (p PartitionList) Less(i, j int) bool {
return p[i].Number != 0 && p[j].Number == 0
}
func (p PartitionList) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
// partitionDisk partitions devAlias according to the spec given by dev
func (s stage) partitionDisk(dev types.Disk, devAlias string) error {
if cutil.IsTrue(dev.WipeTable) {
op := sgdisk.Begin(s.Logger, devAlias)
s.Logger.Info("wiping partition table requested on %q", devAlias)
op.WipeTable(true)
if err := op.Commit(); err != nil {
return err
}
}
// Ensure all partitions with number 0 are last
sort.Stable(PartitionList(dev.Partitions))
op := sgdisk.Begin(s.Logger, devAlias)
diskInfo, err := s.getPartitionMap(devAlias)
if err != nil {
return err
}
// get a list of parititions that have size and start 0 replaced with the real sizes
// that would be used if all specified partitions were to be created anew.
// Also calculate sectors for all of the start/size values.
resolvedPartitions, err := s.getRealStartAndSize(dev, devAlias, diskInfo)
if err != nil {
return err
}
for _, part := range resolvedPartitions {
shouldExist := partitionShouldExist(part)
info, exists := diskInfo.GetPartition(part.Number)
var matchErr error
if exists {
matchErr = partitionMatches(info, part)
}
matches := exists && matchErr == nil
wipeEntry := cutil.IsTrue(part.WipePartitionEntry)
// This is a translation of the matrix in the operator notes.
switch {
case !exists && !shouldExist:
s.Logger.Info("partition %d specified as nonexistant and no partition was found. Success.", part.Number)
case !exists && shouldExist:
op.CreatePartition(part)
case exists && !shouldExist && !wipeEntry:
return fmt.Errorf("partition %d exists but is specified as nonexistant and wipePartitionEntry is false", part.Number)
case exists && !shouldExist && wipeEntry:
op.DeletePartition(part.Number)
case exists && shouldExist && matches:
s.Logger.Info("partition %d found with correct specifications", part.Number)
case exists && shouldExist && !wipeEntry && !matches:
if partitionMatchesResize(info, part) {
s.Logger.Info("resizing partition %d", part.Number)
op.DeletePartition(part.Number)
part.Number = info.Number
part.GUID = &info.GUID
part.TypeGUID = &info.TypeGUID
part.Label = &info.Label
part.StartSector = &info.StartSector
op.CreatePartition(part)
} else {
return fmt.Errorf("Partition %d didn't match: %v", part.Number, matchErr)
}
case exists && shouldExist && wipeEntry && !matches:
s.Logger.Info("partition %d did not meet specifications, wiping partition entry and recreating", part.Number)
op.DeletePartition(part.Number)
op.CreatePartition(part)
default:
// unfortunatey, golang doesn't check that all cases are handled exhaustively
return fmt.Errorf("Unreachable code reached when processing partition %d. golang--", part.Number)
}
}
if err := op.Commit(); err != nil {
return fmt.Errorf("commit failure: %v", err)
}
return nil
}
| parseSgdiskPretend | identifier_name |
partitions.go | // Copyright 2018 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The storage stage is responsible for partitioning disks, creating RAID
// arrays, formatting partitions, writing files, writing systemd units, and
// writing network units.
package disks
import (
"errors"
"fmt"
"regexp"
"sort"
"strconv"
"strings"
cutil "github.com/coreos/ignition/v2/config/util"
"github.com/coreos/ignition/v2/config/v3_5_experimental/types"
"github.com/coreos/ignition/v2/internal/exec/util"
"github.com/coreos/ignition/v2/internal/sgdisk"
)
var (
ErrBadSgdiskOutput = errors.New("sgdisk had unexpected output")
)
// createPartitions creates the partitions described in config.Storage.Disks.
func (s stage) createPartitions(config types.Config) error {
if len(config.Storage.Disks) == 0 {
return nil
}
s.Logger.PushPrefix("createPartitions")
defer s.Logger.PopPrefix()
devs := []string{}
for _, disk := range config.Storage.Disks {
devs = append(devs, string(disk.Device))
}
if err := s.waitOnDevicesAndCreateAliases(devs, "disks"); err != nil {
return err
}
for _, dev := range config.Storage.Disks {
devAlias := util.DeviceAlias(string(dev.Device))
err := s.Logger.LogOp(func() error {
return s.partitionDisk(dev, devAlias)
}, "partitioning %q", devAlias)
if err != nil {
return err
}
}
return nil
}
// partitionMatches determines if the existing partition matches the spec given. See doc/operator notes for what
// what it means for an existing partition to match the spec. spec must have non-zero Start and Size.
func partitionMatches(existing util.PartitionInfo, spec sgdisk.Partition) error {
if err := partitionMatchesCommon(existing, spec); err != nil {
return err
}
if spec.SizeInSectors != nil && *spec.SizeInSectors != existing.SizeInSectors {
return fmt.Errorf("size did not match (specified %d, got %d)", *spec.SizeInSectors, existing.SizeInSectors)
}
return nil
}
// partitionMatchesResize returns if the existing partition should be resized by evaluating if
// `resize`field is true and partition matches in all respects except size.
func partitionMatchesResize(existing util.PartitionInfo, spec sgdisk.Partition) bool {
return cutil.IsTrue(spec.Resize) && partitionMatchesCommon(existing, spec) == nil
}
// partitionMatchesCommon handles the common tests (excluding the partition size) to determine
// if the existing partition matches the spec given.
func partitionMatchesCommon(existing util.PartitionInfo, spec sgdisk.Partition) error {
if spec.Number != existing.Number {
return fmt.Errorf("partition numbers did not match (specified %d, got %d). This should not happen, please file a bug.", spec.Number, existing.Number)
}
if spec.StartSector != nil && *spec.StartSector != existing.StartSector {
return fmt.Errorf("starting sector did not match (specified %d, got %d)", *spec.StartSector, existing.StartSector)
}
if cutil.NotEmpty(spec.GUID) && !strings.EqualFold(*spec.GUID, existing.GUID) {
return fmt.Errorf("GUID did not match (specified %q, got %q)", *spec.GUID, existing.GUID)
}
if cutil.NotEmpty(spec.TypeGUID) && !strings.EqualFold(*spec.TypeGUID, existing.TypeGUID) {
return fmt.Errorf("type GUID did not match (specified %q, got %q)", *spec.TypeGUID, existing.TypeGUID)
}
if spec.Label != nil && *spec.Label != existing.Label {
return fmt.Errorf("label did not match (specified %q, got %q)", *spec.Label, existing.Label)
}
return nil
}
// partitionShouldBeInspected returns if the partition has zeroes that need to be resolved to sectors.
func partitionShouldBeInspected(part sgdisk.Partition) bool {
if part.Number == 0 {
return false
}
return (part.StartSector != nil && *part.StartSector == 0) ||
(part.SizeInSectors != nil && *part.SizeInSectors == 0)
}
func convertMiBToSectors(mib *int, sectorSize int) *int64 {
if mib != nil {
v := int64(*mib) * (1024 * 1024 / int64(sectorSize))
return &v
} else {
return nil
}
}
// getRealStartAndSize returns a map of partition numbers to a struct that contains what their real start
// and end sector should be. It runs sgdisk --pretend to determine what the partitions would look like if
// everything specified were to be (re)created.
func (s stage) getRealStartAndSize(dev types.Disk, devAlias string, diskInfo util.DiskInfo) ([]sgdisk.Partition, error) {
partitions := []sgdisk.Partition{}
for _, cpart := range dev.Partitions {
partitions = append(partitions, sgdisk.Partition{
Partition: cpart,
StartSector: convertMiBToSectors(cpart.StartMiB, diskInfo.LogicalSectorSize),
SizeInSectors: convertMiBToSectors(cpart.SizeMiB, diskInfo.LogicalSectorSize),
})
}
op := sgdisk.Begin(s.Logger, devAlias)
for _, part := range partitions {
if info, exists := diskInfo.GetPartition(part.Number); exists {
// delete all existing partitions
op.DeletePartition(part.Number)
if part.StartSector == nil && !cutil.IsTrue(part.WipePartitionEntry) {
// don't care means keep the same if we can't wipe, otherwise stick it at start 0
part.StartSector = &info.StartSector
}
if part.SizeInSectors == nil && !cutil.IsTrue(part.WipePartitionEntry) {
part.SizeInSectors = &info.SizeInSectors
}
}
if partitionShouldExist(part) {
// Clear the label. sgdisk doesn't escape control characters. This makes parsing easier
part.Label = nil
op.CreatePartition(part)
}
}
// We only care to examine partitions that have start or size 0.
partitionsToInspect := []int{}
for _, part := range partitions {
if partitionShouldBeInspected(part) {
op.Info(part.Number)
partitionsToInspect = append(partitionsToInspect, part.Number)
}
}
output, err := op.Pretend()
if err != nil {
return nil, err
}
realDimensions, err := parseSgdiskPretend(output, partitionsToInspect)
if err != nil {
return nil, err
}
result := []sgdisk.Partition{}
for _, part := range partitions {
if dims, ok := realDimensions[part.Number]; ok {
if part.StartSector != nil {
part.StartSector = &dims.start
}
if part.SizeInSectors != nil {
part.SizeInSectors = &dims.size
}
}
result = append(result, part)
}
return result, nil
}
type sgdiskOutput struct {
start int64
size int64
}
// parseLine takes a regexp that captures an int64 and a string to match on. On success it returns
// the captured int64 and nil. If the regexp does not match it returns -1 and nil. If it encountered
// an error it returns 0 and the error.
func parseLine(r *regexp.Regexp, line string) (int64, error) {
matches := r.FindStringSubmatch(line)
switch len(matches) {
case 0:
return -1, nil
case 2:
return strconv.ParseInt(matches[1], 10, 64)
default:
return 0, ErrBadSgdiskOutput
}
}
// parseSgdiskPretend parses the output of running sgdisk pretend with --info specified for each partition
// number specified in partitionNumbers. E.g. if paritionNumbers is [1,4,5], it is expected that the sgdisk
// output was from running `sgdisk --pretend <commands> --info=1 --info=4 --info=5`. It assumes the the
// partition labels are well behaved (i.e. contain no control characters). It returns a list of partitions
// matching the partition numbers specified, but with the start and size information as determined by sgdisk.
// The partition numbers need to passed in because sgdisk includes them in its output.
func parseSgdiskPretend(sgdiskOut string, partitionNumbers []int) (map[int]sgdiskOutput, error) {
if len(partitionNumbers) == 0 {
return nil, nil
}
startRegex := regexp.MustCompile(`^First sector: (\d*) \(.*\)$`)
endRegex := regexp.MustCompile(`^Last sector: (\d*) \(.*\)$`)
const (
START = iota
END = iota
FAIL_ON_START_END = iota
)
output := map[int]sgdiskOutput{}
state := START
current := sgdiskOutput{}
i := 0
lines := strings.Split(sgdiskOut, "\n")
for _, line := range lines {
switch state {
case START:
start, err := parseLine(startRegex, line)
if err != nil {
return nil, err
}
if start != -1 {
current.start = start
state = END
}
case END:
end, err := parseLine(endRegex, line)
if err != nil {
return nil, err
}
if end != -1 {
current.size = 1 + end - current.start
output[partitionNumbers[i]] = current
i++
if i == len(partitionNumbers) {
state = FAIL_ON_START_END
} else {
current = sgdiskOutput{}
state = START
}
}
case FAIL_ON_START_END:
if len(startRegex.FindStringSubmatch(line)) != 0 ||
len(endRegex.FindStringSubmatch(line)) != 0 {
return nil, ErrBadSgdiskOutput
}
}
}
if state != FAIL_ON_START_END {
// We stopped parsing in the middle of a info block. Something is wrong
return nil, ErrBadSgdiskOutput
}
return output, nil
}
// partitionShouldExist returns whether a bool is indicating if a partition should exist or not.
// nil (unspecified in json) is treated the same as true.
func partitionShouldExist(part sgdisk.Partition) bool {
return !cutil.IsFalse(part.ShouldExist)
}
// getPartitionMap returns a map of partitions on device, indexed by partition number
func (s stage) getPartitionMap(device string) (util.DiskInfo, error) {
info := util.DiskInfo{}
err := s.Logger.LogOp(
func() error {
var err error
info, err = util.DumpDisk(device)
return err
}, "reading partition table of %q", device)
if err != nil {
return util.DiskInfo{}, err
}
return info, nil
}
// Allow sorting partitions (must be a stable sort) so partition number 0 happens last
// regardless of where it was in the list.
type PartitionList []types.Partition
func (p PartitionList) Len() int {
return len(p)
}
// We only care about partitions with number 0 being considered the "largest" elements
// so they are processed last.
func (p PartitionList) Less(i, j int) bool {
return p[i].Number != 0 && p[j].Number == 0
}
func (p PartitionList) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
// partitionDisk partitions devAlias according to the spec given by dev
func (s stage) partitionDisk(dev types.Disk, devAlias string) error {
if cutil.IsTrue(dev.WipeTable) {
op := sgdisk.Begin(s.Logger, devAlias)
s.Logger.Info("wiping partition table requested on %q", devAlias)
op.WipeTable(true)
if err := op.Commit(); err != nil {
return err
}
}
// Ensure all partitions with number 0 are last
sort.Stable(PartitionList(dev.Partitions))
op := sgdisk.Begin(s.Logger, devAlias)
diskInfo, err := s.getPartitionMap(devAlias)
if err != nil {
return err
}
// get a list of parititions that have size and start 0 replaced with the real sizes
// that would be used if all specified partitions were to be created anew.
// Also calculate sectors for all of the start/size values. | }
for _, part := range resolvedPartitions {
shouldExist := partitionShouldExist(part)
info, exists := diskInfo.GetPartition(part.Number)
var matchErr error
if exists {
matchErr = partitionMatches(info, part)
}
matches := exists && matchErr == nil
wipeEntry := cutil.IsTrue(part.WipePartitionEntry)
// This is a translation of the matrix in the operator notes.
switch {
case !exists && !shouldExist:
s.Logger.Info("partition %d specified as nonexistant and no partition was found. Success.", part.Number)
case !exists && shouldExist:
op.CreatePartition(part)
case exists && !shouldExist && !wipeEntry:
return fmt.Errorf("partition %d exists but is specified as nonexistant and wipePartitionEntry is false", part.Number)
case exists && !shouldExist && wipeEntry:
op.DeletePartition(part.Number)
case exists && shouldExist && matches:
s.Logger.Info("partition %d found with correct specifications", part.Number)
case exists && shouldExist && !wipeEntry && !matches:
if partitionMatchesResize(info, part) {
s.Logger.Info("resizing partition %d", part.Number)
op.DeletePartition(part.Number)
part.Number = info.Number
part.GUID = &info.GUID
part.TypeGUID = &info.TypeGUID
part.Label = &info.Label
part.StartSector = &info.StartSector
op.CreatePartition(part)
} else {
return fmt.Errorf("Partition %d didn't match: %v", part.Number, matchErr)
}
case exists && shouldExist && wipeEntry && !matches:
s.Logger.Info("partition %d did not meet specifications, wiping partition entry and recreating", part.Number)
op.DeletePartition(part.Number)
op.CreatePartition(part)
default:
// unfortunatey, golang doesn't check that all cases are handled exhaustively
return fmt.Errorf("Unreachable code reached when processing partition %d. golang--", part.Number)
}
}
if err := op.Commit(); err != nil {
return fmt.Errorf("commit failure: %v", err)
}
return nil
} | resolvedPartitions, err := s.getRealStartAndSize(dev, devAlias, diskInfo)
if err != nil {
return err | random_line_split |
partitions.go | // Copyright 2018 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The storage stage is responsible for partitioning disks, creating RAID
// arrays, formatting partitions, writing files, writing systemd units, and
// writing network units.
package disks
import (
"errors"
"fmt"
"regexp"
"sort"
"strconv"
"strings"
cutil "github.com/coreos/ignition/v2/config/util"
"github.com/coreos/ignition/v2/config/v3_5_experimental/types"
"github.com/coreos/ignition/v2/internal/exec/util"
"github.com/coreos/ignition/v2/internal/sgdisk"
)
var (
ErrBadSgdiskOutput = errors.New("sgdisk had unexpected output")
)
// createPartitions creates the partitions described in config.Storage.Disks.
func (s stage) createPartitions(config types.Config) error {
if len(config.Storage.Disks) == 0 {
return nil
}
s.Logger.PushPrefix("createPartitions")
defer s.Logger.PopPrefix()
devs := []string{}
for _, disk := range config.Storage.Disks {
devs = append(devs, string(disk.Device))
}
if err := s.waitOnDevicesAndCreateAliases(devs, "disks"); err != nil {
return err
}
for _, dev := range config.Storage.Disks {
devAlias := util.DeviceAlias(string(dev.Device))
err := s.Logger.LogOp(func() error {
return s.partitionDisk(dev, devAlias)
}, "partitioning %q", devAlias)
if err != nil {
return err
}
}
return nil
}
// partitionMatches determines if the existing partition matches the spec given. See doc/operator notes for what
// what it means for an existing partition to match the spec. spec must have non-zero Start and Size.
func partitionMatches(existing util.PartitionInfo, spec sgdisk.Partition) error {
if err := partitionMatchesCommon(existing, spec); err != nil {
return err
}
if spec.SizeInSectors != nil && *spec.SizeInSectors != existing.SizeInSectors {
return fmt.Errorf("size did not match (specified %d, got %d)", *spec.SizeInSectors, existing.SizeInSectors)
}
return nil
}
// partitionMatchesResize returns if the existing partition should be resized by evaluating if
// `resize`field is true and partition matches in all respects except size.
func partitionMatchesResize(existing util.PartitionInfo, spec sgdisk.Partition) bool {
return cutil.IsTrue(spec.Resize) && partitionMatchesCommon(existing, spec) == nil
}
// partitionMatchesCommon handles the common tests (excluding the partition size) to determine
// if the existing partition matches the spec given.
func partitionMatchesCommon(existing util.PartitionInfo, spec sgdisk.Partition) error {
if spec.Number != existing.Number {
return fmt.Errorf("partition numbers did not match (specified %d, got %d). This should not happen, please file a bug.", spec.Number, existing.Number)
}
if spec.StartSector != nil && *spec.StartSector != existing.StartSector {
return fmt.Errorf("starting sector did not match (specified %d, got %d)", *spec.StartSector, existing.StartSector)
}
if cutil.NotEmpty(spec.GUID) && !strings.EqualFold(*spec.GUID, existing.GUID) {
return fmt.Errorf("GUID did not match (specified %q, got %q)", *spec.GUID, existing.GUID)
}
if cutil.NotEmpty(spec.TypeGUID) && !strings.EqualFold(*spec.TypeGUID, existing.TypeGUID) {
return fmt.Errorf("type GUID did not match (specified %q, got %q)", *spec.TypeGUID, existing.TypeGUID)
}
if spec.Label != nil && *spec.Label != existing.Label {
return fmt.Errorf("label did not match (specified %q, got %q)", *spec.Label, existing.Label)
}
return nil
}
// partitionShouldBeInspected returns if the partition has zeroes that need to be resolved to sectors.
func partitionShouldBeInspected(part sgdisk.Partition) bool {
if part.Number == 0 {
return false
}
return (part.StartSector != nil && *part.StartSector == 0) ||
(part.SizeInSectors != nil && *part.SizeInSectors == 0)
}
func convertMiBToSectors(mib *int, sectorSize int) *int64 {
if mib != nil {
v := int64(*mib) * (1024 * 1024 / int64(sectorSize))
return &v
} else {
return nil
}
}
// getRealStartAndSize returns a map of partition numbers to a struct that contains what their real start
// and end sector should be. It runs sgdisk --pretend to determine what the partitions would look like if
// everything specified were to be (re)created.
func (s stage) getRealStartAndSize(dev types.Disk, devAlias string, diskInfo util.DiskInfo) ([]sgdisk.Partition, error) {
partitions := []sgdisk.Partition{}
for _, cpart := range dev.Partitions {
partitions = append(partitions, sgdisk.Partition{
Partition: cpart,
StartSector: convertMiBToSectors(cpart.StartMiB, diskInfo.LogicalSectorSize),
SizeInSectors: convertMiBToSectors(cpart.SizeMiB, diskInfo.LogicalSectorSize),
})
}
op := sgdisk.Begin(s.Logger, devAlias)
for _, part := range partitions {
if info, exists := diskInfo.GetPartition(part.Number); exists {
// delete all existing partitions
op.DeletePartition(part.Number)
if part.StartSector == nil && !cutil.IsTrue(part.WipePartitionEntry) {
// don't care means keep the same if we can't wipe, otherwise stick it at start 0
part.StartSector = &info.StartSector
}
if part.SizeInSectors == nil && !cutil.IsTrue(part.WipePartitionEntry) {
part.SizeInSectors = &info.SizeInSectors
}
}
if partitionShouldExist(part) {
// Clear the label. sgdisk doesn't escape control characters. This makes parsing easier
part.Label = nil
op.CreatePartition(part)
}
}
// We only care to examine partitions that have start or size 0.
partitionsToInspect := []int{}
for _, part := range partitions {
if partitionShouldBeInspected(part) {
op.Info(part.Number)
partitionsToInspect = append(partitionsToInspect, part.Number)
}
}
output, err := op.Pretend()
if err != nil {
return nil, err
}
realDimensions, err := parseSgdiskPretend(output, partitionsToInspect)
if err != nil {
return nil, err
}
result := []sgdisk.Partition{}
for _, part := range partitions {
if dims, ok := realDimensions[part.Number]; ok {
if part.StartSector != nil {
part.StartSector = &dims.start
}
if part.SizeInSectors != nil {
part.SizeInSectors = &dims.size
}
}
result = append(result, part)
}
return result, nil
}
type sgdiskOutput struct {
start int64
size int64
}
// parseLine takes a regexp that captures an int64 and a string to match on. On success it returns
// the captured int64 and nil. If the regexp does not match it returns -1 and nil. If it encountered
// an error it returns 0 and the error.
func parseLine(r *regexp.Regexp, line string) (int64, error) {
matches := r.FindStringSubmatch(line)
switch len(matches) {
case 0:
return -1, nil
case 2:
return strconv.ParseInt(matches[1], 10, 64)
default:
return 0, ErrBadSgdiskOutput
}
}
// parseSgdiskPretend parses the output of running sgdisk pretend with --info specified for each partition
// number specified in partitionNumbers. E.g. if paritionNumbers is [1,4,5], it is expected that the sgdisk
// output was from running `sgdisk --pretend <commands> --info=1 --info=4 --info=5`. It assumes the the
// partition labels are well behaved (i.e. contain no control characters). It returns a list of partitions
// matching the partition numbers specified, but with the start and size information as determined by sgdisk.
// The partition numbers need to passed in because sgdisk includes them in its output.
func parseSgdiskPretend(sgdiskOut string, partitionNumbers []int) (map[int]sgdiskOutput, error) {
if len(partitionNumbers) == 0 {
return nil, nil
}
startRegex := regexp.MustCompile(`^First sector: (\d*) \(.*\)$`)
endRegex := regexp.MustCompile(`^Last sector: (\d*) \(.*\)$`)
const (
START = iota
END = iota
FAIL_ON_START_END = iota
)
output := map[int]sgdiskOutput{}
state := START
current := sgdiskOutput{}
i := 0
lines := strings.Split(sgdiskOut, "\n")
for _, line := range lines {
switch state {
case START:
start, err := parseLine(startRegex, line)
if err != nil {
return nil, err
}
if start != -1 {
current.start = start
state = END
}
case END:
end, err := parseLine(endRegex, line)
if err != nil |
if end != -1 {
current.size = 1 + end - current.start
output[partitionNumbers[i]] = current
i++
if i == len(partitionNumbers) {
state = FAIL_ON_START_END
} else {
current = sgdiskOutput{}
state = START
}
}
case FAIL_ON_START_END:
if len(startRegex.FindStringSubmatch(line)) != 0 ||
len(endRegex.FindStringSubmatch(line)) != 0 {
return nil, ErrBadSgdiskOutput
}
}
}
if state != FAIL_ON_START_END {
// We stopped parsing in the middle of a info block. Something is wrong
return nil, ErrBadSgdiskOutput
}
return output, nil
}
// partitionShouldExist returns whether a bool is indicating if a partition should exist or not.
// nil (unspecified in json) is treated the same as true.
func partitionShouldExist(part sgdisk.Partition) bool {
return !cutil.IsFalse(part.ShouldExist)
}
// getPartitionMap returns a map of partitions on device, indexed by partition number
func (s stage) getPartitionMap(device string) (util.DiskInfo, error) {
info := util.DiskInfo{}
err := s.Logger.LogOp(
func() error {
var err error
info, err = util.DumpDisk(device)
return err
}, "reading partition table of %q", device)
if err != nil {
return util.DiskInfo{}, err
}
return info, nil
}
// Allow sorting partitions (must be a stable sort) so partition number 0 happens last
// regardless of where it was in the list.
type PartitionList []types.Partition
func (p PartitionList) Len() int {
return len(p)
}
// We only care about partitions with number 0 being considered the "largest" elements
// so they are processed last.
func (p PartitionList) Less(i, j int) bool {
return p[i].Number != 0 && p[j].Number == 0
}
func (p PartitionList) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
// partitionDisk partitions devAlias according to the spec given by dev
func (s stage) partitionDisk(dev types.Disk, devAlias string) error {
if cutil.IsTrue(dev.WipeTable) {
op := sgdisk.Begin(s.Logger, devAlias)
s.Logger.Info("wiping partition table requested on %q", devAlias)
op.WipeTable(true)
if err := op.Commit(); err != nil {
return err
}
}
// Ensure all partitions with number 0 are last
sort.Stable(PartitionList(dev.Partitions))
op := sgdisk.Begin(s.Logger, devAlias)
diskInfo, err := s.getPartitionMap(devAlias)
if err != nil {
return err
}
// get a list of parititions that have size and start 0 replaced with the real sizes
// that would be used if all specified partitions were to be created anew.
// Also calculate sectors for all of the start/size values.
resolvedPartitions, err := s.getRealStartAndSize(dev, devAlias, diskInfo)
if err != nil {
return err
}
for _, part := range resolvedPartitions {
shouldExist := partitionShouldExist(part)
info, exists := diskInfo.GetPartition(part.Number)
var matchErr error
if exists {
matchErr = partitionMatches(info, part)
}
matches := exists && matchErr == nil
wipeEntry := cutil.IsTrue(part.WipePartitionEntry)
// This is a translation of the matrix in the operator notes.
switch {
case !exists && !shouldExist:
s.Logger.Info("partition %d specified as nonexistant and no partition was found. Success.", part.Number)
case !exists && shouldExist:
op.CreatePartition(part)
case exists && !shouldExist && !wipeEntry:
return fmt.Errorf("partition %d exists but is specified as nonexistant and wipePartitionEntry is false", part.Number)
case exists && !shouldExist && wipeEntry:
op.DeletePartition(part.Number)
case exists && shouldExist && matches:
s.Logger.Info("partition %d found with correct specifications", part.Number)
case exists && shouldExist && !wipeEntry && !matches:
if partitionMatchesResize(info, part) {
s.Logger.Info("resizing partition %d", part.Number)
op.DeletePartition(part.Number)
part.Number = info.Number
part.GUID = &info.GUID
part.TypeGUID = &info.TypeGUID
part.Label = &info.Label
part.StartSector = &info.StartSector
op.CreatePartition(part)
} else {
return fmt.Errorf("Partition %d didn't match: %v", part.Number, matchErr)
}
case exists && shouldExist && wipeEntry && !matches:
s.Logger.Info("partition %d did not meet specifications, wiping partition entry and recreating", part.Number)
op.DeletePartition(part.Number)
op.CreatePartition(part)
default:
// unfortunatey, golang doesn't check that all cases are handled exhaustively
return fmt.Errorf("Unreachable code reached when processing partition %d. golang--", part.Number)
}
}
if err := op.Commit(); err != nil {
return fmt.Errorf("commit failure: %v", err)
}
return nil
}
| {
return nil, err
} | conditional_block |
partitions.go | // Copyright 2018 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The storage stage is responsible for partitioning disks, creating RAID
// arrays, formatting partitions, writing files, writing systemd units, and
// writing network units.
package disks
import (
"errors"
"fmt"
"regexp"
"sort"
"strconv"
"strings"
cutil "github.com/coreos/ignition/v2/config/util"
"github.com/coreos/ignition/v2/config/v3_5_experimental/types"
"github.com/coreos/ignition/v2/internal/exec/util"
"github.com/coreos/ignition/v2/internal/sgdisk"
)
var (
ErrBadSgdiskOutput = errors.New("sgdisk had unexpected output")
)
// createPartitions creates the partitions described in config.Storage.Disks.
func (s stage) createPartitions(config types.Config) error {
if len(config.Storage.Disks) == 0 {
return nil
}
s.Logger.PushPrefix("createPartitions")
defer s.Logger.PopPrefix()
devs := []string{}
for _, disk := range config.Storage.Disks {
devs = append(devs, string(disk.Device))
}
if err := s.waitOnDevicesAndCreateAliases(devs, "disks"); err != nil {
return err
}
for _, dev := range config.Storage.Disks {
devAlias := util.DeviceAlias(string(dev.Device))
err := s.Logger.LogOp(func() error {
return s.partitionDisk(dev, devAlias)
}, "partitioning %q", devAlias)
if err != nil {
return err
}
}
return nil
}
// partitionMatches determines if the existing partition matches the spec given. See doc/operator notes for what
// what it means for an existing partition to match the spec. spec must have non-zero Start and Size.
func partitionMatches(existing util.PartitionInfo, spec sgdisk.Partition) error {
if err := partitionMatchesCommon(existing, spec); err != nil {
return err
}
if spec.SizeInSectors != nil && *spec.SizeInSectors != existing.SizeInSectors {
return fmt.Errorf("size did not match (specified %d, got %d)", *spec.SizeInSectors, existing.SizeInSectors)
}
return nil
}
// partitionMatchesResize returns if the existing partition should be resized by evaluating if
// `resize`field is true and partition matches in all respects except size.
func partitionMatchesResize(existing util.PartitionInfo, spec sgdisk.Partition) bool {
return cutil.IsTrue(spec.Resize) && partitionMatchesCommon(existing, spec) == nil
}
// partitionMatchesCommon handles the common tests (excluding the partition size) to determine
// if the existing partition matches the spec given.
func partitionMatchesCommon(existing util.PartitionInfo, spec sgdisk.Partition) error {
if spec.Number != existing.Number {
return fmt.Errorf("partition numbers did not match (specified %d, got %d). This should not happen, please file a bug.", spec.Number, existing.Number)
}
if spec.StartSector != nil && *spec.StartSector != existing.StartSector {
return fmt.Errorf("starting sector did not match (specified %d, got %d)", *spec.StartSector, existing.StartSector)
}
if cutil.NotEmpty(spec.GUID) && !strings.EqualFold(*spec.GUID, existing.GUID) {
return fmt.Errorf("GUID did not match (specified %q, got %q)", *spec.GUID, existing.GUID)
}
if cutil.NotEmpty(spec.TypeGUID) && !strings.EqualFold(*spec.TypeGUID, existing.TypeGUID) {
return fmt.Errorf("type GUID did not match (specified %q, got %q)", *spec.TypeGUID, existing.TypeGUID)
}
if spec.Label != nil && *spec.Label != existing.Label {
return fmt.Errorf("label did not match (specified %q, got %q)", *spec.Label, existing.Label)
}
return nil
}
// partitionShouldBeInspected returns if the partition has zeroes that need to be resolved to sectors.
func partitionShouldBeInspected(part sgdisk.Partition) bool |
func convertMiBToSectors(mib *int, sectorSize int) *int64 {
if mib != nil {
v := int64(*mib) * (1024 * 1024 / int64(sectorSize))
return &v
} else {
return nil
}
}
// getRealStartAndSize returns a map of partition numbers to a struct that contains what their real start
// and end sector should be. It runs sgdisk --pretend to determine what the partitions would look like if
// everything specified were to be (re)created.
func (s stage) getRealStartAndSize(dev types.Disk, devAlias string, diskInfo util.DiskInfo) ([]sgdisk.Partition, error) {
partitions := []sgdisk.Partition{}
for _, cpart := range dev.Partitions {
partitions = append(partitions, sgdisk.Partition{
Partition: cpart,
StartSector: convertMiBToSectors(cpart.StartMiB, diskInfo.LogicalSectorSize),
SizeInSectors: convertMiBToSectors(cpart.SizeMiB, diskInfo.LogicalSectorSize),
})
}
op := sgdisk.Begin(s.Logger, devAlias)
for _, part := range partitions {
if info, exists := diskInfo.GetPartition(part.Number); exists {
// delete all existing partitions
op.DeletePartition(part.Number)
if part.StartSector == nil && !cutil.IsTrue(part.WipePartitionEntry) {
// don't care means keep the same if we can't wipe, otherwise stick it at start 0
part.StartSector = &info.StartSector
}
if part.SizeInSectors == nil && !cutil.IsTrue(part.WipePartitionEntry) {
part.SizeInSectors = &info.SizeInSectors
}
}
if partitionShouldExist(part) {
// Clear the label. sgdisk doesn't escape control characters. This makes parsing easier
part.Label = nil
op.CreatePartition(part)
}
}
// We only care to examine partitions that have start or size 0.
partitionsToInspect := []int{}
for _, part := range partitions {
if partitionShouldBeInspected(part) {
op.Info(part.Number)
partitionsToInspect = append(partitionsToInspect, part.Number)
}
}
output, err := op.Pretend()
if err != nil {
return nil, err
}
realDimensions, err := parseSgdiskPretend(output, partitionsToInspect)
if err != nil {
return nil, err
}
result := []sgdisk.Partition{}
for _, part := range partitions {
if dims, ok := realDimensions[part.Number]; ok {
if part.StartSector != nil {
part.StartSector = &dims.start
}
if part.SizeInSectors != nil {
part.SizeInSectors = &dims.size
}
}
result = append(result, part)
}
return result, nil
}
type sgdiskOutput struct {
start int64
size int64
}
// parseLine takes a regexp that captures an int64 and a string to match on. On success it returns
// the captured int64 and nil. If the regexp does not match it returns -1 and nil. If it encountered
// an error it returns 0 and the error.
func parseLine(r *regexp.Regexp, line string) (int64, error) {
matches := r.FindStringSubmatch(line)
switch len(matches) {
case 0:
return -1, nil
case 2:
return strconv.ParseInt(matches[1], 10, 64)
default:
return 0, ErrBadSgdiskOutput
}
}
// parseSgdiskPretend parses the output of running sgdisk pretend with --info specified for each partition
// number specified in partitionNumbers. E.g. if paritionNumbers is [1,4,5], it is expected that the sgdisk
// output was from running `sgdisk --pretend <commands> --info=1 --info=4 --info=5`. It assumes the the
// partition labels are well behaved (i.e. contain no control characters). It returns a list of partitions
// matching the partition numbers specified, but with the start and size information as determined by sgdisk.
// The partition numbers need to passed in because sgdisk includes them in its output.
func parseSgdiskPretend(sgdiskOut string, partitionNumbers []int) (map[int]sgdiskOutput, error) {
if len(partitionNumbers) == 0 {
return nil, nil
}
startRegex := regexp.MustCompile(`^First sector: (\d*) \(.*\)$`)
endRegex := regexp.MustCompile(`^Last sector: (\d*) \(.*\)$`)
const (
START = iota
END = iota
FAIL_ON_START_END = iota
)
output := map[int]sgdiskOutput{}
state := START
current := sgdiskOutput{}
i := 0
lines := strings.Split(sgdiskOut, "\n")
for _, line := range lines {
switch state {
case START:
start, err := parseLine(startRegex, line)
if err != nil {
return nil, err
}
if start != -1 {
current.start = start
state = END
}
case END:
end, err := parseLine(endRegex, line)
if err != nil {
return nil, err
}
if end != -1 {
current.size = 1 + end - current.start
output[partitionNumbers[i]] = current
i++
if i == len(partitionNumbers) {
state = FAIL_ON_START_END
} else {
current = sgdiskOutput{}
state = START
}
}
case FAIL_ON_START_END:
if len(startRegex.FindStringSubmatch(line)) != 0 ||
len(endRegex.FindStringSubmatch(line)) != 0 {
return nil, ErrBadSgdiskOutput
}
}
}
if state != FAIL_ON_START_END {
// We stopped parsing in the middle of a info block. Something is wrong
return nil, ErrBadSgdiskOutput
}
return output, nil
}
// partitionShouldExist returns whether a bool is indicating if a partition should exist or not.
// nil (unspecified in json) is treated the same as true.
func partitionShouldExist(part sgdisk.Partition) bool {
return !cutil.IsFalse(part.ShouldExist)
}
// getPartitionMap returns a map of partitions on device, indexed by partition number
func (s stage) getPartitionMap(device string) (util.DiskInfo, error) {
info := util.DiskInfo{}
err := s.Logger.LogOp(
func() error {
var err error
info, err = util.DumpDisk(device)
return err
}, "reading partition table of %q", device)
if err != nil {
return util.DiskInfo{}, err
}
return info, nil
}
// Allow sorting partitions (must be a stable sort) so partition number 0 happens last
// regardless of where it was in the list.
type PartitionList []types.Partition
func (p PartitionList) Len() int {
return len(p)
}
// We only care about partitions with number 0 being considered the "largest" elements
// so they are processed last.
func (p PartitionList) Less(i, j int) bool {
return p[i].Number != 0 && p[j].Number == 0
}
func (p PartitionList) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
// partitionDisk partitions devAlias according to the spec given by dev
func (s stage) partitionDisk(dev types.Disk, devAlias string) error {
if cutil.IsTrue(dev.WipeTable) {
op := sgdisk.Begin(s.Logger, devAlias)
s.Logger.Info("wiping partition table requested on %q", devAlias)
op.WipeTable(true)
if err := op.Commit(); err != nil {
return err
}
}
// Ensure all partitions with number 0 are last
sort.Stable(PartitionList(dev.Partitions))
op := sgdisk.Begin(s.Logger, devAlias)
diskInfo, err := s.getPartitionMap(devAlias)
if err != nil {
return err
}
// get a list of parititions that have size and start 0 replaced with the real sizes
// that would be used if all specified partitions were to be created anew.
// Also calculate sectors for all of the start/size values.
resolvedPartitions, err := s.getRealStartAndSize(dev, devAlias, diskInfo)
if err != nil {
return err
}
for _, part := range resolvedPartitions {
shouldExist := partitionShouldExist(part)
info, exists := diskInfo.GetPartition(part.Number)
var matchErr error
if exists {
matchErr = partitionMatches(info, part)
}
matches := exists && matchErr == nil
wipeEntry := cutil.IsTrue(part.WipePartitionEntry)
// This is a translation of the matrix in the operator notes.
switch {
case !exists && !shouldExist:
s.Logger.Info("partition %d specified as nonexistant and no partition was found. Success.", part.Number)
case !exists && shouldExist:
op.CreatePartition(part)
case exists && !shouldExist && !wipeEntry:
return fmt.Errorf("partition %d exists but is specified as nonexistant and wipePartitionEntry is false", part.Number)
case exists && !shouldExist && wipeEntry:
op.DeletePartition(part.Number)
case exists && shouldExist && matches:
s.Logger.Info("partition %d found with correct specifications", part.Number)
case exists && shouldExist && !wipeEntry && !matches:
if partitionMatchesResize(info, part) {
s.Logger.Info("resizing partition %d", part.Number)
op.DeletePartition(part.Number)
part.Number = info.Number
part.GUID = &info.GUID
part.TypeGUID = &info.TypeGUID
part.Label = &info.Label
part.StartSector = &info.StartSector
op.CreatePartition(part)
} else {
return fmt.Errorf("Partition %d didn't match: %v", part.Number, matchErr)
}
case exists && shouldExist && wipeEntry && !matches:
s.Logger.Info("partition %d did not meet specifications, wiping partition entry and recreating", part.Number)
op.DeletePartition(part.Number)
op.CreatePartition(part)
default:
// unfortunatey, golang doesn't check that all cases are handled exhaustively
return fmt.Errorf("Unreachable code reached when processing partition %d. golang--", part.Number)
}
}
if err := op.Commit(); err != nil {
return fmt.Errorf("commit failure: %v", err)
}
return nil
}
| {
if part.Number == 0 {
return false
}
return (part.StartSector != nil && *part.StartSector == 0) ||
(part.SizeInSectors != nil && *part.SizeInSectors == 0)
} | identifier_body |
file.rs | use crate::reader::{LittleEndian, ReadBytesExt, Reader};
use std::fmt;
use std::io::Read;
use thiserror::Error;
const ELF_MAGIC: [u8; 4] = [0x7f, b'E', b'L', b'F'];
fn show_machine(value: u16) -> &'static str {
match value {
0 => "No machine",
1 => "AT&T WE 32100",
2 => "SUN SPARC",
3 => "Intel 80386",
4 => "Motorola m68k family",
5 => "Motorola m88k family",
6 => "Intel MCU",
7 => "Intel 80860",
8 => "MIPS R3000 big-endian",
9 => "IBM System/370",
10 => "MIPS R3000 little-endian",
15 => "HPPA",
16 => "reserved 16",
17 => "Fujitsu VPP500",
18 => "Sun's v8plus",
19 => "Intel 80960",
20 => "PowerPC",
21 => "PowerPC 64-bit",
22 => "IBM S390",
23 => "IBM SPU/SPC",
36 => "NEC V800 series",
37 => "Fujitsu FR20",
38 => "TRW RH-32",
39 => "Motorola RCE",
40 => "ARM",
41 => "Digital Alpha",
42 => "Hitachi SH",
43 => "SPARC v9 64-bit",
44 => "Siemens Tricore",
45 => "Argonaut RISC Core",
46 => "Hitachi H8/300",
47 => "Hitachi H8/300H",
48 => "Hitachi H8S",
49 => "Hitachi H8/500",
50 => "Intel Merced",
51 => "Stanford MIPS-X",
52 => "Motorola Coldfire",
53 => "Motorola M68HC12",
54 => "Fujitsu MMA Multimedia Accelerator",
55 => "Siemens PCP",
56 => "Sony nCPU embeeded RISC",
57 => "Denso NDR1 microprocessor",
58 => "Motorola Start*Core processor",
59 => "Toyota ME16 processor",
60 => "STMicroelectronic ST100 processor",
61 => "Advanced Logic Corp. Tinyj emb.fam",
62 => "AMD x86-64 architecture",
63 => "Sony DSP Processor",
64 => "Digital PDP-10",
65 => "Digital PDP-11",
66 => "Siemens FX66 microcontroller",
67 => "STMicroelectronics ST9+ 8/16 mc",
68 => "STmicroelectronics ST7 8 bit mc",
69 => "Motorola MC68HC16 microcontroller",
70 => "Motorola MC68HC11 microcontroller",
71 => "Motorola MC68HC08 microcontroller",
72 => "Motorola MC68HC05 microcontroller",
73 => "Silicon Graphics SVx",
74 => "STMicroelectronics ST19 8 bit mc",
75 => "Digital VAX",
76 => "Axis Communications 32-bit emb.proc",
77 => "Infineon Technologies 32-bit emb.proc",
78 => "Element 14 64-bit DSP Processor",
79 => "LSI Logic 16-bit DSP Processor",
80 => "Donald Knuth's educational 64-bit proc",
81 => "Harvard University machine-independent object files",
82 => "SiTera Prism",
83 => "Atmel AVR 8-bit microcontroller",
84 => "Fujitsu FR30",
85 => "Mitsubishi D10V",
86 => "Mitsubishi D30V",
87 => "NEC v850",
88 => "Mitsubishi M32R",
89 => "Matsushita MN10300",
90 => "Matsushita MN10200",
91 => "picoJava",
92 => "OpenRISC 32-bit embedded processor",
93 => "ARC International ARCompact",
94 => "Tensilica Xtensa Architecture",
95 => "Alphamosaic VideoCore",
96 => "Thompson Multimedia General Purpose Proc",
97 => "National Semi. 32000",
98 => "Tenor Network TPC",
99 => "Trebia SNP 1000",
100 => "STMicroelectronics ST200",
101 => "Ubicom IP2xxx",
102 => "MAX processor",
103 => "National Semi. CompactRISC",
104 => "Fujitsu F2MC16",
105 => "Texas Instruments msp430",
106 => "Analog Devices Blackfin DSP",
107 => "Seiko Epson S1C33 family",
108 => "Sharp embedded microprocessor",
109 => "Arca RISC",
110 => "PKU-Unity & MPRC Peking Uni. mc series",
111 => "eXcess configurable cpu",
112 => "Icera Semi. Deep Execution Processor",
113 => "Altera Nios II",
114 => "National Semi. CompactRISC CRX",
115 => "Motorola XGATE",
116 => "Infineon C16x/XC16x",
117 => "Renesas M16C",
118 => "Microchip Technology dsPIC30F",
119 => "Freescale Communication Engine RISC",
120 => "Renesas M32C",
131 => "Altium TSK3000",
132 => "Freescale RS08",
133 => "Analog Devices SHARC family",
134 => "Cyan Technology eCOG2",
135 => "Sunplus S+core7 RISC",
136 => "New Japan Radio (NJR) 24-bit DSP",
137 => "Broadcom VideoCore III",
138 => "RISC for Lattice FPGA",
139 => "Seiko Epson C17",
140 => "Texas Instruments TMS320C6000 DSP",
141 => "Texas Instruments TMS320C2000 DSP",
142 => "Texas Instruments TMS320C55x DSP",
143 => "Texas Instruments App. Specific RISC",
144 => "Texas Instruments Prog. Realtime Unit",
160 => "STMicroelectronics 64bit VLIW DSP",
161 => "Cypress M8C",
162 => "Renesas R32C",
163 => "NXP Semi. TriMedia",
164 => "QUALCOMM DSP6",
165 => "Intel 8051 and variants",
166 => "STMicroelectronics STxP7x",
167 => "Andes Tech. compact code emb. RISC",
168 => "Cyan Technology eCOG1X",
169 => "Dallas Semi. MAXQ30 mc",
170 => "New Japan Radio (NJR) 16-bit DSP",
171 => "M2000 Reconfigurable RISC",
172 => "Cray NV2 vector architecture",
173 => "Renesas RX",
174 => "Imagination Tech. META",
175 => "MCST Elbrus",
176 => "Cyan Technology eCOG16",
177 => "National Semi. CompactRISC CR16",
178 => "Freescale Extended Time Processing Unit",
179 => "Infineon Tech. SLE9X",
180 => "Intel L10M",
181 => "Intel K10M",
182 => "reserved 182",
183 => "ARM AARCH64",
184 => "reserved 184",
185 => "Amtel 32-bit microprocessor",
186 => "STMicroelectronics STM8",
187 => "Tileta TILE64",
188 => "Tilera TILEPro",
189 => "Xilinx MicroBlaze",
190 => "NVIDIA CUDA",
191 => "Tilera TILE-Gx",
192 => "CloudShield",
193 => "KIPO-KAIST Core-A 1st gen.",
194 => "KIPO-KAIST Core-A 2nd gen.",
195 => "Synopsys ARCompact V2",
196 => "Open8 RISC",
197 => "Renesas RL78",
198 => "Broadcom VideoCore V",
199 => "Renesas 78KOR",
200 => "Freescale 56800EX DSC",
201 => "Beyond BA1",
202 => "Beyond BA2",
203 => "XMOS xCORE",
204 => "Microchip 8-bit PIC(r)",
210 => "KM211 KM32",
211 => "KM211 KMX32",
212 => "KM211 KMX16",
213 => "KM211 KMX8",
214 => "KM211 KVARC",
215 => "Paneve CDP",
216 => "Cognitive Smart Memory Processor",
217 => "Bluechip CoolEngine",
218 => "Nanoradio Optimized RISC",
219 => "CSR Kalimba",
220 => "Zilog Z80",
221 => "Controls and Data Services VISIUMcore",
222 => "FTDI Chip FT32",
223 => "Moxie processor",
224 => "AMD GPU",
243 => "RISC-V",
247 => "Linux BPF -- in-kernel virtual machine",
_ => "Unknown",
}
} | pub enum FileClass {
// Invalid class
None,
// 32-bit objects
ElfClass32,
// 64 bit objects
ElfClass64,
// Unknown class
Invalid(u8),
}
#[derive(Debug)]
pub enum Encoding {
// Invalid data encoding
None,
// 2's complement, little endian
LittleEndian,
// 2's complement big endian
BigEndian,
// Uknown data encoding
Invalid(u8),
}
#[derive(Debug)]
pub enum OsAbi {
// UNIX System V ABI
UnixVSystem,
// HP-UX
HpUx,
// NetBDS
NetBsd,
// Object uses GNU ELF extensions
GnuElfExtensions,
// SUN Solaris
SunSolaris,
// IBM AIX
IbmAix,
// SGI Irix
SgiIrix,
// FreeBSD
FreeBsd,
// Compaq TRU64 UNIX
CompaqTru64Unix,
// Novell Modesto
NovellModesto,
// OpenBSD
OpenBsd,
// ARM EABI
ArmEabi,
// ARM
Arm,
// Standalone (embedded) application
Standalone,
// Unknown
Invalid(u8),
}
#[derive(Debug)]
pub enum ObjectType {
// No file type
NoFileType,
// Reolcatable file
RelocatableFile,
// Executable file
ExecutableFile,
// Shared object file
SharedObjectFile,
// Core file
CoreFile,
// Unknown
Invalid(u16),
}
#[derive(Debug)]
pub enum Version {
// Invalid ELF version
Unspecified,
// Current version
Current,
// Unknown
Invalid(u32),
}
#[derive(Debug)]
pub struct ElfFileHeader {
// Conglomeration of the identification bytes, must be \177ELF
pub e_magic: [u8; 4],
// Filpub e class
pub e_class: FileClass,
// Data pub encoding
pub e_encoding: Encoding,
// Filpub e version, value must be EV_CURRENT
pub e_version_: u8,
// OS ABI idpub entification
pub e_os_abi: OsAbi,
// ABI vpub ersion
pub e_os_abi_version: u8,
// Padding bytpub es
pub e_padding_: [u8; 7],
// Objpub ect file type
pub e_type: ObjectType,
// Architpub ecture
pub e_machine: u16,
// Objpub ect file version
pub e_version: Version,
// Entry point virtual addrpub ess
pub e_entry: u64,
// Program hpub eader table file offset
pub e_phoff: u64,
// Spub ection header table file offset
pub e_shoff: u64,
// Procpub essor-specific flags
pub e_flags: u32,
// ELF hpub eader size in bytes
pub e_ehsize: u16,
// Program hpub eader table entry size
pub e_phentsize: u16,
// Program hpub eader table entry count
pub e_phnum: u16,
// Spub ection header table entry size
pub e_shentsize: u16,
// Spub ection header table entry count
pub e_shnum: u16,
// Spub ection header string table index
pub e_shstrndx: u16,
}
#[derive(Error, Debug)]
pub enum Error {
#[error("Elf magic mismatch: got: {:02X?}, expected: {:02X?}", magic, ELF_MAGIC)]
ElfMagicMismatchError {
magic: [u8; 4]
},
#[error(transparent)]
IOError(#[from] std::io::Error),
}
impl ElfFileHeader {
pub fn new(reader: &mut Reader) -> Result<ElfFileHeader, Error> {
let mut e_magic: [u8; 4] = [0; 4];
reader.read_exact(&mut e_magic)?;
if e_magic[0] != ELF_MAGIC[0]
|| e_magic[1] != ELF_MAGIC[1]
|| e_magic[2] != ELF_MAGIC[2]
|| e_magic[3] != ELF_MAGIC[3]
{
return Err(Error::ElfMagicMismatchError { magic: e_magic });
}
let e_class = FileClass::new(reader.read_u8()?);
let e_encoding = Encoding::new(reader.read_u8()?);
let e_version_ = reader.read_u8()?;
let e_os_abi = OsAbi::new(reader.read_u8()?);
let e_os_abi_version = reader.read_u8()?;
let mut e_padding_: [u8; 7] = [0; 7];
reader.read_exact(&mut e_padding_)?;
let e_type = ObjectType::new(reader.read_u16::<LittleEndian>()?);
let e_machine = reader.read_u16::<LittleEndian>()?;
let e_version = Version::new(reader.read_u32::<LittleEndian>()?);
let e_entry = reader.read_u64::<LittleEndian>()?;
let e_phoff = reader.read_u64::<LittleEndian>()?;
let e_shoff = reader.read_u64::<LittleEndian>()?;
let e_flags = reader.read_u32::<LittleEndian>()?;
let e_ehsize = reader.read_u16::<LittleEndian>()?;
let e_phentsize = reader.read_u16::<LittleEndian>()?;
let e_phnum = reader.read_u16::<LittleEndian>()?;
let e_shentsize = reader.read_u16::<LittleEndian>()?;
let e_shnum = reader.read_u16::<LittleEndian>()?;
let e_shstrndx = reader.read_u16::<LittleEndian>()?;
Ok(ElfFileHeader {
e_magic,
e_class,
e_encoding,
e_version_,
e_os_abi,
e_os_abi_version,
e_padding_,
e_type,
e_machine,
e_version,
e_entry,
e_phoff,
e_shoff,
e_flags,
e_ehsize,
e_phentsize,
e_phnum,
e_shentsize,
e_shnum,
e_shstrndx,
})
}
}
impl FileClass {
fn new(value: u8) -> FileClass {
match value {
0 => FileClass::None,
1 => FileClass::ElfClass32,
2 => FileClass::ElfClass64,
_ => FileClass::Invalid(value),
}
}
}
impl Encoding {
fn new(value: u8) -> Encoding {
match value {
0 => Encoding::None,
1 => Encoding::LittleEndian,
2 => Encoding::BigEndian,
_ => Encoding::Invalid(value),
}
}
}
impl OsAbi {
fn new(value: u8) -> OsAbi {
use OsAbi::*;
match value {
0 => UnixVSystem,
1 => HpUx,
2 => NetBsd,
3 => GnuElfExtensions,
6 => SunSolaris,
7 => IbmAix,
8 => SgiIrix,
9 => FreeBsd,
10 => CompaqTru64Unix,
11 => NovellModesto,
12 => OpenBsd,
64 => ArmEabi,
97 => Arm,
255 => Standalone,
_ => OsAbi::Invalid(value),
}
}
}
impl ObjectType {
fn new(value: u16) -> ObjectType {
use ObjectType::*;
match value {
0 => NoFileType,
1 => RelocatableFile,
2 => ExecutableFile,
3 => SharedObjectFile,
4 => CoreFile,
_ => Invalid(value),
}
}
}
impl Version {
fn new(value: u32) -> Version {
match value {
0 => Version::Unspecified,
1 => Version::Current,
_ => Version::Invalid(value),
}
}
}
impl fmt::Display for ElfFileHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Elf Header:")?;
writeln!(f, "{:<32}{:x?}", "Magic:", self.e_magic)?;
writeln!(f, "{:<32}{:?}", "Class:", self.e_class)?;
writeln!(f, "{:<32}{:?}", "Encoding:", self.e_encoding)?;
writeln!(f, "{:<32}{:?}", "OS/ABI:", self.e_os_abi)?;
writeln!(f, "{:<32}{}", "ABI Version:", self.e_os_abi_version)?;
writeln!(f, "{:<32}{:x?}", "Padding:", self.e_padding_)?;
writeln!(f, "{:<32}{:?}", "Type:", self.e_type)?;
writeln!(f, "{:<32}{}", "Architecture:", show_machine(self.e_machine))?;
writeln!(f, "{:<32}{:?}", "Version:", self.e_version)?;
writeln!(f, "{:<32}{:#x}", "Entry point address:", self.e_entry)?;
writeln!(f, "{:<32}{}", "Program header offset:", self.e_phoff)?;
writeln!(f, "{:<32}{}", "Section header offset:", self.e_shoff)?;
writeln!(f, "{:<32}{}", "Flags:", self.e_flags)?;
writeln!(f, "{:<32}{}", "Size of this header:", self.e_ehsize)?;
writeln!(f, "{:<32}{}", "Size of program headers:", self.e_phentsize)?;
writeln!(f, "{:<32}{}", "Number of program headers:", self.e_phnum)?;
writeln!(f, "{:<32}{}", "Size of section headers:", self.e_shentsize)?;
writeln!(f, "{:<32}{}", "Number of section headers:", self.e_shnum)?;
writeln!(
f,
"{:<32}{}",
"Section header strtab index:", self.e_shstrndx
)
}
} | #[derive(Debug)] | random_line_split |
file.rs | use crate::reader::{LittleEndian, ReadBytesExt, Reader};
use std::fmt;
use std::io::Read;
use thiserror::Error;
const ELF_MAGIC: [u8; 4] = [0x7f, b'E', b'L', b'F'];
fn show_machine(value: u16) -> &'static str {
match value {
0 => "No machine",
1 => "AT&T WE 32100",
2 => "SUN SPARC",
3 => "Intel 80386",
4 => "Motorola m68k family",
5 => "Motorola m88k family",
6 => "Intel MCU",
7 => "Intel 80860",
8 => "MIPS R3000 big-endian",
9 => "IBM System/370",
10 => "MIPS R3000 little-endian",
15 => "HPPA",
16 => "reserved 16",
17 => "Fujitsu VPP500",
18 => "Sun's v8plus",
19 => "Intel 80960",
20 => "PowerPC",
21 => "PowerPC 64-bit",
22 => "IBM S390",
23 => "IBM SPU/SPC",
36 => "NEC V800 series",
37 => "Fujitsu FR20",
38 => "TRW RH-32",
39 => "Motorola RCE",
40 => "ARM",
41 => "Digital Alpha",
42 => "Hitachi SH",
43 => "SPARC v9 64-bit",
44 => "Siemens Tricore",
45 => "Argonaut RISC Core",
46 => "Hitachi H8/300",
47 => "Hitachi H8/300H",
48 => "Hitachi H8S",
49 => "Hitachi H8/500",
50 => "Intel Merced",
51 => "Stanford MIPS-X",
52 => "Motorola Coldfire",
53 => "Motorola M68HC12",
54 => "Fujitsu MMA Multimedia Accelerator",
55 => "Siemens PCP",
56 => "Sony nCPU embeeded RISC",
57 => "Denso NDR1 microprocessor",
58 => "Motorola Start*Core processor",
59 => "Toyota ME16 processor",
60 => "STMicroelectronic ST100 processor",
61 => "Advanced Logic Corp. Tinyj emb.fam",
62 => "AMD x86-64 architecture",
63 => "Sony DSP Processor",
64 => "Digital PDP-10",
65 => "Digital PDP-11",
66 => "Siemens FX66 microcontroller",
67 => "STMicroelectronics ST9+ 8/16 mc",
68 => "STmicroelectronics ST7 8 bit mc",
69 => "Motorola MC68HC16 microcontroller",
70 => "Motorola MC68HC11 microcontroller",
71 => "Motorola MC68HC08 microcontroller",
72 => "Motorola MC68HC05 microcontroller",
73 => "Silicon Graphics SVx",
74 => "STMicroelectronics ST19 8 bit mc",
75 => "Digital VAX",
76 => "Axis Communications 32-bit emb.proc",
77 => "Infineon Technologies 32-bit emb.proc",
78 => "Element 14 64-bit DSP Processor",
79 => "LSI Logic 16-bit DSP Processor",
80 => "Donald Knuth's educational 64-bit proc",
81 => "Harvard University machine-independent object files",
82 => "SiTera Prism",
83 => "Atmel AVR 8-bit microcontroller",
84 => "Fujitsu FR30",
85 => "Mitsubishi D10V",
86 => "Mitsubishi D30V",
87 => "NEC v850",
88 => "Mitsubishi M32R",
89 => "Matsushita MN10300",
90 => "Matsushita MN10200",
91 => "picoJava",
92 => "OpenRISC 32-bit embedded processor",
93 => "ARC International ARCompact",
94 => "Tensilica Xtensa Architecture",
95 => "Alphamosaic VideoCore",
96 => "Thompson Multimedia General Purpose Proc",
97 => "National Semi. 32000",
98 => "Tenor Network TPC",
99 => "Trebia SNP 1000",
100 => "STMicroelectronics ST200",
101 => "Ubicom IP2xxx",
102 => "MAX processor",
103 => "National Semi. CompactRISC",
104 => "Fujitsu F2MC16",
105 => "Texas Instruments msp430",
106 => "Analog Devices Blackfin DSP",
107 => "Seiko Epson S1C33 family",
108 => "Sharp embedded microprocessor",
109 => "Arca RISC",
110 => "PKU-Unity & MPRC Peking Uni. mc series",
111 => "eXcess configurable cpu",
112 => "Icera Semi. Deep Execution Processor",
113 => "Altera Nios II",
114 => "National Semi. CompactRISC CRX",
115 => "Motorola XGATE",
116 => "Infineon C16x/XC16x",
117 => "Renesas M16C",
118 => "Microchip Technology dsPIC30F",
119 => "Freescale Communication Engine RISC",
120 => "Renesas M32C",
131 => "Altium TSK3000",
132 => "Freescale RS08",
133 => "Analog Devices SHARC family",
134 => "Cyan Technology eCOG2",
135 => "Sunplus S+core7 RISC",
136 => "New Japan Radio (NJR) 24-bit DSP",
137 => "Broadcom VideoCore III",
138 => "RISC for Lattice FPGA",
139 => "Seiko Epson C17",
140 => "Texas Instruments TMS320C6000 DSP",
141 => "Texas Instruments TMS320C2000 DSP",
142 => "Texas Instruments TMS320C55x DSP",
143 => "Texas Instruments App. Specific RISC",
144 => "Texas Instruments Prog. Realtime Unit",
160 => "STMicroelectronics 64bit VLIW DSP",
161 => "Cypress M8C",
162 => "Renesas R32C",
163 => "NXP Semi. TriMedia",
164 => "QUALCOMM DSP6",
165 => "Intel 8051 and variants",
166 => "STMicroelectronics STxP7x",
167 => "Andes Tech. compact code emb. RISC",
168 => "Cyan Technology eCOG1X",
169 => "Dallas Semi. MAXQ30 mc",
170 => "New Japan Radio (NJR) 16-bit DSP",
171 => "M2000 Reconfigurable RISC",
172 => "Cray NV2 vector architecture",
173 => "Renesas RX",
174 => "Imagination Tech. META",
175 => "MCST Elbrus",
176 => "Cyan Technology eCOG16",
177 => "National Semi. CompactRISC CR16",
178 => "Freescale Extended Time Processing Unit",
179 => "Infineon Tech. SLE9X",
180 => "Intel L10M",
181 => "Intel K10M",
182 => "reserved 182",
183 => "ARM AARCH64",
184 => "reserved 184",
185 => "Amtel 32-bit microprocessor",
186 => "STMicroelectronics STM8",
187 => "Tileta TILE64",
188 => "Tilera TILEPro",
189 => "Xilinx MicroBlaze",
190 => "NVIDIA CUDA",
191 => "Tilera TILE-Gx",
192 => "CloudShield",
193 => "KIPO-KAIST Core-A 1st gen.",
194 => "KIPO-KAIST Core-A 2nd gen.",
195 => "Synopsys ARCompact V2",
196 => "Open8 RISC",
197 => "Renesas RL78",
198 => "Broadcom VideoCore V",
199 => "Renesas 78KOR",
200 => "Freescale 56800EX DSC",
201 => "Beyond BA1",
202 => "Beyond BA2",
203 => "XMOS xCORE",
204 => "Microchip 8-bit PIC(r)",
210 => "KM211 KM32",
211 => "KM211 KMX32",
212 => "KM211 KMX16",
213 => "KM211 KMX8",
214 => "KM211 KVARC",
215 => "Paneve CDP",
216 => "Cognitive Smart Memory Processor",
217 => "Bluechip CoolEngine",
218 => "Nanoradio Optimized RISC",
219 => "CSR Kalimba",
220 => "Zilog Z80",
221 => "Controls and Data Services VISIUMcore",
222 => "FTDI Chip FT32",
223 => "Moxie processor",
224 => "AMD GPU",
243 => "RISC-V",
247 => "Linux BPF -- in-kernel virtual machine",
_ => "Unknown",
}
}
#[derive(Debug)]
pub enum FileClass {
// Invalid class
None,
// 32-bit objects
ElfClass32,
// 64 bit objects
ElfClass64,
// Unknown class
Invalid(u8),
}
#[derive(Debug)]
pub enum | {
// Invalid data encoding
None,
// 2's complement, little endian
LittleEndian,
// 2's complement big endian
BigEndian,
// Uknown data encoding
Invalid(u8),
}
#[derive(Debug)]
pub enum OsAbi {
// UNIX System V ABI
UnixVSystem,
// HP-UX
HpUx,
// NetBDS
NetBsd,
// Object uses GNU ELF extensions
GnuElfExtensions,
// SUN Solaris
SunSolaris,
// IBM AIX
IbmAix,
// SGI Irix
SgiIrix,
// FreeBSD
FreeBsd,
// Compaq TRU64 UNIX
CompaqTru64Unix,
// Novell Modesto
NovellModesto,
// OpenBSD
OpenBsd,
// ARM EABI
ArmEabi,
// ARM
Arm,
// Standalone (embedded) application
Standalone,
// Unknown
Invalid(u8),
}
#[derive(Debug)]
pub enum ObjectType {
// No file type
NoFileType,
// Reolcatable file
RelocatableFile,
// Executable file
ExecutableFile,
// Shared object file
SharedObjectFile,
// Core file
CoreFile,
// Unknown
Invalid(u16),
}
#[derive(Debug)]
pub enum Version {
// Invalid ELF version
Unspecified,
// Current version
Current,
// Unknown
Invalid(u32),
}
#[derive(Debug)]
pub struct ElfFileHeader {
// Conglomeration of the identification bytes, must be \177ELF
pub e_magic: [u8; 4],
// Filpub e class
pub e_class: FileClass,
// Data pub encoding
pub e_encoding: Encoding,
// Filpub e version, value must be EV_CURRENT
pub e_version_: u8,
// OS ABI idpub entification
pub e_os_abi: OsAbi,
// ABI vpub ersion
pub e_os_abi_version: u8,
// Padding bytpub es
pub e_padding_: [u8; 7],
// Objpub ect file type
pub e_type: ObjectType,
// Architpub ecture
pub e_machine: u16,
// Objpub ect file version
pub e_version: Version,
// Entry point virtual addrpub ess
pub e_entry: u64,
// Program hpub eader table file offset
pub e_phoff: u64,
// Spub ection header table file offset
pub e_shoff: u64,
// Procpub essor-specific flags
pub e_flags: u32,
// ELF hpub eader size in bytes
pub e_ehsize: u16,
// Program hpub eader table entry size
pub e_phentsize: u16,
// Program hpub eader table entry count
pub e_phnum: u16,
// Spub ection header table entry size
pub e_shentsize: u16,
// Spub ection header table entry count
pub e_shnum: u16,
// Spub ection header string table index
pub e_shstrndx: u16,
}
#[derive(Error, Debug)]
pub enum Error {
#[error("Elf magic mismatch: got: {:02X?}, expected: {:02X?}", magic, ELF_MAGIC)]
ElfMagicMismatchError {
magic: [u8; 4]
},
#[error(transparent)]
IOError(#[from] std::io::Error),
}
impl ElfFileHeader {
pub fn new(reader: &mut Reader) -> Result<ElfFileHeader, Error> {
let mut e_magic: [u8; 4] = [0; 4];
reader.read_exact(&mut e_magic)?;
if e_magic[0] != ELF_MAGIC[0]
|| e_magic[1] != ELF_MAGIC[1]
|| e_magic[2] != ELF_MAGIC[2]
|| e_magic[3] != ELF_MAGIC[3]
{
return Err(Error::ElfMagicMismatchError { magic: e_magic });
}
let e_class = FileClass::new(reader.read_u8()?);
let e_encoding = Encoding::new(reader.read_u8()?);
let e_version_ = reader.read_u8()?;
let e_os_abi = OsAbi::new(reader.read_u8()?);
let e_os_abi_version = reader.read_u8()?;
let mut e_padding_: [u8; 7] = [0; 7];
reader.read_exact(&mut e_padding_)?;
let e_type = ObjectType::new(reader.read_u16::<LittleEndian>()?);
let e_machine = reader.read_u16::<LittleEndian>()?;
let e_version = Version::new(reader.read_u32::<LittleEndian>()?);
let e_entry = reader.read_u64::<LittleEndian>()?;
let e_phoff = reader.read_u64::<LittleEndian>()?;
let e_shoff = reader.read_u64::<LittleEndian>()?;
let e_flags = reader.read_u32::<LittleEndian>()?;
let e_ehsize = reader.read_u16::<LittleEndian>()?;
let e_phentsize = reader.read_u16::<LittleEndian>()?;
let e_phnum = reader.read_u16::<LittleEndian>()?;
let e_shentsize = reader.read_u16::<LittleEndian>()?;
let e_shnum = reader.read_u16::<LittleEndian>()?;
let e_shstrndx = reader.read_u16::<LittleEndian>()?;
Ok(ElfFileHeader {
e_magic,
e_class,
e_encoding,
e_version_,
e_os_abi,
e_os_abi_version,
e_padding_,
e_type,
e_machine,
e_version,
e_entry,
e_phoff,
e_shoff,
e_flags,
e_ehsize,
e_phentsize,
e_phnum,
e_shentsize,
e_shnum,
e_shstrndx,
})
}
}
impl FileClass {
fn new(value: u8) -> FileClass {
match value {
0 => FileClass::None,
1 => FileClass::ElfClass32,
2 => FileClass::ElfClass64,
_ => FileClass::Invalid(value),
}
}
}
impl Encoding {
fn new(value: u8) -> Encoding {
match value {
0 => Encoding::None,
1 => Encoding::LittleEndian,
2 => Encoding::BigEndian,
_ => Encoding::Invalid(value),
}
}
}
impl OsAbi {
fn new(value: u8) -> OsAbi {
use OsAbi::*;
match value {
0 => UnixVSystem,
1 => HpUx,
2 => NetBsd,
3 => GnuElfExtensions,
6 => SunSolaris,
7 => IbmAix,
8 => SgiIrix,
9 => FreeBsd,
10 => CompaqTru64Unix,
11 => NovellModesto,
12 => OpenBsd,
64 => ArmEabi,
97 => Arm,
255 => Standalone,
_ => OsAbi::Invalid(value),
}
}
}
impl ObjectType {
fn new(value: u16) -> ObjectType {
use ObjectType::*;
match value {
0 => NoFileType,
1 => RelocatableFile,
2 => ExecutableFile,
3 => SharedObjectFile,
4 => CoreFile,
_ => Invalid(value),
}
}
}
impl Version {
fn new(value: u32) -> Version {
match value {
0 => Version::Unspecified,
1 => Version::Current,
_ => Version::Invalid(value),
}
}
}
impl fmt::Display for ElfFileHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Elf Header:")?;
writeln!(f, "{:<32}{:x?}", "Magic:", self.e_magic)?;
writeln!(f, "{:<32}{:?}", "Class:", self.e_class)?;
writeln!(f, "{:<32}{:?}", "Encoding:", self.e_encoding)?;
writeln!(f, "{:<32}{:?}", "OS/ABI:", self.e_os_abi)?;
writeln!(f, "{:<32}{}", "ABI Version:", self.e_os_abi_version)?;
writeln!(f, "{:<32}{:x?}", "Padding:", self.e_padding_)?;
writeln!(f, "{:<32}{:?}", "Type:", self.e_type)?;
writeln!(f, "{:<32}{}", "Architecture:", show_machine(self.e_machine))?;
writeln!(f, "{:<32}{:?}", "Version:", self.e_version)?;
writeln!(f, "{:<32}{:#x}", "Entry point address:", self.e_entry)?;
writeln!(f, "{:<32}{}", "Program header offset:", self.e_phoff)?;
writeln!(f, "{:<32}{}", "Section header offset:", self.e_shoff)?;
writeln!(f, "{:<32}{}", "Flags:", self.e_flags)?;
writeln!(f, "{:<32}{}", "Size of this header:", self.e_ehsize)?;
writeln!(f, "{:<32}{}", "Size of program headers:", self.e_phentsize)?;
writeln!(f, "{:<32}{}", "Number of program headers:", self.e_phnum)?;
writeln!(f, "{:<32}{}", "Size of section headers:", self.e_shentsize)?;
writeln!(f, "{:<32}{}", "Number of section headers:", self.e_shnum)?;
writeln!(
f,
"{:<32}{}",
"Section header strtab index:", self.e_shstrndx
)
}
}
| Encoding | identifier_name |
mod.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
use deno_core::error::bad_resource_id;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::op;
use deno_core::OpState;
use libz_sys::*;
use std::borrow::Cow;
use std::cell::RefCell;
use std::future::Future;
use std::rc::Rc;
mod alloc;
pub mod brotli;
mod mode;
mod stream;
use mode::Flush;
use mode::Mode;
use self::stream::StreamWrapper;
#[inline]
fn check(condition: bool, msg: &str) -> Result<(), AnyError> {
if condition {
Ok(())
} else {
Err(type_error(msg.to_string()))
}
}
#[inline]
fn zlib(state: &mut OpState, handle: u32) -> Result<Rc<Zlib>, AnyError> {
state
.resource_table
.get::<Zlib>(handle)
.map_err(|_| bad_resource_id())
}
#[derive(Default)]
struct ZlibInner {
dictionary: Option<Vec<u8>>,
err: i32,
flush: Flush,
init_done: bool,
level: i32,
mem_level: i32,
mode: Mode,
strategy: i32,
window_bits: i32,
write_in_progress: bool,
pending_close: bool,
gzib_id_bytes_read: u32,
strm: StreamWrapper,
}
const GZIP_HEADER_ID1: u8 = 0x1f;
const GZIP_HEADER_ID2: u8 = 0x8b;
impl ZlibInner {
#[allow(clippy::too_many_arguments)]
fn start_write(
&mut self,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
flush: Flush,
) -> Result<(), AnyError> {
check(self.init_done, "write before init")?;
check(!self.write_in_progress, "write already in progress")?;
check(!self.pending_close, "close already in progress")?;
self.write_in_progress = true;
let next_in = input
.get(in_off as usize..in_off as usize + in_len as usize)
.ok_or_else(|| type_error("invalid input range"))?
.as_ptr() as *mut _;
let next_out = out
.get_mut(out_off as usize..out_off as usize + out_len as usize)
.ok_or_else(|| type_error("invalid output range"))?
.as_mut_ptr();
self.strm.avail_in = in_len;
self.strm.next_in = next_in;
self.strm.avail_out = out_len;
self.strm.next_out = next_out;
self.flush = flush;
Ok(())
}
fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> {
self.flush = flush;
match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => {
self.err = self.strm.deflate(flush);
}
// Auto-detect mode.
Mode::Unzip if self.strm.avail_in > 0 => 'blck: {
let mut next_expected_header_byte = Some(0);
// SAFETY: `self.strm.next_in` is valid pointer to the input buffer.
// `self.strm.avail_in` is the length of the input buffer that is only set by
// `start_write`.
let strm = unsafe {
std::slice::from_raw_parts(
self.strm.next_in,
self.strm.avail_in as usize,
)
};
if self.gzib_id_bytes_read == 0 {
if strm[0] == GZIP_HEADER_ID1 {
self.gzib_id_bytes_read = 1;
next_expected_header_byte = Some(1);
// Not enough.
if self.strm.avail_in == 1 {
break 'blck;
}
} else {
self.mode = Mode::Inflate;
next_expected_header_byte = None;
}
}
if self.gzib_id_bytes_read == 1 {
let byte = match next_expected_header_byte {
Some(i) => strm[i],
None => break 'blck,
};
if byte == GZIP_HEADER_ID2 {
self.gzib_id_bytes_read = 2;
self.mode = Mode::Gunzip;
} else {
self.mode = Mode::Inflate;
}
} else if next_expected_header_byte.is_some() {
return Err(type_error(
"invalid number of gzip magic number bytes read",
));
}
}
_ => {}
}
match self.mode {
Mode::Inflate
| Mode::Gunzip
| Mode::InflateRaw
// We're still reading the header.
| Mode::Unzip => {
self.err = self.strm.inflate(self.flush);
// TODO(@littledivy): Use if let chain when it is stable.
// https://github.com/rust-lang/rust/issues/53667
//
// Data was encoded with dictionary
if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) {
self.err = self.strm.inflate_set_dictionary(dictionary);
if self.err == Z_OK {
self.err = self.strm.inflate(flush);
} else if self.err == Z_DATA_ERROR {
self.err = Z_NEED_DICT;
}
}
while self.strm.avail_in > 0
&& self.mode == Mode::Gunzip
&& self.err == Z_STREAM_END
// SAFETY: `strm` is a valid pointer to zlib strm.
// `strm.next_in` is initialized to the input buffer.
&& unsafe { *self.strm.next_in } != 0x00
{
self.err = self.strm.reset(self.mode);
self.err = self.strm.inflate(flush);
}
}
_ => {}
}
let done = self.strm.avail_out != 0 && self.flush == Flush::Finish;
// We're are not done yet, but output buffer is full
if self.err == Z_BUF_ERROR && !done {
// Set to Z_OK to avoid reporting the error in JS.
self.err = Z_OK;
}
self.write_in_progress = false;
Ok(())
}
fn init_stream(&mut self) -> Result<(), AnyError> {
match self.mode {
Mode::Gzip | Mode::Gunzip => self.window_bits += 16,
Mode::Unzip => self.window_bits += 32,
Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1,
_ => {}
}
self.err = match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init(
self.level,
self.window_bits,
self.mem_level,
self.strategy,
),
Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => {
self.strm.inflate_init(self.window_bits)
}
Mode::None => return Err(type_error("Unknown mode")),
};
self.write_in_progress = false;
self.init_done = true;
Ok(())
}
fn close(&mut self) -> Result<bool, AnyError> {
if self.write_in_progress {
self.pending_close = true;
return Ok(false);
}
self.pending_close = false;
check(self.init_done, "close before init")?;
self.strm.end(self.mode);
self.mode = Mode::None;
Ok(true)
}
fn reset_stream(&mut self) -> Result<(), AnyError> {
self.err = self.strm.reset(self.mode);
Ok(())
}
}
struct Zlib {
inner: RefCell<ZlibInner>,
}
impl deno_core::Resource for Zlib {
fn name(&self) -> Cow<str> {
"zlib".into()
}
}
#[op]
pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> {
let mode = Mode::try_from(mode)?;
let inner = ZlibInner {
mode,
..Default::default()
};
Ok(state.resource_table.add(Zlib {
inner: RefCell::new(inner),
}))
}
#[op]
pub fn | (state: &mut OpState, handle: u32) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
// If there is a pending write, defer the close until the write is done.
zlib.close()?;
Ok(())
}
#[op]
pub fn op_zlib_write_async(
state: Rc<RefCell<OpState>>,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
) -> Result<
impl Future<Output = Result<(i32, u32, u32), AnyError>> + 'static,
AnyError,
> {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut strm = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
let state = state.clone();
Ok(async move {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.do_write(flush)?;
Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in))
})
}
#[op]
pub fn op_zlib_write(
state: &mut OpState,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
result: &mut [u32],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
zlib.do_write(flush)?;
result[0] = zlib.strm.avail_out;
result[1] = zlib.strm.avail_in;
Ok(zlib.err)
}
#[op]
pub fn op_zlib_init(
state: &mut OpState,
handle: u32,
level: i32,
window_bits: i32,
mem_level: i32,
strategy: i32,
dictionary: &[u8],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
check((8..=15).contains(&window_bits), "invalid windowBits")?;
check((-1..=9).contains(&level), "invalid level")?;
check((1..=9).contains(&mem_level), "invalid memLevel")?;
check(
strategy == Z_DEFAULT_STRATEGY
|| strategy == Z_FILTERED
|| strategy == Z_HUFFMAN_ONLY
|| strategy == Z_RLE
|| strategy == Z_FIXED,
"invalid strategy",
)?;
zlib.level = level;
zlib.window_bits = window_bits;
zlib.mem_level = mem_level;
zlib.strategy = strategy;
zlib.flush = Flush::None;
zlib.err = Z_OK;
zlib.init_stream()?;
zlib.dictionary = if !dictionary.is_empty() {
Some(dictionary.to_vec())
} else {
None
};
Ok(zlib.err)
}
#[op]
pub fn op_zlib_reset(
state: &mut OpState,
handle: u32,
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.reset_stream()?;
Ok(zlib.err)
}
#[op]
pub fn op_zlib_close_if_pending(
state: &mut OpState,
handle: u32,
) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let pending_close = {
let mut zlib = resource.inner.borrow_mut();
zlib.write_in_progress = false;
zlib.pending_close
};
if pending_close {
drop(resource);
state.resource_table.close(handle)?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn zlib_start_write() {
// buffer, length, should pass
type WriteVector = (&'static [u8], u32, u32, bool);
const WRITE_VECTORS: [WriteVector; 8] = [
(b"Hello", 5, 0, true),
(b"H", 1, 0, true),
(b"", 0, 0, true),
// Overrun the buffer
(b"H", 5, 0, false),
(b"ello", 5, 0, false),
(b"Hello", 5, 1, false),
(b"H", 1, 1, false),
(b"", 0, 1, false),
];
for (input, len, offset, expected) in WRITE_VECTORS.iter() {
let mut stream = ZlibInner {
mode: Mode::Inflate,
..Default::default()
};
stream.init_stream().unwrap();
assert_eq!(stream.err, Z_OK);
assert_eq!(
stream
.start_write(input, *offset, *len, &mut [], 0, 0, Flush::None)
.is_ok(),
*expected
);
assert_eq!(stream.err, Z_OK);
stream.close().unwrap();
}
}
}
| op_zlib_close | identifier_name |
mod.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
use deno_core::error::bad_resource_id;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::op;
use deno_core::OpState;
use libz_sys::*;
use std::borrow::Cow;
use std::cell::RefCell;
use std::future::Future;
use std::rc::Rc;
mod alloc;
pub mod brotli;
mod mode;
mod stream;
use mode::Flush;
use mode::Mode;
use self::stream::StreamWrapper;
#[inline]
fn check(condition: bool, msg: &str) -> Result<(), AnyError> {
if condition {
Ok(())
} else {
Err(type_error(msg.to_string()))
}
}
#[inline]
fn zlib(state: &mut OpState, handle: u32) -> Result<Rc<Zlib>, AnyError> {
state
.resource_table
.get::<Zlib>(handle)
.map_err(|_| bad_resource_id())
}
#[derive(Default)]
struct ZlibInner {
dictionary: Option<Vec<u8>>,
err: i32,
flush: Flush,
init_done: bool,
level: i32,
mem_level: i32,
mode: Mode,
strategy: i32,
window_bits: i32,
write_in_progress: bool,
pending_close: bool,
gzib_id_bytes_read: u32,
strm: StreamWrapper,
}
const GZIP_HEADER_ID1: u8 = 0x1f;
const GZIP_HEADER_ID2: u8 = 0x8b;
impl ZlibInner {
#[allow(clippy::too_many_arguments)]
fn start_write(
&mut self,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
flush: Flush,
) -> Result<(), AnyError> {
check(self.init_done, "write before init")?;
check(!self.write_in_progress, "write already in progress")?;
check(!self.pending_close, "close already in progress")?;
self.write_in_progress = true;
let next_in = input
.get(in_off as usize..in_off as usize + in_len as usize)
.ok_or_else(|| type_error("invalid input range"))?
.as_ptr() as *mut _;
let next_out = out
.get_mut(out_off as usize..out_off as usize + out_len as usize)
.ok_or_else(|| type_error("invalid output range"))?
.as_mut_ptr();
self.strm.avail_in = in_len;
self.strm.next_in = next_in;
self.strm.avail_out = out_len;
self.strm.next_out = next_out;
self.flush = flush;
Ok(())
}
fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> {
self.flush = flush;
match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => {
self.err = self.strm.deflate(flush);
}
// Auto-detect mode.
Mode::Unzip if self.strm.avail_in > 0 => 'blck: {
let mut next_expected_header_byte = Some(0);
// SAFETY: `self.strm.next_in` is valid pointer to the input buffer.
// `self.strm.avail_in` is the length of the input buffer that is only set by
// `start_write`.
let strm = unsafe {
std::slice::from_raw_parts(
self.strm.next_in,
self.strm.avail_in as usize,
)
};
if self.gzib_id_bytes_read == 0 {
if strm[0] == GZIP_HEADER_ID1 {
self.gzib_id_bytes_read = 1;
next_expected_header_byte = Some(1);
// Not enough.
if self.strm.avail_in == 1 {
break 'blck;
}
} else {
self.mode = Mode::Inflate;
next_expected_header_byte = None;
}
}
if self.gzib_id_bytes_read == 1 {
let byte = match next_expected_header_byte {
Some(i) => strm[i],
None => break 'blck,
};
if byte == GZIP_HEADER_ID2 {
self.gzib_id_bytes_read = 2;
self.mode = Mode::Gunzip;
} else {
self.mode = Mode::Inflate;
}
} else if next_expected_header_byte.is_some() {
return Err(type_error(
"invalid number of gzip magic number bytes read",
));
}
}
_ => {}
}
match self.mode {
Mode::Inflate
| Mode::Gunzip
| Mode::InflateRaw
// We're still reading the header.
| Mode::Unzip => {
self.err = self.strm.inflate(self.flush);
// TODO(@littledivy): Use if let chain when it is stable.
// https://github.com/rust-lang/rust/issues/53667
//
// Data was encoded with dictionary
if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) {
self.err = self.strm.inflate_set_dictionary(dictionary);
if self.err == Z_OK {
self.err = self.strm.inflate(flush);
} else if self.err == Z_DATA_ERROR {
self.err = Z_NEED_DICT;
}
}
while self.strm.avail_in > 0
&& self.mode == Mode::Gunzip
&& self.err == Z_STREAM_END
// SAFETY: `strm` is a valid pointer to zlib strm.
// `strm.next_in` is initialized to the input buffer.
&& unsafe { *self.strm.next_in } != 0x00
{
self.err = self.strm.reset(self.mode);
self.err = self.strm.inflate(flush);
}
}
_ => {}
}
let done = self.strm.avail_out != 0 && self.flush == Flush::Finish;
// We're are not done yet, but output buffer is full
if self.err == Z_BUF_ERROR && !done {
// Set to Z_OK to avoid reporting the error in JS.
self.err = Z_OK;
}
self.write_in_progress = false;
Ok(())
}
fn init_stream(&mut self) -> Result<(), AnyError> {
match self.mode {
Mode::Gzip | Mode::Gunzip => self.window_bits += 16,
Mode::Unzip => self.window_bits += 32,
Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1,
_ => |
}
self.err = match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init(
self.level,
self.window_bits,
self.mem_level,
self.strategy,
),
Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => {
self.strm.inflate_init(self.window_bits)
}
Mode::None => return Err(type_error("Unknown mode")),
};
self.write_in_progress = false;
self.init_done = true;
Ok(())
}
fn close(&mut self) -> Result<bool, AnyError> {
if self.write_in_progress {
self.pending_close = true;
return Ok(false);
}
self.pending_close = false;
check(self.init_done, "close before init")?;
self.strm.end(self.mode);
self.mode = Mode::None;
Ok(true)
}
fn reset_stream(&mut self) -> Result<(), AnyError> {
self.err = self.strm.reset(self.mode);
Ok(())
}
}
struct Zlib {
inner: RefCell<ZlibInner>,
}
impl deno_core::Resource for Zlib {
fn name(&self) -> Cow<str> {
"zlib".into()
}
}
#[op]
pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> {
let mode = Mode::try_from(mode)?;
let inner = ZlibInner {
mode,
..Default::default()
};
Ok(state.resource_table.add(Zlib {
inner: RefCell::new(inner),
}))
}
#[op]
pub fn op_zlib_close(state: &mut OpState, handle: u32) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
// If there is a pending write, defer the close until the write is done.
zlib.close()?;
Ok(())
}
#[op]
pub fn op_zlib_write_async(
state: Rc<RefCell<OpState>>,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
) -> Result<
impl Future<Output = Result<(i32, u32, u32), AnyError>> + 'static,
AnyError,
> {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut strm = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
let state = state.clone();
Ok(async move {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.do_write(flush)?;
Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in))
})
}
#[op]
pub fn op_zlib_write(
state: &mut OpState,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
result: &mut [u32],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
zlib.do_write(flush)?;
result[0] = zlib.strm.avail_out;
result[1] = zlib.strm.avail_in;
Ok(zlib.err)
}
#[op]
pub fn op_zlib_init(
state: &mut OpState,
handle: u32,
level: i32,
window_bits: i32,
mem_level: i32,
strategy: i32,
dictionary: &[u8],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
check((8..=15).contains(&window_bits), "invalid windowBits")?;
check((-1..=9).contains(&level), "invalid level")?;
check((1..=9).contains(&mem_level), "invalid memLevel")?;
check(
strategy == Z_DEFAULT_STRATEGY
|| strategy == Z_FILTERED
|| strategy == Z_HUFFMAN_ONLY
|| strategy == Z_RLE
|| strategy == Z_FIXED,
"invalid strategy",
)?;
zlib.level = level;
zlib.window_bits = window_bits;
zlib.mem_level = mem_level;
zlib.strategy = strategy;
zlib.flush = Flush::None;
zlib.err = Z_OK;
zlib.init_stream()?;
zlib.dictionary = if !dictionary.is_empty() {
Some(dictionary.to_vec())
} else {
None
};
Ok(zlib.err)
}
#[op]
pub fn op_zlib_reset(
state: &mut OpState,
handle: u32,
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.reset_stream()?;
Ok(zlib.err)
}
#[op]
pub fn op_zlib_close_if_pending(
state: &mut OpState,
handle: u32,
) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let pending_close = {
let mut zlib = resource.inner.borrow_mut();
zlib.write_in_progress = false;
zlib.pending_close
};
if pending_close {
drop(resource);
state.resource_table.close(handle)?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn zlib_start_write() {
// buffer, length, should pass
type WriteVector = (&'static [u8], u32, u32, bool);
const WRITE_VECTORS: [WriteVector; 8] = [
(b"Hello", 5, 0, true),
(b"H", 1, 0, true),
(b"", 0, 0, true),
// Overrun the buffer
(b"H", 5, 0, false),
(b"ello", 5, 0, false),
(b"Hello", 5, 1, false),
(b"H", 1, 1, false),
(b"", 0, 1, false),
];
for (input, len, offset, expected) in WRITE_VECTORS.iter() {
let mut stream = ZlibInner {
mode: Mode::Inflate,
..Default::default()
};
stream.init_stream().unwrap();
assert_eq!(stream.err, Z_OK);
assert_eq!(
stream
.start_write(input, *offset, *len, &mut [], 0, 0, Flush::None)
.is_ok(),
*expected
);
assert_eq!(stream.err, Z_OK);
stream.close().unwrap();
}
}
}
| {} | conditional_block |
mod.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
use deno_core::error::bad_resource_id;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::op;
use deno_core::OpState;
use libz_sys::*;
use std::borrow::Cow;
use std::cell::RefCell;
use std::future::Future;
use std::rc::Rc;
mod alloc;
pub mod brotli;
mod mode;
mod stream;
use mode::Flush;
use mode::Mode;
use self::stream::StreamWrapper;
#[inline]
fn check(condition: bool, msg: &str) -> Result<(), AnyError> {
if condition {
Ok(())
} else {
Err(type_error(msg.to_string()))
}
}
#[inline]
fn zlib(state: &mut OpState, handle: u32) -> Result<Rc<Zlib>, AnyError> {
state
.resource_table
.get::<Zlib>(handle)
.map_err(|_| bad_resource_id())
}
#[derive(Default)]
struct ZlibInner {
dictionary: Option<Vec<u8>>,
err: i32,
flush: Flush,
init_done: bool,
level: i32,
mem_level: i32,
mode: Mode,
strategy: i32,
window_bits: i32,
write_in_progress: bool,
pending_close: bool,
gzib_id_bytes_read: u32,
strm: StreamWrapper,
}
const GZIP_HEADER_ID1: u8 = 0x1f;
const GZIP_HEADER_ID2: u8 = 0x8b;
impl ZlibInner {
#[allow(clippy::too_many_arguments)]
fn start_write(
&mut self,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
flush: Flush,
) -> Result<(), AnyError> {
check(self.init_done, "write before init")?;
check(!self.write_in_progress, "write already in progress")?;
check(!self.pending_close, "close already in progress")?;
self.write_in_progress = true;
let next_in = input
.get(in_off as usize..in_off as usize + in_len as usize)
.ok_or_else(|| type_error("invalid input range"))?
.as_ptr() as *mut _;
let next_out = out
.get_mut(out_off as usize..out_off as usize + out_len as usize)
.ok_or_else(|| type_error("invalid output range"))?
.as_mut_ptr();
self.strm.avail_in = in_len;
self.strm.next_in = next_in;
self.strm.avail_out = out_len;
self.strm.next_out = next_out;
self.flush = flush;
Ok(())
}
fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> {
self.flush = flush;
match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => {
self.err = self.strm.deflate(flush);
}
// Auto-detect mode.
Mode::Unzip if self.strm.avail_in > 0 => 'blck: {
let mut next_expected_header_byte = Some(0);
// SAFETY: `self.strm.next_in` is valid pointer to the input buffer.
// `self.strm.avail_in` is the length of the input buffer that is only set by
// `start_write`.
let strm = unsafe {
std::slice::from_raw_parts(
self.strm.next_in,
self.strm.avail_in as usize,
)
};
if self.gzib_id_bytes_read == 0 {
if strm[0] == GZIP_HEADER_ID1 {
self.gzib_id_bytes_read = 1;
next_expected_header_byte = Some(1);
// Not enough.
if self.strm.avail_in == 1 {
break 'blck;
}
} else {
self.mode = Mode::Inflate;
next_expected_header_byte = None;
}
}
if self.gzib_id_bytes_read == 1 {
let byte = match next_expected_header_byte {
Some(i) => strm[i],
None => break 'blck,
};
if byte == GZIP_HEADER_ID2 {
self.gzib_id_bytes_read = 2;
self.mode = Mode::Gunzip;
} else {
self.mode = Mode::Inflate;
}
} else if next_expected_header_byte.is_some() {
return Err(type_error(
"invalid number of gzip magic number bytes read",
));
}
}
_ => {}
}
match self.mode {
Mode::Inflate
| Mode::Gunzip
| Mode::InflateRaw
// We're still reading the header.
| Mode::Unzip => {
self.err = self.strm.inflate(self.flush);
// TODO(@littledivy): Use if let chain when it is stable.
// https://github.com/rust-lang/rust/issues/53667
//
// Data was encoded with dictionary
if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) {
self.err = self.strm.inflate_set_dictionary(dictionary);
if self.err == Z_OK {
self.err = self.strm.inflate(flush);
} else if self.err == Z_DATA_ERROR {
self.err = Z_NEED_DICT;
}
}
while self.strm.avail_in > 0
&& self.mode == Mode::Gunzip
&& self.err == Z_STREAM_END
// SAFETY: `strm` is a valid pointer to zlib strm.
// `strm.next_in` is initialized to the input buffer.
&& unsafe { *self.strm.next_in } != 0x00
{
self.err = self.strm.reset(self.mode);
self.err = self.strm.inflate(flush);
}
}
_ => {}
}
let done = self.strm.avail_out != 0 && self.flush == Flush::Finish;
// We're are not done yet, but output buffer is full
if self.err == Z_BUF_ERROR && !done {
// Set to Z_OK to avoid reporting the error in JS.
self.err = Z_OK;
}
self.write_in_progress = false;
Ok(())
}
fn init_stream(&mut self) -> Result<(), AnyError> {
match self.mode {
Mode::Gzip | Mode::Gunzip => self.window_bits += 16,
Mode::Unzip => self.window_bits += 32,
Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1,
_ => {}
}
self.err = match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init(
self.level,
self.window_bits,
self.mem_level,
self.strategy,
),
Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => {
self.strm.inflate_init(self.window_bits)
}
Mode::None => return Err(type_error("Unknown mode")),
};
self.write_in_progress = false;
self.init_done = true;
Ok(())
}
fn close(&mut self) -> Result<bool, AnyError> {
if self.write_in_progress {
self.pending_close = true;
return Ok(false);
}
self.pending_close = false;
check(self.init_done, "close before init")?;
self.strm.end(self.mode);
self.mode = Mode::None;
Ok(true)
}
fn reset_stream(&mut self) -> Result<(), AnyError> |
}
struct Zlib {
inner: RefCell<ZlibInner>,
}
impl deno_core::Resource for Zlib {
fn name(&self) -> Cow<str> {
"zlib".into()
}
}
#[op]
pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> {
let mode = Mode::try_from(mode)?;
let inner = ZlibInner {
mode,
..Default::default()
};
Ok(state.resource_table.add(Zlib {
inner: RefCell::new(inner),
}))
}
#[op]
pub fn op_zlib_close(state: &mut OpState, handle: u32) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
// If there is a pending write, defer the close until the write is done.
zlib.close()?;
Ok(())
}
#[op]
pub fn op_zlib_write_async(
state: Rc<RefCell<OpState>>,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
) -> Result<
impl Future<Output = Result<(i32, u32, u32), AnyError>> + 'static,
AnyError,
> {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut strm = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
let state = state.clone();
Ok(async move {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.do_write(flush)?;
Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in))
})
}
#[op]
pub fn op_zlib_write(
state: &mut OpState,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
result: &mut [u32],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
zlib.do_write(flush)?;
result[0] = zlib.strm.avail_out;
result[1] = zlib.strm.avail_in;
Ok(zlib.err)
}
#[op]
pub fn op_zlib_init(
state: &mut OpState,
handle: u32,
level: i32,
window_bits: i32,
mem_level: i32,
strategy: i32,
dictionary: &[u8],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
check((8..=15).contains(&window_bits), "invalid windowBits")?;
check((-1..=9).contains(&level), "invalid level")?;
check((1..=9).contains(&mem_level), "invalid memLevel")?;
check(
strategy == Z_DEFAULT_STRATEGY
|| strategy == Z_FILTERED
|| strategy == Z_HUFFMAN_ONLY
|| strategy == Z_RLE
|| strategy == Z_FIXED,
"invalid strategy",
)?;
zlib.level = level;
zlib.window_bits = window_bits;
zlib.mem_level = mem_level;
zlib.strategy = strategy;
zlib.flush = Flush::None;
zlib.err = Z_OK;
zlib.init_stream()?;
zlib.dictionary = if !dictionary.is_empty() {
Some(dictionary.to_vec())
} else {
None
};
Ok(zlib.err)
}
#[op]
pub fn op_zlib_reset(
state: &mut OpState,
handle: u32,
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.reset_stream()?;
Ok(zlib.err)
}
#[op]
pub fn op_zlib_close_if_pending(
state: &mut OpState,
handle: u32,
) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let pending_close = {
let mut zlib = resource.inner.borrow_mut();
zlib.write_in_progress = false;
zlib.pending_close
};
if pending_close {
drop(resource);
state.resource_table.close(handle)?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn zlib_start_write() {
// buffer, length, should pass
type WriteVector = (&'static [u8], u32, u32, bool);
const WRITE_VECTORS: [WriteVector; 8] = [
(b"Hello", 5, 0, true),
(b"H", 1, 0, true),
(b"", 0, 0, true),
// Overrun the buffer
(b"H", 5, 0, false),
(b"ello", 5, 0, false),
(b"Hello", 5, 1, false),
(b"H", 1, 1, false),
(b"", 0, 1, false),
];
for (input, len, offset, expected) in WRITE_VECTORS.iter() {
let mut stream = ZlibInner {
mode: Mode::Inflate,
..Default::default()
};
stream.init_stream().unwrap();
assert_eq!(stream.err, Z_OK);
assert_eq!(
stream
.start_write(input, *offset, *len, &mut [], 0, 0, Flush::None)
.is_ok(),
*expected
);
assert_eq!(stream.err, Z_OK);
stream.close().unwrap();
}
}
}
| {
self.err = self.strm.reset(self.mode);
Ok(())
} | identifier_body |
mod.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
use deno_core::error::bad_resource_id;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::op;
use deno_core::OpState;
use libz_sys::*;
use std::borrow::Cow;
use std::cell::RefCell;
use std::future::Future;
use std::rc::Rc;
mod alloc;
pub mod brotli;
mod mode;
mod stream;
use mode::Flush;
use mode::Mode;
use self::stream::StreamWrapper;
#[inline]
fn check(condition: bool, msg: &str) -> Result<(), AnyError> {
if condition {
Ok(())
} else {
Err(type_error(msg.to_string()))
}
}
#[inline]
fn zlib(state: &mut OpState, handle: u32) -> Result<Rc<Zlib>, AnyError> {
state
.resource_table
.get::<Zlib>(handle)
.map_err(|_| bad_resource_id())
}
#[derive(Default)]
struct ZlibInner {
dictionary: Option<Vec<u8>>,
err: i32,
flush: Flush,
init_done: bool,
level: i32,
mem_level: i32,
mode: Mode,
strategy: i32,
window_bits: i32,
write_in_progress: bool,
pending_close: bool,
gzib_id_bytes_read: u32,
strm: StreamWrapper,
}
const GZIP_HEADER_ID1: u8 = 0x1f;
const GZIP_HEADER_ID2: u8 = 0x8b;
impl ZlibInner {
#[allow(clippy::too_many_arguments)]
fn start_write(
&mut self,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
flush: Flush,
) -> Result<(), AnyError> {
check(self.init_done, "write before init")?;
check(!self.write_in_progress, "write already in progress")?;
check(!self.pending_close, "close already in progress")?;
self.write_in_progress = true;
let next_in = input
.get(in_off as usize..in_off as usize + in_len as usize)
.ok_or_else(|| type_error("invalid input range"))?
.as_ptr() as *mut _;
let next_out = out
.get_mut(out_off as usize..out_off as usize + out_len as usize)
.ok_or_else(|| type_error("invalid output range"))?
.as_mut_ptr();
self.strm.avail_in = in_len;
self.strm.next_in = next_in;
self.strm.avail_out = out_len;
self.strm.next_out = next_out;
self.flush = flush;
Ok(())
}
fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> {
self.flush = flush;
match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => {
self.err = self.strm.deflate(flush);
}
// Auto-detect mode.
Mode::Unzip if self.strm.avail_in > 0 => 'blck: {
let mut next_expected_header_byte = Some(0);
// SAFETY: `self.strm.next_in` is valid pointer to the input buffer.
// `self.strm.avail_in` is the length of the input buffer that is only set by
// `start_write`.
let strm = unsafe {
std::slice::from_raw_parts(
self.strm.next_in,
self.strm.avail_in as usize,
)
};
if self.gzib_id_bytes_read == 0 {
if strm[0] == GZIP_HEADER_ID1 {
self.gzib_id_bytes_read = 1;
next_expected_header_byte = Some(1);
// Not enough.
if self.strm.avail_in == 1 {
break 'blck;
}
} else {
self.mode = Mode::Inflate;
next_expected_header_byte = None;
}
}
if self.gzib_id_bytes_read == 1 {
let byte = match next_expected_header_byte {
Some(i) => strm[i],
None => break 'blck,
};
if byte == GZIP_HEADER_ID2 {
self.gzib_id_bytes_read = 2;
self.mode = Mode::Gunzip;
} else {
self.mode = Mode::Inflate;
}
} else if next_expected_header_byte.is_some() {
return Err(type_error(
"invalid number of gzip magic number bytes read",
));
}
}
_ => {}
}
match self.mode {
Mode::Inflate
| Mode::Gunzip
| Mode::InflateRaw
// We're still reading the header.
| Mode::Unzip => {
self.err = self.strm.inflate(self.flush);
// TODO(@littledivy): Use if let chain when it is stable.
// https://github.com/rust-lang/rust/issues/53667
//
// Data was encoded with dictionary
if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) {
self.err = self.strm.inflate_set_dictionary(dictionary);
if self.err == Z_OK {
self.err = self.strm.inflate(flush);
} else if self.err == Z_DATA_ERROR {
self.err = Z_NEED_DICT;
}
}
while self.strm.avail_in > 0
&& self.mode == Mode::Gunzip
&& self.err == Z_STREAM_END
// SAFETY: `strm` is a valid pointer to zlib strm.
// `strm.next_in` is initialized to the input buffer.
&& unsafe { *self.strm.next_in } != 0x00
{
self.err = self.strm.reset(self.mode);
self.err = self.strm.inflate(flush);
}
}
_ => {}
}
let done = self.strm.avail_out != 0 && self.flush == Flush::Finish;
// We're are not done yet, but output buffer is full
if self.err == Z_BUF_ERROR && !done {
// Set to Z_OK to avoid reporting the error in JS.
self.err = Z_OK;
}
self.write_in_progress = false;
Ok(())
}
fn init_stream(&mut self) -> Result<(), AnyError> {
match self.mode {
Mode::Gzip | Mode::Gunzip => self.window_bits += 16,
Mode::Unzip => self.window_bits += 32,
Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1,
_ => {}
}
self.err = match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init(
self.level,
self.window_bits,
self.mem_level,
self.strategy,
),
Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => {
self.strm.inflate_init(self.window_bits)
}
Mode::None => return Err(type_error("Unknown mode")),
};
self.write_in_progress = false;
self.init_done = true;
Ok(())
}
fn close(&mut self) -> Result<bool, AnyError> {
if self.write_in_progress { | self.pending_close = false;
check(self.init_done, "close before init")?;
self.strm.end(self.mode);
self.mode = Mode::None;
Ok(true)
}
fn reset_stream(&mut self) -> Result<(), AnyError> {
self.err = self.strm.reset(self.mode);
Ok(())
}
}
struct Zlib {
inner: RefCell<ZlibInner>,
}
impl deno_core::Resource for Zlib {
fn name(&self) -> Cow<str> {
"zlib".into()
}
}
#[op]
pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> {
let mode = Mode::try_from(mode)?;
let inner = ZlibInner {
mode,
..Default::default()
};
Ok(state.resource_table.add(Zlib {
inner: RefCell::new(inner),
}))
}
#[op]
pub fn op_zlib_close(state: &mut OpState, handle: u32) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
// If there is a pending write, defer the close until the write is done.
zlib.close()?;
Ok(())
}
#[op]
pub fn op_zlib_write_async(
state: Rc<RefCell<OpState>>,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
) -> Result<
impl Future<Output = Result<(i32, u32, u32), AnyError>> + 'static,
AnyError,
> {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut strm = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
let state = state.clone();
Ok(async move {
let mut state_mut = state.borrow_mut();
let resource = zlib(&mut state_mut, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.do_write(flush)?;
Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in))
})
}
#[op]
pub fn op_zlib_write(
state: &mut OpState,
handle: u32,
flush: i32,
input: &[u8],
in_off: u32,
in_len: u32,
out: &mut [u8],
out_off: u32,
out_len: u32,
result: &mut [u32],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
let flush = Flush::try_from(flush)?;
zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
zlib.do_write(flush)?;
result[0] = zlib.strm.avail_out;
result[1] = zlib.strm.avail_in;
Ok(zlib.err)
}
#[op]
pub fn op_zlib_init(
state: &mut OpState,
handle: u32,
level: i32,
window_bits: i32,
mem_level: i32,
strategy: i32,
dictionary: &[u8],
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
check((8..=15).contains(&window_bits), "invalid windowBits")?;
check((-1..=9).contains(&level), "invalid level")?;
check((1..=9).contains(&mem_level), "invalid memLevel")?;
check(
strategy == Z_DEFAULT_STRATEGY
|| strategy == Z_FILTERED
|| strategy == Z_HUFFMAN_ONLY
|| strategy == Z_RLE
|| strategy == Z_FIXED,
"invalid strategy",
)?;
zlib.level = level;
zlib.window_bits = window_bits;
zlib.mem_level = mem_level;
zlib.strategy = strategy;
zlib.flush = Flush::None;
zlib.err = Z_OK;
zlib.init_stream()?;
zlib.dictionary = if !dictionary.is_empty() {
Some(dictionary.to_vec())
} else {
None
};
Ok(zlib.err)
}
#[op]
pub fn op_zlib_reset(
state: &mut OpState,
handle: u32,
) -> Result<i32, AnyError> {
let resource = zlib(state, handle)?;
let mut zlib = resource.inner.borrow_mut();
zlib.reset_stream()?;
Ok(zlib.err)
}
#[op]
pub fn op_zlib_close_if_pending(
state: &mut OpState,
handle: u32,
) -> Result<(), AnyError> {
let resource = zlib(state, handle)?;
let pending_close = {
let mut zlib = resource.inner.borrow_mut();
zlib.write_in_progress = false;
zlib.pending_close
};
if pending_close {
drop(resource);
state.resource_table.close(handle)?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn zlib_start_write() {
// buffer, length, should pass
type WriteVector = (&'static [u8], u32, u32, bool);
const WRITE_VECTORS: [WriteVector; 8] = [
(b"Hello", 5, 0, true),
(b"H", 1, 0, true),
(b"", 0, 0, true),
// Overrun the buffer
(b"H", 5, 0, false),
(b"ello", 5, 0, false),
(b"Hello", 5, 1, false),
(b"H", 1, 1, false),
(b"", 0, 1, false),
];
for (input, len, offset, expected) in WRITE_VECTORS.iter() {
let mut stream = ZlibInner {
mode: Mode::Inflate,
..Default::default()
};
stream.init_stream().unwrap();
assert_eq!(stream.err, Z_OK);
assert_eq!(
stream
.start_write(input, *offset, *len, &mut [], 0, 0, Flush::None)
.is_ok(),
*expected
);
assert_eq!(stream.err, Z_OK);
stream.close().unwrap();
}
}
} | self.pending_close = true;
return Ok(false);
}
| random_line_split |
seq2seq_chatbot_Learning.py | # -*- coding: utf-8 -*-
"""Chatbot learning
학습시 생성된 vocab 딕셔너리 파일을 Cindy ui 실행시 경로를 동일시 해주어야 연결성 있는 문장을 생성해줍니다.
"""
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers, losses, metrics
from tensorflow.keras import preprocessing
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
import os
import re
from konlpy.tag import Okt
import pickle
import tensorflow as tf
tf.__version__
# 태그 단어
PAD = "<PADDING>" # 패딩
STA = "<START>" # 시작
END = "<END>" # 끝
OOV = "<OOV>" # 없는 단어(Out of Vocabulary)
# 태그 인덱스
PAD_INDEX = 0
STA_INDEX = 1
END_INDEX = 2
OOV_INDEX = 3
# 데이터 타입
ENCODER_INPUT = 0
DECODER_INPUT = 1
DECODER_TARGET = 2
# 한 문장에서 단어 시퀀스의 최대 개수
max_sequences = 30
# 임베딩 벡터 차원
embedding_dim = 100
# LSTM 히든레이어 차원
lstm_hidden_dim = 128
# 정규 표현식 필터
RE_FILTER = re.compile("[.,!?\"':;~()]")
# 챗봇 데이터 로드
chatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')
question, answer = list(chatbot_data['Q']), list(chatbot_data['A'])
chatbot_data.head()
len(chatbot_data['Q'].unique())
# 데이터 개수
len(question)
# 형태소분석 함수
def pos_tag(sentences):
# KoNLPy 형태소분석기 설정
tagger = Okt()
# 문장 품사 변수 초기화
sentences_pos = []
# 모든 문장 반복
for sentence in sentences:
# 특수기호 제거
sentence = re.sub(RE_FILTER, "", sentence)
#print(sentence)
# 배열인 형태소분석의 출력을 띄어쓰기로 구분하여 붙임
sentence = " ".join(tagger.morphs(sentence))
sentences_pos.append(sentence)
return sentences_pos
# 형태소분석 수행
question = pos_tag(question)
answer = pos_tag(answer)
# 질문과 대답 문장들을 하나로 합침
sentences = []
sentences.extend(question)
sentences.extend(answer)
words = []
# 단어들의 배열 생성
for sentence in sentences:
for word in sentence.split():
words.append(word)
# 길이가 0인 단어는 삭제
words = [word for word in words if len(word) > 0]
# 중복된 단어 삭제
words = list(set(words))
# 제일 앞에 태그 단어 삽입
words[:0] = [PAD, STA, END, OOV]
# 단어 개수
len(words)
# 단어와 인덱스의 딕셔너리 생성
word_to_index = {word: index for index, word in enumerate(words)}
index_to_word = {index: word for index, word in enumerate(words)}
#word_index vocab 저장 - >
with open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:
pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)
with open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:
pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)
# 단어 -> 인덱스
# 문장을 인덱스로 변환하여 모델 입력으로 사용
print(dict(list(word_to_index.items())[:20]))
# 인덱스 -> 단어
# 모델의 예측 결과인 인덱스를 문장으로 변환시 사용
print(dict(list(index_to_word.items())[:20]))
# 문장을 인덱스로 변환
def convert_text_to_index(sentences, vocabulary, type):
sentences_index = []
# 모든 문장에 대해서 반복
for sentence in sentences:
sentence_index = []
# 디코더 입력일 경우 맨 앞에 START 태그 추가
if type == DECODER_INPUT:
sentence_index.extend([vocabulary[STA]])
# 문장의 단어들을 띄어쓰기로 분리
for word in sentence.split():
if vocabulary.get(word) is not None:
# 사전에 있는 단어면 해당 인덱스를 추가
sentence_index.extend([vocabulary[word]])
else:
# 사전에 없는 단어면 OOV 인덱스를 추가
sentence_index.extend([vocabulary[OOV]])
# 최대 길이 검사
if type == DECODER_TARGET:
# 디코더 목표일 경우 맨 뒤에 END 태그 추가
if len(sentence_index) >= max_sequences:
sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]
else:
sentence_index += [vocabulary[END]]
else:
if len(sentence_index) > max_sequences:
sentence_index = sentence_index[:max_sequences]
# 최대 길이에 없는 공간은 패딩 인덱스로 채움
sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]
# 문장의 인덱스 배열을 추가
sentences_index.append(sentence_index)
return np.asarray(sentences_index)
# 인코더 입력 인덱스 변환
x_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)
# 첫 번째 인코더 입력 출력 (12시 땡)
x_encoder[0]
# 디코더 입력 인덱스 변환
x_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)
# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)
x_decoder[0]
len(x_decoder[0])
# 디코더 목표 인덱스 변환
y_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)
# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)
print(y_decoder[0])
# 원핫인코딩 초기화
one_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))
# 디코더 목표를 원핫인코딩으로 변환
# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
for i, sequence in enumerate(y_decoder):
for j, index in enumerate(sequence):
one_hot_data[i, j, index] = 1
# 디코더 목표 설정
y_decoder = one_hot_data
# 첫 번째 디코더 목표 출력
print(y_decoder[0])
#--------------------------------------------
# 훈련 모델 인코더 정의
#--------------------------------------------
# 입력 문장의 인덱스 시퀀스를 입력으로 받음
encoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
encoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)
# return_state가 True면 상태값 리턴
# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재
encoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True)(encoder_outputs)
# 히든 상태와 셀 상태를 하나로 묶음
encoder_states = [state_h, state_c]
#--------------------------------------------
# 훈련 모델 디코더 정의
#--------------------------------------------
# 목표 문장의 인덱스 시퀀스를 입력으로 받음
decoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
decoder_embedding = layers.Embedding(len(words), embedding_dim)
decoder_outputs = decoder_embedding(decoder_inputs)
# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴
# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함
decoder_lstm = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True,
return_sequences=True)
# initial_state를 인코더의 상태로 초기화
decoder_outputs, _, _ = decoder_lstm(decoder_outputs,
initial_state=encoder_states)
# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력
decoder_dense = layers.Dense(len(words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#--------------------------------------------
# 훈련 모델 정의
#--------------------------------------------
# 입력과 출력으로 함수형 API 모델 생성
model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
# 학습 방법 설정
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#--------------------------------------------
# 예측 모델 인코더 정의
#--------------------------------------------
# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정
encoder_model = models.Model(encoder_inputs, encoder_states)
#--------------------------------------------
# 예측 모델 디코더 정의
#--------------------------------------------
# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행
# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정
decoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))
decoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# 임베딩 레이어
decoder_outputs = decoder_embedding(decoder_inputs)
# LSTM 레이어
decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,
initial_state=decoder_states_inputs)
# 히든 상태와 셀 상태를 하나로 묶음
decoder_states = [state_h, state_c]
# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력
decoder_outputs = decoder_dense(decoder_outputs)
# 예측 모델 디코더 설정
decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 인덱스를 문장으로 변환
def convert_index_to_text(indexs, vocabulary):
sentence = ''
# 모든 문장에 대해서 반복
for index in indexs:
if index == END_INDEX:
# 종료 인덱스면 중지
break;
if vocabulary.get(index) is not None:
# 사전에 있는 인덱스면 해당 단어를 추가
sentence += vocabulary[index]
else:
# 사전에 없는 인덱스면 OOV 단어를 추가
sentence.extend([vocabulary[OOV_INDEX]])
# 빈칸 추가
sentence += ' '
return sentence
# len(x_decoder)
#
# len(y_decoder)
#model.summary()
#encoder_model.summary()
#decoder_model.summary()
from tqdm import tqdm
#에폭 반복
for epoch in range(10):
print('Total Epoch :', epoch + 1)
history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100, batch_size=64, verbose=1)
model.summary()
# 정확도와 손실 출력
print('accuracy :', history.history['accuracy'][-1])
print('loss :', history.history['loss'][-1])
# 문장 예측 테스트
# (3 박 4일 놀러 가고 싶다) -> (여행 은 언제나 좋죠)
input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])
input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])
results = model.predict([input_encoder, input_decoder])
# 결과의 원핫인코딩 형식을 인덱스로 변환
# 1축을 기준으로 가장 높은 값의 위치를 구함
indexs = np.argmax(results[0], 1)
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
#모델 가중치 저장
model.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')
encoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')
decoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')
# 예측을 위한 입력 생성
def make_predict_input(sentence):
sentences = []
sentences.append(sentence)
sentences = pos_tag(sentences)
input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)
return input_seq
# 텍스트 생성
def generate_text(input_seq):
# 입력을 인코더에 넣어 마지막 상태 구함
states = encoder_model.predict(input_seq)
# 목표 시퀀스 초기화
target_seq = np.zeros((1, 1))
# 목표 시퀀스의 첫 번째에 <START> 태그 추가
target_seq[0, 0] = STA_INDEX
# 인덱스 초기화
indexs = []
# 디코더 타임 스텝 반복
while 1:
# 디코더로 현재 타임 스텝 출력 구함
# 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화
decoder_outputs, state_h, state_c = decoder_model.pr | [target_seq] + states)
# 결과의 원핫인코딩 형식을 인덱스로 변환
index = np.argmax(decoder_outputs[0, 0, :])
indexs.append(index)
# 종료 검사
if index == END_INDEX or len(indexs) >= max_sequences:
break
# 목표 시퀀스를 바로 이전의 출력으로 설정
target_seq = np.zeros((1, 1))
target_seq[0, 0] = index
# 디코더의 이전 상태를 다음 디코더 예측에 사용
states = [state_h, state_c]
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
return sentence
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 같이 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가려고')
print(sentence)
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('SNS 시간낭비인데')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('PPL 너무나 심하네')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가상화폐 망함')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스불')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스비')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가족 보고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간식 먹고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간접흡연 싫어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('감기 기운 잇어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('내일 날씨 어떄?')
sentence = generate_text(input_seq)
print(sentence)
| edict(
| conditional_block |
seq2seq_chatbot_Learning.py | # -*- coding: utf-8 -*-
"""Chatbot learning
학습시 생성된 vocab 딕셔너리 파일을 Cindy ui 실행시 경로를 동일시 해주어야 연결성 있는 문장을 생성해줍니다.
"""
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers, losses, metrics
from tensorflow.keras import preprocessing
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
import os
import re
from konlpy.tag import Okt
import pickle
import tensorflow as tf
tf.__version__
# 태그 단어
PAD = "<PADDING>" # 패딩
STA = "<START>" # 시작
END = "<END>" # 끝
OOV = "<OOV>" # 없는 단어(Out of Vocabulary)
# 태그 인덱스
PAD_INDEX = 0
STA_INDEX = 1
END_INDEX = 2
OOV_INDEX = 3
# 데이터 타입
ENCODER_INPUT = 0
DECODER_INPUT = 1
DECODER_TARGET = 2
# 한 문장에서 단어 시퀀스의 최대 개수
max_sequences = 30
# 임베딩 벡터 차원
embedding_dim = 100
# LSTM 히든레이어 차원
lstm_hidden_dim = 128
# 정규 표현식 필터
RE_FILTER = re.compile("[.,!?\"':;~()]")
# 챗봇 데이터 로드
chatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')
question, answer = list(chatbot_data['Q']), list(chatbot_data['A'])
chatbot_data.head()
len(chatbot_data['Q'].unique())
# 데이터 개수
len(question)
# 형태소분석 함수
def pos_tag(sentences):
# KoNLPy 형태소분석기 설정
tagger = Okt()
# 문장 품사 변수 초기화
sentences_pos = []
# 모든 문장 반복
for sentence in sentences:
# 특수기호 제거
sentence = re.sub(RE_FILTER, "", sentence)
#print(sentence)
# 배열인 형태소분석의 출력을 띄어쓰기로 구분하여 붙임
| 중복된 단어 삭제
words = list(set(words))
# 제일 앞에 태그 단어 삽입
words[:0] = [PAD, STA, END, OOV]
# 단어 개수
len(words)
# 단어와 인덱스의 딕셔너리 생성
word_to_index = {word: index for index, word in enumerate(words)}
index_to_word = {index: word for index, word in enumerate(words)}
#word_index vocab 저장 - >
with open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:
pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)
with open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:
pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)
# 단어 -> 인덱스
# 문장을 인덱스로 변환하여 모델 입력으로 사용
print(dict(list(word_to_index.items())[:20]))
# 인덱스 -> 단어
# 모델의 예측 결과인 인덱스를 문장으로 변환시 사용
print(dict(list(index_to_word.items())[:20]))
# 문장을 인덱스로 변환
def convert_text_to_index(sentences, vocabulary, type):
sentences_index = []
# 모든 문장에 대해서 반복
for sentence in sentences:
sentence_index = []
# 디코더 입력일 경우 맨 앞에 START 태그 추가
if type == DECODER_INPUT:
sentence_index.extend([vocabulary[STA]])
# 문장의 단어들을 띄어쓰기로 분리
for word in sentence.split():
if vocabulary.get(word) is not None:
# 사전에 있는 단어면 해당 인덱스를 추가
sentence_index.extend([vocabulary[word]])
else:
# 사전에 없는 단어면 OOV 인덱스를 추가
sentence_index.extend([vocabulary[OOV]])
# 최대 길이 검사
if type == DECODER_TARGET:
# 디코더 목표일 경우 맨 뒤에 END 태그 추가
if len(sentence_index) >= max_sequences:
sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]
else:
sentence_index += [vocabulary[END]]
else:
if len(sentence_index) > max_sequences:
sentence_index = sentence_index[:max_sequences]
# 최대 길이에 없는 공간은 패딩 인덱스로 채움
sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]
# 문장의 인덱스 배열을 추가
sentences_index.append(sentence_index)
return np.asarray(sentences_index)
# 인코더 입력 인덱스 변환
x_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)
# 첫 번째 인코더 입력 출력 (12시 땡)
x_encoder[0]
# 디코더 입력 인덱스 변환
x_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)
# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)
x_decoder[0]
len(x_decoder[0])
# 디코더 목표 인덱스 변환
y_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)
# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)
print(y_decoder[0])
# 원핫인코딩 초기화
one_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))
# 디코더 목표를 원핫인코딩으로 변환
# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
for i, sequence in enumerate(y_decoder):
for j, index in enumerate(sequence):
one_hot_data[i, j, index] = 1
# 디코더 목표 설정
y_decoder = one_hot_data
# 첫 번째 디코더 목표 출력
print(y_decoder[0])
#--------------------------------------------
# 훈련 모델 인코더 정의
#--------------------------------------------
# 입력 문장의 인덱스 시퀀스를 입력으로 받음
encoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
encoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)
# return_state가 True면 상태값 리턴
# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재
encoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True)(encoder_outputs)
# 히든 상태와 셀 상태를 하나로 묶음
encoder_states = [state_h, state_c]
#--------------------------------------------
# 훈련 모델 디코더 정의
#--------------------------------------------
# 목표 문장의 인덱스 시퀀스를 입력으로 받음
decoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
decoder_embedding = layers.Embedding(len(words), embedding_dim)
decoder_outputs = decoder_embedding(decoder_inputs)
# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴
# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함
decoder_lstm = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True,
return_sequences=True)
# initial_state를 인코더의 상태로 초기화
decoder_outputs, _, _ = decoder_lstm(decoder_outputs,
initial_state=encoder_states)
# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력
decoder_dense = layers.Dense(len(words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#--------------------------------------------
# 훈련 모델 정의
#--------------------------------------------
# 입력과 출력으로 함수형 API 모델 생성
model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
# 학습 방법 설정
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#--------------------------------------------
# 예측 모델 인코더 정의
#--------------------------------------------
# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정
encoder_model = models.Model(encoder_inputs, encoder_states)
#--------------------------------------------
# 예측 모델 디코더 정의
#--------------------------------------------
# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행
# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정
decoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))
decoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# 임베딩 레이어
decoder_outputs = decoder_embedding(decoder_inputs)
# LSTM 레이어
decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,
initial_state=decoder_states_inputs)
# 히든 상태와 셀 상태를 하나로 묶음
decoder_states = [state_h, state_c]
# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력
decoder_outputs = decoder_dense(decoder_outputs)
# 예측 모델 디코더 설정
decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 인덱스를 문장으로 변환
def convert_index_to_text(indexs, vocabulary):
sentence = ''
# 모든 문장에 대해서 반복
for index in indexs:
if index == END_INDEX:
# 종료 인덱스면 중지
break;
if vocabulary.get(index) is not None:
# 사전에 있는 인덱스면 해당 단어를 추가
sentence += vocabulary[index]
else:
# 사전에 없는 인덱스면 OOV 단어를 추가
sentence.extend([vocabulary[OOV_INDEX]])
# 빈칸 추가
sentence += ' '
return sentence
# len(x_decoder)
#
# len(y_decoder)
#model.summary()
#encoder_model.summary()
#decoder_model.summary()
from tqdm import tqdm
#에폭 반복
for epoch in range(10):
print('Total Epoch :', epoch + 1)
history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100, batch_size=64, verbose=1)
model.summary()
# 정확도와 손실 출력
print('accuracy :', history.history['accuracy'][-1])
print('loss :', history.history['loss'][-1])
# 문장 예측 테스트
# (3 박 4일 놀러 가고 싶다) -> (여행 은 언제나 좋죠)
input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])
input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])
results = model.predict([input_encoder, input_decoder])
# 결과의 원핫인코딩 형식을 인덱스로 변환
# 1축을 기준으로 가장 높은 값의 위치를 구함
indexs = np.argmax(results[0], 1)
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
#모델 가중치 저장
model.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')
encoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')
decoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')
# 예측을 위한 입력 생성
def make_predict_input(sentence):
sentences = []
sentences.append(sentence)
sentences = pos_tag(sentences)
input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)
return input_seq
# 텍스트 생성
def generate_text(input_seq):
# 입력을 인코더에 넣어 마지막 상태 구함
states = encoder_model.predict(input_seq)
# 목표 시퀀스 초기화
target_seq = np.zeros((1, 1))
# 목표 시퀀스의 첫 번째에 <START> 태그 추가
target_seq[0, 0] = STA_INDEX
# 인덱스 초기화
indexs = []
# 디코더 타임 스텝 반복
while 1:
# 디코더로 현재 타임 스텝 출력 구함
# 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화
decoder_outputs, state_h, state_c = decoder_model.predict(
[target_seq] + states)
# 결과의 원핫인코딩 형식을 인덱스로 변환
index = np.argmax(decoder_outputs[0, 0, :])
indexs.append(index)
# 종료 검사
if index == END_INDEX or len(indexs) >= max_sequences:
break
# 목표 시퀀스를 바로 이전의 출력으로 설정
target_seq = np.zeros((1, 1))
target_seq[0, 0] = index
# 디코더의 이전 상태를 다음 디코더 예측에 사용
states = [state_h, state_c]
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
return sentence
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 같이 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가려고')
print(sentence)
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('SNS 시간낭비인데')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('PPL 너무나 심하네')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가상화폐 망함')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스불')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스비')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가족 보고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간식 먹고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간접흡연 싫어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('감기 기운 잇어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('내일 날씨 어떄?')
sentence = generate_text(input_seq)
print(sentence)
| sentence = " ".join(tagger.morphs(sentence))
sentences_pos.append(sentence)
return sentences_pos
# 형태소분석 수행
question = pos_tag(question)
answer = pos_tag(answer)
# 질문과 대답 문장들을 하나로 합침
sentences = []
sentences.extend(question)
sentences.extend(answer)
words = []
# 단어들의 배열 생성
for sentence in sentences:
for word in sentence.split():
words.append(word)
# 길이가 0인 단어는 삭제
words = [word for word in words if len(word) > 0]
# | identifier_body |
seq2seq_chatbot_Learning.py | # -*- coding: utf-8 -*-
"""Chatbot learning
학습시 생성된 vocab 딕셔너리 파일을 Cindy ui 실행시 경로를 동일시 해주어야 연결성 있는 문장을 생성해줍니다.
"""
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers, losses, metrics
from tensorflow.keras import preprocessing
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
import os
import re
from konlpy.tag import Okt
import pickle
import tensorflow as tf
tf.__version__
# 태그 단어
PAD = "<PADDING>" # 패딩
STA = "<START>" # 시작
END = "<END>" # 끝
OOV = "<OOV>" # 없는 단어(Out of Vocabulary)
# 태그 인덱스
PAD_INDEX = 0
STA_INDEX = 1
END_INDEX = 2
OOV_INDEX = 3
# 데이터 타입
ENCODER_INPUT = 0
DECODER_INPUT = 1
DECODER_TARGET = 2
# 한 문장에서 단어 시퀀스의 최대 개수
max_sequences = 30
# 임베딩 벡터 차원
embedding_dim = 100
# LSTM 히든레이어 차원
lstm_hidden_dim = 128
# 정규 표현식 필터
RE_FILTER = re.compile("[.,!?\"':;~()]")
# 챗봇 데이터 로드
chatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')
question, answer = list(chatbot_data['Q']), list(chatbot_data['A'])
chatbot_data.head()
len(chatbot_data['Q'].unique())
# 데이터 개수
len(question)
# 형태소분석 함수
def pos_tag(sentences):
# KoNLPy 형태소분석기 설정
tagger = Okt()
# 문장 품사 변수 초기화
sentences_pos = []
# 모든 문장 반복
for sentence in sentences:
# 특수기호 제거
sentence = re.sub(RE_FILTER, "", sentence)
#print(sentence)
# 배열인 형태소분석의 출력을 띄어쓰기로 구분하여 붙임
sentence = " ".join(tagger.morphs(sentence))
sentences_pos.append(sentence)
return sentences_pos
# 형태소분석 수행
question = pos_tag(question)
answer = pos_tag(answer)
# 질문과 대답 문장들을 하나로 합침
sentences = []
sentences.extend(question)
sentences.extend(answer)
words = []
# 단어들의 배열 생성
for sentence in sentences:
for word in sentence.split():
words.append(word)
# 길이가 0인 단어는 삭제
words = [word for word in words if len(word) > 0]
# 중복된 단어 삭제
words = list(set(words))
# 제일 앞에 태그 단어 삽입
words[:0] = [PAD, STA, END, OOV]
# 단어 개수
len(words)
# 단어와 인덱스의 딕셔너리 생성
word_to_index = {word: index for index, word in enumerate(words)}
index_to_word = {index: word for index, word in enumerate(words)}
#word_index vocab 저장 - >
with open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:
pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)
with open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:
pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)
# 단어 -> 인덱스
# 문장을 인덱스로 변환하여 모델 입력으로 사용
print(dict(list(word_to_index.items())[:20]))
# 인덱스 -> 단어
# 모델의 예측 결과인 인덱스를 문장으로 변환시 사용
print(dict(list(index_to_word.items())[:20]))
# 문장을 인덱스로 변환
def convert_text_to_index(sentences, vocabulary, type):
sentences_index = []
# 모든 문장에 대해서 반복
for sentence in sentences:
sentence_index = []
# 디코더 입력일 경우 맨 앞에 START 태그 추가
if type == DECODER_INPUT:
sentence_index.extend([vocabulary[STA]])
# 문장의 단어들을 띄어쓰기로 분리
for word in sentence.split():
if vocabulary.get(word) is not None:
# 사전에 있는 단어면 해당 인덱스를 추가
sentence_index.extend([vocabulary[word]])
else:
# 사전에 없는 단어면 OOV 인덱스를 추가
sentence_index.extend([vocabulary[OOV]])
# 최대 길이 검사
if type == DECODER_TARGET:
# 디코더 목표일 경우 맨 뒤에 END 태그 추가
if len(sentence_index) >= max_sequences:
sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]
else:
sentence_index += [vocabulary[END]]
else:
if len(sentence_index) > max_sequences:
sentence_index = sentence_index[:max_sequences]
# 최대 길이에 없는 공간은 패딩 인덱스로 채움
sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]
# 문장의 인덱스 배열을 추가
sentences_index.append(sentence_index)
return np.asarray(sentences_index)
# 인코더 입력 인덱스 변환
x_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)
# 첫 번째 인코더 입력 출력 (12시 땡)
x_encoder[0]
# 디코더 입력 인덱스 변환
x_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)
# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)
x_decoder[0]
len(x_decoder[0])
# 디코더 목표 인덱스 변환
y_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)
# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)
print(y_decoder[0])
# 원핫인코딩 초기화
one_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))
# 디코더 목표를 원핫인코딩으로 변환
# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
for i, sequence in enumerate(y_decoder):
for j, index in enumerate(sequence):
one_hot_data[i, j, index] = 1
# 디코더 목표 설정
y_decoder = one_hot_data
# 첫 번째 디코더 목표 출력
print(y_decoder[0])
#--------------------------------------------
# 훈련 모델 인코더 정의
#--------------------------------------------
# 입력 문장의 인덱스 시퀀스를 입력으로 받음
encoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
encoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)
# return_state가 True면 상태값 리턴
# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재
encoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim, | dropout=0.1,
recurrent_dropout=0.5,
return_state=True)(encoder_outputs)
# 히든 상태와 셀 상태를 하나로 묶음
encoder_states = [state_h, state_c]
#--------------------------------------------
# 훈련 모델 디코더 정의
#--------------------------------------------
# 목표 문장의 인덱스 시퀀스를 입력으로 받음
decoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
decoder_embedding = layers.Embedding(len(words), embedding_dim)
decoder_outputs = decoder_embedding(decoder_inputs)
# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴
# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함
decoder_lstm = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True,
return_sequences=True)
# initial_state를 인코더의 상태로 초기화
decoder_outputs, _, _ = decoder_lstm(decoder_outputs,
initial_state=encoder_states)
# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력
decoder_dense = layers.Dense(len(words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#--------------------------------------------
# 훈련 모델 정의
#--------------------------------------------
# 입력과 출력으로 함수형 API 모델 생성
model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
# 학습 방법 설정
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#--------------------------------------------
# 예측 모델 인코더 정의
#--------------------------------------------
# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정
encoder_model = models.Model(encoder_inputs, encoder_states)
#--------------------------------------------
# 예측 모델 디코더 정의
#--------------------------------------------
# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행
# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정
decoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))
decoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# 임베딩 레이어
decoder_outputs = decoder_embedding(decoder_inputs)
# LSTM 레이어
decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,
initial_state=decoder_states_inputs)
# 히든 상태와 셀 상태를 하나로 묶음
decoder_states = [state_h, state_c]
# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력
decoder_outputs = decoder_dense(decoder_outputs)
# 예측 모델 디코더 설정
decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 인덱스를 문장으로 변환
def convert_index_to_text(indexs, vocabulary):
sentence = ''
# 모든 문장에 대해서 반복
for index in indexs:
if index == END_INDEX:
# 종료 인덱스면 중지
break;
if vocabulary.get(index) is not None:
# 사전에 있는 인덱스면 해당 단어를 추가
sentence += vocabulary[index]
else:
# 사전에 없는 인덱스면 OOV 단어를 추가
sentence.extend([vocabulary[OOV_INDEX]])
# 빈칸 추가
sentence += ' '
return sentence
# len(x_decoder)
#
# len(y_decoder)
#model.summary()
#encoder_model.summary()
#decoder_model.summary()
from tqdm import tqdm
#에폭 반복
for epoch in range(10):
print('Total Epoch :', epoch + 1)
history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100, batch_size=64, verbose=1)
model.summary()
# 정확도와 손실 출력
print('accuracy :', history.history['accuracy'][-1])
print('loss :', history.history['loss'][-1])
# 문장 예측 테스트
# (3 박 4일 놀러 가고 싶다) -> (여행 은 언제나 좋죠)
input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])
input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])
results = model.predict([input_encoder, input_decoder])
# 결과의 원핫인코딩 형식을 인덱스로 변환
# 1축을 기준으로 가장 높은 값의 위치를 구함
indexs = np.argmax(results[0], 1)
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
#모델 가중치 저장
model.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')
encoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')
decoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')
# 예측을 위한 입력 생성
def make_predict_input(sentence):
sentences = []
sentences.append(sentence)
sentences = pos_tag(sentences)
input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)
return input_seq
# 텍스트 생성
def generate_text(input_seq):
# 입력을 인코더에 넣어 마지막 상태 구함
states = encoder_model.predict(input_seq)
# 목표 시퀀스 초기화
target_seq = np.zeros((1, 1))
# 목표 시퀀스의 첫 번째에 <START> 태그 추가
target_seq[0, 0] = STA_INDEX
# 인덱스 초기화
indexs = []
# 디코더 타임 스텝 반복
while 1:
# 디코더로 현재 타임 스텝 출력 구함
# 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화
decoder_outputs, state_h, state_c = decoder_model.predict(
[target_seq] + states)
# 결과의 원핫인코딩 형식을 인덱스로 변환
index = np.argmax(decoder_outputs[0, 0, :])
indexs.append(index)
# 종료 검사
if index == END_INDEX or len(indexs) >= max_sequences:
break
# 목표 시퀀스를 바로 이전의 출력으로 설정
target_seq = np.zeros((1, 1))
target_seq[0, 0] = index
# 디코더의 이전 상태를 다음 디코더 예측에 사용
states = [state_h, state_c]
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
return sentence
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 같이 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가려고')
print(sentence)
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('SNS 시간낭비인데')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('PPL 너무나 심하네')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가상화폐 망함')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스불')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스비')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가족 보고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간식 먹고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간접흡연 싫어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('감기 기운 잇어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('내일 날씨 어떄?')
sentence = generate_text(input_seq)
print(sentence) | random_line_split |
|
seq2seq_chatbot_Learning.py | # -*- coding: utf-8 -*-
"""Chatbot learning
학습시 생성된 vocab 딕셔너리 파일을 Cindy ui 실행시 경로를 동일시 해주어야 연결성 있는 문장을 생성해줍니다.
"""
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers, losses, metrics
from tensorflow.keras import preprocessing
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
import os
import re
from konlpy.tag import Okt
import pickle
import tensorflow as tf
tf.__version__
# 태그 단어
PAD = "<PADDING>" # 패딩
STA = "<START>" # 시작
END = "<END>" # 끝
OOV = "<OOV>" # 없는 단어(Out of Vocabulary)
# 태그 인덱스
PAD_INDEX = 0
STA_INDEX = 1
END_INDEX = 2
OOV_INDEX = 3
# 데이터 타입
ENCODER_INPUT = 0
DECODER_INPUT = 1
DECODER_TARGET = 2
# 한 문장에서 단어 시퀀스의 최대 개수
max_sequences = 30
# 임베딩 벡터 차원
embedding_dim = 100
# LSTM 히든레이어 차원
lstm_hidden_dim = 128
# 정규 표현식 필터
RE_FILTER = re.compile("[.,!?\"':;~()]")
# 챗봇 데이터 로드
chatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')
question, answer = list(chatbot_data['Q']), list(chatbot_data['A'])
chatbot_data.head()
len(chatbot_data['Q'].unique())
# 데이터 개수
len(question)
# 형태소분석 함수
def pos_tag(sentences):
# KoNLPy 형태소분석기 설정
tagger = Okt()
# 문장 품사 변수 초기화
sentences_pos = []
# 모든 문장 반복
for sentence in sentences:
# 특수기호 제거
sentence = re.sub(RE_FILTER, "", sentence)
#print(sentence)
# 배열인 형태소분석의 출력을 띄어쓰기로 구분하여 붙임
sentence = " ".join(tagger.morphs(sentence))
sentences_pos.append(sentence)
return sentences_pos
# 형태소분석 수행
question = pos_tag(question)
answer = pos_tag(answer)
# 질문과 대답 문장들을 하나로 합침
sentences = []
sentences.extend(question)
sentences.extend(answer)
words = []
# 단어들의 배열 생성
for sentence in sentences:
for word in sentence.split():
words.append(word)
# 길이가 0인 단어는 삭제
words = [word for word in words if len(word) > 0]
# 중복된 단어 삭제
words = list(set(words))
# 제일 앞에 태그 단어 삽입
words[:0] = [PAD, STA, END, OOV]
# 단어 개수
len(words)
# 단어와 인덱스의 딕셔너리 생성
word_to_index = {word: index for index, word in enumerate(words)}
index_to_word = {index: word for index, word in enumerate(words)}
#word_index vocab 저장 - >
with open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:
pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)
with open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:
pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)
# 단어 -> 인덱스
# 문장을 인덱스로 변환하여 모델 입력으로 사용
print(dict(list(word_to_index.items())[:20]))
# 인덱스 -> 단어
# 모델의 예측 결과인 인덱스를 문장으로 변환시 사용
print(dict(list(index_to_word.items())[:20]))
# 문장을 인덱스로 변환
def convert_text_to_index(sentences, vocabulary, type):
sentences_index = []
# 모든 문장에 대해서 반복
for sentence in sentences:
sentence_index = []
# 디코더 입력일 경우 맨 앞에 START 태그 추가
if type == DECODER_INPUT:
sentence_index.extend([vocabulary[STA]])
# 문장의 단어들을 띄어쓰기로 분리
for word in sentence.split():
if vocabulary.get(word) is not None:
# 사전에 있는 단어면 해당 인덱스를 추가
sentence_index.extend([vocabulary[word]])
else:
# 사전에 없는 단어면 OOV 인덱스를 추가
sentence_index.extend([vocabulary[OOV]])
# 최대 길이 검사
if type == DECODER_TARGET:
# 디코더 목표일 경우 맨 뒤에 END 태그 추가
if len(sentence_index) >= max_sequences:
sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]
else:
sentence_index += [vocabulary[END]]
else:
if len(sentence_index) > max_sequences:
sentence_index = sentence_index[:max_sequences]
# 최대 길이에 없는 공간은 패딩 인덱스로 채움
sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]
# 문장의 인덱스 배열을 추가
sentences_index.append(sentence_index)
return np.asarray(sentences_index)
# 인코더 입력 인덱스 변환
x_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)
# 첫 번째 인코더 입력 출력 (12시 땡)
x_encoder[0]
# 디코더 입력 인덱스 변환
x_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)
# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)
x_decoder[0]
len(x_decoder[0])
# 디코더 목표 인덱스 변환
y_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)
# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)
print(y_decoder[0])
# 원핫인코딩 초기화
one_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))
# 디코더 목표를 원핫인코딩으로 변환
# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
for i, sequence in enumerate(y_decoder):
for j, index in enumerate(sequence):
one_hot_data[i, j, index] = 1
# 디코더 목표 설정
y_decoder = one_hot_data
# 첫 번째 디코더 목표 출력
print(y_decoder[0])
#--------------------------------------------
# 훈련 모델 인코더 정의
#--------------------------------------------
# 입력 문장의 인덱스 시퀀스를 입력으로 받음
encoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
encoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)
# return_state가 True면 상태값 리턴
# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재
encoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True)(encoder_outputs)
# 히든 상태와 셀 상태를 하나로 묶음
encoder_states = [state_h, state_c]
#--------------------------------------------
# 훈련 모델 디코더 정의
#--------------------------------------------
# 목표 문장의 인덱스 시퀀스를 입력으로 받음
decoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
decoder_embedding = layers.Embedding(len(words), embedding_dim)
decoder_outputs = decoder_embedding(decoder_inputs)
# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴
# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함
decoder_lstm = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True,
return_sequences=True)
# initial_state를 인코더의 상태로 초기화
decoder_outputs, _, _ = decoder_lstm(decoder_outputs,
initial_state=encoder_states)
# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력
decoder_dense = layers.Dense(len(words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#--------------------------------------------
# 훈련 모델 정의
#--------------------------------------------
# 입력과 출력으로 함수형 API 모델 생성
model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
# 학습 방법 설정
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#--------------------------------------------
# 예측 모델 인코더 정의
#--------------------------------------------
# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정
encoder_model = models.Model(encoder_inputs, encoder_states)
#--------------------------------------------
# 예측 모델 디코더 정의
#--------------------------------------------
# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행
# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정
decoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))
decoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# 임베딩 레이어
decoder_outputs = decoder_embedding(decoder_inputs)
# LSTM 레이어
decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,
initial_state=decoder_states_inputs)
# 히든 상태와 셀 상태를 하나로 묶음
decoder_states = [state_h, state_c]
# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력
decoder_outputs = decoder_dense(decoder_outputs)
# 예측 모델 디코더 설정
decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 인덱스를 문장으로 변환
def convert_index_to_text(indexs, vocabulary):
sentence = ''
# 모든 문장에 대해서 반복
for index in indexs:
if index == END_INDEX:
# 종료 인덱스면 중지
break;
if vocabulary.get(index) is not None:
# 사전에 있는 인덱스면 해당 단어를 추가
sentence += vocabulary[index]
else:
# 사전에 없는 인덱스면 OOV 단어를 추가
sentence.extend([vocabulary[OOV_INDEX]])
# 빈칸 추가
sentence += ' '
return sentence
# len(x_decoder)
#
# len(y_decoder)
#model.summary()
#encoder_model.summary()
#decoder_model.summary()
from tqdm import tqdm
#에폭 반복
for epoch in range(10):
print('Total Epoch :', epoch + 1)
history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100, batch_size=64, verbose=1)
model.summary()
# 정확도와 손실 출력
print('accuracy :', history.history['accuracy'][-1])
print('loss :', history.history['loss'][-1])
# 문장 예측 테스트
# (3 박 4일 놀러 가고 싶다) -> (여행 은 언제나 좋죠)
input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])
input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])
results = model.predict([input_encoder, input_decoder])
# 결과의 원핫인코딩 형식을 인덱스로 변환
# 1축을 기준으로 가장 높은 값의 위치를 구함
indexs = np.argmax(results[0], 1)
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
#모델 가중치 저장
model.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')
encoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')
decoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')
# 예측을 위한 입력 생성
def make_predict_input(sentence):
sentences = []
sentences.append(sentence)
sentences = pos_tag(sentences)
input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)
| _seq
# 텍스트 생성
def generate_text(input_seq):
# 입력을 인코더에 넣어 마지막 상태 구함
states = encoder_model.predict(input_seq)
# 목표 시퀀스 초기화
target_seq = np.zeros((1, 1))
# 목표 시퀀스의 첫 번째에 <START> 태그 추가
target_seq[0, 0] = STA_INDEX
# 인덱스 초기화
indexs = []
# 디코더 타임 스텝 반복
while 1:
# 디코더로 현재 타임 스텝 출력 구함
# 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화
decoder_outputs, state_h, state_c = decoder_model.predict(
[target_seq] + states)
# 결과의 원핫인코딩 형식을 인덱스로 변환
index = np.argmax(decoder_outputs[0, 0, :])
indexs.append(index)
# 종료 검사
if index == END_INDEX or len(indexs) >= max_sequences:
break
# 목표 시퀀스를 바로 이전의 출력으로 설정
target_seq = np.zeros((1, 1))
target_seq[0, 0] = index
# 디코더의 이전 상태를 다음 디코더 예측에 사용
states = [state_h, state_c]
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
return sentence
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 같이 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가려고')
print(sentence)
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('SNS 시간낭비인데')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('PPL 너무나 심하네')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가상화폐 망함')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스불')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스비')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가족 보고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간식 먹고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간접흡연 싫어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('감기 기운 잇어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('내일 날씨 어떄?')
sentence = generate_text(input_seq)
print(sentence)
|
return input | identifier_name |
mod.rs | mod assignments;
mod colors;
mod directory_stack;
mod flow;
/// The various blocks
pub mod flow_control;
mod job;
mod pipe_exec;
mod shell_expand;
mod signals;
pub mod sys;
/// Variables for the shell
pub mod variables;
use self::{
directory_stack::DirectoryStack,
flow_control::{Block, Function, FunctionError, Statement},
pipe_exec::foreground,
sys::NULL_PATH,
variables::Variables,
};
pub use self::{
flow::BlockError,
job::{Job, RefinedJob},
pipe_exec::{
job_control::{BackgroundEvent, BackgroundProcess},
PipelineError,
},
variables::Value,
};
use crate::{
assignments::value_check,
builtins::{BuiltinMap, Status},
expansion::{
pipelines::{PipeType, Pipeline},
Error as ExpansionError,
},
parser::{
lexers::{Key, Primitive},
Error as ParseError,
},
};
use nix::{
sys::signal::{self, SigHandler},
unistd::Pid,
};
use std::{
convert::TryFrom,
fs::File,
mem,
ops::{Deref, DerefMut},
rc::Rc,
sync::{atomic::Ordering, Arc, Mutex},
time::SystemTime,
};
use thiserror::Error;
/// Errors from execution
#[derive(Debug, Error)]
pub enum IonError {
// Parse-time error
/// Parsing failed
#[error("syntax error: {0}")]
InvalidSyntax(#[source] ParseError),
/// Incorrect order of blocks
#[error("block error: {0}")]
StatementFlowError(#[source] BlockError),
// Run time errors
/// Function execution error
#[error("function error: {0}")]
Function(#[source] FunctionError),
/// Failed to run a pipeline
#[error("pipeline execution error: {0}")]
PipelineExecutionError(#[source] PipelineError),
/// Could not properly expand to a pipeline
#[error("expansion error: {0}")]
ExpansionError(#[source] ExpansionError<IonError>),
}
impl From<ParseError> for IonError {
#[must_use]
fn from(cause: ParseError) -> Self { Self::InvalidSyntax(cause) }
}
impl From<FunctionError> for IonError {
#[must_use]
fn from(cause: FunctionError) -> Self { Self::Function(cause) }
}
impl From<BlockError> for IonError {
#[must_use]
fn from(cause: BlockError) -> Self { Self::StatementFlowError(cause) }
}
impl From<PipelineError> for IonError {
#[must_use]
fn from(cause: PipelineError) -> Self { Self::PipelineExecutionError(cause) }
}
impl From<ExpansionError<Self>> for IonError {
#[must_use]
fn from(cause: ExpansionError<Self>) -> Self { Self::ExpansionError(cause) }
}
/// Options for the shell
#[derive(Debug, Clone, Hash, Default)]
pub struct Options {
/// Exit from the shell on the first error.
pub err_exit: bool,
/// Activates the -p option, aka pipefail in bash
pub pipe_fail: bool,
/// Do not execute any commands given to the shell.
pub no_exec: bool,
/// If set, denotes that this shell is running as a background job.
pub grab_tty: bool,
}
/// The shell structure is a megastructure that manages all of the state of the shell throughout
/// the entirety of the
/// program. It is initialized at the beginning of the program, and lives until the end of the
/// program.
pub struct Shell<'a> {
/// Contains a list of built-in commands that were created when the program
/// started.
builtins: BuiltinMap<'a>,
/// Contains the aliases, strings, and array variable maps.
variables: Variables,
/// Contains the current state of flow control parameters.
flow_control: Block,
/// Contains the directory stack parameters.
directory_stack: DirectoryStack,
/// When a command is executed, the final result of that command is stored
/// here.
previous_status: Status,
/// The job ID of the previous command sent to the background.
previous_job: usize,
/// Contains all the options relative to the shell
opts: Options,
/// Contains information on all of the active background processes that are being managed
/// by the shell.
background: Arc<Mutex<Vec<BackgroundProcess>>>,
/// When the `fg` command is run, this will be used to communicate with the specified
/// background process.
foreground_signals: Arc<foreground::Signals>,
// Callbacks
/// Custom callback for each command call
on_command: Option<OnCommandCallback<'a>>,
/// Custom callback before each command call
pre_command: Option<PreCommandCallback<'a>>,
/// Custom callback when a background event occurs
background_event: Option<BackgroundEventCallback>,
// Default std pipes
stdin: Option<File>,
stdout: Option<File>,
stderr: Option<File>,
}
/// A callback that is executed after each pipeline is run
pub type OnCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, std::time::Duration) + 'a>;
/// A callback that is executed before each pipeline is run
pub type PreCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, &Pipeline<RefinedJob<'_>>) + 'a>;
/// A callback that is executed when a background event occurs
pub type BackgroundEventCallback = Arc<dyn Fn(usize, Pid, BackgroundEvent) + Send + Sync>;
impl<'a> Default for Shell<'a> {
#[must_use]
fn default() -> Self { Self::new() }
}
impl<'a> Shell<'a> {
/// Install signal handlers necessary for the shell to work
fn install_signal_handler() {
extern "C" fn handler(signal: i32) {
let signal = signal::Signal::try_from(signal).unwrap();
let signal = match signal {
signal::Signal::SIGINT => signals::SIGINT,
signal::Signal::SIGHUP => signals::SIGHUP,
signal::Signal::SIGTERM => signals::SIGTERM,
_ => unreachable!(),
};
signals::PENDING.store(signal as usize, Ordering::SeqCst);
}
unsafe {
let _ = signal::signal(signal::Signal::SIGHUP, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGINT, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGTERM, SigHandler::Handler(handler));
}
}
/// Create a new shell with default settings
#[must_use]
pub fn new() -> Self { Self::with_builtins(BuiltinMap::default()) }
/// Create a shell with custom builtins
#[must_use]
pub fn with_builtins(builtins: BuiltinMap<'a>) -> Self {
Self::install_signal_handler();
// This will block SIGTSTP, SIGTTOU, SIGTTIN, and SIGCHLD, which is required
// for this shell to manage its own process group / children / etc.
signals::block();
Shell {
builtins,
variables: Variables::default(),
flow_control: Block::with_capacity(5),
directory_stack: DirectoryStack::new(),
previous_job: !0,
previous_status: Status::SUCCESS,
opts: Options::default(),
background: Arc::new(Mutex::new(Vec::new())),
foreground_signals: Arc::new(foreground::Signals::new()),
on_command: None,
pre_command: None,
background_event: None,
stdin: None,
stdout: None,
stderr: None,
}
}
/// Replace the default stdin
pub fn stdin<T: Into<Option<File>>>(&mut self, stdin: T) -> Option<File> {
mem::replace(&mut self.stdin, stdin.into())
}
/// Replace the default stdout
pub fn stdout<T: Into<Option<File>>>(&mut self, stdout: T) -> Option<File> {
mem::replace(&mut self.stdout, stdout.into())
}
/// Replace the default stderr
pub fn stderr<T: Into<Option<File>>>(&mut self, stderr: T) -> Option<File> {
mem::replace(&mut self.stderr, stderr.into())
}
/// Access the directory stack
#[must_use]
pub const fn dir_stack(&self) -> &DirectoryStack { &self.directory_stack }
/// Mutable access to the directory stack
#[must_use]
pub fn dir_stack_mut(&mut self) -> &mut DirectoryStack { &mut self.directory_stack }
/// Resets the flow control fields to their default values.
pub fn reset_flow(&mut self) { self.flow_control.clear(); }
/// Exit the current block
pub fn exit_block(&mut self) -> Result<(), BlockError> {
self.flow_control.pop().map(|_| ()).ok_or(BlockError::UnmatchedEnd)
}
/// Get the depth of the current block
#[must_use]
pub fn block_len(&self) -> usize { self.flow_control.len() }
/// A method for executing a function, using `args` as the input.
pub fn execute_function<S: AsRef<str>>(
&mut self,
function: &Rc<Function>,
args: &[S],
) -> Result<Status, IonError> {
function.clone().execute(self, args)?;
Ok(self.previous_status)
}
/// A method for executing commands in the Ion shell without capturing. It takes command(s)
/// as
/// a string argument, parses them, and executes them the same as it would if you had
/// executed
/// the command(s) in the command line REPL interface for Ion. If the supplied command is
/// not
/// terminated, then an error will be returned.
pub fn execute_command<T: std::io::Read>(&mut self, command: T) -> Result<Status, IonError> {
self.on_command(command.bytes().filter_map(Result::ok), true)?;
if let Some(block) = self.flow_control.last().map(Statement::to_string) {
self.previous_status = Status::from_exit_code(1);
Err(IonError::StatementFlowError(BlockError::UnclosedBlock(block)))
} else {
Ok(self.previous_status)
}
}
/// Executes a pipeline and returns the final exit status of the pipeline.
pub fn run_pipeline(&mut self, pipeline: &Pipeline<Job>) -> Result<Status, IonError> {
let command_start_time = SystemTime::now();
let mut pipeline = pipeline.expand(self)?;
let null_file =
if pipeline.pipe == PipeType::Disown { File::open(NULL_PATH).ok() } else { None };
let (stderr, stdout) = (
null_file.as_ref().or_else(|| self.stderr.as_ref()),
null_file.as_ref().or_else(|| self.stdout.as_ref()),
);
for item in &mut pipeline.items {
item.job.stdin = self
.stdin
.as_ref()
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stdout = stdout
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stderr = stderr
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
}
if let Some(ref callback) = self.pre_command {
callback(self, &pipeline);
}
// Don't execute commands when the `-n` flag is passed.
let exit_status = if self.opts.no_exec {
Ok(Status::SUCCESS)
} else if pipeline.requires_piping()
|| self.stderr.is_some()
|| self.stdin.is_some()
|| self.stdout.is_some()
{
self.execute_pipeline(pipeline).map_err(Into::into)
} else if let Some(main) = self.builtins.get(pipeline.items[0].command()) {
Ok(main(&pipeline.items[0].job.args, self))
} else if let Some(Value::Function(function)) =
self.variables.get(&pipeline.items[0].job.args[0]).cloned()
{
function.execute(self, &pipeline.items[0].job.args).map(|_| self.previous_status)
} else {
self.execute_pipeline(pipeline).map_err(Into::into)
}?;
if let Some(ref callback) = self.on_command {
if let Ok(elapsed_time) = command_start_time.elapsed() {
callback(self, elapsed_time);
}
}
if self.opts.err_exit && !exit_status.is_success() {
return Err(PipelineError::EarlyExit(exit_status).into());
}
Ok(exit_status)
}
/// Get the pid of the last executed job
#[must_use]
pub const fn previous_job(&self) -> Option<usize> {
if self.previous_job == !0 {
None
} else {
Some(self.previous_job)
}
}
/// Set the callback to call before each command
pub fn set_background_event(&mut self, callback: Option<BackgroundEventCallback>) {
self.background_event = callback;
}
/// Set the callback to call before each command
#[must_use]
pub fn background_event_mut(&mut self) -> &mut Option<BackgroundEventCallback> {
&mut self.background_event
}
/// Set the callback to call before each command
pub fn set_pre_command(&mut self, callback: Option<PreCommandCallback<'a>>) {
self.pre_command = callback;
}
/// Set the callback to call before each command
#[must_use]
pub fn pre_command_mut(&mut self) -> &mut Option<PreCommandCallback<'a>> {
&mut self.pre_command
}
/// Set the callback to call on each command
pub fn set_on_command(&mut self, callback: Option<OnCommandCallback<'a>>) {
self.on_command = callback;
}
/// Set the callback to call on each command
pub fn on_command_mut(&mut self) -> &mut Option<OnCommandCallback<'a>> { &mut self.on_command }
/// Get access to the builtins
#[must_use]
pub const fn builtins(&self) -> &BuiltinMap<'a> { &self.builtins }
/// Get a mutable access to the builtins
///
/// Warning: Previously defined functions will rely on previous versions of the builtins, even
/// if they are redefined. It is strongly advised to avoid mutating the builtins while the shell
/// is running
#[must_use]
pub fn | (&mut self) -> &mut BuiltinMap<'a> { &mut self.builtins }
/// Access to the shell options
#[must_use]
pub const fn opts(&self) -> &Options { &self.opts }
/// Mutable access to the shell options
#[must_use]
pub fn opts_mut(&mut self) -> &mut Options { &mut self.opts }
/// Access to the variables
#[must_use]
pub const fn variables(&self) -> &Variables { &self.variables }
/// Mutable access to the variables
#[must_use]
pub fn variables_mut(&mut self) -> &mut Variables { &mut self.variables }
/// Access to the variables
#[must_use]
pub fn background_jobs(&self) -> impl Deref<Target = Vec<BackgroundProcess>> + '_ {
self.background.lock().expect("Could not lock the mutex")
}
/// Mutable access to the variables
pub fn background_jobs_mut(&mut self) -> impl DerefMut<Target = Vec<BackgroundProcess>> + '_ {
self.background.lock().expect("Could not lock the mutex")
}
/// Get a function if it exists
pub fn get_func<T: AsRef<str>>(&self, f: T) -> Option<Rc<Function>> {
if let Some(Value::Function(function)) = self.variables().get(f.as_ref()) {
Some(function.clone())
} else {
None
}
}
/// Get the last command's return code and/or the code for the error
pub fn set_previous_status(&mut self, status: Status) { self.previous_status = status; }
/// Get the last command's return code and/or the code for the error
#[must_use]
pub const fn previous_status(&self) -> Status { self.previous_status }
fn assign(&mut self, key: &Key<'_>, value: Value<Rc<Function>>) -> Result<(), String> {
match (&key.kind, &value) {
(Primitive::Indexed(ref index_name, ref index_kind), Value::Str(_)) => {
let index = value_check(self, index_name, index_kind)
.map_err(|why| format!("{}: {}", key.name, why))?;
match index {
Value::Str(index) => {
let lhs = self
.variables
.get_mut(key.name)
.ok_or_else(|| "index value does not exist".to_string())?;
match lhs {
Value::HashMap(hmap) => {
let _ = hmap.insert(index, value);
Ok(())
}
Value::BTreeMap(bmap) => {
let _ = bmap.insert(index, value);
Ok(())
}
Value::Array(array) => {
let index_num = index.parse::<usize>().map_err(|_| {
format!("index variable is not a numeric value: `{}`", index)
})?;
if let Some(var) = array.get_mut(index_num) {
*var = value;
}
Ok(())
}
Value::Str(_) => Err("cannot assign to an index of a string".into()),
_ => Ok(()),
}
}
Value::Array(_) => Err("index variable cannot be an array".into()),
Value::HashMap(_) => Err("index variable cannot be a hmap".into()),
Value::BTreeMap(_) => Err("index variable cannot be a bmap".into()),
_ => Ok(()),
}
}
(_, Value::Str(_))
| (_, Value::Array(_))
| (Primitive::HashMap(_), Value::HashMap(_))
| (Primitive::BTreeMap(_), Value::BTreeMap(_)) => {
self.variables.set(key.name, value);
Ok(())
}
_ => Ok(()),
}
}
}
| builtins_mut | identifier_name |
mod.rs | mod assignments;
mod colors;
mod directory_stack;
mod flow;
/// The various blocks
pub mod flow_control;
mod job;
mod pipe_exec;
mod shell_expand;
mod signals;
pub mod sys;
/// Variables for the shell
pub mod variables;
use self::{
directory_stack::DirectoryStack,
flow_control::{Block, Function, FunctionError, Statement},
pipe_exec::foreground,
sys::NULL_PATH,
variables::Variables,
};
pub use self::{
flow::BlockError,
job::{Job, RefinedJob},
pipe_exec::{
job_control::{BackgroundEvent, BackgroundProcess},
PipelineError,
},
variables::Value,
};
use crate::{
assignments::value_check,
builtins::{BuiltinMap, Status},
expansion::{
pipelines::{PipeType, Pipeline},
Error as ExpansionError,
},
parser::{
lexers::{Key, Primitive},
Error as ParseError,
},
};
use nix::{
sys::signal::{self, SigHandler},
unistd::Pid,
};
use std::{
convert::TryFrom,
fs::File,
mem,
ops::{Deref, DerefMut},
rc::Rc,
sync::{atomic::Ordering, Arc, Mutex},
time::SystemTime,
};
use thiserror::Error;
/// Errors from execution
#[derive(Debug, Error)]
pub enum IonError {
// Parse-time error
/// Parsing failed
#[error("syntax error: {0}")]
InvalidSyntax(#[source] ParseError),
/// Incorrect order of blocks
#[error("block error: {0}")]
StatementFlowError(#[source] BlockError),
// Run time errors
/// Function execution error
#[error("function error: {0}")]
Function(#[source] FunctionError),
/// Failed to run a pipeline
#[error("pipeline execution error: {0}")]
PipelineExecutionError(#[source] PipelineError),
/// Could not properly expand to a pipeline
#[error("expansion error: {0}")]
ExpansionError(#[source] ExpansionError<IonError>),
}
impl From<ParseError> for IonError {
#[must_use]
fn from(cause: ParseError) -> Self { Self::InvalidSyntax(cause) }
}
impl From<FunctionError> for IonError {
#[must_use]
fn from(cause: FunctionError) -> Self { Self::Function(cause) }
}
impl From<BlockError> for IonError {
#[must_use]
fn from(cause: BlockError) -> Self { Self::StatementFlowError(cause) }
}
impl From<PipelineError> for IonError {
#[must_use]
fn from(cause: PipelineError) -> Self { Self::PipelineExecutionError(cause) }
}
impl From<ExpansionError<Self>> for IonError {
#[must_use]
fn from(cause: ExpansionError<Self>) -> Self { Self::ExpansionError(cause) }
}
/// Options for the shell
#[derive(Debug, Clone, Hash, Default)]
pub struct Options {
/// Exit from the shell on the first error.
pub err_exit: bool,
/// Activates the -p option, aka pipefail in bash
pub pipe_fail: bool,
/// Do not execute any commands given to the shell.
pub no_exec: bool,
/// If set, denotes that this shell is running as a background job.
pub grab_tty: bool,
}
/// The shell structure is a megastructure that manages all of the state of the shell throughout
/// the entirety of the
/// program. It is initialized at the beginning of the program, and lives until the end of the
/// program.
pub struct Shell<'a> {
/// Contains a list of built-in commands that were created when the program
/// started.
builtins: BuiltinMap<'a>,
/// Contains the aliases, strings, and array variable maps.
variables: Variables,
/// Contains the current state of flow control parameters.
flow_control: Block,
/// Contains the directory stack parameters.
directory_stack: DirectoryStack,
/// When a command is executed, the final result of that command is stored
/// here.
previous_status: Status,
/// The job ID of the previous command sent to the background.
previous_job: usize,
/// Contains all the options relative to the shell
opts: Options,
/// Contains information on all of the active background processes that are being managed
/// by the shell.
background: Arc<Mutex<Vec<BackgroundProcess>>>,
/// When the `fg` command is run, this will be used to communicate with the specified
/// background process.
foreground_signals: Arc<foreground::Signals>,
// Callbacks
/// Custom callback for each command call
on_command: Option<OnCommandCallback<'a>>,
/// Custom callback before each command call
pre_command: Option<PreCommandCallback<'a>>,
/// Custom callback when a background event occurs
background_event: Option<BackgroundEventCallback>,
// Default std pipes
stdin: Option<File>,
stdout: Option<File>,
stderr: Option<File>,
}
/// A callback that is executed after each pipeline is run
pub type OnCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, std::time::Duration) + 'a>;
/// A callback that is executed before each pipeline is run
pub type PreCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, &Pipeline<RefinedJob<'_>>) + 'a>;
/// A callback that is executed when a background event occurs
pub type BackgroundEventCallback = Arc<dyn Fn(usize, Pid, BackgroundEvent) + Send + Sync>;
impl<'a> Default for Shell<'a> {
#[must_use]
fn default() -> Self { Self::new() }
}
impl<'a> Shell<'a> {
/// Install signal handlers necessary for the shell to work
fn install_signal_handler() {
extern "C" fn handler(signal: i32) {
let signal = signal::Signal::try_from(signal).unwrap();
let signal = match signal {
signal::Signal::SIGINT => signals::SIGINT,
signal::Signal::SIGHUP => signals::SIGHUP,
signal::Signal::SIGTERM => signals::SIGTERM,
_ => unreachable!(),
};
signals::PENDING.store(signal as usize, Ordering::SeqCst);
}
unsafe {
let _ = signal::signal(signal::Signal::SIGHUP, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGINT, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGTERM, SigHandler::Handler(handler));
}
}
/// Create a new shell with default settings
#[must_use]
pub fn new() -> Self { Self::with_builtins(BuiltinMap::default()) }
/// Create a shell with custom builtins
#[must_use]
pub fn with_builtins(builtins: BuiltinMap<'a>) -> Self {
Self::install_signal_handler();
// This will block SIGTSTP, SIGTTOU, SIGTTIN, and SIGCHLD, which is required
// for this shell to manage its own process group / children / etc.
signals::block();
Shell {
builtins,
variables: Variables::default(),
flow_control: Block::with_capacity(5),
directory_stack: DirectoryStack::new(),
previous_job: !0,
previous_status: Status::SUCCESS,
opts: Options::default(),
background: Arc::new(Mutex::new(Vec::new())),
foreground_signals: Arc::new(foreground::Signals::new()),
on_command: None,
pre_command: None,
background_event: None,
stdin: None,
stdout: None,
stderr: None,
}
}
/// Replace the default stdin
pub fn stdin<T: Into<Option<File>>>(&mut self, stdin: T) -> Option<File> {
mem::replace(&mut self.stdin, stdin.into())
}
/// Replace the default stdout
pub fn stdout<T: Into<Option<File>>>(&mut self, stdout: T) -> Option<File> {
mem::replace(&mut self.stdout, stdout.into())
}
/// Replace the default stderr
pub fn stderr<T: Into<Option<File>>>(&mut self, stderr: T) -> Option<File> {
mem::replace(&mut self.stderr, stderr.into())
}
/// Access the directory stack
#[must_use]
pub const fn dir_stack(&self) -> &DirectoryStack { &self.directory_stack }
/// Mutable access to the directory stack
#[must_use]
pub fn dir_stack_mut(&mut self) -> &mut DirectoryStack { &mut self.directory_stack }
/// Resets the flow control fields to their default values.
pub fn reset_flow(&mut self) { self.flow_control.clear(); }
/// Exit the current block
pub fn exit_block(&mut self) -> Result<(), BlockError> {
self.flow_control.pop().map(|_| ()).ok_or(BlockError::UnmatchedEnd)
}
/// Get the depth of the current block
#[must_use]
pub fn block_len(&self) -> usize { self.flow_control.len() }
/// A method for executing a function, using `args` as the input.
pub fn execute_function<S: AsRef<str>>(
&mut self,
function: &Rc<Function>,
args: &[S],
) -> Result<Status, IonError> {
function.clone().execute(self, args)?;
Ok(self.previous_status)
}
/// A method for executing commands in the Ion shell without capturing. It takes command(s)
/// as
/// a string argument, parses them, and executes them the same as it would if you had
/// executed
/// the command(s) in the command line REPL interface for Ion. If the supplied command is
/// not
/// terminated, then an error will be returned.
pub fn execute_command<T: std::io::Read>(&mut self, command: T) -> Result<Status, IonError> {
self.on_command(command.bytes().filter_map(Result::ok), true)?;
if let Some(block) = self.flow_control.last().map(Statement::to_string) {
self.previous_status = Status::from_exit_code(1);
Err(IonError::StatementFlowError(BlockError::UnclosedBlock(block)))
} else {
Ok(self.previous_status)
}
}
/// Executes a pipeline and returns the final exit status of the pipeline.
pub fn run_pipeline(&mut self, pipeline: &Pipeline<Job>) -> Result<Status, IonError> {
let command_start_time = SystemTime::now();
let mut pipeline = pipeline.expand(self)?;
let null_file =
if pipeline.pipe == PipeType::Disown { File::open(NULL_PATH).ok() } else { None };
let (stderr, stdout) = (
null_file.as_ref().or_else(|| self.stderr.as_ref()),
null_file.as_ref().or_else(|| self.stdout.as_ref()),
);
for item in &mut pipeline.items {
item.job.stdin = self
.stdin
.as_ref()
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stdout = stdout
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stderr = stderr
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
}
if let Some(ref callback) = self.pre_command {
callback(self, &pipeline);
}
// Don't execute commands when the `-n` flag is passed.
let exit_status = if self.opts.no_exec {
Ok(Status::SUCCESS)
} else if pipeline.requires_piping()
|| self.stderr.is_some()
|| self.stdin.is_some()
|| self.stdout.is_some()
{
self.execute_pipeline(pipeline).map_err(Into::into)
} else if let Some(main) = self.builtins.get(pipeline.items[0].command()) {
Ok(main(&pipeline.items[0].job.args, self))
} else if let Some(Value::Function(function)) =
self.variables.get(&pipeline.items[0].job.args[0]).cloned()
{
function.execute(self, &pipeline.items[0].job.args).map(|_| self.previous_status)
} else {
self.execute_pipeline(pipeline).map_err(Into::into)
}?;
if let Some(ref callback) = self.on_command {
if let Ok(elapsed_time) = command_start_time.elapsed() {
callback(self, elapsed_time);
}
}
if self.opts.err_exit && !exit_status.is_success() {
return Err(PipelineError::EarlyExit(exit_status).into());
}
Ok(exit_status)
}
/// Get the pid of the last executed job
#[must_use]
pub const fn previous_job(&self) -> Option<usize> {
if self.previous_job == !0 {
None
} else {
Some(self.previous_job)
}
}
/// Set the callback to call before each command
pub fn set_background_event(&mut self, callback: Option<BackgroundEventCallback>) {
self.background_event = callback;
}
/// Set the callback to call before each command
#[must_use]
pub fn background_event_mut(&mut self) -> &mut Option<BackgroundEventCallback> {
&mut self.background_event
}
/// Set the callback to call before each command
pub fn set_pre_command(&mut self, callback: Option<PreCommandCallback<'a>>) {
self.pre_command = callback;
}
/// Set the callback to call before each command
#[must_use]
pub fn pre_command_mut(&mut self) -> &mut Option<PreCommandCallback<'a>> |
/// Set the callback to call on each command
pub fn set_on_command(&mut self, callback: Option<OnCommandCallback<'a>>) {
self.on_command = callback;
}
/// Set the callback to call on each command
pub fn on_command_mut(&mut self) -> &mut Option<OnCommandCallback<'a>> { &mut self.on_command }
/// Get access to the builtins
#[must_use]
pub const fn builtins(&self) -> &BuiltinMap<'a> { &self.builtins }
/// Get a mutable access to the builtins
///
/// Warning: Previously defined functions will rely on previous versions of the builtins, even
/// if they are redefined. It is strongly advised to avoid mutating the builtins while the shell
/// is running
#[must_use]
pub fn builtins_mut(&mut self) -> &mut BuiltinMap<'a> { &mut self.builtins }
/// Access to the shell options
#[must_use]
pub const fn opts(&self) -> &Options { &self.opts }
/// Mutable access to the shell options
#[must_use]
pub fn opts_mut(&mut self) -> &mut Options { &mut self.opts }
/// Access to the variables
#[must_use]
pub const fn variables(&self) -> &Variables { &self.variables }
/// Mutable access to the variables
#[must_use]
pub fn variables_mut(&mut self) -> &mut Variables { &mut self.variables }
/// Access to the variables
#[must_use]
pub fn background_jobs(&self) -> impl Deref<Target = Vec<BackgroundProcess>> + '_ {
self.background.lock().expect("Could not lock the mutex")
}
/// Mutable access to the variables
pub fn background_jobs_mut(&mut self) -> impl DerefMut<Target = Vec<BackgroundProcess>> + '_ {
self.background.lock().expect("Could not lock the mutex")
}
/// Get a function if it exists
pub fn get_func<T: AsRef<str>>(&self, f: T) -> Option<Rc<Function>> {
if let Some(Value::Function(function)) = self.variables().get(f.as_ref()) {
Some(function.clone())
} else {
None
}
}
/// Get the last command's return code and/or the code for the error
pub fn set_previous_status(&mut self, status: Status) { self.previous_status = status; }
/// Get the last command's return code and/or the code for the error
#[must_use]
pub const fn previous_status(&self) -> Status { self.previous_status }
fn assign(&mut self, key: &Key<'_>, value: Value<Rc<Function>>) -> Result<(), String> {
match (&key.kind, &value) {
(Primitive::Indexed(ref index_name, ref index_kind), Value::Str(_)) => {
let index = value_check(self, index_name, index_kind)
.map_err(|why| format!("{}: {}", key.name, why))?;
match index {
Value::Str(index) => {
let lhs = self
.variables
.get_mut(key.name)
.ok_or_else(|| "index value does not exist".to_string())?;
match lhs {
Value::HashMap(hmap) => {
let _ = hmap.insert(index, value);
Ok(())
}
Value::BTreeMap(bmap) => {
let _ = bmap.insert(index, value);
Ok(())
}
Value::Array(array) => {
let index_num = index.parse::<usize>().map_err(|_| {
format!("index variable is not a numeric value: `{}`", index)
})?;
if let Some(var) = array.get_mut(index_num) {
*var = value;
}
Ok(())
}
Value::Str(_) => Err("cannot assign to an index of a string".into()),
_ => Ok(()),
}
}
Value::Array(_) => Err("index variable cannot be an array".into()),
Value::HashMap(_) => Err("index variable cannot be a hmap".into()),
Value::BTreeMap(_) => Err("index variable cannot be a bmap".into()),
_ => Ok(()),
}
}
(_, Value::Str(_))
| (_, Value::Array(_))
| (Primitive::HashMap(_), Value::HashMap(_))
| (Primitive::BTreeMap(_), Value::BTreeMap(_)) => {
self.variables.set(key.name, value);
Ok(())
}
_ => Ok(()),
}
}
}
| {
&mut self.pre_command
} | identifier_body |
mod.rs | mod assignments;
mod colors;
mod directory_stack;
mod flow;
/// The various blocks
pub mod flow_control;
mod job;
mod pipe_exec;
mod shell_expand;
mod signals;
pub mod sys;
/// Variables for the shell
pub mod variables;
use self::{
directory_stack::DirectoryStack,
flow_control::{Block, Function, FunctionError, Statement},
pipe_exec::foreground,
sys::NULL_PATH,
variables::Variables,
};
pub use self::{
flow::BlockError,
job::{Job, RefinedJob},
pipe_exec::{
job_control::{BackgroundEvent, BackgroundProcess},
PipelineError,
},
variables::Value,
};
use crate::{
assignments::value_check,
builtins::{BuiltinMap, Status},
expansion::{
pipelines::{PipeType, Pipeline},
Error as ExpansionError,
},
parser::{
lexers::{Key, Primitive},
Error as ParseError,
},
};
use nix::{
sys::signal::{self, SigHandler},
unistd::Pid,
};
use std::{
convert::TryFrom,
fs::File,
mem,
ops::{Deref, DerefMut},
rc::Rc,
sync::{atomic::Ordering, Arc, Mutex},
time::SystemTime,
};
use thiserror::Error;
/// Errors from execution
#[derive(Debug, Error)]
pub enum IonError {
// Parse-time error
/// Parsing failed
#[error("syntax error: {0}")]
InvalidSyntax(#[source] ParseError),
/// Incorrect order of blocks
#[error("block error: {0}")]
StatementFlowError(#[source] BlockError),
// Run time errors
/// Function execution error
#[error("function error: {0}")]
Function(#[source] FunctionError),
/// Failed to run a pipeline
#[error("pipeline execution error: {0}")]
PipelineExecutionError(#[source] PipelineError),
/// Could not properly expand to a pipeline
#[error("expansion error: {0}")]
ExpansionError(#[source] ExpansionError<IonError>),
}
impl From<ParseError> for IonError {
#[must_use]
fn from(cause: ParseError) -> Self { Self::InvalidSyntax(cause) }
}
impl From<FunctionError> for IonError {
#[must_use]
fn from(cause: FunctionError) -> Self { Self::Function(cause) }
}
impl From<BlockError> for IonError {
#[must_use]
fn from(cause: BlockError) -> Self { Self::StatementFlowError(cause) }
}
impl From<PipelineError> for IonError {
#[must_use]
fn from(cause: PipelineError) -> Self { Self::PipelineExecutionError(cause) }
}
impl From<ExpansionError<Self>> for IonError {
#[must_use]
fn from(cause: ExpansionError<Self>) -> Self { Self::ExpansionError(cause) }
}
/// Options for the shell
#[derive(Debug, Clone, Hash, Default)]
pub struct Options {
/// Exit from the shell on the first error.
pub err_exit: bool,
/// Activates the -p option, aka pipefail in bash
pub pipe_fail: bool,
/// Do not execute any commands given to the shell.
pub no_exec: bool,
/// If set, denotes that this shell is running as a background job.
pub grab_tty: bool,
}
/// The shell structure is a megastructure that manages all of the state of the shell throughout
/// the entirety of the
/// program. It is initialized at the beginning of the program, and lives until the end of the
/// program.
pub struct Shell<'a> {
/// Contains a list of built-in commands that were created when the program | flow_control: Block,
/// Contains the directory stack parameters.
directory_stack: DirectoryStack,
/// When a command is executed, the final result of that command is stored
/// here.
previous_status: Status,
/// The job ID of the previous command sent to the background.
previous_job: usize,
/// Contains all the options relative to the shell
opts: Options,
/// Contains information on all of the active background processes that are being managed
/// by the shell.
background: Arc<Mutex<Vec<BackgroundProcess>>>,
/// When the `fg` command is run, this will be used to communicate with the specified
/// background process.
foreground_signals: Arc<foreground::Signals>,
// Callbacks
/// Custom callback for each command call
on_command: Option<OnCommandCallback<'a>>,
/// Custom callback before each command call
pre_command: Option<PreCommandCallback<'a>>,
/// Custom callback when a background event occurs
background_event: Option<BackgroundEventCallback>,
// Default std pipes
stdin: Option<File>,
stdout: Option<File>,
stderr: Option<File>,
}
/// A callback that is executed after each pipeline is run
pub type OnCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, std::time::Duration) + 'a>;
/// A callback that is executed before each pipeline is run
pub type PreCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, &Pipeline<RefinedJob<'_>>) + 'a>;
/// A callback that is executed when a background event occurs
pub type BackgroundEventCallback = Arc<dyn Fn(usize, Pid, BackgroundEvent) + Send + Sync>;
impl<'a> Default for Shell<'a> {
#[must_use]
fn default() -> Self { Self::new() }
}
impl<'a> Shell<'a> {
/// Install signal handlers necessary for the shell to work
fn install_signal_handler() {
extern "C" fn handler(signal: i32) {
let signal = signal::Signal::try_from(signal).unwrap();
let signal = match signal {
signal::Signal::SIGINT => signals::SIGINT,
signal::Signal::SIGHUP => signals::SIGHUP,
signal::Signal::SIGTERM => signals::SIGTERM,
_ => unreachable!(),
};
signals::PENDING.store(signal as usize, Ordering::SeqCst);
}
unsafe {
let _ = signal::signal(signal::Signal::SIGHUP, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGINT, SigHandler::Handler(handler));
let _ = signal::signal(signal::Signal::SIGTERM, SigHandler::Handler(handler));
}
}
/// Create a new shell with default settings
#[must_use]
pub fn new() -> Self { Self::with_builtins(BuiltinMap::default()) }
/// Create a shell with custom builtins
#[must_use]
pub fn with_builtins(builtins: BuiltinMap<'a>) -> Self {
Self::install_signal_handler();
// This will block SIGTSTP, SIGTTOU, SIGTTIN, and SIGCHLD, which is required
// for this shell to manage its own process group / children / etc.
signals::block();
Shell {
builtins,
variables: Variables::default(),
flow_control: Block::with_capacity(5),
directory_stack: DirectoryStack::new(),
previous_job: !0,
previous_status: Status::SUCCESS,
opts: Options::default(),
background: Arc::new(Mutex::new(Vec::new())),
foreground_signals: Arc::new(foreground::Signals::new()),
on_command: None,
pre_command: None,
background_event: None,
stdin: None,
stdout: None,
stderr: None,
}
}
/// Replace the default stdin
pub fn stdin<T: Into<Option<File>>>(&mut self, stdin: T) -> Option<File> {
mem::replace(&mut self.stdin, stdin.into())
}
/// Replace the default stdout
pub fn stdout<T: Into<Option<File>>>(&mut self, stdout: T) -> Option<File> {
mem::replace(&mut self.stdout, stdout.into())
}
/// Replace the default stderr
pub fn stderr<T: Into<Option<File>>>(&mut self, stderr: T) -> Option<File> {
mem::replace(&mut self.stderr, stderr.into())
}
/// Access the directory stack
#[must_use]
pub const fn dir_stack(&self) -> &DirectoryStack { &self.directory_stack }
/// Mutable access to the directory stack
#[must_use]
pub fn dir_stack_mut(&mut self) -> &mut DirectoryStack { &mut self.directory_stack }
/// Resets the flow control fields to their default values.
pub fn reset_flow(&mut self) { self.flow_control.clear(); }
/// Exit the current block
pub fn exit_block(&mut self) -> Result<(), BlockError> {
self.flow_control.pop().map(|_| ()).ok_or(BlockError::UnmatchedEnd)
}
/// Get the depth of the current block
#[must_use]
pub fn block_len(&self) -> usize { self.flow_control.len() }
/// A method for executing a function, using `args` as the input.
pub fn execute_function<S: AsRef<str>>(
&mut self,
function: &Rc<Function>,
args: &[S],
) -> Result<Status, IonError> {
function.clone().execute(self, args)?;
Ok(self.previous_status)
}
/// A method for executing commands in the Ion shell without capturing. It takes command(s)
/// as
/// a string argument, parses them, and executes them the same as it would if you had
/// executed
/// the command(s) in the command line REPL interface for Ion. If the supplied command is
/// not
/// terminated, then an error will be returned.
pub fn execute_command<T: std::io::Read>(&mut self, command: T) -> Result<Status, IonError> {
self.on_command(command.bytes().filter_map(Result::ok), true)?;
if let Some(block) = self.flow_control.last().map(Statement::to_string) {
self.previous_status = Status::from_exit_code(1);
Err(IonError::StatementFlowError(BlockError::UnclosedBlock(block)))
} else {
Ok(self.previous_status)
}
}
/// Executes a pipeline and returns the final exit status of the pipeline.
pub fn run_pipeline(&mut self, pipeline: &Pipeline<Job>) -> Result<Status, IonError> {
let command_start_time = SystemTime::now();
let mut pipeline = pipeline.expand(self)?;
let null_file =
if pipeline.pipe == PipeType::Disown { File::open(NULL_PATH).ok() } else { None };
let (stderr, stdout) = (
null_file.as_ref().or_else(|| self.stderr.as_ref()),
null_file.as_ref().or_else(|| self.stdout.as_ref()),
);
for item in &mut pipeline.items {
item.job.stdin = self
.stdin
.as_ref()
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stdout = stdout
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
item.job.stderr = stderr
.map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed))
.transpose()?;
}
if let Some(ref callback) = self.pre_command {
callback(self, &pipeline);
}
// Don't execute commands when the `-n` flag is passed.
let exit_status = if self.opts.no_exec {
Ok(Status::SUCCESS)
} else if pipeline.requires_piping()
|| self.stderr.is_some()
|| self.stdin.is_some()
|| self.stdout.is_some()
{
self.execute_pipeline(pipeline).map_err(Into::into)
} else if let Some(main) = self.builtins.get(pipeline.items[0].command()) {
Ok(main(&pipeline.items[0].job.args, self))
} else if let Some(Value::Function(function)) =
self.variables.get(&pipeline.items[0].job.args[0]).cloned()
{
function.execute(self, &pipeline.items[0].job.args).map(|_| self.previous_status)
} else {
self.execute_pipeline(pipeline).map_err(Into::into)
}?;
if let Some(ref callback) = self.on_command {
if let Ok(elapsed_time) = command_start_time.elapsed() {
callback(self, elapsed_time);
}
}
if self.opts.err_exit && !exit_status.is_success() {
return Err(PipelineError::EarlyExit(exit_status).into());
}
Ok(exit_status)
}
/// Get the pid of the last executed job
#[must_use]
pub const fn previous_job(&self) -> Option<usize> {
if self.previous_job == !0 {
None
} else {
Some(self.previous_job)
}
}
/// Set the callback to call before each command
pub fn set_background_event(&mut self, callback: Option<BackgroundEventCallback>) {
self.background_event = callback;
}
/// Set the callback to call before each command
#[must_use]
pub fn background_event_mut(&mut self) -> &mut Option<BackgroundEventCallback> {
&mut self.background_event
}
/// Set the callback to call before each command
pub fn set_pre_command(&mut self, callback: Option<PreCommandCallback<'a>>) {
self.pre_command = callback;
}
/// Set the callback to call before each command
#[must_use]
pub fn pre_command_mut(&mut self) -> &mut Option<PreCommandCallback<'a>> {
&mut self.pre_command
}
/// Set the callback to call on each command
pub fn set_on_command(&mut self, callback: Option<OnCommandCallback<'a>>) {
self.on_command = callback;
}
/// Set the callback to call on each command
pub fn on_command_mut(&mut self) -> &mut Option<OnCommandCallback<'a>> { &mut self.on_command }
/// Get access to the builtins
#[must_use]
pub const fn builtins(&self) -> &BuiltinMap<'a> { &self.builtins }
/// Get a mutable access to the builtins
///
/// Warning: Previously defined functions will rely on previous versions of the builtins, even
/// if they are redefined. It is strongly advised to avoid mutating the builtins while the shell
/// is running
#[must_use]
pub fn builtins_mut(&mut self) -> &mut BuiltinMap<'a> { &mut self.builtins }
/// Access to the shell options
#[must_use]
pub const fn opts(&self) -> &Options { &self.opts }
/// Mutable access to the shell options
#[must_use]
pub fn opts_mut(&mut self) -> &mut Options { &mut self.opts }
/// Access to the variables
#[must_use]
pub const fn variables(&self) -> &Variables { &self.variables }
/// Mutable access to the variables
#[must_use]
pub fn variables_mut(&mut self) -> &mut Variables { &mut self.variables }
/// Access to the variables
#[must_use]
pub fn background_jobs(&self) -> impl Deref<Target = Vec<BackgroundProcess>> + '_ {
self.background.lock().expect("Could not lock the mutex")
}
/// Mutable access to the variables
pub fn background_jobs_mut(&mut self) -> impl DerefMut<Target = Vec<BackgroundProcess>> + '_ {
self.background.lock().expect("Could not lock the mutex")
}
/// Get a function if it exists
pub fn get_func<T: AsRef<str>>(&self, f: T) -> Option<Rc<Function>> {
if let Some(Value::Function(function)) = self.variables().get(f.as_ref()) {
Some(function.clone())
} else {
None
}
}
/// Get the last command's return code and/or the code for the error
pub fn set_previous_status(&mut self, status: Status) { self.previous_status = status; }
/// Get the last command's return code and/or the code for the error
#[must_use]
pub const fn previous_status(&self) -> Status { self.previous_status }
fn assign(&mut self, key: &Key<'_>, value: Value<Rc<Function>>) -> Result<(), String> {
match (&key.kind, &value) {
(Primitive::Indexed(ref index_name, ref index_kind), Value::Str(_)) => {
let index = value_check(self, index_name, index_kind)
.map_err(|why| format!("{}: {}", key.name, why))?;
match index {
Value::Str(index) => {
let lhs = self
.variables
.get_mut(key.name)
.ok_or_else(|| "index value does not exist".to_string())?;
match lhs {
Value::HashMap(hmap) => {
let _ = hmap.insert(index, value);
Ok(())
}
Value::BTreeMap(bmap) => {
let _ = bmap.insert(index, value);
Ok(())
}
Value::Array(array) => {
let index_num = index.parse::<usize>().map_err(|_| {
format!("index variable is not a numeric value: `{}`", index)
})?;
if let Some(var) = array.get_mut(index_num) {
*var = value;
}
Ok(())
}
Value::Str(_) => Err("cannot assign to an index of a string".into()),
_ => Ok(()),
}
}
Value::Array(_) => Err("index variable cannot be an array".into()),
Value::HashMap(_) => Err("index variable cannot be a hmap".into()),
Value::BTreeMap(_) => Err("index variable cannot be a bmap".into()),
_ => Ok(()),
}
}
(_, Value::Str(_))
| (_, Value::Array(_))
| (Primitive::HashMap(_), Value::HashMap(_))
| (Primitive::BTreeMap(_), Value::BTreeMap(_)) => {
self.variables.set(key.name, value);
Ok(())
}
_ => Ok(()),
}
}
} | /// started.
builtins: BuiltinMap<'a>,
/// Contains the aliases, strings, and array variable maps.
variables: Variables,
/// Contains the current state of flow control parameters. | random_line_split |
lib.rs | //! The SGX root enclave
//!
//! ## Authors
//!
//! The Veracruz Development Team.
//!
//! ## Licensing and copyright notice
//!
//! See the `LICENSE_MIT.markdown` file in the Veracruz root directory for
//! information on licensing and copyright.
#![no_std]
#[macro_use]
extern crate sgx_tstd as std;
use lazy_static::lazy_static;
use sgx_tdh::{SgxDhInitiator, SgxDhMsg3};
use sgx_types;
use sgx_types::{
sgx_create_report, sgx_dh_msg1_t, sgx_dh_msg2_t, sgx_dh_msg3_t,
sgx_dh_session_enclave_identity_t, sgx_ec256_public_t, sgx_key_128bit_t, sgx_ra_context_t,
sgx_ra_init, sgx_status_t, sgx_target_info_t,
};
use std::{collections::HashMap, mem, sync::atomic::{AtomicU64, Ordering}};
use ring::{rand::SystemRandom, signature::EcdsaKeyPair};
use veracruz_utils::csr;
lazy_static! {
static ref SESSION_ID: AtomicU64 = AtomicU64::new(1);
static ref INITIATOR_HASH: std::sync::SgxMutex<HashMap<u64, SgxDhInitiator>> =
std::sync::SgxMutex::new(HashMap::new());
static ref PRIVATE_KEY: std::sync::SgxMutex<Option<std::vec::Vec<u8>>> = std::sync::SgxMutex::new(None);
static ref CERT_CHAIN: std::sync::SgxMutex<Option<(std::vec::Vec<u8>, std::vec::Vec<u8>)>> = std::sync::SgxMutex::new(None);
}
pub enum SgxRootEnclave {
Success = 0x00,
Msg3RawError = 0x01,
ProcMsg3Error = 0x02,
CsrVerifyFail = 0x03,
CsrToCertFail = 0x04,
LockFail = 0x05,
HashError = 0x06,
PKCS8Error = 0x07,
StateError = 0x08,
PrivateKeyNotPopulated = 0x09,
}
#[no_mangle]
pub extern "C" fn get_firmware_version_len(p_fwv_len: &mut usize) -> sgx_status_t {
let version = env!("CARGO_PKG_VERSION");
*p_fwv_len = version.len();
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn get_firmware_version(
p_firmware_version_buf: *mut u8,
fv_buf_size: usize,
) -> sgx_status_t {
let version = env!("CARGO_PKG_VERSION");
assert!(version.len() <= fv_buf_size);
let version_buf_slice =
unsafe { std::slice::from_raw_parts_mut(p_firmware_version_buf, fv_buf_size) };
version_buf_slice.clone_from_slice(&version.as_bytes());
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn init_remote_attestation_enc(
pub_key_buf: *const u8,
pub_key_size: usize,
p_context: *mut sgx_ra_context_t,
) -> sgx_status_t {
assert!(pub_key_size != 0);
assert!(!pub_key_buf.is_null());
let pub_key_vec = unsafe { std::slice::from_raw_parts(pub_key_buf, pub_key_size) };
let pub_key = sgx_ec256_public_t {
gx: from_slice(&pub_key_vec[0..32]),
gy: from_slice(&pub_key_vec[32..64]),
};
let mut context: sgx_ra_context_t = 0;
assert!(pub_key_vec.len() > 0);
let ret = unsafe {
sgx_ra_init(
&pub_key as *const sgx_ec256_public_t,
0,
&mut context as *mut sgx_ra_context_t,
)
};
if ret != sgx_status_t::SGX_SUCCESS {
return ret;
}
unsafe {
*p_context = context;
}
return ret;
}
/// Retrieve or generate the private key as a Vec<u8>
fn get_private_key() -> Result<std::vec::Vec<u8>, SgxRootEnclave> {
let mut private_key_guard = match PRIVATE_KEY.lock() {
Err(_) => return Err(SgxRootEnclave::LockFail),
Ok(guard) => guard,
};
let pkcs8_bytes = match &*private_key_guard {
Some(bytes) => {
bytes.clone()
}
None => {
// ECDSA prime256r1 generation.
let pkcs8_bytes = EcdsaKeyPair::generate_pkcs8(
&ring::signature::ECDSA_P256_SHA256_FIXED_SIGNING,
&SystemRandom::new(),)
.map_err(|_| SgxRootEnclave::PKCS8Error)?;
*private_key_guard = Some(pkcs8_bytes.as_ref().to_vec());
pkcs8_bytes.as_ref().to_vec()
}
};
return Ok(pkcs8_bytes);
}
#[no_mangle]
pub extern "C" fn sgx_get_collateral_report(
p_pubkey_challenge: *const u8,
pubkey_challenge_size: usize,
p_target_info: *const sgx_target_info_t,
report: *mut sgx_types::sgx_report_t,
csr_buffer: *mut u8,
csr_buf_size: usize,
p_csr_size: *mut usize,
) -> sgx_status_t {
let pubkey_challenge_vec =
unsafe { std::slice::from_raw_parts(p_pubkey_challenge, pubkey_challenge_size) };
let mut report_data = sgx_types::sgx_report_data_t::default();
// place the challenge in the report
report_data.d[0..pubkey_challenge_size].copy_from_slice(&pubkey_challenge_vec);
let private_key_ring = {
let private_key_vec = match get_private_key() {
Ok(vec) => vec,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) {
Ok(pkr) => pkr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
}
};
// generate the certificate signing request
let csr_vec = match csr::generate_csr(&csr::ROOT_ENCLAVE_CSR_TEMPLATE, &private_key_ring) {
Ok(csr) => csr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
// // place the hash of the csr in the report
let collateral_hash = ring::digest::digest(&ring::digest::SHA256, &csr_vec);
report_data.d[pubkey_challenge_size..48].copy_from_slice(collateral_hash.as_ref());
let ret = unsafe { sgx_create_report(p_target_info, &report_data, report) };
assert!(ret == sgx_types::sgx_status_t::SGX_SUCCESS);
// place the csr where it needs to be
if csr_vec.len() > csr_buf_size {
assert!(false);
} else {
let csr_buf_slice = unsafe { std::slice::from_raw_parts_mut(csr_buffer, csr_vec.len()) };
csr_buf_slice.clone_from_slice(&csr_vec);
unsafe { *p_csr_size = csr_vec.len() };
}
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn sgx_send_cert_chain(
root_cert: *const u8,
root_cert_size: usize,
enclave_cert: *const u8,
enclave_cert_size: usize,
) -> sgx_status_t {
let root_cert_slice = unsafe { std::slice::from_raw_parts(root_cert, root_cert_size) };
let enclave_cert_slice = unsafe { std::slice::from_raw_parts(enclave_cert, enclave_cert_size) };
let mut cert_chain_guard = match CERT_CHAIN.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
match &*cert_chain_guard {
Some(_) => {
panic!("Unhandled. CERT_CHAIN is not None.");
}
None => {
*cert_chain_guard = Some((enclave_cert_slice.to_vec(), root_cert_slice.to_vec()));
}
}
return sgx_status_t::SGX_SUCCESS;
}
#[no_mangle]
pub extern "C" fn start_local_attest_enc(
msg1: &sgx_dh_msg1_t,
msg2: &mut sgx_dh_msg2_t,
sgx_root_enclave_session_id: &mut u64,
) -> sgx_status_t {
let mut initiator = SgxDhInitiator::init_session();
let status = initiator.proc_msg1(msg1, msg2);
assert!(!status.is_err());
let session_id = SESSION_ID.fetch_add(1, Ordering::SeqCst);
{
let mut initiator_hash = match INITIATOR_HASH.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
initiator_hash.insert(session_id, initiator);
}
*sgx_root_enclave_session_id = session_id;
sgx_status_t::SGX_SUCCESS
}
const CSR_BODY_LOCATION: (usize, usize) = (4, 4 + 218);
const CSR_PUBKEY_LOCATION: (usize, usize) = (129 + 26, 220);
fn verify_csr(csr: &[u8]) -> Result<bool, std::string::String> {
let pubkey_bytes = &csr[CSR_PUBKEY_LOCATION.0..CSR_PUBKEY_LOCATION.1];
let public_key = ring::signature::UnparsedPublicKey::new(&ring::signature::ECDSA_P256_SHA256_ASN1, pubkey_bytes);
let csr_body = &csr[CSR_BODY_LOCATION.0..CSR_BODY_LOCATION.1];
let csr_signature = &csr[237..];
let verify_result = public_key.verify(&csr_body, &csr_signature);
if verify_result.is_err() {
return Err(format!("verify_csr failed:{:?}", verify_result));
} else {
return Ok(true);
}
}
#[no_mangle]
pub extern "C" fn finish_local_attest_enc(
dh_msg3_raw: &mut sgx_dh_msg3_t,
csr: *const u8,
csr_size: usize,
sgx_root_enclave_session_id: u64,
p_cert_buf: *mut u8,
cert_buf_size: usize,
p_cert_size: *mut usize,
cert_lengths: *mut u32,
cert_lengths_size: usize,
) -> SgxRootEnclave { |
fn from_slice(bytes: &[u8]) -> [u8; 32] {
let mut array = [0; 32];
let bytes = &bytes[..array.len()]; // panics if not enough data
for index in 0..32 {
array[index] = bytes[index];
}
array
}
|
let dh_msg3_raw_len =
mem::size_of::<sgx_dh_msg3_t>() as u32 + dh_msg3_raw.msg3_body.additional_prop_length;
let dh_msg3 = unsafe { SgxDhMsg3::from_raw_dh_msg3_t(dh_msg3_raw, dh_msg3_raw_len) };
assert!(!dh_msg3.is_none());
let dh_msg3 = match dh_msg3 {
Some(msg) => msg,
None => {
return SgxRootEnclave::Msg3RawError;
}
};
let mut initiator = {
let mut initiator_hash = match INITIATOR_HASH.lock() {
Err(_) => return SgxRootEnclave::LockFail,
Ok(guard) => guard,
};
initiator_hash.remove(&sgx_root_enclave_session_id).unwrap()
};
let mut dh_aek: sgx_key_128bit_t = sgx_key_128bit_t::default(); // Session Key, we won't use this
let mut responder_identity = sgx_dh_session_enclave_identity_t::default();
let status = initiator.proc_msg3(&dh_msg3, &mut dh_aek, &mut responder_identity);
if status.is_err() {
return SgxRootEnclave::ProcMsg3Error;
}
// now that the msg3 is authenticated, we can generate the cert from the csr
let csr_slice = unsafe { std::slice::from_raw_parts(csr, csr_size) };
match verify_csr(&csr_slice) {
Ok(status) => match status {
true => (), // Do nothing
false => {
println!("CSR Did not verify successfully");
return SgxRootEnclave::CsrVerifyFail;
},
},
Err(err) => {
println!("CSR did not verify:{:?}. Returning error", err);
return SgxRootEnclave::CsrVerifyFail;
},
}
//generate cert from csr, signed by PRIVATE_KEY
let private_key = {
let private_key_vec = match get_private_key() {
Ok(key) => key,
Err(_) => return SgxRootEnclave::PrivateKeyNotPopulated,
};
match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) {
Ok(key) => key,
Err(_) => return SgxRootEnclave::PKCS8Error,
}
};
let mut compute_enclave_cert = match csr::convert_csr_to_cert(&csr_slice, &csr::COMPUTE_ENCLAVE_CERT_TEMPLATE, &responder_identity.mr_enclave.m, &private_key) {
Ok(bytes) => bytes,
Err(err) => {
println!("Failed to convert csr to cert:{:?}", err);
return SgxRootEnclave::CsrToCertFail;
},
};
let (mut root_enclave_cert, mut root_cert) = {
let cert_chain_guard = match CERT_CHAIN.lock() {
Err(_) => return SgxRootEnclave::LockFail,
Ok(guard) => guard,
};
match &*cert_chain_guard {
Some((re_cert, r_cert)) => {
(re_cert.clone(), r_cert.clone())
}
None => {
panic!("CERT_CHAIN is not populated");
},
}
};
if cert_buf_size < (compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) {
assert!(false);
}
let cert_buf_slice = unsafe { std::slice::from_raw_parts_mut(p_cert_buf, compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) };
unsafe { *p_cert_size = compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len() };
let cert_lengths_slice = unsafe { std::slice::from_raw_parts_mut(cert_lengths, cert_lengths_size/std::mem::size_of::<u32>()) };
// create a buffer to aggregate the certificates
let mut temp_cert_buf: std::vec::Vec<u8> = std::vec::Vec::new();
let mut temp_cert_lengths: std::vec::Vec<u32> = std::vec::Vec::new();
// add the compute_enclave_cert to the return buffer
temp_cert_lengths.push(compute_enclave_cert.len() as u32);
temp_cert_buf.append(&mut compute_enclave_cert);
// add the root_enclave cert to the temp buffer
temp_cert_lengths.push(root_enclave_cert.len() as u32);
temp_cert_buf.append(&mut root_enclave_cert);
// add the root cert to the temp buffer
temp_cert_lengths.push(root_cert.len() as u32);
temp_cert_buf.append(&mut root_cert);
// Copy the temporary certificate buffer contents to the destination buffer
cert_buf_slice.clone_from_slice(&temp_cert_buf);
cert_lengths_slice.clone_from_slice(&temp_cert_lengths);
return SgxRootEnclave::Success;
}
| identifier_body |
lib.rs | //! The SGX root enclave
//!
//! ## Authors
//!
//! The Veracruz Development Team.
//!
//! ## Licensing and copyright notice
//!
//! See the `LICENSE_MIT.markdown` file in the Veracruz root directory for
//! information on licensing and copyright.
#![no_std]
#[macro_use]
extern crate sgx_tstd as std;
use lazy_static::lazy_static;
use sgx_tdh::{SgxDhInitiator, SgxDhMsg3};
use sgx_types;
use sgx_types::{
sgx_create_report, sgx_dh_msg1_t, sgx_dh_msg2_t, sgx_dh_msg3_t,
sgx_dh_session_enclave_identity_t, sgx_ec256_public_t, sgx_key_128bit_t, sgx_ra_context_t,
sgx_ra_init, sgx_status_t, sgx_target_info_t,
};
use std::{collections::HashMap, mem, sync::atomic::{AtomicU64, Ordering}};
use ring::{rand::SystemRandom, signature::EcdsaKeyPair};
use veracruz_utils::csr;
lazy_static! {
static ref SESSION_ID: AtomicU64 = AtomicU64::new(1);
static ref INITIATOR_HASH: std::sync::SgxMutex<HashMap<u64, SgxDhInitiator>> =
std::sync::SgxMutex::new(HashMap::new());
static ref PRIVATE_KEY: std::sync::SgxMutex<Option<std::vec::Vec<u8>>> = std::sync::SgxMutex::new(None);
static ref CERT_CHAIN: std::sync::SgxMutex<Option<(std::vec::Vec<u8>, std::vec::Vec<u8>)>> = std::sync::SgxMutex::new(None);
}
pub enum SgxRootEnclave {
Success = 0x00,
Msg3RawError = 0x01,
ProcMsg3Error = 0x02,
CsrVerifyFail = 0x03,
CsrToCertFail = 0x04,
LockFail = 0x05,
HashError = 0x06,
PKCS8Error = 0x07,
StateError = 0x08,
PrivateKeyNotPopulated = 0x09,
}
#[no_mangle]
pub extern "C" fn g | p_fwv_len: &mut usize) -> sgx_status_t {
let version = env!("CARGO_PKG_VERSION");
*p_fwv_len = version.len();
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn get_firmware_version(
p_firmware_version_buf: *mut u8,
fv_buf_size: usize,
) -> sgx_status_t {
let version = env!("CARGO_PKG_VERSION");
assert!(version.len() <= fv_buf_size);
let version_buf_slice =
unsafe { std::slice::from_raw_parts_mut(p_firmware_version_buf, fv_buf_size) };
version_buf_slice.clone_from_slice(&version.as_bytes());
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn init_remote_attestation_enc(
pub_key_buf: *const u8,
pub_key_size: usize,
p_context: *mut sgx_ra_context_t,
) -> sgx_status_t {
assert!(pub_key_size != 0);
assert!(!pub_key_buf.is_null());
let pub_key_vec = unsafe { std::slice::from_raw_parts(pub_key_buf, pub_key_size) };
let pub_key = sgx_ec256_public_t {
gx: from_slice(&pub_key_vec[0..32]),
gy: from_slice(&pub_key_vec[32..64]),
};
let mut context: sgx_ra_context_t = 0;
assert!(pub_key_vec.len() > 0);
let ret = unsafe {
sgx_ra_init(
&pub_key as *const sgx_ec256_public_t,
0,
&mut context as *mut sgx_ra_context_t,
)
};
if ret != sgx_status_t::SGX_SUCCESS {
return ret;
}
unsafe {
*p_context = context;
}
return ret;
}
/// Retrieve or generate the private key as a Vec<u8>
fn get_private_key() -> Result<std::vec::Vec<u8>, SgxRootEnclave> {
let mut private_key_guard = match PRIVATE_KEY.lock() {
Err(_) => return Err(SgxRootEnclave::LockFail),
Ok(guard) => guard,
};
let pkcs8_bytes = match &*private_key_guard {
Some(bytes) => {
bytes.clone()
}
None => {
// ECDSA prime256r1 generation.
let pkcs8_bytes = EcdsaKeyPair::generate_pkcs8(
&ring::signature::ECDSA_P256_SHA256_FIXED_SIGNING,
&SystemRandom::new(),)
.map_err(|_| SgxRootEnclave::PKCS8Error)?;
*private_key_guard = Some(pkcs8_bytes.as_ref().to_vec());
pkcs8_bytes.as_ref().to_vec()
}
};
return Ok(pkcs8_bytes);
}
#[no_mangle]
pub extern "C" fn sgx_get_collateral_report(
p_pubkey_challenge: *const u8,
pubkey_challenge_size: usize,
p_target_info: *const sgx_target_info_t,
report: *mut sgx_types::sgx_report_t,
csr_buffer: *mut u8,
csr_buf_size: usize,
p_csr_size: *mut usize,
) -> sgx_status_t {
let pubkey_challenge_vec =
unsafe { std::slice::from_raw_parts(p_pubkey_challenge, pubkey_challenge_size) };
let mut report_data = sgx_types::sgx_report_data_t::default();
// place the challenge in the report
report_data.d[0..pubkey_challenge_size].copy_from_slice(&pubkey_challenge_vec);
let private_key_ring = {
let private_key_vec = match get_private_key() {
Ok(vec) => vec,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) {
Ok(pkr) => pkr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
}
};
// generate the certificate signing request
let csr_vec = match csr::generate_csr(&csr::ROOT_ENCLAVE_CSR_TEMPLATE, &private_key_ring) {
Ok(csr) => csr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
// // place the hash of the csr in the report
let collateral_hash = ring::digest::digest(&ring::digest::SHA256, &csr_vec);
report_data.d[pubkey_challenge_size..48].copy_from_slice(collateral_hash.as_ref());
let ret = unsafe { sgx_create_report(p_target_info, &report_data, report) };
assert!(ret == sgx_types::sgx_status_t::SGX_SUCCESS);
// place the csr where it needs to be
if csr_vec.len() > csr_buf_size {
assert!(false);
} else {
let csr_buf_slice = unsafe { std::slice::from_raw_parts_mut(csr_buffer, csr_vec.len()) };
csr_buf_slice.clone_from_slice(&csr_vec);
unsafe { *p_csr_size = csr_vec.len() };
}
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn sgx_send_cert_chain(
root_cert: *const u8,
root_cert_size: usize,
enclave_cert: *const u8,
enclave_cert_size: usize,
) -> sgx_status_t {
let root_cert_slice = unsafe { std::slice::from_raw_parts(root_cert, root_cert_size) };
let enclave_cert_slice = unsafe { std::slice::from_raw_parts(enclave_cert, enclave_cert_size) };
let mut cert_chain_guard = match CERT_CHAIN.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
match &*cert_chain_guard {
Some(_) => {
panic!("Unhandled. CERT_CHAIN is not None.");
}
None => {
*cert_chain_guard = Some((enclave_cert_slice.to_vec(), root_cert_slice.to_vec()));
}
}
return sgx_status_t::SGX_SUCCESS;
}
#[no_mangle]
pub extern "C" fn start_local_attest_enc(
msg1: &sgx_dh_msg1_t,
msg2: &mut sgx_dh_msg2_t,
sgx_root_enclave_session_id: &mut u64,
) -> sgx_status_t {
let mut initiator = SgxDhInitiator::init_session();
let status = initiator.proc_msg1(msg1, msg2);
assert!(!status.is_err());
let session_id = SESSION_ID.fetch_add(1, Ordering::SeqCst);
{
let mut initiator_hash = match INITIATOR_HASH.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
initiator_hash.insert(session_id, initiator);
}
*sgx_root_enclave_session_id = session_id;
sgx_status_t::SGX_SUCCESS
}
const CSR_BODY_LOCATION: (usize, usize) = (4, 4 + 218);
const CSR_PUBKEY_LOCATION: (usize, usize) = (129 + 26, 220);
fn verify_csr(csr: &[u8]) -> Result<bool, std::string::String> {
let pubkey_bytes = &csr[CSR_PUBKEY_LOCATION.0..CSR_PUBKEY_LOCATION.1];
let public_key = ring::signature::UnparsedPublicKey::new(&ring::signature::ECDSA_P256_SHA256_ASN1, pubkey_bytes);
let csr_body = &csr[CSR_BODY_LOCATION.0..CSR_BODY_LOCATION.1];
let csr_signature = &csr[237..];
let verify_result = public_key.verify(&csr_body, &csr_signature);
if verify_result.is_err() {
return Err(format!("verify_csr failed:{:?}", verify_result));
} else {
return Ok(true);
}
}
#[no_mangle]
pub extern "C" fn finish_local_attest_enc(
dh_msg3_raw: &mut sgx_dh_msg3_t,
csr: *const u8,
csr_size: usize,
sgx_root_enclave_session_id: u64,
p_cert_buf: *mut u8,
cert_buf_size: usize,
p_cert_size: *mut usize,
cert_lengths: *mut u32,
cert_lengths_size: usize,
) -> SgxRootEnclave {
let dh_msg3_raw_len =
mem::size_of::<sgx_dh_msg3_t>() as u32 + dh_msg3_raw.msg3_body.additional_prop_length;
let dh_msg3 = unsafe { SgxDhMsg3::from_raw_dh_msg3_t(dh_msg3_raw, dh_msg3_raw_len) };
assert!(!dh_msg3.is_none());
let dh_msg3 = match dh_msg3 {
Some(msg) => msg,
None => {
return SgxRootEnclave::Msg3RawError;
}
};
let mut initiator = {
let mut initiator_hash = match INITIATOR_HASH.lock() {
Err(_) => return SgxRootEnclave::LockFail,
Ok(guard) => guard,
};
initiator_hash.remove(&sgx_root_enclave_session_id).unwrap()
};
let mut dh_aek: sgx_key_128bit_t = sgx_key_128bit_t::default(); // Session Key, we won't use this
let mut responder_identity = sgx_dh_session_enclave_identity_t::default();
let status = initiator.proc_msg3(&dh_msg3, &mut dh_aek, &mut responder_identity);
if status.is_err() {
return SgxRootEnclave::ProcMsg3Error;
}
// now that the msg3 is authenticated, we can generate the cert from the csr
let csr_slice = unsafe { std::slice::from_raw_parts(csr, csr_size) };
match verify_csr(&csr_slice) {
Ok(status) => match status {
true => (), // Do nothing
false => {
println!("CSR Did not verify successfully");
return SgxRootEnclave::CsrVerifyFail;
},
},
Err(err) => {
println!("CSR did not verify:{:?}. Returning error", err);
return SgxRootEnclave::CsrVerifyFail;
},
}
//generate cert from csr, signed by PRIVATE_KEY
let private_key = {
let private_key_vec = match get_private_key() {
Ok(key) => key,
Err(_) => return SgxRootEnclave::PrivateKeyNotPopulated,
};
match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) {
Ok(key) => key,
Err(_) => return SgxRootEnclave::PKCS8Error,
}
};
let mut compute_enclave_cert = match csr::convert_csr_to_cert(&csr_slice, &csr::COMPUTE_ENCLAVE_CERT_TEMPLATE, &responder_identity.mr_enclave.m, &private_key) {
Ok(bytes) => bytes,
Err(err) => {
println!("Failed to convert csr to cert:{:?}", err);
return SgxRootEnclave::CsrToCertFail;
},
};
let (mut root_enclave_cert, mut root_cert) = {
let cert_chain_guard = match CERT_CHAIN.lock() {
Err(_) => return SgxRootEnclave::LockFail,
Ok(guard) => guard,
};
match &*cert_chain_guard {
Some((re_cert, r_cert)) => {
(re_cert.clone(), r_cert.clone())
}
None => {
panic!("CERT_CHAIN is not populated");
},
}
};
if cert_buf_size < (compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) {
assert!(false);
}
let cert_buf_slice = unsafe { std::slice::from_raw_parts_mut(p_cert_buf, compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) };
unsafe { *p_cert_size = compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len() };
let cert_lengths_slice = unsafe { std::slice::from_raw_parts_mut(cert_lengths, cert_lengths_size/std::mem::size_of::<u32>()) };
// create a buffer to aggregate the certificates
let mut temp_cert_buf: std::vec::Vec<u8> = std::vec::Vec::new();
let mut temp_cert_lengths: std::vec::Vec<u32> = std::vec::Vec::new();
// add the compute_enclave_cert to the return buffer
temp_cert_lengths.push(compute_enclave_cert.len() as u32);
temp_cert_buf.append(&mut compute_enclave_cert);
// add the root_enclave cert to the temp buffer
temp_cert_lengths.push(root_enclave_cert.len() as u32);
temp_cert_buf.append(&mut root_enclave_cert);
// add the root cert to the temp buffer
temp_cert_lengths.push(root_cert.len() as u32);
temp_cert_buf.append(&mut root_cert);
// Copy the temporary certificate buffer contents to the destination buffer
cert_buf_slice.clone_from_slice(&temp_cert_buf);
cert_lengths_slice.clone_from_slice(&temp_cert_lengths);
return SgxRootEnclave::Success;
}
fn from_slice(bytes: &[u8]) -> [u8; 32] {
let mut array = [0; 32];
let bytes = &bytes[..array.len()]; // panics if not enough data
for index in 0..32 {
array[index] = bytes[index];
}
array
}
| et_firmware_version_len( | identifier_name |
lib.rs | //! The SGX root enclave
//!
//! ## Authors
//!
//! The Veracruz Development Team.
//!
//! ## Licensing and copyright notice
//!
//! See the `LICENSE_MIT.markdown` file in the Veracruz root directory for
//! information on licensing and copyright.
#![no_std]
#[macro_use]
extern crate sgx_tstd as std;
use lazy_static::lazy_static;
use sgx_tdh::{SgxDhInitiator, SgxDhMsg3};
use sgx_types;
use sgx_types::{
sgx_create_report, sgx_dh_msg1_t, sgx_dh_msg2_t, sgx_dh_msg3_t,
sgx_dh_session_enclave_identity_t, sgx_ec256_public_t, sgx_key_128bit_t, sgx_ra_context_t,
sgx_ra_init, sgx_status_t, sgx_target_info_t,
};
use std::{collections::HashMap, mem, sync::atomic::{AtomicU64, Ordering}};
use ring::{rand::SystemRandom, signature::EcdsaKeyPair};
use veracruz_utils::csr;
lazy_static! {
static ref SESSION_ID: AtomicU64 = AtomicU64::new(1);
static ref INITIATOR_HASH: std::sync::SgxMutex<HashMap<u64, SgxDhInitiator>> =
std::sync::SgxMutex::new(HashMap::new());
static ref PRIVATE_KEY: std::sync::SgxMutex<Option<std::vec::Vec<u8>>> = std::sync::SgxMutex::new(None);
static ref CERT_CHAIN: std::sync::SgxMutex<Option<(std::vec::Vec<u8>, std::vec::Vec<u8>)>> = std::sync::SgxMutex::new(None);
}
pub enum SgxRootEnclave {
Success = 0x00,
Msg3RawError = 0x01,
ProcMsg3Error = 0x02,
CsrVerifyFail = 0x03,
CsrToCertFail = 0x04,
LockFail = 0x05,
HashError = 0x06,
PKCS8Error = 0x07,
StateError = 0x08,
PrivateKeyNotPopulated = 0x09,
}
#[no_mangle]
pub extern "C" fn get_firmware_version_len(p_fwv_len: &mut usize) -> sgx_status_t {
let version = env!("CARGO_PKG_VERSION");
*p_fwv_len = version.len();
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn get_firmware_version(
p_firmware_version_buf: *mut u8,
fv_buf_size: usize,
) -> sgx_status_t {
let version = env!("CARGO_PKG_VERSION");
assert!(version.len() <= fv_buf_size);
let version_buf_slice =
unsafe { std::slice::from_raw_parts_mut(p_firmware_version_buf, fv_buf_size) };
version_buf_slice.clone_from_slice(&version.as_bytes());
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn init_remote_attestation_enc(
pub_key_buf: *const u8,
pub_key_size: usize,
p_context: *mut sgx_ra_context_t,
) -> sgx_status_t {
assert!(pub_key_size != 0);
assert!(!pub_key_buf.is_null());
let pub_key_vec = unsafe { std::slice::from_raw_parts(pub_key_buf, pub_key_size) };
let pub_key = sgx_ec256_public_t {
gx: from_slice(&pub_key_vec[0..32]),
gy: from_slice(&pub_key_vec[32..64]),
};
let mut context: sgx_ra_context_t = 0;
assert!(pub_key_vec.len() > 0);
let ret = unsafe {
sgx_ra_init(
&pub_key as *const sgx_ec256_public_t,
0,
&mut context as *mut sgx_ra_context_t,
)
};
if ret != sgx_status_t::SGX_SUCCESS {
return ret;
}
unsafe {
*p_context = context;
}
return ret;
}
/// Retrieve or generate the private key as a Vec<u8>
fn get_private_key() -> Result<std::vec::Vec<u8>, SgxRootEnclave> {
let mut private_key_guard = match PRIVATE_KEY.lock() {
Err(_) => return Err(SgxRootEnclave::LockFail),
Ok(guard) => guard,
};
let pkcs8_bytes = match &*private_key_guard {
Some(bytes) => {
bytes.clone()
}
None => {
// ECDSA prime256r1 generation.
let pkcs8_bytes = EcdsaKeyPair::generate_pkcs8(
&ring::signature::ECDSA_P256_SHA256_FIXED_SIGNING,
&SystemRandom::new(),)
.map_err(|_| SgxRootEnclave::PKCS8Error)?; | }
#[no_mangle]
pub extern "C" fn sgx_get_collateral_report(
p_pubkey_challenge: *const u8,
pubkey_challenge_size: usize,
p_target_info: *const sgx_target_info_t,
report: *mut sgx_types::sgx_report_t,
csr_buffer: *mut u8,
csr_buf_size: usize,
p_csr_size: *mut usize,
) -> sgx_status_t {
let pubkey_challenge_vec =
unsafe { std::slice::from_raw_parts(p_pubkey_challenge, pubkey_challenge_size) };
let mut report_data = sgx_types::sgx_report_data_t::default();
// place the challenge in the report
report_data.d[0..pubkey_challenge_size].copy_from_slice(&pubkey_challenge_vec);
let private_key_ring = {
let private_key_vec = match get_private_key() {
Ok(vec) => vec,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) {
Ok(pkr) => pkr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
}
};
// generate the certificate signing request
let csr_vec = match csr::generate_csr(&csr::ROOT_ENCLAVE_CSR_TEMPLATE, &private_key_ring) {
Ok(csr) => csr,
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
};
// // place the hash of the csr in the report
let collateral_hash = ring::digest::digest(&ring::digest::SHA256, &csr_vec);
report_data.d[pubkey_challenge_size..48].copy_from_slice(collateral_hash.as_ref());
let ret = unsafe { sgx_create_report(p_target_info, &report_data, report) };
assert!(ret == sgx_types::sgx_status_t::SGX_SUCCESS);
// place the csr where it needs to be
if csr_vec.len() > csr_buf_size {
assert!(false);
} else {
let csr_buf_slice = unsafe { std::slice::from_raw_parts_mut(csr_buffer, csr_vec.len()) };
csr_buf_slice.clone_from_slice(&csr_vec);
unsafe { *p_csr_size = csr_vec.len() };
}
sgx_status_t::SGX_SUCCESS
}
#[no_mangle]
pub extern "C" fn sgx_send_cert_chain(
root_cert: *const u8,
root_cert_size: usize,
enclave_cert: *const u8,
enclave_cert_size: usize,
) -> sgx_status_t {
let root_cert_slice = unsafe { std::slice::from_raw_parts(root_cert, root_cert_size) };
let enclave_cert_slice = unsafe { std::slice::from_raw_parts(enclave_cert, enclave_cert_size) };
let mut cert_chain_guard = match CERT_CHAIN.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
match &*cert_chain_guard {
Some(_) => {
panic!("Unhandled. CERT_CHAIN is not None.");
}
None => {
*cert_chain_guard = Some((enclave_cert_slice.to_vec(), root_cert_slice.to_vec()));
}
}
return sgx_status_t::SGX_SUCCESS;
}
#[no_mangle]
pub extern "C" fn start_local_attest_enc(
msg1: &sgx_dh_msg1_t,
msg2: &mut sgx_dh_msg2_t,
sgx_root_enclave_session_id: &mut u64,
) -> sgx_status_t {
let mut initiator = SgxDhInitiator::init_session();
let status = initiator.proc_msg1(msg1, msg2);
assert!(!status.is_err());
let session_id = SESSION_ID.fetch_add(1, Ordering::SeqCst);
{
let mut initiator_hash = match INITIATOR_HASH.lock() {
Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED,
Ok(guard) => guard,
};
initiator_hash.insert(session_id, initiator);
}
*sgx_root_enclave_session_id = session_id;
sgx_status_t::SGX_SUCCESS
}
const CSR_BODY_LOCATION: (usize, usize) = (4, 4 + 218);
const CSR_PUBKEY_LOCATION: (usize, usize) = (129 + 26, 220);
fn verify_csr(csr: &[u8]) -> Result<bool, std::string::String> {
let pubkey_bytes = &csr[CSR_PUBKEY_LOCATION.0..CSR_PUBKEY_LOCATION.1];
let public_key = ring::signature::UnparsedPublicKey::new(&ring::signature::ECDSA_P256_SHA256_ASN1, pubkey_bytes);
let csr_body = &csr[CSR_BODY_LOCATION.0..CSR_BODY_LOCATION.1];
let csr_signature = &csr[237..];
let verify_result = public_key.verify(&csr_body, &csr_signature);
if verify_result.is_err() {
return Err(format!("verify_csr failed:{:?}", verify_result));
} else {
return Ok(true);
}
}
#[no_mangle]
pub extern "C" fn finish_local_attest_enc(
dh_msg3_raw: &mut sgx_dh_msg3_t,
csr: *const u8,
csr_size: usize,
sgx_root_enclave_session_id: u64,
p_cert_buf: *mut u8,
cert_buf_size: usize,
p_cert_size: *mut usize,
cert_lengths: *mut u32,
cert_lengths_size: usize,
) -> SgxRootEnclave {
let dh_msg3_raw_len =
mem::size_of::<sgx_dh_msg3_t>() as u32 + dh_msg3_raw.msg3_body.additional_prop_length;
let dh_msg3 = unsafe { SgxDhMsg3::from_raw_dh_msg3_t(dh_msg3_raw, dh_msg3_raw_len) };
assert!(!dh_msg3.is_none());
let dh_msg3 = match dh_msg3 {
Some(msg) => msg,
None => {
return SgxRootEnclave::Msg3RawError;
}
};
let mut initiator = {
let mut initiator_hash = match INITIATOR_HASH.lock() {
Err(_) => return SgxRootEnclave::LockFail,
Ok(guard) => guard,
};
initiator_hash.remove(&sgx_root_enclave_session_id).unwrap()
};
let mut dh_aek: sgx_key_128bit_t = sgx_key_128bit_t::default(); // Session Key, we won't use this
let mut responder_identity = sgx_dh_session_enclave_identity_t::default();
let status = initiator.proc_msg3(&dh_msg3, &mut dh_aek, &mut responder_identity);
if status.is_err() {
return SgxRootEnclave::ProcMsg3Error;
}
// now that the msg3 is authenticated, we can generate the cert from the csr
let csr_slice = unsafe { std::slice::from_raw_parts(csr, csr_size) };
match verify_csr(&csr_slice) {
Ok(status) => match status {
true => (), // Do nothing
false => {
println!("CSR Did not verify successfully");
return SgxRootEnclave::CsrVerifyFail;
},
},
Err(err) => {
println!("CSR did not verify:{:?}. Returning error", err);
return SgxRootEnclave::CsrVerifyFail;
},
}
//generate cert from csr, signed by PRIVATE_KEY
let private_key = {
let private_key_vec = match get_private_key() {
Ok(key) => key,
Err(_) => return SgxRootEnclave::PrivateKeyNotPopulated,
};
match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) {
Ok(key) => key,
Err(_) => return SgxRootEnclave::PKCS8Error,
}
};
let mut compute_enclave_cert = match csr::convert_csr_to_cert(&csr_slice, &csr::COMPUTE_ENCLAVE_CERT_TEMPLATE, &responder_identity.mr_enclave.m, &private_key) {
Ok(bytes) => bytes,
Err(err) => {
println!("Failed to convert csr to cert:{:?}", err);
return SgxRootEnclave::CsrToCertFail;
},
};
let (mut root_enclave_cert, mut root_cert) = {
let cert_chain_guard = match CERT_CHAIN.lock() {
Err(_) => return SgxRootEnclave::LockFail,
Ok(guard) => guard,
};
match &*cert_chain_guard {
Some((re_cert, r_cert)) => {
(re_cert.clone(), r_cert.clone())
}
None => {
panic!("CERT_CHAIN is not populated");
},
}
};
if cert_buf_size < (compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) {
assert!(false);
}
let cert_buf_slice = unsafe { std::slice::from_raw_parts_mut(p_cert_buf, compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) };
unsafe { *p_cert_size = compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len() };
let cert_lengths_slice = unsafe { std::slice::from_raw_parts_mut(cert_lengths, cert_lengths_size/std::mem::size_of::<u32>()) };
// create a buffer to aggregate the certificates
let mut temp_cert_buf: std::vec::Vec<u8> = std::vec::Vec::new();
let mut temp_cert_lengths: std::vec::Vec<u32> = std::vec::Vec::new();
// add the compute_enclave_cert to the return buffer
temp_cert_lengths.push(compute_enclave_cert.len() as u32);
temp_cert_buf.append(&mut compute_enclave_cert);
// add the root_enclave cert to the temp buffer
temp_cert_lengths.push(root_enclave_cert.len() as u32);
temp_cert_buf.append(&mut root_enclave_cert);
// add the root cert to the temp buffer
temp_cert_lengths.push(root_cert.len() as u32);
temp_cert_buf.append(&mut root_cert);
// Copy the temporary certificate buffer contents to the destination buffer
cert_buf_slice.clone_from_slice(&temp_cert_buf);
cert_lengths_slice.clone_from_slice(&temp_cert_lengths);
return SgxRootEnclave::Success;
}
fn from_slice(bytes: &[u8]) -> [u8; 32] {
let mut array = [0; 32];
let bytes = &bytes[..array.len()]; // panics if not enough data
for index in 0..32 {
array[index] = bytes[index];
}
array
} | *private_key_guard = Some(pkcs8_bytes.as_ref().to_vec());
pkcs8_bytes.as_ref().to_vec()
}
};
return Ok(pkcs8_bytes); | random_line_split |
config.pb.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go.
// source: config.proto
// DO NOT EDIT!
/*
Package config is a generated protocol buffer package.
It is generated from these files:
config.proto
It has these top-level messages:
TestGroup
Dashboard
LinkTemplate
LinkOptionsTemplate
DashboardTab
Configuration
DefaultConfiguration
*/
package config
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type TestGroup_TestsName int32
const (
TestGroup_TESTS_NAME_MIN TestGroup_TestsName = 0
TestGroup_TESTS_NAME_IGNORE TestGroup_TestsName = 1
TestGroup_TESTS_NAME_REPLACE TestGroup_TestsName = 2
TestGroup_TESTS_NAME_APPEND TestGroup_TestsName = 3
)
var TestGroup_TestsName_name = map[int32]string{
0: "TESTS_NAME_MIN",
1: "TESTS_NAME_IGNORE",
2: "TESTS_NAME_REPLACE",
3: "TESTS_NAME_APPEND",
}
var TestGroup_TestsName_value = map[string]int32{
"TESTS_NAME_MIN": 0,
"TESTS_NAME_IGNORE": 1,
"TESTS_NAME_REPLACE": 2,
"TESTS_NAME_APPEND": 3,
}
func (x TestGroup_TestsName) String() string {
return proto.EnumName(TestGroup_TestsName_name, int32(x))
}
func (TestGroup_TestsName) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// Specifies a group of tests to gather.
type TestGroup struct {
// Name of this TestGroup, for mapping dashboard tabs to tests.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// Path to the test result stored in gcs
GcsPrefix string `protobuf:"bytes,2,opt,name=gcs_prefix,json=gcsPrefix" yaml:"gcs_prefix,omitempty"`
// Number of days of test results to gather and serve.
DaysOfResults int32 `protobuf:"varint,3,opt,name=days_of_results,json=daysOfResults" yaml:"days_of_results,omitempty"`
// What to do with the 'Tests name' configuration value. It can replace the
// name of the test, be appended to the name of the test, or ignored. If it is
// ignored, then the name of the tests will be the build target.
TestsNamePolicy TestGroup_TestsName `protobuf:"varint,6,opt,name=tests_name_policy,json=testsNamePolicy,enum=TestGroup_TestsName" yaml:"tests_name_policy,omitempty"`
ColumnHeader []*TestGroup_ColumnHeader `protobuf:"bytes,9,rep,name=column_header,json=columnHeader" yaml:"column_header,omitempty"`
// deprecated - always set to true
UseKubernetesClient bool `protobuf:"varint,24,opt,name=use_kubernetes_client,json=useKubernetesClient" yaml:"use_kubernetes_client,omitempty"`
// deprecated - always set to true
IsExternal bool `protobuf:"varint,25,opt,name=is_external,json=isExternal" yaml:"is_external,omitempty"`
}
func (m *TestGroup) Reset() { *m = TestGroup{} }
func (m *TestGroup) String() string { return proto.CompactTextString(m) }
func (*TestGroup) ProtoMessage() {}
func (*TestGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *TestGroup) GetColumnHeader() []*TestGroup_ColumnHeader {
if m != nil {
return m.ColumnHeader
}
return nil
}
// Custom column headers for defining extra column-heading rows from values in
// the test result.
type TestGroup_ColumnHeader struct {
ConfigurationValue string `protobuf:"bytes,3,opt,name=configuration_value,json=configurationValue" yaml:"configuration_value,omitempty"`
}
func (m *TestGroup_ColumnHeader) Reset() { *m = TestGroup_ColumnHeader{} }
func (m *TestGroup_ColumnHeader) String() string { return proto.CompactTextString(m) }
func (*TestGroup_ColumnHeader) ProtoMessage() {}
func (*TestGroup_ColumnHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// Specifies a dashboard.
type Dashboard struct {
// A list of the tabs on the dashboard.
DashboardTab []*DashboardTab `protobuf:"bytes,1,rep,name=dashboard_tab,json=dashboardTab" yaml:"dashboard_tab,omitempty"`
// A name for the Dashboard.
Name string `protobuf:"bytes,2,opt,name=name" yaml:"name,omitempty"`
}
func (m *Dashboard) Reset() { *m = Dashboard{} }
func (m *Dashboard) String() string { return proto.CompactTextString(m) }
func (*Dashboard) ProtoMessage() {}
func (*Dashboard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Dashboard) GetDashboardTab() []*DashboardTab {
if m != nil {
return m.DashboardTab
}
return nil
}
type LinkTemplate struct {
// The URL template.
Url string `protobuf:"bytes,1,opt,name=url" yaml:"url,omitempty"`
// The options templates.
Options []*LinkOptionsTemplate `protobuf:"bytes,2,rep,name=options" yaml:"options,omitempty"`
}
func (m *LinkTemplate) Reset() { *m = LinkTemplate{} }
func (m *LinkTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkTemplate) ProtoMessage() {}
func (*LinkTemplate) | () ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *LinkTemplate) GetOptions() []*LinkOptionsTemplate {
if m != nil {
return m.Options
}
return nil
}
// A simple key/value pair for link options.
type LinkOptionsTemplate struct {
// The key for the option. This is not expanded.
Key string `protobuf:"bytes,1,opt,name=key" yaml:"key,omitempty"`
// The value for the option. This is expanded the same as the LinkTemplate.
Value string `protobuf:"bytes,2,opt,name=value" yaml:"value,omitempty"`
}
func (m *LinkOptionsTemplate) Reset() { *m = LinkOptionsTemplate{} }
func (m *LinkOptionsTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkOptionsTemplate) ProtoMessage() {}
func (*LinkOptionsTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// A single tab on a dashboard.
type DashboardTab struct {
// The name of the dashboard tab to display in the client.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// The name of the TestGroup specifying the test results for this tab.
TestGroupName string `protobuf:"bytes,2,opt,name=test_group_name,json=testGroupName" yaml:"test_group_name,omitempty"`
// Default bug component for manually filing bugs from the dashboard
BugComponent int32 `protobuf:"varint,3,opt,name=bug_component,json=bugComponent" yaml:"bug_component,omitempty"`
// Default code search path for changelist search links
CodeSearchPath string `protobuf:"bytes,4,opt,name=code_search_path,json=codeSearchPath" yaml:"code_search_path,omitempty"`
// The URL template to visit after clicking on a cell.
OpenTestTemplate *LinkTemplate `protobuf:"bytes,7,opt,name=open_test_template,json=openTestTemplate" yaml:"open_test_template,omitempty"`
// The URL template to visit when filing a bug.
FileBugTemplate *LinkTemplate `protobuf:"bytes,8,opt,name=file_bug_template,json=fileBugTemplate" yaml:"file_bug_template,omitempty"`
// The URL template to visit when attaching a bug
AttachBugTemplate *LinkTemplate `protobuf:"bytes,9,opt,name=attach_bug_template,json=attachBugTemplate" yaml:"attach_bug_template,omitempty"`
// Text to show in the about menu as a link to another view of the results.
ResultsText string `protobuf:"bytes,10,opt,name=results_text,json=resultsText" yaml:"results_text,omitempty"`
// The URL template to visit after clicking.
ResultsUrlTemplate *LinkTemplate `protobuf:"bytes,11,opt,name=results_url_template,json=resultsUrlTemplate" yaml:"results_url_template,omitempty"`
// The URL template to visit when searching for changelists.
CodeSearchUrlTemplate *LinkTemplate `protobuf:"bytes,12,opt,name=code_search_url_template,json=codeSearchUrlTemplate" yaml:"code_search_url_template,omitempty"`
}
func (m *DashboardTab) Reset() { *m = DashboardTab{} }
func (m *DashboardTab) String() string { return proto.CompactTextString(m) }
func (*DashboardTab) ProtoMessage() {}
func (*DashboardTab) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *DashboardTab) GetOpenTestTemplate() *LinkTemplate {
if m != nil {
return m.OpenTestTemplate
}
return nil
}
func (m *DashboardTab) GetFileBugTemplate() *LinkTemplate {
if m != nil {
return m.FileBugTemplate
}
return nil
}
func (m *DashboardTab) GetAttachBugTemplate() *LinkTemplate {
if m != nil {
return m.AttachBugTemplate
}
return nil
}
func (m *DashboardTab) GetResultsUrlTemplate() *LinkTemplate {
if m != nil {
return m.ResultsUrlTemplate
}
return nil
}
func (m *DashboardTab) GetCodeSearchUrlTemplate() *LinkTemplate {
if m != nil {
return m.CodeSearchUrlTemplate
}
return nil
}
// A service configuration consisting of multiple test groups and dashboards.
type Configuration struct {
// A list of groups of tests to gather.
TestGroups []*TestGroup `protobuf:"bytes,1,rep,name=test_groups,json=testGroups" yaml:"test_groups,omitempty"`
// A list of all of the dashboards for a server.
Dashboards []*Dashboard `protobuf:"bytes,2,rep,name=dashboards" yaml:"dashboards,omitempty"`
}
func (m *Configuration) Reset() { *m = Configuration{} }
func (m *Configuration) String() string { return proto.CompactTextString(m) }
func (*Configuration) ProtoMessage() {}
func (*Configuration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Configuration) GetTestGroups() []*TestGroup {
if m != nil {
return m.TestGroups
}
return nil
}
func (m *Configuration) GetDashboards() []*Dashboard {
if m != nil {
return m.Dashboards
}
return nil
}
type DefaultConfiguration struct {
// A default testgroup with default initialization data
DefaultTestGroup *TestGroup `protobuf:"bytes,1,opt,name=default_test_group,json=defaultTestGroup" yaml:"default_test_group,omitempty"`
// A default dashboard with default initialization data
DefaultDashboardTab *DashboardTab `protobuf:"bytes,2,opt,name=default_dashboard_tab,json=defaultDashboardTab" yaml:"default_dashboard_tab,omitempty"`
}
func (m *DefaultConfiguration) Reset() { *m = DefaultConfiguration{} }
func (m *DefaultConfiguration) String() string { return proto.CompactTextString(m) }
func (*DefaultConfiguration) ProtoMessage() {}
func (*DefaultConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *DefaultConfiguration) GetDefaultTestGroup() *TestGroup {
if m != nil {
return m.DefaultTestGroup
}
return nil
}
func (m *DefaultConfiguration) GetDefaultDashboardTab() *DashboardTab {
if m != nil {
return m.DefaultDashboardTab
}
return nil
}
func init() {
proto.RegisterType((*TestGroup)(nil), "TestGroup")
proto.RegisterType((*TestGroup_ColumnHeader)(nil), "TestGroup.ColumnHeader")
proto.RegisterType((*Dashboard)(nil), "Dashboard")
proto.RegisterType((*LinkTemplate)(nil), "LinkTemplate")
proto.RegisterType((*LinkOptionsTemplate)(nil), "LinkOptionsTemplate")
proto.RegisterType((*DashboardTab)(nil), "DashboardTab")
proto.RegisterType((*Configuration)(nil), "Configuration")
proto.RegisterType((*DefaultConfiguration)(nil), "DefaultConfiguration")
proto.RegisterEnum("TestGroup_TestsName", TestGroup_TestsName_name, TestGroup_TestsName_value)
}
func init() { proto.RegisterFile("config.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 731 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x54, 0x5d, 0x8f, 0xd2, 0x40,
0x14, 0xb5, 0xb0, 0x5f, 0x5c, 0xca, 0x6e, 0x19, 0x40, 0xab, 0x89, 0x11, 0x6b, 0xb2, 0x21, 0x9a,
0x60, 0x82, 0x2f, 0x1a, 0xdd, 0xac, 0x08, 0xb8, 0x6e, 0xdc, 0x65, 0x49, 0x41, 0x5f, 0x27, 0x43,
0x19, 0xa0, 0xa1, 0xb4, 0x4d, 0x67, 0x6a, 0xe0, 0x77, 0xf8, 0x13, 0xfd, 0x1b, 0x3e, 0x98, 0x4e,
0xbf, 0xa6, 0x86, 0xb7, 0x99, 0x73, 0xcf, 0xbd, 0x77, 0xe6, 0xcc, 0xb9, 0x03, 0xaa, 0xe5, 0xb9,
0x4b, 0x7b, 0xd5, 0xf5, 0x03, 0x8f, 0x7b, 0xc6, 0x9f, 0x32, 0x54, 0x66, 0x94, 0xf1, 0x9b, 0xc0,
0x0b, 0x7d, 0x84, 0xe0, 0xc8, 0x25, 0x5b, 0xaa, 0x2b, 0x6d, 0xa5, 0x53, 0x31, 0xc5, 0x1a, 0x3d,
0x07, 0x58, 0x59, 0x0c, 0xfb, 0x01, 0x5d, 0xda, 0x3b, 0xbd, 0x24, 0x22, 0x95, 0x95, 0xc5, 0x26,
0x02, 0x40, 0x97, 0x70, 0xb1, 0x20, 0x7b, 0x86, 0xbd, 0x25, 0x0e, 0x28, 0x0b, 0x1d, 0xce, 0xf4,
0x72, 0x5b, 0xe9, 0x1c, 0x9b, 0xb5, 0x08, 0x7e, 0x58, 0x9a, 0x31, 0x88, 0x3e, 0x43, 0x9d, 0x53,
0xc6, 0x19, 0x8e, 0x8a, 0x62, 0xdf, 0x73, 0x6c, 0x6b, 0xaf, 0x9f, 0xb4, 0x95, 0xce, 0x79, 0xaf,
0xd9, 0xcd, 0x4e, 0x20, 0x56, 0x6c, 0x4c, 0xb6, 0xd4, 0xbc, 0xe0, 0xe9, 0x72, 0x22, 0xc8, 0xe8,
0x13, 0xd4, 0x2c, 0xcf, 0x09, 0xb7, 0x2e, 0x5e, 0x53, 0xb2, 0xa0, 0x81, 0x5e, 0x69, 0x97, 0x3b,
0xd5, 0xde, 0x13, 0x29, 0x7b, 0x20, 0xe2, 0xdf, 0x44, 0xd8, 0x54, 0x2d, 0x69, 0x87, 0x7a, 0xd0,
0x0a, 0x19, 0xc5, 0x9b, 0x70, 0x4e, 0x03, 0x97, 0x72, 0xca, 0xb0, 0xe5, 0xd8, 0xd4, 0xe5, 0xba,
0xde, 0x56, 0x3a, 0x67, 0x66, 0x23, 0x64, 0xf4, 0x7b, 0x16, 0x1b, 0x88, 0x10, 0x7a, 0x01, 0x55,
0x9b, 0x61, 0xba, 0xe3, 0x34, 0x70, 0x89, 0xa3, 0x3f, 0x15, 0x4c, 0xb0, 0xd9, 0x28, 0x41, 0x9e,
0x5d, 0x83, 0x2a, 0xb7, 0x44, 0x6f, 0xa1, 0x11, 0xab, 0x1b, 0x06, 0x84, 0xdb, 0x9e, 0x8b, 0x7f,
0x11, 0x27, 0xa4, 0x42, 0x90, 0x8a, 0x89, 0x0a, 0xa1, 0x9f, 0x51, 0xc4, 0xa0, 0xb1, 0xfa, 0xe2,
0x9a, 0x08, 0xc1, 0xf9, 0x6c, 0x34, 0x9d, 0x4d, 0xf1, 0xb8, 0x7f, 0x3f, 0xc2, 0xf7, 0xb7, 0x63,
0xed, 0x11, 0x6a, 0x41, 0x5d, 0xc2, 0x6e, 0x6f, 0xc6, 0x0f, 0xe6, 0x48, 0x53, 0xd0, 0x63, 0x40,
0x12, 0x6c, 0x8e, 0x26, 0x77, 0xfd, 0xc1, 0x48, 0x2b, 0xfd, 0x47, 0xef, 0x4f, 0x26, 0xa3, 0xf1,
0x50, 0x2b, 0x1b, 0x53, 0xa8, 0x0c, 0x09, 0x5b, 0xcf, 0x3d, 0x12, 0x2c, 0x50, 0x0f, 0x6a, 0x8b,
0x74, 0x83, 0x39, 0x99, 0xeb, 0x8a, 0xd0, 0xb1, 0xd6, 0xcd, 0x28, 0x33, 0x32, 0x37, 0xd5, 0x85,
0xb4, 0xcb, 0x8c, 0x51, 0xca, 0x8d, 0x61, 0x4c, 0x40, 0xbd, 0xb3, 0xdd, 0xcd, 0x8c, 0x6e, 0x7d,
0x87, 0x70, 0x8a, 0x34, 0x28, 0x87, 0x81, 0x93, 0x78, 0x27, 0x5a, 0xa2, 0x2e, 0x9c, 0x7a, 0x7e,
0x74, 0x59, 0xa6, 0x97, 0x44, 0x8f, 0x66, 0x37, 0xca, 0x78, 0x88, 0xb1, 0x34, 0xd1, 0x4c, 0x49,
0xc6, 0x15, 0x34, 0x0e, 0xc4, 0xa3, 0xc2, 0x1b, 0xba, 0x4f, 0x0b, 0x6f, 0xe8, 0x1e, 0x35, 0xe1,
0x38, 0x56, 0x36, 0x3e, 0x4f, 0xbc, 0x31, 0xfe, 0x96, 0x41, 0x1d, 0x1e, 0x3a, 0xb5, 0x6c, 0xe7,
0x4b, 0x10, 0xc6, 0xc2, 0xab, 0xc8, 0x30, 0x58, 0xba, 0x54, 0x8d, 0xa7, 0x36, 0x12, 0x8f, 0xf1,
0x0a, 0x6a, 0xf3, 0x70, 0x85, 0x2d, 0x6f, 0xeb, 0x7b, 0x6e, 0xe4, 0x93, 0xd8, 0xd5, 0xea, 0x3c,
0x5c, 0x0d, 0x52, 0x0c, 0x75, 0x40, 0xb3, 0xbc, 0x05, 0xc5, 0x8c, 0x92, 0xc0, 0x5a, 0x63, 0x9f,
0xf0, 0xb5, 0x7e, 0x24, 0xaa, 0x9d, 0x47, 0xf8, 0x54, 0xc0, 0x13, 0xc2, 0xd7, 0xe8, 0x23, 0x20,
0xcf, 0xa7, 0x2e, 0x16, 0xbd, 0x79, 0x72, 0x33, 0xfd, 0xb4, 0xad, 0x08, 0xe5, 0x65, 0x1d, 0x4d,
0x2d, 0x22, 0x46, 0xae, 0xc8, 0x04, 0xf8, 0x00, 0xf5, 0xa5, 0xed, 0x50, 0x1c, 0x1d, 0x28, 0xcb,
0x3d, 0x3b, 0x94, 0x7b, 0x11, 0xf1, 0xbe, 0x84, 0xab, 0x2c, 0xf5, 0x0a, 0x1a, 0x84, 0x73, 0x62,
0xad, 0x8b, 0xc9, 0x95, 0x43, 0xc9, 0xf5, 0x98, 0x29, 0xa7, 0xbf, 0x04, 0x35, 0x99, 0x6a, 0xcc,
0xe9, 0x8e, 0xeb, 0x20, 0x2e, 0x57, 0x4d, 0xb0, 0x19, 0xdd, 0x71, 0x74, 0x0d, 0xcd, 0x94, 0x12,
0x06, 0x4e, 0xde, 0xa2, 0x7a, 0xa8, 0x05, 0x4a, 0xa8, 0x3f, 0x02, 0x27, 0xeb, 0xf1, 0x15, 0x74,
0x59, 0xc4, 0x42, 0x11, 0xf5, 0x50, 0x91, 0x56, 0xae, 0xad, 0x54, 0xc7, 0x58, 0x43, 0x6d, 0x20,
0x4f, 0x18, 0x7a, 0x03, 0xd5, 0xfc, 0xa9, 0x59, 0x62, 0x73, 0xc8, 0xbf, 0x0b, 0x13, 0xb2, 0x27,
0x67, 0xe8, 0x35, 0x40, 0xe6, 0xf8, 0xd4, 0xae, 0x90, 0x8f, 0x84, 0x29, 0x45, 0x8d, 0xdf, 0x0a,
0x34, 0x87, 0x74, 0x49, 0x42, 0x87, 0x17, 0x3b, 0xbe, 0x07, 0xb4, 0x88, 0x71, 0x9c, 0x77, 0x16,
0xf6, 0x2b, 0x36, 0xd6, 0x12, 0x56, 0xfe, 0xf3, 0xf6, 0xa1, 0x95, 0x66, 0x16, 0x87, 0xb3, 0x94,
0x28, 0x50, 0x18, 0xce, 0x46, 0xc2, 0x95, 0xc1, 0xf9, 0x89, 0xf8, 0xd1, 0xdf, 0xfd, 0x0b, 0x00,
0x00, 0xff, 0xff, 0x3e, 0xdd, 0x76, 0xfa, 0xe1, 0x05, 0x00, 0x00,
}
| Descriptor | identifier_name |
config.pb.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go.
// source: config.proto
// DO NOT EDIT!
/*
Package config is a generated protocol buffer package.
It is generated from these files:
config.proto
It has these top-level messages:
TestGroup
Dashboard
LinkTemplate
LinkOptionsTemplate
DashboardTab
Configuration
DefaultConfiguration
*/
package config
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type TestGroup_TestsName int32
const (
TestGroup_TESTS_NAME_MIN TestGroup_TestsName = 0
TestGroup_TESTS_NAME_IGNORE TestGroup_TestsName = 1
TestGroup_TESTS_NAME_REPLACE TestGroup_TestsName = 2
TestGroup_TESTS_NAME_APPEND TestGroup_TestsName = 3
)
var TestGroup_TestsName_name = map[int32]string{
0: "TESTS_NAME_MIN",
1: "TESTS_NAME_IGNORE",
2: "TESTS_NAME_REPLACE",
3: "TESTS_NAME_APPEND",
}
var TestGroup_TestsName_value = map[string]int32{
"TESTS_NAME_MIN": 0,
"TESTS_NAME_IGNORE": 1,
"TESTS_NAME_REPLACE": 2,
"TESTS_NAME_APPEND": 3,
}
func (x TestGroup_TestsName) String() string |
func (TestGroup_TestsName) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// Specifies a group of tests to gather.
type TestGroup struct {
// Name of this TestGroup, for mapping dashboard tabs to tests.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// Path to the test result stored in gcs
GcsPrefix string `protobuf:"bytes,2,opt,name=gcs_prefix,json=gcsPrefix" yaml:"gcs_prefix,omitempty"`
// Number of days of test results to gather and serve.
DaysOfResults int32 `protobuf:"varint,3,opt,name=days_of_results,json=daysOfResults" yaml:"days_of_results,omitempty"`
// What to do with the 'Tests name' configuration value. It can replace the
// name of the test, be appended to the name of the test, or ignored. If it is
// ignored, then the name of the tests will be the build target.
TestsNamePolicy TestGroup_TestsName `protobuf:"varint,6,opt,name=tests_name_policy,json=testsNamePolicy,enum=TestGroup_TestsName" yaml:"tests_name_policy,omitempty"`
ColumnHeader []*TestGroup_ColumnHeader `protobuf:"bytes,9,rep,name=column_header,json=columnHeader" yaml:"column_header,omitempty"`
// deprecated - always set to true
UseKubernetesClient bool `protobuf:"varint,24,opt,name=use_kubernetes_client,json=useKubernetesClient" yaml:"use_kubernetes_client,omitempty"`
// deprecated - always set to true
IsExternal bool `protobuf:"varint,25,opt,name=is_external,json=isExternal" yaml:"is_external,omitempty"`
}
func (m *TestGroup) Reset() { *m = TestGroup{} }
func (m *TestGroup) String() string { return proto.CompactTextString(m) }
func (*TestGroup) ProtoMessage() {}
func (*TestGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *TestGroup) GetColumnHeader() []*TestGroup_ColumnHeader {
if m != nil {
return m.ColumnHeader
}
return nil
}
// Custom column headers for defining extra column-heading rows from values in
// the test result.
type TestGroup_ColumnHeader struct {
ConfigurationValue string `protobuf:"bytes,3,opt,name=configuration_value,json=configurationValue" yaml:"configuration_value,omitempty"`
}
func (m *TestGroup_ColumnHeader) Reset() { *m = TestGroup_ColumnHeader{} }
func (m *TestGroup_ColumnHeader) String() string { return proto.CompactTextString(m) }
func (*TestGroup_ColumnHeader) ProtoMessage() {}
func (*TestGroup_ColumnHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// Specifies a dashboard.
type Dashboard struct {
// A list of the tabs on the dashboard.
DashboardTab []*DashboardTab `protobuf:"bytes,1,rep,name=dashboard_tab,json=dashboardTab" yaml:"dashboard_tab,omitempty"`
// A name for the Dashboard.
Name string `protobuf:"bytes,2,opt,name=name" yaml:"name,omitempty"`
}
func (m *Dashboard) Reset() { *m = Dashboard{} }
func (m *Dashboard) String() string { return proto.CompactTextString(m) }
func (*Dashboard) ProtoMessage() {}
func (*Dashboard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Dashboard) GetDashboardTab() []*DashboardTab {
if m != nil {
return m.DashboardTab
}
return nil
}
type LinkTemplate struct {
// The URL template.
Url string `protobuf:"bytes,1,opt,name=url" yaml:"url,omitempty"`
// The options templates.
Options []*LinkOptionsTemplate `protobuf:"bytes,2,rep,name=options" yaml:"options,omitempty"`
}
func (m *LinkTemplate) Reset() { *m = LinkTemplate{} }
func (m *LinkTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkTemplate) ProtoMessage() {}
func (*LinkTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *LinkTemplate) GetOptions() []*LinkOptionsTemplate {
if m != nil {
return m.Options
}
return nil
}
// A simple key/value pair for link options.
type LinkOptionsTemplate struct {
// The key for the option. This is not expanded.
Key string `protobuf:"bytes,1,opt,name=key" yaml:"key,omitempty"`
// The value for the option. This is expanded the same as the LinkTemplate.
Value string `protobuf:"bytes,2,opt,name=value" yaml:"value,omitempty"`
}
func (m *LinkOptionsTemplate) Reset() { *m = LinkOptionsTemplate{} }
func (m *LinkOptionsTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkOptionsTemplate) ProtoMessage() {}
func (*LinkOptionsTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// A single tab on a dashboard.
type DashboardTab struct {
// The name of the dashboard tab to display in the client.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// The name of the TestGroup specifying the test results for this tab.
TestGroupName string `protobuf:"bytes,2,opt,name=test_group_name,json=testGroupName" yaml:"test_group_name,omitempty"`
// Default bug component for manually filing bugs from the dashboard
BugComponent int32 `protobuf:"varint,3,opt,name=bug_component,json=bugComponent" yaml:"bug_component,omitempty"`
// Default code search path for changelist search links
CodeSearchPath string `protobuf:"bytes,4,opt,name=code_search_path,json=codeSearchPath" yaml:"code_search_path,omitempty"`
// The URL template to visit after clicking on a cell.
OpenTestTemplate *LinkTemplate `protobuf:"bytes,7,opt,name=open_test_template,json=openTestTemplate" yaml:"open_test_template,omitempty"`
// The URL template to visit when filing a bug.
FileBugTemplate *LinkTemplate `protobuf:"bytes,8,opt,name=file_bug_template,json=fileBugTemplate" yaml:"file_bug_template,omitempty"`
// The URL template to visit when attaching a bug
AttachBugTemplate *LinkTemplate `protobuf:"bytes,9,opt,name=attach_bug_template,json=attachBugTemplate" yaml:"attach_bug_template,omitempty"`
// Text to show in the about menu as a link to another view of the results.
ResultsText string `protobuf:"bytes,10,opt,name=results_text,json=resultsText" yaml:"results_text,omitempty"`
// The URL template to visit after clicking.
ResultsUrlTemplate *LinkTemplate `protobuf:"bytes,11,opt,name=results_url_template,json=resultsUrlTemplate" yaml:"results_url_template,omitempty"`
// The URL template to visit when searching for changelists.
CodeSearchUrlTemplate *LinkTemplate `protobuf:"bytes,12,opt,name=code_search_url_template,json=codeSearchUrlTemplate" yaml:"code_search_url_template,omitempty"`
}
func (m *DashboardTab) Reset() { *m = DashboardTab{} }
func (m *DashboardTab) String() string { return proto.CompactTextString(m) }
func (*DashboardTab) ProtoMessage() {}
func (*DashboardTab) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *DashboardTab) GetOpenTestTemplate() *LinkTemplate {
if m != nil {
return m.OpenTestTemplate
}
return nil
}
func (m *DashboardTab) GetFileBugTemplate() *LinkTemplate {
if m != nil {
return m.FileBugTemplate
}
return nil
}
func (m *DashboardTab) GetAttachBugTemplate() *LinkTemplate {
if m != nil {
return m.AttachBugTemplate
}
return nil
}
func (m *DashboardTab) GetResultsUrlTemplate() *LinkTemplate {
if m != nil {
return m.ResultsUrlTemplate
}
return nil
}
func (m *DashboardTab) GetCodeSearchUrlTemplate() *LinkTemplate {
if m != nil {
return m.CodeSearchUrlTemplate
}
return nil
}
// A service configuration consisting of multiple test groups and dashboards.
type Configuration struct {
// A list of groups of tests to gather.
TestGroups []*TestGroup `protobuf:"bytes,1,rep,name=test_groups,json=testGroups" yaml:"test_groups,omitempty"`
// A list of all of the dashboards for a server.
Dashboards []*Dashboard `protobuf:"bytes,2,rep,name=dashboards" yaml:"dashboards,omitempty"`
}
func (m *Configuration) Reset() { *m = Configuration{} }
func (m *Configuration) String() string { return proto.CompactTextString(m) }
func (*Configuration) ProtoMessage() {}
func (*Configuration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Configuration) GetTestGroups() []*TestGroup {
if m != nil {
return m.TestGroups
}
return nil
}
func (m *Configuration) GetDashboards() []*Dashboard {
if m != nil {
return m.Dashboards
}
return nil
}
type DefaultConfiguration struct {
// A default testgroup with default initialization data
DefaultTestGroup *TestGroup `protobuf:"bytes,1,opt,name=default_test_group,json=defaultTestGroup" yaml:"default_test_group,omitempty"`
// A default dashboard with default initialization data
DefaultDashboardTab *DashboardTab `protobuf:"bytes,2,opt,name=default_dashboard_tab,json=defaultDashboardTab" yaml:"default_dashboard_tab,omitempty"`
}
func (m *DefaultConfiguration) Reset() { *m = DefaultConfiguration{} }
func (m *DefaultConfiguration) String() string { return proto.CompactTextString(m) }
func (*DefaultConfiguration) ProtoMessage() {}
func (*DefaultConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *DefaultConfiguration) GetDefaultTestGroup() *TestGroup {
if m != nil {
return m.DefaultTestGroup
}
return nil
}
func (m *DefaultConfiguration) GetDefaultDashboardTab() *DashboardTab {
if m != nil {
return m.DefaultDashboardTab
}
return nil
}
func init() {
proto.RegisterType((*TestGroup)(nil), "TestGroup")
proto.RegisterType((*TestGroup_ColumnHeader)(nil), "TestGroup.ColumnHeader")
proto.RegisterType((*Dashboard)(nil), "Dashboard")
proto.RegisterType((*LinkTemplate)(nil), "LinkTemplate")
proto.RegisterType((*LinkOptionsTemplate)(nil), "LinkOptionsTemplate")
proto.RegisterType((*DashboardTab)(nil), "DashboardTab")
proto.RegisterType((*Configuration)(nil), "Configuration")
proto.RegisterType((*DefaultConfiguration)(nil), "DefaultConfiguration")
proto.RegisterEnum("TestGroup_TestsName", TestGroup_TestsName_name, TestGroup_TestsName_value)
}
func init() { proto.RegisterFile("config.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 731 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x54, 0x5d, 0x8f, 0xd2, 0x40,
0x14, 0xb5, 0xb0, 0x5f, 0x5c, 0xca, 0x6e, 0x19, 0x40, 0xab, 0x89, 0x11, 0x6b, 0xb2, 0x21, 0x9a,
0x60, 0x82, 0x2f, 0x1a, 0xdd, 0xac, 0x08, 0xb8, 0x6e, 0xdc, 0x65, 0x49, 0x41, 0x5f, 0x27, 0x43,
0x19, 0xa0, 0xa1, 0xb4, 0x4d, 0x67, 0x6a, 0xe0, 0x77, 0xf8, 0x13, 0xfd, 0x1b, 0x3e, 0x98, 0x4e,
0xbf, 0xa6, 0x86, 0xb7, 0x99, 0x73, 0xcf, 0xbd, 0x77, 0xe6, 0xcc, 0xb9, 0x03, 0xaa, 0xe5, 0xb9,
0x4b, 0x7b, 0xd5, 0xf5, 0x03, 0x8f, 0x7b, 0xc6, 0x9f, 0x32, 0x54, 0x66, 0x94, 0xf1, 0x9b, 0xc0,
0x0b, 0x7d, 0x84, 0xe0, 0xc8, 0x25, 0x5b, 0xaa, 0x2b, 0x6d, 0xa5, 0x53, 0x31, 0xc5, 0x1a, 0x3d,
0x07, 0x58, 0x59, 0x0c, 0xfb, 0x01, 0x5d, 0xda, 0x3b, 0xbd, 0x24, 0x22, 0x95, 0x95, 0xc5, 0x26,
0x02, 0x40, 0x97, 0x70, 0xb1, 0x20, 0x7b, 0x86, 0xbd, 0x25, 0x0e, 0x28, 0x0b, 0x1d, 0xce, 0xf4,
0x72, 0x5b, 0xe9, 0x1c, 0x9b, 0xb5, 0x08, 0x7e, 0x58, 0x9a, 0x31, 0x88, 0x3e, 0x43, 0x9d, 0x53,
0xc6, 0x19, 0x8e, 0x8a, 0x62, 0xdf, 0x73, 0x6c, 0x6b, 0xaf, 0x9f, 0xb4, 0x95, 0xce, 0x79, 0xaf,
0xd9, 0xcd, 0x4e, 0x20, 0x56, 0x6c, 0x4c, 0xb6, 0xd4, 0xbc, 0xe0, 0xe9, 0x72, 0x22, 0xc8, 0xe8,
0x13, 0xd4, 0x2c, 0xcf, 0x09, 0xb7, 0x2e, 0x5e, 0x53, 0xb2, 0xa0, 0x81, 0x5e, 0x69, 0x97, 0x3b,
0xd5, 0xde, 0x13, 0x29, 0x7b, 0x20, 0xe2, 0xdf, 0x44, 0xd8, 0x54, 0x2d, 0x69, 0x87, 0x7a, 0xd0,
0x0a, 0x19, 0xc5, 0x9b, 0x70, 0x4e, 0x03, 0x97, 0x72, 0xca, 0xb0, 0xe5, 0xd8, 0xd4, 0xe5, 0xba,
0xde, 0x56, 0x3a, 0x67, 0x66, 0x23, 0x64, 0xf4, 0x7b, 0x16, 0x1b, 0x88, 0x10, 0x7a, 0x01, 0x55,
0x9b, 0x61, 0xba, 0xe3, 0x34, 0x70, 0x89, 0xa3, 0x3f, 0x15, 0x4c, 0xb0, 0xd9, 0x28, 0x41, 0x9e,
0x5d, 0x83, 0x2a, 0xb7, 0x44, 0x6f, 0xa1, 0x11, 0xab, 0x1b, 0x06, 0x84, 0xdb, 0x9e, 0x8b, 0x7f,
0x11, 0x27, 0xa4, 0x42, 0x90, 0x8a, 0x89, 0x0a, 0xa1, 0x9f, 0x51, 0xc4, 0xa0, 0xb1, 0xfa, 0xe2,
0x9a, 0x08, 0xc1, 0xf9, 0x6c, 0x34, 0x9d, 0x4d, 0xf1, 0xb8, 0x7f, 0x3f, 0xc2, 0xf7, 0xb7, 0x63,
0xed, 0x11, 0x6a, 0x41, 0x5d, 0xc2, 0x6e, 0x6f, 0xc6, 0x0f, 0xe6, 0x48, 0x53, 0xd0, 0x63, 0x40,
0x12, 0x6c, 0x8e, 0x26, 0x77, 0xfd, 0xc1, 0x48, 0x2b, 0xfd, 0x47, 0xef, 0x4f, 0x26, 0xa3, 0xf1,
0x50, 0x2b, 0x1b, 0x53, 0xa8, 0x0c, 0x09, 0x5b, 0xcf, 0x3d, 0x12, 0x2c, 0x50, 0x0f, 0x6a, 0x8b,
0x74, 0x83, 0x39, 0x99, 0xeb, 0x8a, 0xd0, 0xb1, 0xd6, 0xcd, 0x28, 0x33, 0x32, 0x37, 0xd5, 0x85,
0xb4, 0xcb, 0x8c, 0x51, 0xca, 0x8d, 0x61, 0x4c, 0x40, 0xbd, 0xb3, 0xdd, 0xcd, 0x8c, 0x6e, 0x7d,
0x87, 0x70, 0x8a, 0x34, 0x28, 0x87, 0x81, 0x93, 0x78, 0x27, 0x5a, 0xa2, 0x2e, 0x9c, 0x7a, 0x7e,
0x74, 0x59, 0xa6, 0x97, 0x44, 0x8f, 0x66, 0x37, 0xca, 0x78, 0x88, 0xb1, 0x34, 0xd1, 0x4c, 0x49,
0xc6, 0x15, 0x34, 0x0e, 0xc4, 0xa3, 0xc2, 0x1b, 0xba, 0x4f, 0x0b, 0x6f, 0xe8, 0x1e, 0x35, 0xe1,
0x38, 0x56, 0x36, 0x3e, 0x4f, 0xbc, 0x31, 0xfe, 0x96, 0x41, 0x1d, 0x1e, 0x3a, 0xb5, 0x6c, 0xe7,
0x4b, 0x10, 0xc6, 0xc2, 0xab, 0xc8, 0x30, 0x58, 0xba, 0x54, 0x8d, 0xa7, 0x36, 0x12, 0x8f, 0xf1,
0x0a, 0x6a, 0xf3, 0x70, 0x85, 0x2d, 0x6f, 0xeb, 0x7b, 0x6e, 0xe4, 0x93, 0xd8, 0xd5, 0xea, 0x3c,
0x5c, 0x0d, 0x52, 0x0c, 0x75, 0x40, 0xb3, 0xbc, 0x05, 0xc5, 0x8c, 0x92, 0xc0, 0x5a, 0x63, 0x9f,
0xf0, 0xb5, 0x7e, 0x24, 0xaa, 0x9d, 0x47, 0xf8, 0x54, 0xc0, 0x13, 0xc2, 0xd7, 0xe8, 0x23, 0x20,
0xcf, 0xa7, 0x2e, 0x16, 0xbd, 0x79, 0x72, 0x33, 0xfd, 0xb4, 0xad, 0x08, 0xe5, 0x65, 0x1d, 0x4d,
0x2d, 0x22, 0x46, 0xae, 0xc8, 0x04, 0xf8, 0x00, 0xf5, 0xa5, 0xed, 0x50, 0x1c, 0x1d, 0x28, 0xcb,
0x3d, 0x3b, 0x94, 0x7b, 0x11, 0xf1, 0xbe, 0x84, 0xab, 0x2c, 0xf5, 0x0a, 0x1a, 0x84, 0x73, 0x62,
0xad, 0x8b, 0xc9, 0x95, 0x43, 0xc9, 0xf5, 0x98, 0x29, 0xa7, 0xbf, 0x04, 0x35, 0x99, 0x6a, 0xcc,
0xe9, 0x8e, 0xeb, 0x20, 0x2e, 0x57, 0x4d, 0xb0, 0x19, 0xdd, 0x71, 0x74, 0x0d, 0xcd, 0x94, 0x12,
0x06, 0x4e, 0xde, 0xa2, 0x7a, 0xa8, 0x05, 0x4a, 0xa8, 0x3f, 0x02, 0x27, 0xeb, 0xf1, 0x15, 0x74,
0x59, 0xc4, 0x42, 0x11, 0xf5, 0x50, 0x91, 0x56, 0xae, 0xad, 0x54, 0xc7, 0x58, 0x43, 0x6d, 0x20,
0x4f, 0x18, 0x7a, 0x03, 0xd5, 0xfc, 0xa9, 0x59, 0x62, 0x73, 0xc8, 0xbf, 0x0b, 0x13, 0xb2, 0x27,
0x67, 0xe8, 0x35, 0x40, 0xe6, 0xf8, 0xd4, 0xae, 0x90, 0x8f, 0x84, 0x29, 0x45, 0x8d, 0xdf, 0x0a,
0x34, 0x87, 0x74, 0x49, 0x42, 0x87, 0x17, 0x3b, 0xbe, 0x07, 0xb4, 0x88, 0x71, 0x9c, 0x77, 0x16,
0xf6, 0x2b, 0x36, 0xd6, 0x12, 0x56, 0xfe, 0xf3, 0xf6, 0xa1, 0x95, 0x66, 0x16, 0x87, 0xb3, 0x94,
0x28, 0x50, 0x18, 0xce, 0x46, 0xc2, 0x95, 0xc1, 0xf9, 0x89, 0xf8, 0xd1, 0xdf, 0xfd, 0x0b, 0x00,
0x00, 0xff, 0xff, 0x3e, 0xdd, 0x76, 0xfa, 0xe1, 0x05, 0x00, 0x00,
}
| {
return proto.EnumName(TestGroup_TestsName_name, int32(x))
} | identifier_body |
config.pb.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go.
// source: config.proto
// DO NOT EDIT!
/*
Package config is a generated protocol buffer package.
It is generated from these files:
config.proto
It has these top-level messages:
TestGroup
Dashboard
LinkTemplate
LinkOptionsTemplate
DashboardTab
Configuration
DefaultConfiguration
*/
package config
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type TestGroup_TestsName int32
const (
TestGroup_TESTS_NAME_MIN TestGroup_TestsName = 0
TestGroup_TESTS_NAME_IGNORE TestGroup_TestsName = 1
TestGroup_TESTS_NAME_REPLACE TestGroup_TestsName = 2
TestGroup_TESTS_NAME_APPEND TestGroup_TestsName = 3
)
var TestGroup_TestsName_name = map[int32]string{
0: "TESTS_NAME_MIN",
1: "TESTS_NAME_IGNORE",
2: "TESTS_NAME_REPLACE",
3: "TESTS_NAME_APPEND",
}
var TestGroup_TestsName_value = map[string]int32{
"TESTS_NAME_MIN": 0,
"TESTS_NAME_IGNORE": 1,
"TESTS_NAME_REPLACE": 2,
"TESTS_NAME_APPEND": 3,
}
func (x TestGroup_TestsName) String() string {
return proto.EnumName(TestGroup_TestsName_name, int32(x))
}
func (TestGroup_TestsName) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// Specifies a group of tests to gather.
type TestGroup struct {
// Name of this TestGroup, for mapping dashboard tabs to tests.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// Path to the test result stored in gcs
GcsPrefix string `protobuf:"bytes,2,opt,name=gcs_prefix,json=gcsPrefix" yaml:"gcs_prefix,omitempty"`
// Number of days of test results to gather and serve.
DaysOfResults int32 `protobuf:"varint,3,opt,name=days_of_results,json=daysOfResults" yaml:"days_of_results,omitempty"`
// What to do with the 'Tests name' configuration value. It can replace the
// name of the test, be appended to the name of the test, or ignored. If it is
// ignored, then the name of the tests will be the build target.
TestsNamePolicy TestGroup_TestsName `protobuf:"varint,6,opt,name=tests_name_policy,json=testsNamePolicy,enum=TestGroup_TestsName" yaml:"tests_name_policy,omitempty"`
ColumnHeader []*TestGroup_ColumnHeader `protobuf:"bytes,9,rep,name=column_header,json=columnHeader" yaml:"column_header,omitempty"`
// deprecated - always set to true
UseKubernetesClient bool `protobuf:"varint,24,opt,name=use_kubernetes_client,json=useKubernetesClient" yaml:"use_kubernetes_client,omitempty"`
// deprecated - always set to true
IsExternal bool `protobuf:"varint,25,opt,name=is_external,json=isExternal" yaml:"is_external,omitempty"`
}
func (m *TestGroup) Reset() { *m = TestGroup{} }
func (m *TestGroup) String() string { return proto.CompactTextString(m) }
func (*TestGroup) ProtoMessage() {}
func (*TestGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *TestGroup) GetColumnHeader() []*TestGroup_ColumnHeader {
if m != nil {
return m.ColumnHeader
}
return nil
}
// Custom column headers for defining extra column-heading rows from values in
// the test result.
type TestGroup_ColumnHeader struct {
ConfigurationValue string `protobuf:"bytes,3,opt,name=configuration_value,json=configurationValue" yaml:"configuration_value,omitempty"`
}
func (m *TestGroup_ColumnHeader) Reset() { *m = TestGroup_ColumnHeader{} }
func (m *TestGroup_ColumnHeader) String() string { return proto.CompactTextString(m) }
func (*TestGroup_ColumnHeader) ProtoMessage() {}
func (*TestGroup_ColumnHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// Specifies a dashboard.
type Dashboard struct {
// A list of the tabs on the dashboard.
DashboardTab []*DashboardTab `protobuf:"bytes,1,rep,name=dashboard_tab,json=dashboardTab" yaml:"dashboard_tab,omitempty"`
// A name for the Dashboard.
Name string `protobuf:"bytes,2,opt,name=name" yaml:"name,omitempty"`
}
func (m *Dashboard) Reset() { *m = Dashboard{} }
func (m *Dashboard) String() string { return proto.CompactTextString(m) }
func (*Dashboard) ProtoMessage() {}
func (*Dashboard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Dashboard) GetDashboardTab() []*DashboardTab {
if m != nil {
return m.DashboardTab
}
return nil
}
type LinkTemplate struct {
// The URL template.
Url string `protobuf:"bytes,1,opt,name=url" yaml:"url,omitempty"`
// The options templates.
Options []*LinkOptionsTemplate `protobuf:"bytes,2,rep,name=options" yaml:"options,omitempty"`
}
func (m *LinkTemplate) Reset() { *m = LinkTemplate{} }
func (m *LinkTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkTemplate) ProtoMessage() {}
func (*LinkTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *LinkTemplate) GetOptions() []*LinkOptionsTemplate {
if m != nil {
return m.Options
}
return nil
}
// A simple key/value pair for link options.
type LinkOptionsTemplate struct {
// The key for the option. This is not expanded.
Key string `protobuf:"bytes,1,opt,name=key" yaml:"key,omitempty"`
// The value for the option. This is expanded the same as the LinkTemplate.
Value string `protobuf:"bytes,2,opt,name=value" yaml:"value,omitempty"`
}
func (m *LinkOptionsTemplate) Reset() { *m = LinkOptionsTemplate{} }
func (m *LinkOptionsTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkOptionsTemplate) ProtoMessage() {}
func (*LinkOptionsTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// A single tab on a dashboard.
type DashboardTab struct {
// The name of the dashboard tab to display in the client.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// The name of the TestGroup specifying the test results for this tab.
TestGroupName string `protobuf:"bytes,2,opt,name=test_group_name,json=testGroupName" yaml:"test_group_name,omitempty"`
// Default bug component for manually filing bugs from the dashboard
BugComponent int32 `protobuf:"varint,3,opt,name=bug_component,json=bugComponent" yaml:"bug_component,omitempty"`
// Default code search path for changelist search links
CodeSearchPath string `protobuf:"bytes,4,opt,name=code_search_path,json=codeSearchPath" yaml:"code_search_path,omitempty"`
// The URL template to visit after clicking on a cell.
OpenTestTemplate *LinkTemplate `protobuf:"bytes,7,opt,name=open_test_template,json=openTestTemplate" yaml:"open_test_template,omitempty"`
// The URL template to visit when filing a bug.
FileBugTemplate *LinkTemplate `protobuf:"bytes,8,opt,name=file_bug_template,json=fileBugTemplate" yaml:"file_bug_template,omitempty"`
// The URL template to visit when attaching a bug
AttachBugTemplate *LinkTemplate `protobuf:"bytes,9,opt,name=attach_bug_template,json=attachBugTemplate" yaml:"attach_bug_template,omitempty"`
// Text to show in the about menu as a link to another view of the results.
ResultsText string `protobuf:"bytes,10,opt,name=results_text,json=resultsText" yaml:"results_text,omitempty"`
// The URL template to visit after clicking.
ResultsUrlTemplate *LinkTemplate `protobuf:"bytes,11,opt,name=results_url_template,json=resultsUrlTemplate" yaml:"results_url_template,omitempty"`
// The URL template to visit when searching for changelists.
CodeSearchUrlTemplate *LinkTemplate `protobuf:"bytes,12,opt,name=code_search_url_template,json=codeSearchUrlTemplate" yaml:"code_search_url_template,omitempty"`
}
func (m *DashboardTab) Reset() { *m = DashboardTab{} }
func (m *DashboardTab) String() string { return proto.CompactTextString(m) }
func (*DashboardTab) ProtoMessage() {}
func (*DashboardTab) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *DashboardTab) GetOpenTestTemplate() *LinkTemplate {
if m != nil {
return m.OpenTestTemplate
}
return nil
}
func (m *DashboardTab) GetFileBugTemplate() *LinkTemplate {
if m != nil {
return m.FileBugTemplate
}
return nil
}
func (m *DashboardTab) GetAttachBugTemplate() *LinkTemplate {
if m != nil {
return m.AttachBugTemplate
}
return nil | }
func (m *DashboardTab) GetResultsUrlTemplate() *LinkTemplate {
if m != nil {
return m.ResultsUrlTemplate
}
return nil
}
func (m *DashboardTab) GetCodeSearchUrlTemplate() *LinkTemplate {
if m != nil {
return m.CodeSearchUrlTemplate
}
return nil
}
// A service configuration consisting of multiple test groups and dashboards.
type Configuration struct {
// A list of groups of tests to gather.
TestGroups []*TestGroup `protobuf:"bytes,1,rep,name=test_groups,json=testGroups" yaml:"test_groups,omitempty"`
// A list of all of the dashboards for a server.
Dashboards []*Dashboard `protobuf:"bytes,2,rep,name=dashboards" yaml:"dashboards,omitempty"`
}
func (m *Configuration) Reset() { *m = Configuration{} }
func (m *Configuration) String() string { return proto.CompactTextString(m) }
func (*Configuration) ProtoMessage() {}
func (*Configuration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Configuration) GetTestGroups() []*TestGroup {
if m != nil {
return m.TestGroups
}
return nil
}
func (m *Configuration) GetDashboards() []*Dashboard {
if m != nil {
return m.Dashboards
}
return nil
}
type DefaultConfiguration struct {
// A default testgroup with default initialization data
DefaultTestGroup *TestGroup `protobuf:"bytes,1,opt,name=default_test_group,json=defaultTestGroup" yaml:"default_test_group,omitempty"`
// A default dashboard with default initialization data
DefaultDashboardTab *DashboardTab `protobuf:"bytes,2,opt,name=default_dashboard_tab,json=defaultDashboardTab" yaml:"default_dashboard_tab,omitempty"`
}
func (m *DefaultConfiguration) Reset() { *m = DefaultConfiguration{} }
func (m *DefaultConfiguration) String() string { return proto.CompactTextString(m) }
func (*DefaultConfiguration) ProtoMessage() {}
func (*DefaultConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *DefaultConfiguration) GetDefaultTestGroup() *TestGroup {
if m != nil {
return m.DefaultTestGroup
}
return nil
}
func (m *DefaultConfiguration) GetDefaultDashboardTab() *DashboardTab {
if m != nil {
return m.DefaultDashboardTab
}
return nil
}
func init() {
proto.RegisterType((*TestGroup)(nil), "TestGroup")
proto.RegisterType((*TestGroup_ColumnHeader)(nil), "TestGroup.ColumnHeader")
proto.RegisterType((*Dashboard)(nil), "Dashboard")
proto.RegisterType((*LinkTemplate)(nil), "LinkTemplate")
proto.RegisterType((*LinkOptionsTemplate)(nil), "LinkOptionsTemplate")
proto.RegisterType((*DashboardTab)(nil), "DashboardTab")
proto.RegisterType((*Configuration)(nil), "Configuration")
proto.RegisterType((*DefaultConfiguration)(nil), "DefaultConfiguration")
proto.RegisterEnum("TestGroup_TestsName", TestGroup_TestsName_name, TestGroup_TestsName_value)
}
func init() { proto.RegisterFile("config.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 731 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x54, 0x5d, 0x8f, 0xd2, 0x40,
0x14, 0xb5, 0xb0, 0x5f, 0x5c, 0xca, 0x6e, 0x19, 0x40, 0xab, 0x89, 0x11, 0x6b, 0xb2, 0x21, 0x9a,
0x60, 0x82, 0x2f, 0x1a, 0xdd, 0xac, 0x08, 0xb8, 0x6e, 0xdc, 0x65, 0x49, 0x41, 0x5f, 0x27, 0x43,
0x19, 0xa0, 0xa1, 0xb4, 0x4d, 0x67, 0x6a, 0xe0, 0x77, 0xf8, 0x13, 0xfd, 0x1b, 0x3e, 0x98, 0x4e,
0xbf, 0xa6, 0x86, 0xb7, 0x99, 0x73, 0xcf, 0xbd, 0x77, 0xe6, 0xcc, 0xb9, 0x03, 0xaa, 0xe5, 0xb9,
0x4b, 0x7b, 0xd5, 0xf5, 0x03, 0x8f, 0x7b, 0xc6, 0x9f, 0x32, 0x54, 0x66, 0x94, 0xf1, 0x9b, 0xc0,
0x0b, 0x7d, 0x84, 0xe0, 0xc8, 0x25, 0x5b, 0xaa, 0x2b, 0x6d, 0xa5, 0x53, 0x31, 0xc5, 0x1a, 0x3d,
0x07, 0x58, 0x59, 0x0c, 0xfb, 0x01, 0x5d, 0xda, 0x3b, 0xbd, 0x24, 0x22, 0x95, 0x95, 0xc5, 0x26,
0x02, 0x40, 0x97, 0x70, 0xb1, 0x20, 0x7b, 0x86, 0xbd, 0x25, 0x0e, 0x28, 0x0b, 0x1d, 0xce, 0xf4,
0x72, 0x5b, 0xe9, 0x1c, 0x9b, 0xb5, 0x08, 0x7e, 0x58, 0x9a, 0x31, 0x88, 0x3e, 0x43, 0x9d, 0x53,
0xc6, 0x19, 0x8e, 0x8a, 0x62, 0xdf, 0x73, 0x6c, 0x6b, 0xaf, 0x9f, 0xb4, 0x95, 0xce, 0x79, 0xaf,
0xd9, 0xcd, 0x4e, 0x20, 0x56, 0x6c, 0x4c, 0xb6, 0xd4, 0xbc, 0xe0, 0xe9, 0x72, 0x22, 0xc8, 0xe8,
0x13, 0xd4, 0x2c, 0xcf, 0x09, 0xb7, 0x2e, 0x5e, 0x53, 0xb2, 0xa0, 0x81, 0x5e, 0x69, 0x97, 0x3b,
0xd5, 0xde, 0x13, 0x29, 0x7b, 0x20, 0xe2, 0xdf, 0x44, 0xd8, 0x54, 0x2d, 0x69, 0x87, 0x7a, 0xd0,
0x0a, 0x19, 0xc5, 0x9b, 0x70, 0x4e, 0x03, 0x97, 0x72, 0xca, 0xb0, 0xe5, 0xd8, 0xd4, 0xe5, 0xba,
0xde, 0x56, 0x3a, 0x67, 0x66, 0x23, 0x64, 0xf4, 0x7b, 0x16, 0x1b, 0x88, 0x10, 0x7a, 0x01, 0x55,
0x9b, 0x61, 0xba, 0xe3, 0x34, 0x70, 0x89, 0xa3, 0x3f, 0x15, 0x4c, 0xb0, 0xd9, 0x28, 0x41, 0x9e,
0x5d, 0x83, 0x2a, 0xb7, 0x44, 0x6f, 0xa1, 0x11, 0xab, 0x1b, 0x06, 0x84, 0xdb, 0x9e, 0x8b, 0x7f,
0x11, 0x27, 0xa4, 0x42, 0x90, 0x8a, 0x89, 0x0a, 0xa1, 0x9f, 0x51, 0xc4, 0xa0, 0xb1, 0xfa, 0xe2,
0x9a, 0x08, 0xc1, 0xf9, 0x6c, 0x34, 0x9d, 0x4d, 0xf1, 0xb8, 0x7f, 0x3f, 0xc2, 0xf7, 0xb7, 0x63,
0xed, 0x11, 0x6a, 0x41, 0x5d, 0xc2, 0x6e, 0x6f, 0xc6, 0x0f, 0xe6, 0x48, 0x53, 0xd0, 0x63, 0x40,
0x12, 0x6c, 0x8e, 0x26, 0x77, 0xfd, 0xc1, 0x48, 0x2b, 0xfd, 0x47, 0xef, 0x4f, 0x26, 0xa3, 0xf1,
0x50, 0x2b, 0x1b, 0x53, 0xa8, 0x0c, 0x09, 0x5b, 0xcf, 0x3d, 0x12, 0x2c, 0x50, 0x0f, 0x6a, 0x8b,
0x74, 0x83, 0x39, 0x99, 0xeb, 0x8a, 0xd0, 0xb1, 0xd6, 0xcd, 0x28, 0x33, 0x32, 0x37, 0xd5, 0x85,
0xb4, 0xcb, 0x8c, 0x51, 0xca, 0x8d, 0x61, 0x4c, 0x40, 0xbd, 0xb3, 0xdd, 0xcd, 0x8c, 0x6e, 0x7d,
0x87, 0x70, 0x8a, 0x34, 0x28, 0x87, 0x81, 0x93, 0x78, 0x27, 0x5a, 0xa2, 0x2e, 0x9c, 0x7a, 0x7e,
0x74, 0x59, 0xa6, 0x97, 0x44, 0x8f, 0x66, 0x37, 0xca, 0x78, 0x88, 0xb1, 0x34, 0xd1, 0x4c, 0x49,
0xc6, 0x15, 0x34, 0x0e, 0xc4, 0xa3, 0xc2, 0x1b, 0xba, 0x4f, 0x0b, 0x6f, 0xe8, 0x1e, 0x35, 0xe1,
0x38, 0x56, 0x36, 0x3e, 0x4f, 0xbc, 0x31, 0xfe, 0x96, 0x41, 0x1d, 0x1e, 0x3a, 0xb5, 0x6c, 0xe7,
0x4b, 0x10, 0xc6, 0xc2, 0xab, 0xc8, 0x30, 0x58, 0xba, 0x54, 0x8d, 0xa7, 0x36, 0x12, 0x8f, 0xf1,
0x0a, 0x6a, 0xf3, 0x70, 0x85, 0x2d, 0x6f, 0xeb, 0x7b, 0x6e, 0xe4, 0x93, 0xd8, 0xd5, 0xea, 0x3c,
0x5c, 0x0d, 0x52, 0x0c, 0x75, 0x40, 0xb3, 0xbc, 0x05, 0xc5, 0x8c, 0x92, 0xc0, 0x5a, 0x63, 0x9f,
0xf0, 0xb5, 0x7e, 0x24, 0xaa, 0x9d, 0x47, 0xf8, 0x54, 0xc0, 0x13, 0xc2, 0xd7, 0xe8, 0x23, 0x20,
0xcf, 0xa7, 0x2e, 0x16, 0xbd, 0x79, 0x72, 0x33, 0xfd, 0xb4, 0xad, 0x08, 0xe5, 0x65, 0x1d, 0x4d,
0x2d, 0x22, 0x46, 0xae, 0xc8, 0x04, 0xf8, 0x00, 0xf5, 0xa5, 0xed, 0x50, 0x1c, 0x1d, 0x28, 0xcb,
0x3d, 0x3b, 0x94, 0x7b, 0x11, 0xf1, 0xbe, 0x84, 0xab, 0x2c, 0xf5, 0x0a, 0x1a, 0x84, 0x73, 0x62,
0xad, 0x8b, 0xc9, 0x95, 0x43, 0xc9, 0xf5, 0x98, 0x29, 0xa7, 0xbf, 0x04, 0x35, 0x99, 0x6a, 0xcc,
0xe9, 0x8e, 0xeb, 0x20, 0x2e, 0x57, 0x4d, 0xb0, 0x19, 0xdd, 0x71, 0x74, 0x0d, 0xcd, 0x94, 0x12,
0x06, 0x4e, 0xde, 0xa2, 0x7a, 0xa8, 0x05, 0x4a, 0xa8, 0x3f, 0x02, 0x27, 0xeb, 0xf1, 0x15, 0x74,
0x59, 0xc4, 0x42, 0x11, 0xf5, 0x50, 0x91, 0x56, 0xae, 0xad, 0x54, 0xc7, 0x58, 0x43, 0x6d, 0x20,
0x4f, 0x18, 0x7a, 0x03, 0xd5, 0xfc, 0xa9, 0x59, 0x62, 0x73, 0xc8, 0xbf, 0x0b, 0x13, 0xb2, 0x27,
0x67, 0xe8, 0x35, 0x40, 0xe6, 0xf8, 0xd4, 0xae, 0x90, 0x8f, 0x84, 0x29, 0x45, 0x8d, 0xdf, 0x0a,
0x34, 0x87, 0x74, 0x49, 0x42, 0x87, 0x17, 0x3b, 0xbe, 0x07, 0xb4, 0x88, 0x71, 0x9c, 0x77, 0x16,
0xf6, 0x2b, 0x36, 0xd6, 0x12, 0x56, 0xfe, 0xf3, 0xf6, 0xa1, 0x95, 0x66, 0x16, 0x87, 0xb3, 0x94,
0x28, 0x50, 0x18, 0xce, 0x46, 0xc2, 0x95, 0xc1, 0xf9, 0x89, 0xf8, 0xd1, 0xdf, 0xfd, 0x0b, 0x00,
0x00, 0xff, 0xff, 0x3e, 0xdd, 0x76, 0xfa, 0xe1, 0x05, 0x00, 0x00,
} | random_line_split |
|
config.pb.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go.
// source: config.proto
// DO NOT EDIT!
/*
Package config is a generated protocol buffer package.
It is generated from these files:
config.proto
It has these top-level messages:
TestGroup
Dashboard
LinkTemplate
LinkOptionsTemplate
DashboardTab
Configuration
DefaultConfiguration
*/
package config
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type TestGroup_TestsName int32
const (
TestGroup_TESTS_NAME_MIN TestGroup_TestsName = 0
TestGroup_TESTS_NAME_IGNORE TestGroup_TestsName = 1
TestGroup_TESTS_NAME_REPLACE TestGroup_TestsName = 2
TestGroup_TESTS_NAME_APPEND TestGroup_TestsName = 3
)
var TestGroup_TestsName_name = map[int32]string{
0: "TESTS_NAME_MIN",
1: "TESTS_NAME_IGNORE",
2: "TESTS_NAME_REPLACE",
3: "TESTS_NAME_APPEND",
}
var TestGroup_TestsName_value = map[string]int32{
"TESTS_NAME_MIN": 0,
"TESTS_NAME_IGNORE": 1,
"TESTS_NAME_REPLACE": 2,
"TESTS_NAME_APPEND": 3,
}
func (x TestGroup_TestsName) String() string {
return proto.EnumName(TestGroup_TestsName_name, int32(x))
}
func (TestGroup_TestsName) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// Specifies a group of tests to gather.
type TestGroup struct {
// Name of this TestGroup, for mapping dashboard tabs to tests.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// Path to the test result stored in gcs
GcsPrefix string `protobuf:"bytes,2,opt,name=gcs_prefix,json=gcsPrefix" yaml:"gcs_prefix,omitempty"`
// Number of days of test results to gather and serve.
DaysOfResults int32 `protobuf:"varint,3,opt,name=days_of_results,json=daysOfResults" yaml:"days_of_results,omitempty"`
// What to do with the 'Tests name' configuration value. It can replace the
// name of the test, be appended to the name of the test, or ignored. If it is
// ignored, then the name of the tests will be the build target.
TestsNamePolicy TestGroup_TestsName `protobuf:"varint,6,opt,name=tests_name_policy,json=testsNamePolicy,enum=TestGroup_TestsName" yaml:"tests_name_policy,omitempty"`
ColumnHeader []*TestGroup_ColumnHeader `protobuf:"bytes,9,rep,name=column_header,json=columnHeader" yaml:"column_header,omitempty"`
// deprecated - always set to true
UseKubernetesClient bool `protobuf:"varint,24,opt,name=use_kubernetes_client,json=useKubernetesClient" yaml:"use_kubernetes_client,omitempty"`
// deprecated - always set to true
IsExternal bool `protobuf:"varint,25,opt,name=is_external,json=isExternal" yaml:"is_external,omitempty"`
}
func (m *TestGroup) Reset() { *m = TestGroup{} }
func (m *TestGroup) String() string { return proto.CompactTextString(m) }
func (*TestGroup) ProtoMessage() {}
func (*TestGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *TestGroup) GetColumnHeader() []*TestGroup_ColumnHeader {
if m != nil {
return m.ColumnHeader
}
return nil
}
// Custom column headers for defining extra column-heading rows from values in
// the test result.
type TestGroup_ColumnHeader struct {
ConfigurationValue string `protobuf:"bytes,3,opt,name=configuration_value,json=configurationValue" yaml:"configuration_value,omitempty"`
}
func (m *TestGroup_ColumnHeader) Reset() { *m = TestGroup_ColumnHeader{} }
func (m *TestGroup_ColumnHeader) String() string { return proto.CompactTextString(m) }
func (*TestGroup_ColumnHeader) ProtoMessage() {}
func (*TestGroup_ColumnHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// Specifies a dashboard.
type Dashboard struct {
// A list of the tabs on the dashboard.
DashboardTab []*DashboardTab `protobuf:"bytes,1,rep,name=dashboard_tab,json=dashboardTab" yaml:"dashboard_tab,omitempty"`
// A name for the Dashboard.
Name string `protobuf:"bytes,2,opt,name=name" yaml:"name,omitempty"`
}
func (m *Dashboard) Reset() { *m = Dashboard{} }
func (m *Dashboard) String() string { return proto.CompactTextString(m) }
func (*Dashboard) ProtoMessage() {}
func (*Dashboard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Dashboard) GetDashboardTab() []*DashboardTab {
if m != nil {
return m.DashboardTab
}
return nil
}
type LinkTemplate struct {
// The URL template.
Url string `protobuf:"bytes,1,opt,name=url" yaml:"url,omitempty"`
// The options templates.
Options []*LinkOptionsTemplate `protobuf:"bytes,2,rep,name=options" yaml:"options,omitempty"`
}
func (m *LinkTemplate) Reset() { *m = LinkTemplate{} }
func (m *LinkTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkTemplate) ProtoMessage() {}
func (*LinkTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *LinkTemplate) GetOptions() []*LinkOptionsTemplate {
if m != nil {
return m.Options
}
return nil
}
// A simple key/value pair for link options.
type LinkOptionsTemplate struct {
// The key for the option. This is not expanded.
Key string `protobuf:"bytes,1,opt,name=key" yaml:"key,omitempty"`
// The value for the option. This is expanded the same as the LinkTemplate.
Value string `protobuf:"bytes,2,opt,name=value" yaml:"value,omitempty"`
}
func (m *LinkOptionsTemplate) Reset() { *m = LinkOptionsTemplate{} }
func (m *LinkOptionsTemplate) String() string { return proto.CompactTextString(m) }
func (*LinkOptionsTemplate) ProtoMessage() {}
func (*LinkOptionsTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// A single tab on a dashboard.
type DashboardTab struct {
// The name of the dashboard tab to display in the client.
Name string `protobuf:"bytes,1,opt,name=name" yaml:"name,omitempty"`
// The name of the TestGroup specifying the test results for this tab.
TestGroupName string `protobuf:"bytes,2,opt,name=test_group_name,json=testGroupName" yaml:"test_group_name,omitempty"`
// Default bug component for manually filing bugs from the dashboard
BugComponent int32 `protobuf:"varint,3,opt,name=bug_component,json=bugComponent" yaml:"bug_component,omitempty"`
// Default code search path for changelist search links
CodeSearchPath string `protobuf:"bytes,4,opt,name=code_search_path,json=codeSearchPath" yaml:"code_search_path,omitempty"`
// The URL template to visit after clicking on a cell.
OpenTestTemplate *LinkTemplate `protobuf:"bytes,7,opt,name=open_test_template,json=openTestTemplate" yaml:"open_test_template,omitempty"`
// The URL template to visit when filing a bug.
FileBugTemplate *LinkTemplate `protobuf:"bytes,8,opt,name=file_bug_template,json=fileBugTemplate" yaml:"file_bug_template,omitempty"`
// The URL template to visit when attaching a bug
AttachBugTemplate *LinkTemplate `protobuf:"bytes,9,opt,name=attach_bug_template,json=attachBugTemplate" yaml:"attach_bug_template,omitempty"`
// Text to show in the about menu as a link to another view of the results.
ResultsText string `protobuf:"bytes,10,opt,name=results_text,json=resultsText" yaml:"results_text,omitempty"`
// The URL template to visit after clicking.
ResultsUrlTemplate *LinkTemplate `protobuf:"bytes,11,opt,name=results_url_template,json=resultsUrlTemplate" yaml:"results_url_template,omitempty"`
// The URL template to visit when searching for changelists.
CodeSearchUrlTemplate *LinkTemplate `protobuf:"bytes,12,opt,name=code_search_url_template,json=codeSearchUrlTemplate" yaml:"code_search_url_template,omitempty"`
}
func (m *DashboardTab) Reset() { *m = DashboardTab{} }
func (m *DashboardTab) String() string { return proto.CompactTextString(m) }
func (*DashboardTab) ProtoMessage() {}
func (*DashboardTab) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *DashboardTab) GetOpenTestTemplate() *LinkTemplate {
if m != nil {
return m.OpenTestTemplate
}
return nil
}
func (m *DashboardTab) GetFileBugTemplate() *LinkTemplate {
if m != nil {
return m.FileBugTemplate
}
return nil
}
func (m *DashboardTab) GetAttachBugTemplate() *LinkTemplate {
if m != nil {
return m.AttachBugTemplate
}
return nil
}
func (m *DashboardTab) GetResultsUrlTemplate() *LinkTemplate {
if m != nil {
return m.ResultsUrlTemplate
}
return nil
}
func (m *DashboardTab) GetCodeSearchUrlTemplate() *LinkTemplate {
if m != nil {
return m.CodeSearchUrlTemplate
}
return nil
}
// A service configuration consisting of multiple test groups and dashboards.
type Configuration struct {
// A list of groups of tests to gather.
TestGroups []*TestGroup `protobuf:"bytes,1,rep,name=test_groups,json=testGroups" yaml:"test_groups,omitempty"`
// A list of all of the dashboards for a server.
Dashboards []*Dashboard `protobuf:"bytes,2,rep,name=dashboards" yaml:"dashboards,omitempty"`
}
func (m *Configuration) Reset() { *m = Configuration{} }
func (m *Configuration) String() string { return proto.CompactTextString(m) }
func (*Configuration) ProtoMessage() {}
func (*Configuration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Configuration) GetTestGroups() []*TestGroup {
if m != nil {
return m.TestGroups
}
return nil
}
func (m *Configuration) GetDashboards() []*Dashboard {
if m != nil |
return nil
}
type DefaultConfiguration struct {
// A default testgroup with default initialization data
DefaultTestGroup *TestGroup `protobuf:"bytes,1,opt,name=default_test_group,json=defaultTestGroup" yaml:"default_test_group,omitempty"`
// A default dashboard with default initialization data
DefaultDashboardTab *DashboardTab `protobuf:"bytes,2,opt,name=default_dashboard_tab,json=defaultDashboardTab" yaml:"default_dashboard_tab,omitempty"`
}
func (m *DefaultConfiguration) Reset() { *m = DefaultConfiguration{} }
func (m *DefaultConfiguration) String() string { return proto.CompactTextString(m) }
func (*DefaultConfiguration) ProtoMessage() {}
func (*DefaultConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *DefaultConfiguration) GetDefaultTestGroup() *TestGroup {
if m != nil {
return m.DefaultTestGroup
}
return nil
}
func (m *DefaultConfiguration) GetDefaultDashboardTab() *DashboardTab {
if m != nil {
return m.DefaultDashboardTab
}
return nil
}
func init() {
proto.RegisterType((*TestGroup)(nil), "TestGroup")
proto.RegisterType((*TestGroup_ColumnHeader)(nil), "TestGroup.ColumnHeader")
proto.RegisterType((*Dashboard)(nil), "Dashboard")
proto.RegisterType((*LinkTemplate)(nil), "LinkTemplate")
proto.RegisterType((*LinkOptionsTemplate)(nil), "LinkOptionsTemplate")
proto.RegisterType((*DashboardTab)(nil), "DashboardTab")
proto.RegisterType((*Configuration)(nil), "Configuration")
proto.RegisterType((*DefaultConfiguration)(nil), "DefaultConfiguration")
proto.RegisterEnum("TestGroup_TestsName", TestGroup_TestsName_name, TestGroup_TestsName_value)
}
func init() { proto.RegisterFile("config.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 731 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x54, 0x5d, 0x8f, 0xd2, 0x40,
0x14, 0xb5, 0xb0, 0x5f, 0x5c, 0xca, 0x6e, 0x19, 0x40, 0xab, 0x89, 0x11, 0x6b, 0xb2, 0x21, 0x9a,
0x60, 0x82, 0x2f, 0x1a, 0xdd, 0xac, 0x08, 0xb8, 0x6e, 0xdc, 0x65, 0x49, 0x41, 0x5f, 0x27, 0x43,
0x19, 0xa0, 0xa1, 0xb4, 0x4d, 0x67, 0x6a, 0xe0, 0x77, 0xf8, 0x13, 0xfd, 0x1b, 0x3e, 0x98, 0x4e,
0xbf, 0xa6, 0x86, 0xb7, 0x99, 0x73, 0xcf, 0xbd, 0x77, 0xe6, 0xcc, 0xb9, 0x03, 0xaa, 0xe5, 0xb9,
0x4b, 0x7b, 0xd5, 0xf5, 0x03, 0x8f, 0x7b, 0xc6, 0x9f, 0x32, 0x54, 0x66, 0x94, 0xf1, 0x9b, 0xc0,
0x0b, 0x7d, 0x84, 0xe0, 0xc8, 0x25, 0x5b, 0xaa, 0x2b, 0x6d, 0xa5, 0x53, 0x31, 0xc5, 0x1a, 0x3d,
0x07, 0x58, 0x59, 0x0c, 0xfb, 0x01, 0x5d, 0xda, 0x3b, 0xbd, 0x24, 0x22, 0x95, 0x95, 0xc5, 0x26,
0x02, 0x40, 0x97, 0x70, 0xb1, 0x20, 0x7b, 0x86, 0xbd, 0x25, 0x0e, 0x28, 0x0b, 0x1d, 0xce, 0xf4,
0x72, 0x5b, 0xe9, 0x1c, 0x9b, 0xb5, 0x08, 0x7e, 0x58, 0x9a, 0x31, 0x88, 0x3e, 0x43, 0x9d, 0x53,
0xc6, 0x19, 0x8e, 0x8a, 0x62, 0xdf, 0x73, 0x6c, 0x6b, 0xaf, 0x9f, 0xb4, 0x95, 0xce, 0x79, 0xaf,
0xd9, 0xcd, 0x4e, 0x20, 0x56, 0x6c, 0x4c, 0xb6, 0xd4, 0xbc, 0xe0, 0xe9, 0x72, 0x22, 0xc8, 0xe8,
0x13, 0xd4, 0x2c, 0xcf, 0x09, 0xb7, 0x2e, 0x5e, 0x53, 0xb2, 0xa0, 0x81, 0x5e, 0x69, 0x97, 0x3b,
0xd5, 0xde, 0x13, 0x29, 0x7b, 0x20, 0xe2, 0xdf, 0x44, 0xd8, 0x54, 0x2d, 0x69, 0x87, 0x7a, 0xd0,
0x0a, 0x19, 0xc5, 0x9b, 0x70, 0x4e, 0x03, 0x97, 0x72, 0xca, 0xb0, 0xe5, 0xd8, 0xd4, 0xe5, 0xba,
0xde, 0x56, 0x3a, 0x67, 0x66, 0x23, 0x64, 0xf4, 0x7b, 0x16, 0x1b, 0x88, 0x10, 0x7a, 0x01, 0x55,
0x9b, 0x61, 0xba, 0xe3, 0x34, 0x70, 0x89, 0xa3, 0x3f, 0x15, 0x4c, 0xb0, 0xd9, 0x28, 0x41, 0x9e,
0x5d, 0x83, 0x2a, 0xb7, 0x44, 0x6f, 0xa1, 0x11, 0xab, 0x1b, 0x06, 0x84, 0xdb, 0x9e, 0x8b, 0x7f,
0x11, 0x27, 0xa4, 0x42, 0x90, 0x8a, 0x89, 0x0a, 0xa1, 0x9f, 0x51, 0xc4, 0xa0, 0xb1, 0xfa, 0xe2,
0x9a, 0x08, 0xc1, 0xf9, 0x6c, 0x34, 0x9d, 0x4d, 0xf1, 0xb8, 0x7f, 0x3f, 0xc2, 0xf7, 0xb7, 0x63,
0xed, 0x11, 0x6a, 0x41, 0x5d, 0xc2, 0x6e, 0x6f, 0xc6, 0x0f, 0xe6, 0x48, 0x53, 0xd0, 0x63, 0x40,
0x12, 0x6c, 0x8e, 0x26, 0x77, 0xfd, 0xc1, 0x48, 0x2b, 0xfd, 0x47, 0xef, 0x4f, 0x26, 0xa3, 0xf1,
0x50, 0x2b, 0x1b, 0x53, 0xa8, 0x0c, 0x09, 0x5b, 0xcf, 0x3d, 0x12, 0x2c, 0x50, 0x0f, 0x6a, 0x8b,
0x74, 0x83, 0x39, 0x99, 0xeb, 0x8a, 0xd0, 0xb1, 0xd6, 0xcd, 0x28, 0x33, 0x32, 0x37, 0xd5, 0x85,
0xb4, 0xcb, 0x8c, 0x51, 0xca, 0x8d, 0x61, 0x4c, 0x40, 0xbd, 0xb3, 0xdd, 0xcd, 0x8c, 0x6e, 0x7d,
0x87, 0x70, 0x8a, 0x34, 0x28, 0x87, 0x81, 0x93, 0x78, 0x27, 0x5a, 0xa2, 0x2e, 0x9c, 0x7a, 0x7e,
0x74, 0x59, 0xa6, 0x97, 0x44, 0x8f, 0x66, 0x37, 0xca, 0x78, 0x88, 0xb1, 0x34, 0xd1, 0x4c, 0x49,
0xc6, 0x15, 0x34, 0x0e, 0xc4, 0xa3, 0xc2, 0x1b, 0xba, 0x4f, 0x0b, 0x6f, 0xe8, 0x1e, 0x35, 0xe1,
0x38, 0x56, 0x36, 0x3e, 0x4f, 0xbc, 0x31, 0xfe, 0x96, 0x41, 0x1d, 0x1e, 0x3a, 0xb5, 0x6c, 0xe7,
0x4b, 0x10, 0xc6, 0xc2, 0xab, 0xc8, 0x30, 0x58, 0xba, 0x54, 0x8d, 0xa7, 0x36, 0x12, 0x8f, 0xf1,
0x0a, 0x6a, 0xf3, 0x70, 0x85, 0x2d, 0x6f, 0xeb, 0x7b, 0x6e, 0xe4, 0x93, 0xd8, 0xd5, 0xea, 0x3c,
0x5c, 0x0d, 0x52, 0x0c, 0x75, 0x40, 0xb3, 0xbc, 0x05, 0xc5, 0x8c, 0x92, 0xc0, 0x5a, 0x63, 0x9f,
0xf0, 0xb5, 0x7e, 0x24, 0xaa, 0x9d, 0x47, 0xf8, 0x54, 0xc0, 0x13, 0xc2, 0xd7, 0xe8, 0x23, 0x20,
0xcf, 0xa7, 0x2e, 0x16, 0xbd, 0x79, 0x72, 0x33, 0xfd, 0xb4, 0xad, 0x08, 0xe5, 0x65, 0x1d, 0x4d,
0x2d, 0x22, 0x46, 0xae, 0xc8, 0x04, 0xf8, 0x00, 0xf5, 0xa5, 0xed, 0x50, 0x1c, 0x1d, 0x28, 0xcb,
0x3d, 0x3b, 0x94, 0x7b, 0x11, 0xf1, 0xbe, 0x84, 0xab, 0x2c, 0xf5, 0x0a, 0x1a, 0x84, 0x73, 0x62,
0xad, 0x8b, 0xc9, 0x95, 0x43, 0xc9, 0xf5, 0x98, 0x29, 0xa7, 0xbf, 0x04, 0x35, 0x99, 0x6a, 0xcc,
0xe9, 0x8e, 0xeb, 0x20, 0x2e, 0x57, 0x4d, 0xb0, 0x19, 0xdd, 0x71, 0x74, 0x0d, 0xcd, 0x94, 0x12,
0x06, 0x4e, 0xde, 0xa2, 0x7a, 0xa8, 0x05, 0x4a, 0xa8, 0x3f, 0x02, 0x27, 0xeb, 0xf1, 0x15, 0x74,
0x59, 0xc4, 0x42, 0x11, 0xf5, 0x50, 0x91, 0x56, 0xae, 0xad, 0x54, 0xc7, 0x58, 0x43, 0x6d, 0x20,
0x4f, 0x18, 0x7a, 0x03, 0xd5, 0xfc, 0xa9, 0x59, 0x62, 0x73, 0xc8, 0xbf, 0x0b, 0x13, 0xb2, 0x27,
0x67, 0xe8, 0x35, 0x40, 0xe6, 0xf8, 0xd4, 0xae, 0x90, 0x8f, 0x84, 0x29, 0x45, 0x8d, 0xdf, 0x0a,
0x34, 0x87, 0x74, 0x49, 0x42, 0x87, 0x17, 0x3b, 0xbe, 0x07, 0xb4, 0x88, 0x71, 0x9c, 0x77, 0x16,
0xf6, 0x2b, 0x36, 0xd6, 0x12, 0x56, 0xfe, 0xf3, 0xf6, 0xa1, 0x95, 0x66, 0x16, 0x87, 0xb3, 0x94,
0x28, 0x50, 0x18, 0xce, 0x46, 0xc2, 0x95, 0xc1, 0xf9, 0x89, 0xf8, 0xd1, 0xdf, 0xfd, 0x0b, 0x00,
0x00, 0xff, 0xff, 0x3e, 0xdd, 0x76, 0xfa, 0xe1, 0x05, 0x00, 0x00,
}
| {
return m.Dashboards
} | conditional_block |
main.rs | #![allow(clippy::needless_return)]
#![feature(portable_simd)]
use core_simd::Simd;
use core::convert::TryInto;
use srng::SRng;
use simd_aes::SimdAes;
const DEFAULT_SEED: Simd<u8, 16> = Simd::from_array([
178, 201, 95, 240, 40, 41, 143, 216,
2, 209, 178, 114, 232, 4, 176, 188,
]);
#[allow(non_snake_case)]
fn ComputeGlyphHash(data: &[u8]) -> Simd<u8, 16> {
let zero = Simd::splat(0);
let mut hash = Simd::<u64, 2>::from_array([data.len() as u64, 0]).to_ne_bytes();
hash ^= DEFAULT_SEED;
let mut chunks = data.chunks_exact(16);
for chunk in chunks.by_ref() {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash ^= value;
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
}
let remainder = chunks.remainder();
let mut temp = [0_u8; 16];
temp[..remainder.len()].copy_from_slice(remainder);
let value = Simd::from_array(temp);
hash ^= value;
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
return hash;
}
#[allow(dead_code)]
fn inv_aes_dec(mut data: Simd<u8, 16>, key: Simd<u8, 16>) -> Simd<u8, 16> {
data ^= key;
let zero = Simd::splat(0);
data = data.aes_dec_last(zero).aes_enc(zero);
return data.aes_enc_last(zero);
}
fn inv_aes_decx4(mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let zero = Simd::splat(0);
hash = hash.aes_dec_last(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc_last(zero);
return hash;
}
fn single_prefix(count: usize, target_hash: Simd<u8, 16>) -> Simd<u8, 16> {
// The first stage looks like this:
// Hash ^ Seed = dec^4(Count ^ Seed ^ Chunk)
// To get the chunk, we need to reverse these:
// dec^-4(Hash ^ Seed) = Count ^ Seed ^ Chunk
// Chunk = dec^4(Hash ^ Seed) ^ Count ^ Seed
// To create a one-prefix initialization, we want:
// Hash = Count
// Count = Count + 16
let mut hash = target_hash;
hash = inv_aes_decx4(hash);
let prefix_init = Simd::<u64, 2>::from_array([count as u64 + 16, 0]).to_ne_bytes();
hash ^= prefix_init;
hash ^= DEFAULT_SEED;
return hash;
}
fn preimage_prefix_hash(mut hash: Simd<u8, 16>, data: &[u8]) -> Simd<u8, 16> {
let chunks = data.len() / 16;
let tail = &data[chunks*16..];
let mut tail_buf = [0_u8; 16];
tail_buf[..tail.len()].copy_from_slice(tail);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
for chunk in data.chunks_exact(16).rev() {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
hash ^= value;
}
return hash;
}
fn invert_block(mut hash: Simd<u8, 16>, chunk: &[u8]) -> Simd<u8, 16> {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
return hash ^ value;
}
fn invert_last(suffix: &[u8], mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let mut tail_buf = [0_u8; 16];
tail_buf[..suffix.len()].copy_from_slice(suffix);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
hash = inv_aes_decx4(hash);
return hash;
}
fn concat(prefix: Simd<u8, 16>, target: &[u8]) -> Vec<u8> {
let mut image = prefix.to_array().to_vec();
image.extend_from_slice(target);
image
}
fn prefix_collision_attack(message: &[u8]) |
fn chosen_prefix(prefix: &[u8]) {
let zero = Simd::splat(0);
let mut message = prefix.to_vec();
let remainder = 16 - (message.len() % 16);
message.extend((0..remainder).map(|_| b'A'));
message.extend((0..16).map(|_| 0));
let hash = ComputeGlyphHash(&message);
let pre_current = invert_last(&[], hash);
let pre_target = invert_last(&[], zero);
let last = message.len() - 16;
let suffix = pre_current ^ pre_target;
message[last..].copy_from_slice(&suffix.to_array());
println!("Demonstrating chosen prefix attack");
println!("prefix: {:x?}", prefix);
println!("forgery: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
println!();
}
fn preimage_attack(suffix: &[u8]) {
println!("Demonstrating preimage attack");
println!("suffix: {:x?}", suffix);
let target_hash = Simd::splat(0);
println!("goal hash: {:x?}", target_hash);
let prefix_hash = preimage_prefix_hash(target_hash, suffix);
let preimage_prefix = single_prefix(suffix.len(), prefix_hash);
println!("prefix: {:x?}", preimage_prefix);
let message = concat(preimage_prefix, suffix);
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
}
fn padding_attack() {
println!("Demonstrating padding attack");
println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b""));
println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01"));
println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A"));
println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00"));
println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA"));
println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00"));
println!();
}
fn invert_attack(message: &[u8]) {
println!("Demonstrating invert attack, invert a hash up to 15 bytes");
println!("Note: due to padding attack, there are actually more messages");
println!("plaintext: {:x?}", message);
let mut hash = ComputeGlyphHash(message);
println!("hash: {:x?}", hash);
hash = inv_aes_decx4(hash);
hash ^= DEFAULT_SEED;
let mut buffer = hash.to_array();
let len = buffer.iter().rposition(|&chr| chr != 0).map_or(0, |x| x + 1);
if len == 16 {
println!("the plaintext mus be shorter than 16 bytes, cannot invert");
return;
}
buffer[0] ^= len as u8;
let recovered = &buffer[..len];
println!("recovered: {:x?}", recovered);
println!("hash: {:x?}", ComputeGlyphHash(recovered));
println!();
}
pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool {
// check if the characters are outside of '0'..'z' range
if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() {
return false;
}
// check if the characters are in of '9'+1..'A'-1 range
if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() {
return false;
}
// check if the characters are in of 'Z'+1..'a'-1 range
if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() {
return false;
}
return true;
}
use core::sync::atomic::{AtomicBool, Ordering};
static FOUND: AtomicBool = AtomicBool::new(false);
fn find_ascii_zeros(suffix: &[u8], worker: u64) {
const ATTACK_BYTES: usize = 6;
let mut target_hash = Simd::<u8, 16>::splat(0);
let mut bsuffix = suffix;
let suffix_len = 16 - ATTACK_BYTES;
let mut whole_block = false;
if suffix.len() >= suffix_len {
target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]);
bsuffix = &suffix[..suffix_len];
whole_block = true;
}
let mut controlled = [0u8; 16];
let total_len = ATTACK_BYTES + suffix.len();
let controlled_bytes = total_len.min(16);
let controlled = &mut controlled[..controlled_bytes];
controlled[ATTACK_BYTES..].copy_from_slice(bsuffix);
let seed = Simd::from_array([
17820195240, 4041143216,
22093178114, 2324176188,
]);
let mut rng = SRng::new(seed * Simd::splat(worker + 1));
let start = std::time::Instant::now();
for ii in 0_u64.. {
if FOUND.load(Ordering::Relaxed) {
return;
}
let prefix = rng.random_alphanum();
controlled[..6].copy_from_slice(&prefix[..6]);
let prefix = {
let prefix_hash = if whole_block {
invert_block(target_hash, controlled)
} else {
preimage_prefix_hash(target_hash, controlled)
};
single_prefix(total_len, prefix_hash)
};
if check_alphanum(prefix) {
FOUND.store(true, Ordering::Relaxed);
let mut buffer = prefix.to_array().to_vec();
buffer.extend_from_slice(&controlled[..6]);
buffer.extend_from_slice(suffix);
let elapsed = start.elapsed();
let mhs = (ii as f64) / 1e6 / elapsed.as_secs_f64();
eprintln!("found prefix in {}it {:?} {:3.3}MH/s/core", ii, elapsed, mhs);
eprintln!("hash: {:x?}", ComputeGlyphHash(&buffer));
println!("{}", core::str::from_utf8(&buffer).unwrap());
break;
}
}
}
const MESSAGE: &[&[u8]] = &[
b" Hello Casey! I hope this message finds you well.",
b" Please ignore those 22 random chars to the left for now.",
b" The work you've done on refterm is admirable. There are",
b" not enough performance conscious programmers around, and",
b" we need a demonstration of what is achievable. However,",
b" I would like to address the claim that the hash function",
b" used in refterm is 'cryptographically secure'. There is",
b" a very specific meaning attached to those words, namely:",
b" 1) it is hard to create a message for a given hash value",
b" 2) it is hard to produce two messages with the same hash",
b" If you check, the following strings have the same hash:",
b" xvD7FsaUdGy9UyjalZlFEU, 0XXPpB0wpVszsvSxgsn0su,",
b" IGNwdjol0dxLflcnfW7vsI, jcTHx0zBJbW2tdiX157RSz.",
b" In fact, every line in the message yields the exact same",
b" hash value. That is 0x00000000000000000000000000000000.",
b" I believe this was a clear enough demonstration that the",
b" hash function `ComputeGlyphHash` isn't cryptographically",
b" secure, and that an attacker can corrupt the glyph cache",
b" by printing glyphs with the same hash. The main problem",
b" with this hash function is that all operations consuming",
b" bytes are invertible. Which means an attacker could run",
b" the hash function in reverse, consuming the message from",
b" behind, and calculate the message to get the given hash.",
b" The hash is also weak to a padding attack. For example,",
br#" two strings "A" and "B\x00" yield the same hash, because"#,
b" the padding is constant, so zero byte in the end doens't",
b" matter, and the first byte is `xor`ed with input length.",
b" If you'd like to, you can read this blog post explaining",
b" these attacks in detail and how to avoid them using well",
b" known methods: https://m1el.github.io/refterm-hash",
b" Best regards, -- Igor",
];
fn main() {
padding_attack();
invert_attack(b"Qwerty123");
prefix_collision_attack(b"hello");
chosen_prefix(b"hello");
preimage_attack(b"hello");
const THREADS: u64 = 16;
for msg in MESSAGE {
FOUND.store(false, Ordering::Relaxed);
let threads = (0..THREADS)
.map(|worker| std::thread::spawn(move || find_ascii_zeros(msg, worker)))
.collect::<Vec<_>>();
for thread in threads {
thread.join().unwrap();
}
};
}
| {
let mut target_hash = Simd::<u64, 2>::from_array([message.len() as u64, 0]).to_ne_bytes();
target_hash ^= DEFAULT_SEED;
let prefix = single_prefix(message.len(), target_hash);
println!("Demonstrating prefix attack");
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(b"hello"));
println!("prefix: {:x?}", prefix);
let forgery = concat(prefix, message);
println!("forgery: {:x?}", forgery);
println!("hash: {:x?}", ComputeGlyphHash(&forgery));
println!();
} | identifier_body |
main.rs | #![allow(clippy::needless_return)]
#![feature(portable_simd)]
use core_simd::Simd;
use core::convert::TryInto;
use srng::SRng;
use simd_aes::SimdAes;
const DEFAULT_SEED: Simd<u8, 16> = Simd::from_array([
178, 201, 95, 240, 40, 41, 143, 216,
2, 209, 178, 114, 232, 4, 176, 188,
]);
#[allow(non_snake_case)]
fn ComputeGlyphHash(data: &[u8]) -> Simd<u8, 16> {
let zero = Simd::splat(0);
let mut hash = Simd::<u64, 2>::from_array([data.len() as u64, 0]).to_ne_bytes();
hash ^= DEFAULT_SEED;
let mut chunks = data.chunks_exact(16);
for chunk in chunks.by_ref() {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash ^= value;
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
}
let remainder = chunks.remainder();
let mut temp = [0_u8; 16];
temp[..remainder.len()].copy_from_slice(remainder);
let value = Simd::from_array(temp);
hash ^= value;
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
return hash;
}
#[allow(dead_code)]
fn inv_aes_dec(mut data: Simd<u8, 16>, key: Simd<u8, 16>) -> Simd<u8, 16> {
data ^= key;
let zero = Simd::splat(0);
data = data.aes_dec_last(zero).aes_enc(zero);
return data.aes_enc_last(zero);
}
fn inv_aes_decx4(mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let zero = Simd::splat(0);
hash = hash.aes_dec_last(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc_last(zero);
return hash;
}
fn single_prefix(count: usize, target_hash: Simd<u8, 16>) -> Simd<u8, 16> {
// The first stage looks like this:
// Hash ^ Seed = dec^4(Count ^ Seed ^ Chunk)
// To get the chunk, we need to reverse these:
// dec^-4(Hash ^ Seed) = Count ^ Seed ^ Chunk
// Chunk = dec^4(Hash ^ Seed) ^ Count ^ Seed
// To create a one-prefix initialization, we want:
// Hash = Count
// Count = Count + 16
let mut hash = target_hash;
hash = inv_aes_decx4(hash);
let prefix_init = Simd::<u64, 2>::from_array([count as u64 + 16, 0]).to_ne_bytes();
hash ^= prefix_init;
hash ^= DEFAULT_SEED;
return hash;
}
fn preimage_prefix_hash(mut hash: Simd<u8, 16>, data: &[u8]) -> Simd<u8, 16> {
let chunks = data.len() / 16;
let tail = &data[chunks*16..];
let mut tail_buf = [0_u8; 16];
tail_buf[..tail.len()].copy_from_slice(tail);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
for chunk in data.chunks_exact(16).rev() {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
hash ^= value;
}
return hash;
}
fn invert_block(mut hash: Simd<u8, 16>, chunk: &[u8]) -> Simd<u8, 16> {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
return hash ^ value;
}
fn invert_last(suffix: &[u8], mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let mut tail_buf = [0_u8; 16];
tail_buf[..suffix.len()].copy_from_slice(suffix);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
hash = inv_aes_decx4(hash);
return hash;
}
fn | (prefix: Simd<u8, 16>, target: &[u8]) -> Vec<u8> {
let mut image = prefix.to_array().to_vec();
image.extend_from_slice(target);
image
}
fn prefix_collision_attack(message: &[u8]) {
let mut target_hash = Simd::<u64, 2>::from_array([message.len() as u64, 0]).to_ne_bytes();
target_hash ^= DEFAULT_SEED;
let prefix = single_prefix(message.len(), target_hash);
println!("Demonstrating prefix attack");
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(b"hello"));
println!("prefix: {:x?}", prefix);
let forgery = concat(prefix, message);
println!("forgery: {:x?}", forgery);
println!("hash: {:x?}", ComputeGlyphHash(&forgery));
println!();
}
fn chosen_prefix(prefix: &[u8]) {
let zero = Simd::splat(0);
let mut message = prefix.to_vec();
let remainder = 16 - (message.len() % 16);
message.extend((0..remainder).map(|_| b'A'));
message.extend((0..16).map(|_| 0));
let hash = ComputeGlyphHash(&message);
let pre_current = invert_last(&[], hash);
let pre_target = invert_last(&[], zero);
let last = message.len() - 16;
let suffix = pre_current ^ pre_target;
message[last..].copy_from_slice(&suffix.to_array());
println!("Demonstrating chosen prefix attack");
println!("prefix: {:x?}", prefix);
println!("forgery: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
println!();
}
fn preimage_attack(suffix: &[u8]) {
println!("Demonstrating preimage attack");
println!("suffix: {:x?}", suffix);
let target_hash = Simd::splat(0);
println!("goal hash: {:x?}", target_hash);
let prefix_hash = preimage_prefix_hash(target_hash, suffix);
let preimage_prefix = single_prefix(suffix.len(), prefix_hash);
println!("prefix: {:x?}", preimage_prefix);
let message = concat(preimage_prefix, suffix);
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
}
fn padding_attack() {
println!("Demonstrating padding attack");
println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b""));
println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01"));
println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A"));
println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00"));
println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA"));
println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00"));
println!();
}
fn invert_attack(message: &[u8]) {
println!("Demonstrating invert attack, invert a hash up to 15 bytes");
println!("Note: due to padding attack, there are actually more messages");
println!("plaintext: {:x?}", message);
let mut hash = ComputeGlyphHash(message);
println!("hash: {:x?}", hash);
hash = inv_aes_decx4(hash);
hash ^= DEFAULT_SEED;
let mut buffer = hash.to_array();
let len = buffer.iter().rposition(|&chr| chr != 0).map_or(0, |x| x + 1);
if len == 16 {
println!("the plaintext mus be shorter than 16 bytes, cannot invert");
return;
}
buffer[0] ^= len as u8;
let recovered = &buffer[..len];
println!("recovered: {:x?}", recovered);
println!("hash: {:x?}", ComputeGlyphHash(recovered));
println!();
}
pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool {
// check if the characters are outside of '0'..'z' range
if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() {
return false;
}
// check if the characters are in of '9'+1..'A'-1 range
if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() {
return false;
}
// check if the characters are in of 'Z'+1..'a'-1 range
if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() {
return false;
}
return true;
}
use core::sync::atomic::{AtomicBool, Ordering};
static FOUND: AtomicBool = AtomicBool::new(false);
fn find_ascii_zeros(suffix: &[u8], worker: u64) {
const ATTACK_BYTES: usize = 6;
let mut target_hash = Simd::<u8, 16>::splat(0);
let mut bsuffix = suffix;
let suffix_len = 16 - ATTACK_BYTES;
let mut whole_block = false;
if suffix.len() >= suffix_len {
target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]);
bsuffix = &suffix[..suffix_len];
whole_block = true;
}
let mut controlled = [0u8; 16];
let total_len = ATTACK_BYTES + suffix.len();
let controlled_bytes = total_len.min(16);
let controlled = &mut controlled[..controlled_bytes];
controlled[ATTACK_BYTES..].copy_from_slice(bsuffix);
let seed = Simd::from_array([
17820195240, 4041143216,
22093178114, 2324176188,
]);
let mut rng = SRng::new(seed * Simd::splat(worker + 1));
let start = std::time::Instant::now();
for ii in 0_u64.. {
if FOUND.load(Ordering::Relaxed) {
return;
}
let prefix = rng.random_alphanum();
controlled[..6].copy_from_slice(&prefix[..6]);
let prefix = {
let prefix_hash = if whole_block {
invert_block(target_hash, controlled)
} else {
preimage_prefix_hash(target_hash, controlled)
};
single_prefix(total_len, prefix_hash)
};
if check_alphanum(prefix) {
FOUND.store(true, Ordering::Relaxed);
let mut buffer = prefix.to_array().to_vec();
buffer.extend_from_slice(&controlled[..6]);
buffer.extend_from_slice(suffix);
let elapsed = start.elapsed();
let mhs = (ii as f64) / 1e6 / elapsed.as_secs_f64();
eprintln!("found prefix in {}it {:?} {:3.3}MH/s/core", ii, elapsed, mhs);
eprintln!("hash: {:x?}", ComputeGlyphHash(&buffer));
println!("{}", core::str::from_utf8(&buffer).unwrap());
break;
}
}
}
const MESSAGE: &[&[u8]] = &[
b" Hello Casey! I hope this message finds you well.",
b" Please ignore those 22 random chars to the left for now.",
b" The work you've done on refterm is admirable. There are",
b" not enough performance conscious programmers around, and",
b" we need a demonstration of what is achievable. However,",
b" I would like to address the claim that the hash function",
b" used in refterm is 'cryptographically secure'. There is",
b" a very specific meaning attached to those words, namely:",
b" 1) it is hard to create a message for a given hash value",
b" 2) it is hard to produce two messages with the same hash",
b" If you check, the following strings have the same hash:",
b" xvD7FsaUdGy9UyjalZlFEU, 0XXPpB0wpVszsvSxgsn0su,",
b" IGNwdjol0dxLflcnfW7vsI, jcTHx0zBJbW2tdiX157RSz.",
b" In fact, every line in the message yields the exact same",
b" hash value. That is 0x00000000000000000000000000000000.",
b" I believe this was a clear enough demonstration that the",
b" hash function `ComputeGlyphHash` isn't cryptographically",
b" secure, and that an attacker can corrupt the glyph cache",
b" by printing glyphs with the same hash. The main problem",
b" with this hash function is that all operations consuming",
b" bytes are invertible. Which means an attacker could run",
b" the hash function in reverse, consuming the message from",
b" behind, and calculate the message to get the given hash.",
b" The hash is also weak to a padding attack. For example,",
br#" two strings "A" and "B\x00" yield the same hash, because"#,
b" the padding is constant, so zero byte in the end doens't",
b" matter, and the first byte is `xor`ed with input length.",
b" If you'd like to, you can read this blog post explaining",
b" these attacks in detail and how to avoid them using well",
b" known methods: https://m1el.github.io/refterm-hash",
b" Best regards, -- Igor",
];
fn main() {
padding_attack();
invert_attack(b"Qwerty123");
prefix_collision_attack(b"hello");
chosen_prefix(b"hello");
preimage_attack(b"hello");
const THREADS: u64 = 16;
for msg in MESSAGE {
FOUND.store(false, Ordering::Relaxed);
let threads = (0..THREADS)
.map(|worker| std::thread::spawn(move || find_ascii_zeros(msg, worker)))
.collect::<Vec<_>>();
for thread in threads {
thread.join().unwrap();
}
};
}
| concat | identifier_name |
main.rs | #![allow(clippy::needless_return)]
#![feature(portable_simd)]
use core_simd::Simd;
use core::convert::TryInto;
use srng::SRng;
use simd_aes::SimdAes;
const DEFAULT_SEED: Simd<u8, 16> = Simd::from_array([
178, 201, 95, 240, 40, 41, 143, 216,
2, 209, 178, 114, 232, 4, 176, 188,
]);
#[allow(non_snake_case)]
fn ComputeGlyphHash(data: &[u8]) -> Simd<u8, 16> {
let zero = Simd::splat(0);
let mut hash = Simd::<u64, 2>::from_array([data.len() as u64, 0]).to_ne_bytes();
hash ^= DEFAULT_SEED;
let mut chunks = data.chunks_exact(16);
for chunk in chunks.by_ref() {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash ^= value;
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
}
let remainder = chunks.remainder();
let mut temp = [0_u8; 16];
temp[..remainder.len()].copy_from_slice(remainder);
let value = Simd::from_array(temp);
hash ^= value;
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
return hash;
}
#[allow(dead_code)]
fn inv_aes_dec(mut data: Simd<u8, 16>, key: Simd<u8, 16>) -> Simd<u8, 16> {
data ^= key;
let zero = Simd::splat(0);
data = data.aes_dec_last(zero).aes_enc(zero);
return data.aes_enc_last(zero);
}
fn inv_aes_decx4(mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let zero = Simd::splat(0);
hash = hash.aes_dec_last(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc_last(zero);
return hash;
}
fn single_prefix(count: usize, target_hash: Simd<u8, 16>) -> Simd<u8, 16> {
// The first stage looks like this:
// Hash ^ Seed = dec^4(Count ^ Seed ^ Chunk)
// To get the chunk, we need to reverse these:
// dec^-4(Hash ^ Seed) = Count ^ Seed ^ Chunk
// Chunk = dec^4(Hash ^ Seed) ^ Count ^ Seed
// To create a one-prefix initialization, we want:
// Hash = Count
// Count = Count + 16
let mut hash = target_hash;
hash = inv_aes_decx4(hash);
let prefix_init = Simd::<u64, 2>::from_array([count as u64 + 16, 0]).to_ne_bytes();
hash ^= prefix_init;
hash ^= DEFAULT_SEED;
return hash;
}
fn preimage_prefix_hash(mut hash: Simd<u8, 16>, data: &[u8]) -> Simd<u8, 16> {
let chunks = data.len() / 16;
let tail = &data[chunks*16..];
let mut tail_buf = [0_u8; 16];
tail_buf[..tail.len()].copy_from_slice(tail);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
for chunk in data.chunks_exact(16).rev() {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
hash ^= value;
}
return hash;
}
fn invert_block(mut hash: Simd<u8, 16>, chunk: &[u8]) -> Simd<u8, 16> {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
return hash ^ value;
}
fn invert_last(suffix: &[u8], mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let mut tail_buf = [0_u8; 16];
tail_buf[..suffix.len()].copy_from_slice(suffix);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
hash = inv_aes_decx4(hash);
return hash;
}
fn concat(prefix: Simd<u8, 16>, target: &[u8]) -> Vec<u8> {
let mut image = prefix.to_array().to_vec();
image.extend_from_slice(target);
image
}
fn prefix_collision_attack(message: &[u8]) {
let mut target_hash = Simd::<u64, 2>::from_array([message.len() as u64, 0]).to_ne_bytes();
target_hash ^= DEFAULT_SEED;
let prefix = single_prefix(message.len(), target_hash);
println!("Demonstrating prefix attack");
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(b"hello"));
println!("prefix: {:x?}", prefix);
let forgery = concat(prefix, message);
println!("forgery: {:x?}", forgery);
println!("hash: {:x?}", ComputeGlyphHash(&forgery));
println!();
}
fn chosen_prefix(prefix: &[u8]) {
let zero = Simd::splat(0);
let mut message = prefix.to_vec();
let remainder = 16 - (message.len() % 16);
message.extend((0..remainder).map(|_| b'A'));
message.extend((0..16).map(|_| 0));
let hash = ComputeGlyphHash(&message);
let pre_current = invert_last(&[], hash);
let pre_target = invert_last(&[], zero);
let last = message.len() - 16;
let suffix = pre_current ^ pre_target;
message[last..].copy_from_slice(&suffix.to_array());
println!("Demonstrating chosen prefix attack");
println!("prefix: {:x?}", prefix);
println!("forgery: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
println!();
}
fn preimage_attack(suffix: &[u8]) {
println!("Demonstrating preimage attack");
println!("suffix: {:x?}", suffix);
let target_hash = Simd::splat(0);
println!("goal hash: {:x?}", target_hash);
let prefix_hash = preimage_prefix_hash(target_hash, suffix);
let preimage_prefix = single_prefix(suffix.len(), prefix_hash);
println!("prefix: {:x?}", preimage_prefix);
let message = concat(preimage_prefix, suffix);
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
}
fn padding_attack() {
println!("Demonstrating padding attack");
println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b""));
println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01"));
println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A"));
println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00"));
println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA"));
println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00"));
println!();
}
fn invert_attack(message: &[u8]) {
println!("Demonstrating invert attack, invert a hash up to 15 bytes");
println!("Note: due to padding attack, there are actually more messages");
println!("plaintext: {:x?}", message);
let mut hash = ComputeGlyphHash(message);
println!("hash: {:x?}", hash);
hash = inv_aes_decx4(hash);
hash ^= DEFAULT_SEED;
let mut buffer = hash.to_array();
let len = buffer.iter().rposition(|&chr| chr != 0).map_or(0, |x| x + 1);
if len == 16 {
println!("the plaintext mus be shorter than 16 bytes, cannot invert");
return;
}
buffer[0] ^= len as u8;
let recovered = &buffer[..len];
println!("recovered: {:x?}", recovered);
println!("hash: {:x?}", ComputeGlyphHash(recovered));
println!();
}
pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool {
// check if the characters are outside of '0'..'z' range
if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() {
return false;
}
// check if the characters are in of '9'+1..'A'-1 range
if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() {
return false;
}
// check if the characters are in of 'Z'+1..'a'-1 range
if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() {
return false;
}
return true;
}
use core::sync::atomic::{AtomicBool, Ordering};
static FOUND: AtomicBool = AtomicBool::new(false);
fn find_ascii_zeros(suffix: &[u8], worker: u64) {
const ATTACK_BYTES: usize = 6;
let mut target_hash = Simd::<u8, 16>::splat(0);
let mut bsuffix = suffix;
let suffix_len = 16 - ATTACK_BYTES;
let mut whole_block = false;
if suffix.len() >= suffix_len {
target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]);
bsuffix = &suffix[..suffix_len];
whole_block = true;
}
let mut controlled = [0u8; 16];
let total_len = ATTACK_BYTES + suffix.len();
let controlled_bytes = total_len.min(16);
let controlled = &mut controlled[..controlled_bytes];
controlled[ATTACK_BYTES..].copy_from_slice(bsuffix);
let seed = Simd::from_array([
17820195240, 4041143216,
22093178114, 2324176188,
]);
let mut rng = SRng::new(seed * Simd::splat(worker + 1));
let start = std::time::Instant::now();
for ii in 0_u64.. {
if FOUND.load(Ordering::Relaxed) {
return;
}
let prefix = rng.random_alphanum();
controlled[..6].copy_from_slice(&prefix[..6]);
let prefix = {
let prefix_hash = if whole_block {
invert_block(target_hash, controlled)
} else {
preimage_prefix_hash(target_hash, controlled)
};
single_prefix(total_len, prefix_hash)
};
if check_alphanum(prefix) {
FOUND.store(true, Ordering::Relaxed);
let mut buffer = prefix.to_array().to_vec();
buffer.extend_from_slice(&controlled[..6]);
buffer.extend_from_slice(suffix);
let elapsed = start.elapsed();
let mhs = (ii as f64) / 1e6 / elapsed.as_secs_f64();
eprintln!("found prefix in {}it {:?} {:3.3}MH/s/core", ii, elapsed, mhs);
eprintln!("hash: {:x?}", ComputeGlyphHash(&buffer));
println!("{}", core::str::from_utf8(&buffer).unwrap());
break;
}
}
}
const MESSAGE: &[&[u8]] = &[
b" Hello Casey! I hope this message finds you well.",
b" Please ignore those 22 random chars to the left for now.",
b" The work you've done on refterm is admirable. There are",
b" not enough performance conscious programmers around, and",
b" we need a demonstration of what is achievable. However,",
b" I would like to address the claim that the hash function",
b" used in refterm is 'cryptographically secure'. There is",
b" a very specific meaning attached to those words, namely:",
b" 1) it is hard to create a message for a given hash value",
b" 2) it is hard to produce two messages with the same hash",
b" If you check, the following strings have the same hash:",
b" xvD7FsaUdGy9UyjalZlFEU, 0XXPpB0wpVszsvSxgsn0su,",
b" IGNwdjol0dxLflcnfW7vsI, jcTHx0zBJbW2tdiX157RSz.",
b" In fact, every line in the message yields the exact same",
b" hash value. That is 0x00000000000000000000000000000000.",
b" I believe this was a clear enough demonstration that the",
b" hash function `ComputeGlyphHash` isn't cryptographically",
b" secure, and that an attacker can corrupt the glyph cache",
b" by printing glyphs with the same hash. The main problem",
b" with this hash function is that all operations consuming",
b" bytes are invertible. Which means an attacker could run",
b" the hash function in reverse, consuming the message from",
b" behind, and calculate the message to get the given hash.",
b" The hash is also weak to a padding attack. For example,", | b" known methods: https://m1el.github.io/refterm-hash",
b" Best regards, -- Igor",
];
fn main() {
padding_attack();
invert_attack(b"Qwerty123");
prefix_collision_attack(b"hello");
chosen_prefix(b"hello");
preimage_attack(b"hello");
const THREADS: u64 = 16;
for msg in MESSAGE {
FOUND.store(false, Ordering::Relaxed);
let threads = (0..THREADS)
.map(|worker| std::thread::spawn(move || find_ascii_zeros(msg, worker)))
.collect::<Vec<_>>();
for thread in threads {
thread.join().unwrap();
}
};
} | br#" two strings "A" and "B\x00" yield the same hash, because"#,
b" the padding is constant, so zero byte in the end doens't",
b" matter, and the first byte is `xor`ed with input length.",
b" If you'd like to, you can read this blog post explaining",
b" these attacks in detail and how to avoid them using well", | random_line_split |
main.rs | #![allow(clippy::needless_return)]
#![feature(portable_simd)]
use core_simd::Simd;
use core::convert::TryInto;
use srng::SRng;
use simd_aes::SimdAes;
const DEFAULT_SEED: Simd<u8, 16> = Simd::from_array([
178, 201, 95, 240, 40, 41, 143, 216,
2, 209, 178, 114, 232, 4, 176, 188,
]);
#[allow(non_snake_case)]
fn ComputeGlyphHash(data: &[u8]) -> Simd<u8, 16> {
let zero = Simd::splat(0);
let mut hash = Simd::<u64, 2>::from_array([data.len() as u64, 0]).to_ne_bytes();
hash ^= DEFAULT_SEED;
let mut chunks = data.chunks_exact(16);
for chunk in chunks.by_ref() {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash ^= value;
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
}
let remainder = chunks.remainder();
let mut temp = [0_u8; 16];
temp[..remainder.len()].copy_from_slice(remainder);
let value = Simd::from_array(temp);
hash ^= value;
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
hash = hash.aes_dec(zero);
return hash;
}
#[allow(dead_code)]
fn inv_aes_dec(mut data: Simd<u8, 16>, key: Simd<u8, 16>) -> Simd<u8, 16> {
data ^= key;
let zero = Simd::splat(0);
data = data.aes_dec_last(zero).aes_enc(zero);
return data.aes_enc_last(zero);
}
fn inv_aes_decx4(mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let zero = Simd::splat(0);
hash = hash.aes_dec_last(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc(zero);
hash = hash.aes_enc_last(zero);
return hash;
}
fn single_prefix(count: usize, target_hash: Simd<u8, 16>) -> Simd<u8, 16> {
// The first stage looks like this:
// Hash ^ Seed = dec^4(Count ^ Seed ^ Chunk)
// To get the chunk, we need to reverse these:
// dec^-4(Hash ^ Seed) = Count ^ Seed ^ Chunk
// Chunk = dec^4(Hash ^ Seed) ^ Count ^ Seed
// To create a one-prefix initialization, we want:
// Hash = Count
// Count = Count + 16
let mut hash = target_hash;
hash = inv_aes_decx4(hash);
let prefix_init = Simd::<u64, 2>::from_array([count as u64 + 16, 0]).to_ne_bytes();
hash ^= prefix_init;
hash ^= DEFAULT_SEED;
return hash;
}
fn preimage_prefix_hash(mut hash: Simd<u8, 16>, data: &[u8]) -> Simd<u8, 16> {
let chunks = data.len() / 16;
let tail = &data[chunks*16..];
let mut tail_buf = [0_u8; 16];
tail_buf[..tail.len()].copy_from_slice(tail);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
for chunk in data.chunks_exact(16).rev() {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
hash ^= value;
}
return hash;
}
fn invert_block(mut hash: Simd<u8, 16>, chunk: &[u8]) -> Simd<u8, 16> {
let chunk: &[u8; 16] = chunk.try_into().unwrap();
let value = Simd::from_array(*chunk);
hash = inv_aes_decx4(hash);
return hash ^ value;
}
fn invert_last(suffix: &[u8], mut hash: Simd<u8, 16>) -> Simd<u8, 16> {
let mut tail_buf = [0_u8; 16];
tail_buf[..suffix.len()].copy_from_slice(suffix);
let value = Simd::from_array(tail_buf);
hash = inv_aes_decx4(hash);
hash ^= value;
hash = inv_aes_decx4(hash);
return hash;
}
fn concat(prefix: Simd<u8, 16>, target: &[u8]) -> Vec<u8> {
let mut image = prefix.to_array().to_vec();
image.extend_from_slice(target);
image
}
fn prefix_collision_attack(message: &[u8]) {
let mut target_hash = Simd::<u64, 2>::from_array([message.len() as u64, 0]).to_ne_bytes();
target_hash ^= DEFAULT_SEED;
let prefix = single_prefix(message.len(), target_hash);
println!("Demonstrating prefix attack");
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(b"hello"));
println!("prefix: {:x?}", prefix);
let forgery = concat(prefix, message);
println!("forgery: {:x?}", forgery);
println!("hash: {:x?}", ComputeGlyphHash(&forgery));
println!();
}
fn chosen_prefix(prefix: &[u8]) {
let zero = Simd::splat(0);
let mut message = prefix.to_vec();
let remainder = 16 - (message.len() % 16);
message.extend((0..remainder).map(|_| b'A'));
message.extend((0..16).map(|_| 0));
let hash = ComputeGlyphHash(&message);
let pre_current = invert_last(&[], hash);
let pre_target = invert_last(&[], zero);
let last = message.len() - 16;
let suffix = pre_current ^ pre_target;
message[last..].copy_from_slice(&suffix.to_array());
println!("Demonstrating chosen prefix attack");
println!("prefix: {:x?}", prefix);
println!("forgery: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
println!();
}
fn preimage_attack(suffix: &[u8]) {
println!("Demonstrating preimage attack");
println!("suffix: {:x?}", suffix);
let target_hash = Simd::splat(0);
println!("goal hash: {:x?}", target_hash);
let prefix_hash = preimage_prefix_hash(target_hash, suffix);
let preimage_prefix = single_prefix(suffix.len(), prefix_hash);
println!("prefix: {:x?}", preimage_prefix);
let message = concat(preimage_prefix, suffix);
println!("message: {:x?}", message);
println!("hash: {:x?}", ComputeGlyphHash(&message));
}
fn padding_attack() {
println!("Demonstrating padding attack");
println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b""));
println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01"));
println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A"));
println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00"));
println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA"));
println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00"));
println!();
}
fn invert_attack(message: &[u8]) {
println!("Demonstrating invert attack, invert a hash up to 15 bytes");
println!("Note: due to padding attack, there are actually more messages");
println!("plaintext: {:x?}", message);
let mut hash = ComputeGlyphHash(message);
println!("hash: {:x?}", hash);
hash = inv_aes_decx4(hash);
hash ^= DEFAULT_SEED;
let mut buffer = hash.to_array();
let len = buffer.iter().rposition(|&chr| chr != 0).map_or(0, |x| x + 1);
if len == 16 |
buffer[0] ^= len as u8;
let recovered = &buffer[..len];
println!("recovered: {:x?}", recovered);
println!("hash: {:x?}", ComputeGlyphHash(recovered));
println!();
}
pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool {
// check if the characters are outside of '0'..'z' range
if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() {
return false;
}
// check if the characters are in of '9'+1..'A'-1 range
if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() {
return false;
}
// check if the characters are in of 'Z'+1..'a'-1 range
if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() {
return false;
}
return true;
}
use core::sync::atomic::{AtomicBool, Ordering};
static FOUND: AtomicBool = AtomicBool::new(false);
fn find_ascii_zeros(suffix: &[u8], worker: u64) {
const ATTACK_BYTES: usize = 6;
let mut target_hash = Simd::<u8, 16>::splat(0);
let mut bsuffix = suffix;
let suffix_len = 16 - ATTACK_BYTES;
let mut whole_block = false;
if suffix.len() >= suffix_len {
target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]);
bsuffix = &suffix[..suffix_len];
whole_block = true;
}
let mut controlled = [0u8; 16];
let total_len = ATTACK_BYTES + suffix.len();
let controlled_bytes = total_len.min(16);
let controlled = &mut controlled[..controlled_bytes];
controlled[ATTACK_BYTES..].copy_from_slice(bsuffix);
let seed = Simd::from_array([
17820195240, 4041143216,
22093178114, 2324176188,
]);
let mut rng = SRng::new(seed * Simd::splat(worker + 1));
let start = std::time::Instant::now();
for ii in 0_u64.. {
if FOUND.load(Ordering::Relaxed) {
return;
}
let prefix = rng.random_alphanum();
controlled[..6].copy_from_slice(&prefix[..6]);
let prefix = {
let prefix_hash = if whole_block {
invert_block(target_hash, controlled)
} else {
preimage_prefix_hash(target_hash, controlled)
};
single_prefix(total_len, prefix_hash)
};
if check_alphanum(prefix) {
FOUND.store(true, Ordering::Relaxed);
let mut buffer = prefix.to_array().to_vec();
buffer.extend_from_slice(&controlled[..6]);
buffer.extend_from_slice(suffix);
let elapsed = start.elapsed();
let mhs = (ii as f64) / 1e6 / elapsed.as_secs_f64();
eprintln!("found prefix in {}it {:?} {:3.3}MH/s/core", ii, elapsed, mhs);
eprintln!("hash: {:x?}", ComputeGlyphHash(&buffer));
println!("{}", core::str::from_utf8(&buffer).unwrap());
break;
}
}
}
const MESSAGE: &[&[u8]] = &[
b" Hello Casey! I hope this message finds you well.",
b" Please ignore those 22 random chars to the left for now.",
b" The work you've done on refterm is admirable. There are",
b" not enough performance conscious programmers around, and",
b" we need a demonstration of what is achievable. However,",
b" I would like to address the claim that the hash function",
b" used in refterm is 'cryptographically secure'. There is",
b" a very specific meaning attached to those words, namely:",
b" 1) it is hard to create a message for a given hash value",
b" 2) it is hard to produce two messages with the same hash",
b" If you check, the following strings have the same hash:",
b" xvD7FsaUdGy9UyjalZlFEU, 0XXPpB0wpVszsvSxgsn0su,",
b" IGNwdjol0dxLflcnfW7vsI, jcTHx0zBJbW2tdiX157RSz.",
b" In fact, every line in the message yields the exact same",
b" hash value. That is 0x00000000000000000000000000000000.",
b" I believe this was a clear enough demonstration that the",
b" hash function `ComputeGlyphHash` isn't cryptographically",
b" secure, and that an attacker can corrupt the glyph cache",
b" by printing glyphs with the same hash. The main problem",
b" with this hash function is that all operations consuming",
b" bytes are invertible. Which means an attacker could run",
b" the hash function in reverse, consuming the message from",
b" behind, and calculate the message to get the given hash.",
b" The hash is also weak to a padding attack. For example,",
br#" two strings "A" and "B\x00" yield the same hash, because"#,
b" the padding is constant, so zero byte in the end doens't",
b" matter, and the first byte is `xor`ed with input length.",
b" If you'd like to, you can read this blog post explaining",
b" these attacks in detail and how to avoid them using well",
b" known methods: https://m1el.github.io/refterm-hash",
b" Best regards, -- Igor",
];
fn main() {
padding_attack();
invert_attack(b"Qwerty123");
prefix_collision_attack(b"hello");
chosen_prefix(b"hello");
preimage_attack(b"hello");
const THREADS: u64 = 16;
for msg in MESSAGE {
FOUND.store(false, Ordering::Relaxed);
let threads = (0..THREADS)
.map(|worker| std::thread::spawn(move || find_ascii_zeros(msg, worker)))
.collect::<Vec<_>>();
for thread in threads {
thread.join().unwrap();
}
};
}
| {
println!("the plaintext mus be shorter than 16 bytes, cannot invert");
return;
} | conditional_block |
constants.go | // Copyright (C) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE-CODE in the project root for license information.
package main
const (
AvereAdminUsername = "admin"
MaxZonalNodesCount = 3
MinNodesCount = 3
MaxNodesCount = 16
MinVserverIpCount = MinNodesCount
MaxVserverIpCount = 2 * MaxNodesCount
MinFixedQuotaPercent = 0
MaxFixedQuotaPercent = 100
DefaultSshPort = 22
VfxtLogDateFormat = "2006-01-02.15.04.05"
VServerRangeSeperator = "-"
AverecmdRetryCount = 60 // wait 10 minutes (ex. remove core filer gets perm denied for a while)
AverecmdRetrySleepSeconds = 10
ShellcmdRetryCount = 60 // wait 10 minutes (ex. apt install waiting for lock to release)
ShellcmdRetrySleepSeconds = 10
ClusterAliveRetryCount = 3 // try 3 times to see if the cluster is alive
ClusterAliveRetrySleepSeconds = 5
AverecmdLogFile = "~/averecmd.log"
VServerName = "vserver"
DBUtilYes = "yes"
DBUtilNo = "no"
VfxtKeyPubFile = "~/vfxt_ssh_key_data.pub"
ShellLogFile = "~/shell.log"
// Platform
PlatformAzure = "azure"
// cluster sizes
ClusterSkuUnsupportedTestFast = "unsupported_test_SKU_fast"
ClusterSkuUnsupportedTest = "unsupported_test_SKU"
ClusterSkuProd = "prod_sku"
// cache policies
CachePolicyClientsBypass = "Clients Bypassing the Cluster"
CachePolicyReadCaching = "Read Caching"
CachePolicyReadWriteCaching = "Read and Write Caching"
CachePolicyFullCaching = "Full Caching"
CachePolicyTransitioningClients = "Transitioning Clients Before or After a Migration"
CachePolicyIsolatedCloudWorkstation = "Isolated Cloud Workstation"
CachePolicyCollaboratingCloudWorkstation = "Collaborating Cloud Workstation"
CachePolicyReadOnlyHighVerificationTime = "Read Only High Verification Time"
CachePolicyIsolatedCloudWorkstationCheckAttributes = "{}"
CachePolicyCollaboratingCloudWorkstationCheckAttributes = "{'checkAttrPeriod':30,'checkDirAttrPeriod':30}"
CachePolicyReadOnlyHighVerificationTimeCheckAttributes = "{'checkAttrPeriod':10800,'checkDirAttrPeriod':10800}"
CachePolicyClientsBypassCustomCheckAttributes = "{'checkAttrPeriod':%d}"
QuotaCacheMoveMax = "cfs.quotaCacheMoveMax DN 50" // 50 is the max
QuotaDivisorFloor = "cfs.quotaCacheDivisorFloor CQ %d"
// This setting is used to speed up the number of blocks
// to be assigned to a policy. Decreasing it could reduce
// the impact from the early added corefiler default is 20
QuotaMaxMultiplierForInvalidatedMassQuota = "cfs.maxMultiplierForInvalidatedMassQuota VS 2"
QuotaWaitMinutes = 20 // wait up to 20 minutes for the quota to balance
TargetPercentageError = float32(0.01)
QuotaSpeedUpDeleteFirstFiler = true
TerraformAutoMessage = "Customer Added Custom Setting via Terraform"
TerraformOverriddenAutoMessage = "Customer Overridden Deprecated Custom Setting via Terraform"
TerraformFeatureMessage = "Terraform Feature"
// features that are custom settings
AutoWanOptimizeCustomSetting = "autoWanOptimize YF 2"
CustomSettingOverride = "override "
NFSConnMultCustomSetting = "nfsConnMult YW %d"
MinNFSConnMult = 1
MaxNFSConnMult = 23
DefaultNFSConnMult = 4
AnalyticsClusterFilersRaw = "cluster_filers_raw"
CacheModeReadWrite = "read-write"
CacheModeReadOnly = "read"
WriteBackDelayDefault = 30
// user policies for admin.addUser Avere xml rpc call
UserReadOnly = "ro"
UserReadWrite = "rw"
AdminUserName = "admin"
// filer class
FilerClassNetappNonClustered = "NetappNonClustered"
FilerClassNetappClustered = "NetappClustered"
FilerClassEMCIsilon = "EmcIsilon"
FilerClassOther = "Other"
FilerClassAvereCloud = "AvereCloud"
// VServer retry
VServerRetryCount = 60
VServerRetrySleepSeconds = 10
// filer retry
FilerRetryCount = 120
FilerRetrySleepSeconds = 10
// cluster stable, wait 40 minutes for cluster to become healthy
ClusterStableRetryCount = 240
ClusterStableRetrySleepSeconds = 10
// node change, wait 40 minutes for node increase or decrease
NodeChangeRetryCount = 240
NodeChangeRetrySleepSeconds = 10
// only wait 15 minutes for support uploads
UploadGSIRetryCount = 45
UploadGSIRetrySleepSeconds = 20
// status's returned from Activity
StatusComplete = "complete"
StatusCompleted = "completed"
StatusNodeRemoved = "node(s) removed"
CompletedPercent = "100"
NodeUp = "up"
AlertSeverityGreen = "green" // this means the alert is complete
AlertSeverityYellow = "yellow" // this will eventually resolve itself
// the cloud filer export
CloudFilerExport = "/"
// the share permssions
PermissionsPreserve = "preserve" // this is the default for NFS shares
PermissionsModebits = "modebits" // this is the default for the Azure Storage Share
PrimaryClusterIPKey = "IP"
DefaultExportPolicyName = "default"
DefaultDirectoryServiceName = "default"
FaultString = "faultString"
FaultCode = "faultCode"
MultiCall = "--json system.multicall"
JunctionPolicyPosix = "posix"
JunctionPolicyCifs = "cifs"
CIFSUsernameSourceAD = "AD"
CIFSUsernameSourceFile = "File"
CIFSSelfPasswdUriStrFmt = "https://%s/avere/avere-user.txt"
CIFSSelfGroupUriStrFmt = "https://%s/avere/avere-group.txt"
ProactiveSupportDisabled = "Disabled"
ProactiveSupportSupport = "Support"
ProactiveSupportAPI = "API"
ProactiveSupportFull = "Full"
SupportNamePrefix = "av"
SupportNameSeparator = "0x2d"
SupportNameUnknown = "unknown"
RollingTraceTimeAfter = 2
RollingTraceTimeBefore = 10
DefaultRollingTraceFlag = "0xef401"
)
// terraform schema constants - avoids bugs on schema name changes
const (
controller_address = "controller_address"
controller_admin_username = "controller_admin_username"
controller_admin_password = "controller_admin_password"
controller_ssh_port = "controller_ssh_port"
run_local = "run_local"
use_availability_zones = "use_availability_zones"
allow_non_ascii = "allow_non_ascii"
location = "location"
platform = "platform"
azure_resource_group = "azure_resource_group"
azure_network_resource_group = "azure_network_resource_group"
azure_network_name = "azure_network_name"
azure_subnet_name = "azure_subnet_name"
ntp_servers = "ntp_servers"
timezone = "timezone"
dns_server = "dns_server"
dns_domain = "dns_domain"
dns_search = "dns_search"
proxy_uri = "proxy_uri"
cluster_proxy_uri = "cluster_proxy_uri"
image_id = "image_id"
vfxt_cluster_name = "vfxt_cluster_name"
vfxt_admin_password = "vfxt_admin_password"
vfxt_ssh_key_data = "vfxt_ssh_key_data"
vfxt_node_count = "vfxt_node_count"
node_size = "node_size"
node_cache_size = "node_cache_size"
enable_nlm = "enable_nlm"
vserver_first_ip = "vserver_first_ip"
vserver_ip_count = "vserver_ip_count"
global_custom_settings = "global_custom_settings"
vserver_settings = "vserver_settings"
enable_support_uploads = "enable_support_uploads"
support_uploads_company_name = "support_uploads_company_name"
enable_rolling_trace_data = "enable_rolling_trace_data"
rolling_trace_flag = "rolling_trace_flag" | cifs_netbios_domain_name = "cifs_netbios_domain_name"
cifs_dc_addreses = "cifs_dc_addreses"
cifs_server_name = "cifs_server_name"
cifs_username = "cifs_username"
cifs_password = "cifs_password"
cifs_flatfile_passwd_uri = "cifs_flatfile_passwd_uri"
cifs_flatfile_group_uri = "cifs_flatfile_group_uri"
cifs_flatfile_passwd_b64z = "cifs_flatfile_passwd_b64z"
cifs_flatfile_group_b64z = "cifs_flatfile_group_b64z"
cifs_rid_mapping_base_integer = "cifs_rid_mapping_base_integer"
cifs_organizational_unit = "cifs_organizational_unit"
cifs_trusted_active_directory_domains = "cifs_trusted_active_directory_domains"
login_services_ldap_server = "login_services_ldap_server"
login_services_ldap_basedn = "login_services_ldap_basedn"
login_services_ldap_binddn = "login_services_ldap_binddn"
login_services_ldap_bind_password = "login_services_ldap_bind_password"
enable_extended_groups = "enable_extended_groups"
user_assigned_managed_identity = "user_assigned_managed_identity"
user = "user"
name = "name"
password = "password"
permission = "permission"
core_filer = "core_filer"
core_filer_name = "name"
fqdn_or_primary_ip = "fqdn_or_primary_ip"
filer_class = "filer_class"
cache_policy = "cache_policy"
auto_wan_optimize = "auto_wan_optimize"
nfs_connection_multiplier = "nfs_connection_multiplier"
ordinal = "ordinal"
fixed_quota_percent = "fixed_quota_percent"
custom_settings = "custom_settings"
junction = "junction"
namespace_path = "namespace_path"
cifs_share_name = "cifs_share_name"
core_filer_cifs_share_name = "core_filer_cifs_share_name"
cifs_share_ace = "cifs_share_ace"
cifs_create_mask = "cifs_create_mask"
cifs_dir_mask = "cifs_dir_mask"
core_filer_export = "core_filer_export"
export_subdirectory = "export_subdirectory"
export_rule = "export_rule"
azure_storage_filer = "azure_storage_filer"
account_name = "account_name"
container_name = "container_name"
vfxt_management_ip = "vfxt_management_ip"
vserver_ip_addresses = "vserver_ip_addresses"
node_names = "node_names"
junction_namespace_path = "junction_namespace_path"
primary_cluster_ips = "primary_cluster_ips"
licensing_id = "licensing_id"
mass_filer_mappings = "mass_filer_mappings"
tags = "tags"
) | active_support_upload = "active_support_upload"
enable_secure_proactive_support = "enable_secure_proactive_support"
cifs_ad_domain = "cifs_ad_domain" | random_line_split |
FRB_MCMC.py | import numpy as np
import sys
from subprocess import *
import os
import emcee
from emcee.utils import MPIPool
#from schwimmbad import MPIPool
import astropy
import xi_2D
import time
import lightcone_FRB_decreasingz_xlos as lc
import DM
import h5py
#import matplotlib.pyplot as pl
import misc_functions as misc
import mpi4py
import lightcone_FRB_decreasingz_xlos_forHaloFinder as lcH
from scipy.stats import skewnorm
import misc_functions as misc
from scipy import stats
import lightcone_FRB_decreasingz_xlos_forHaloFinder as lcH
#let's name this run
RUN= int(np.abs(np.random.uniform(0, 2000)))
os.system("echo RUN IS : " + str(RUN))
os.system("mkdir ../Boxes/Output_files")
boxes_path = '../Boxes/'
def copy_FROM_TO(marker , FROM_OUTPUT_NUMBER, TO_OUTPUT_NUMBER):
#this does all boxes that have the desired tag
filename_list = open(boxes_path +'Output_files/filename_list_'+str(FROM_OUTPUT_NUMBER)+'_' +str(TO_OUTPUT_NUMBER), 'w')
call(["ls ../Boxes/*" + str(FROM_OUTPUT_NUMBER)], stdout = filename_list, shell = True)
filename_list.close()
with open(boxes_path + 'Output_files/filename_list_'+str(FROM_OUTPUT_NUMBER)+'_' +str(TO_OUTPUT_NUMBER) , 'r') as inF:
for line in inF:
if str(marker) in line:
os.system("cp " + line.replace("\n", '') + " " + line.replace(str(FROM_OUTPUT_NUMBER),str(TO_OUTPUT_NUMBER)))
def load_data(path, HII_DIM):
data = np.fromfile(path ,dtype=np.float32)
return data.reshape((HII_DIM,HII_DIM,HII_DIM))
def beta2sigma(beta):
if beta >= 0:
sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
return (sign*sigma)
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
return (sign*sigma)
####################################################################
# Cosmology and Astrophysical Parameters #
####################################################################
#EoR parameters (fixed in this version)
zeta = 500
Mturn = 10
Rmfp = 30
#Cosmology constants
nHI = 10
H0 = float(68)/float(3.086e19)
OMm = 0.25
OMl = 0.75
baryon2DMfrac = 0.05
#constants
pc = 3.08*10**16 # pc in terms of m
cm2m = 0.01 #cm to m conversion
####################################################################
# Emcee specific parameters #
####################################################################
#os.chdir("/home/grx40/projects/def-acliu/grx40/soft/21cmFASTM/Programs/")
#dimensions and walkers of EnsembleSampler
ndim = 4
nwalkers = 24
####################################################################
# FRB script loader #
####################################################################
#Constants for the script
Box_length = 300
HII_DIM = 200
DIM = 800
####################################################################
# Lightcone stuff #
####################################################################
halo_directory = '../Boxes/Default_Res/'
lightcone_sharpcutoff = False
z_end = 0.0
z_start = 10.0
#we never actually use this
delta_z = 0.5
box_slice = 199
nboxes = int(np.round(float(z_start - z_end)/float(delta_z),1)) + 1
z_range_of_halo_boxes = np.linspace(z_start, z_end, nboxes)
#confirm that the z_range is correct (debug)
os.system("echo " + str(z_range_of_halo_boxes))
#directory to get the base density boxes AFTER reionization
density_boxes ='../Boxes/Fiducial_1_5000_30_5e8_allZ/'
#make the base density lightcone once (use it for all subsequent times
densitylightcone = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), N = 500, directory = density_boxes, marker = 'updated_smoothed_deltax')
#make the halolightcone
#what is the redshift range spanned by the halo files?
z_range_of_halo_boxes = np.linspace(10, 0.0, int(np.round(float(10)/float(0.5),2) + 1))
os.system('echo ' + str(z_range_of_halo_boxes))
#load halos (i.e. FRBs)
halo_directory = '../Boxes/Halos/'
#load all the halopos for all the redshifts and store them into a single array
#Halopos_z = np.zeros((len(z_range_of_halo_boxes)), dtype = object)
#for z in range(Halopos_z.shape[0]):
# if z_range_of_halo_boxes[z] < 6.0:
# print('switching to the same box')
#switch to the same box over and over (because those boxes aren't made yet)
# box = 'halos_z6.00_800_300Mpc_5015241'
# Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
# else:
# box = 'halos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'0_800_300Mpc_5015241'
#Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
#os.system('echo Done redshift' + str(np.round(z_range_of_halo_boxes[z],1)))
#save the lightcone should something go very very wrong
#np.savez('Halopos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'_FRB.npz', Halopos_z = Halopos_z[z])
#do the lightcone for the Halo field
#Halo_Position_Box = np.zeros((len(z_range_of_halo_boxes), HII_DIM, HII_DIM, HII_DIM))
#Halo_Mass_Box = np.zeros_like(Halo_Position_Box)
#for z in range(len(z_range_of_halo_boxes)):
# Halo_Position_Box[z] , Halo_Mass_Box[z] = misc.map2box(Halopos_z[z], HII_DIM)
#Halo_lightcone, halolightcone_redshifts = lcH.lightcone(DIM = HII_DIM, halo_boxes_z = Halo_Position_Box, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), return_redshifts = True)
#load Fiducial stuff
npzfile = np.load('Halo_lightcone.npz', allow_pickle = True)
Halo_lightcone = npzfile['Halo_lightcone']
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
os.system("echo shape of fiducial dm z " + str(fiducial_DM_z.shape))
####################################################################
# Define Bayesian Probabilities #
####################################################################
#lets add the number density to the prior. If we constrain it using the likelihood then we may end up in the
#unfortunate situation of having the code get stuck with a gigantic ACF
def lnprior(x):
|
def lnprob(x, fiducial_DM_z ):
lp = lnprior(x)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(x, fiducial_DM_z)
beta_list = []
zeta_list = []
Mturn_list = []
model_DM_z = []
Rmfp_list = []
chi2_model = []
def lnlike(x, fiducial_DM_z):
#draw a tag for this run
OUTPUT_NUMBER = int(np.abs(np.random.uniform(1000000, 9990000)))
#map emcee space to EoR parameters
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
beta_list.append(beta)
zeta_list.append(zeta)
Mturn_list.append(Mturn)
Rmfp_list.append(Rmfp)
if beta >= 0:
sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
t21_i = time.time()
#make the reionization scenario for these parameters
os.system("echo choice of beta is " + str(beta) + ' leading to a sigma of' + str(sigma) +' with sign' + str(sign) )
os.system("./init " + str(sign) + ' ' + str(sigma) +' ' + str(OUTPUT_NUMBER) )
os.system("./drive_zscroll_noTs " + str(10*zeta) +' ' + str(Rmfp) +' ' + str(Mturn*5*10**7)+ ' ' + str(OUTPUT_NUMBER))
t21_f = time.time()
os.system("echo RUN " + str(RUN) + " 21cmfast runtime is " + str(t21_f - t21_i))
#make lightcone for this model data
os.system("echo n boxes is " + str(nboxes))
#make the lightcone for each quantity
box_slice = 199
#cope the post EoR stuff
copy_FROM_TO('xH_', 5015241, OUTPUT_NUMBER)
xH_lightcone_model , lightcone_redshifts = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, N = 500, box_slice = int(box_slice), directory = '../Boxes/', tag = OUTPUT_NUMBER, return_redshifts = True )
os.system('echo Done making lightcone!')
time_DM_start = time.time()
#number of redshifts to include in our FRB plot
lc_z_subsample = 10
DM_z_y_z_model = np.zeros((lc_z_subsample+1, HII_DIM, HII_DIM ))
n_FRBs_z = np.zeros((lc_z_subsample+1))
z_we_are_actually_using = np.zeros((lc_z_subsample+1))
#start with z = 9.6 for now (redshift = 0 index)
for red_idx in range(lc_z_subsample+1):
red = red_idx*int(len(lightcone_redshifts)/lc_z_subsample)
z_we_are_actually_using[red_idx] = lightcone_redshifts[red]
os.system("echo Doing z" + str( lightcone_redshifts[red]))
for y in range(HII_DIM):
for z in range(HII_DIM):
#if Halo_lightcone[red][y][z] != 0:
# n_FRBs_z[red_idx] += 1
DM_z_y_z_model[red_idx][y][z] = DM.compute_DM(y,z, xH_lightcone_model[red:,:,], densitylightcone[red:,:,], lightcone_redshifts[red:], Halo_lightcone[red:])
time_DM_end = time.time()
os.system("echo RUN " + str(RUN) + " DM runtime is " + str(time_DM_end-time_DM_start))
#sum DMs over all sightlines
DM_z = (0.01**3)*np.true_divide(np.sum(DM_z_y_z_model, axis = (1,2)), (HII_DIM*HII_DIM*pc))
#compute chi squared for each redshift and then add them up
chi_squared_total = 0
diff = np.zeros_like(DM_z)
diff = np.subtract(DM_z, fiducial_DM_z)
diff = np.true_divide(diff, 1)
chi_squared_total += np.dot(diff, diff)
os.system("echo RUN " + str(RUN) + " chi squared total is " + str(chi_squared_total) + str("for params") + str(zeta) + " " + str(Mturn) + " " + str(beta) + str(Rmfp))
#add this chi2 to the list
chi2_model.append(chi_squared_total)
#add DM to the list
model_DM_z.append(DM_z)
#cleanup boxes that are leftover
os.system("rm ../Boxes/*" + str(OUTPUT_NUMBER))
#save results to an npz file
np.savez('MCMC_snapshot_FRB' + str(RUN)+ '.npz', betas = np.array(beta_list) , zetas = np.array(zeta_list) , Mturns = np.array(Mturn_list),Rmfps = np.array(Rmfp_list) , model_DM_z = np.array(model_DM_z), chi2_model = np.array(chi2_model) )
return (-chi_squared_total/2.)
####################################################################
# Make Mock Data #
####################################################################
pool = MPIPool()
#with MPIPool() as pool:
if not pool.is_master():
pool.wait()
sys.exit()
#parameters used for making fiducial data
#we are using a fiducial inside-out model
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
#fiducial_redshifts = npzfile['fiducial_redshifts']
####################################################################
# Define Starting Point and run the MCMC #
####################################################################
randomize = np.random.normal(1, 0.1, ndim * nwalkers).reshape((nwalkers, ndim))
for i in range(nwalkers):
randomize[i][0] = np.random.uniform(0.9,0.95)
randomize[i][1] = np.random.uniform(459, 550)
randomize[i][2] = np.random.uniform(9, 11)
randomize[i][3] = np.random.uniform(27, 33)
starting_parameters = randomize
#npzfile = np.load('checkpoint_values_FRB_full.npz')
#starting_parameters = npzfile['position']
os.system('echo Our starting parameters have been saved ')
np.savez('starting_params_test_FRB.npz' , starting_parameters = starting_parameters)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, pool = pool, args = [fiducial_DM_z])
pos , prob , state = sampler.run_mcmc(starting_parameters, 10)
####################################################################
# Save MCMC results and checkpoint the progress #
####################################################################
#save final position in an npz file to be ready afterwards
np.savez('checkpoint_values_FRB_full.npz', position = pos, probability = prob, stateof = state, acceptance_frac= np.mean(sampler.acceptance_fraction))
#write out chain data to npz files
np.savez('flatchain_FRB_' +str(RUN)+ '_full.npz', betas = sampler.flatchain[:,0], zeta = sampler.flatchain[:,1] , Mturn = sampler.flatchain[:,2], Rmfp = sampler.flatchain[:,3] , acceptance_frac= np.mean(sampler.acceptance_fraction))
np.savez('chain_FRB_' + str(RUN) +'_full.npz', samples = sampler.chain)
pool.close()
| beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
if -1 < beta < 1 and 200 < zeta < 1000 and 1e7 < (Mturn*5e7) < 9e9 and 5 < Rmfp < 60:
os.system("echo RUN " + str(RUN) + " accepting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp) )
return 0.0
os.system("echo RUN " + str(RUN) + " Rejecting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp ) )
return -np.inf | identifier_body |
FRB_MCMC.py | import numpy as np
import sys
from subprocess import *
import os
import emcee
from emcee.utils import MPIPool
#from schwimmbad import MPIPool
import astropy
import xi_2D
import time
import lightcone_FRB_decreasingz_xlos as lc
import DM
import h5py
#import matplotlib.pyplot as pl
import misc_functions as misc
import mpi4py
import lightcone_FRB_decreasingz_xlos_forHaloFinder as lcH
from scipy.stats import skewnorm
import misc_functions as misc
from scipy import stats
import lightcone_FRB_decreasingz_xlos_forHaloFinder as lcH
#let's name this run
RUN= int(np.abs(np.random.uniform(0, 2000)))
os.system("echo RUN IS : " + str(RUN))
os.system("mkdir ../Boxes/Output_files")
boxes_path = '../Boxes/'
def copy_FROM_TO(marker , FROM_OUTPUT_NUMBER, TO_OUTPUT_NUMBER):
#this does all boxes that have the desired tag
filename_list = open(boxes_path +'Output_files/filename_list_'+str(FROM_OUTPUT_NUMBER)+'_' +str(TO_OUTPUT_NUMBER), 'w')
call(["ls ../Boxes/*" + str(FROM_OUTPUT_NUMBER)], stdout = filename_list, shell = True)
filename_list.close()
with open(boxes_path + 'Output_files/filename_list_'+str(FROM_OUTPUT_NUMBER)+'_' +str(TO_OUTPUT_NUMBER) , 'r') as inF:
for line in inF:
if str(marker) in line:
os.system("cp " + line.replace("\n", '') + " " + line.replace(str(FROM_OUTPUT_NUMBER),str(TO_OUTPUT_NUMBER)))
def load_data(path, HII_DIM):
data = np.fromfile(path ,dtype=np.float32)
return data.reshape((HII_DIM,HII_DIM,HII_DIM))
def beta2sigma(beta):
if beta >= 0:
|
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
return (sign*sigma)
####################################################################
# Cosmology and Astrophysical Parameters #
####################################################################
#EoR parameters (fixed in this version)
zeta = 500
Mturn = 10
Rmfp = 30
#Cosmology constants
nHI = 10
H0 = float(68)/float(3.086e19)
OMm = 0.25
OMl = 0.75
baryon2DMfrac = 0.05
#constants
pc = 3.08*10**16 # pc in terms of m
cm2m = 0.01 #cm to m conversion
####################################################################
# Emcee specific parameters #
####################################################################
#os.chdir("/home/grx40/projects/def-acliu/grx40/soft/21cmFASTM/Programs/")
#dimensions and walkers of EnsembleSampler
ndim = 4
nwalkers = 24
####################################################################
# FRB script loader #
####################################################################
#Constants for the script
Box_length = 300
HII_DIM = 200
DIM = 800
####################################################################
# Lightcone stuff #
####################################################################
halo_directory = '../Boxes/Default_Res/'
lightcone_sharpcutoff = False
z_end = 0.0
z_start = 10.0
#we never actually use this
delta_z = 0.5
box_slice = 199
nboxes = int(np.round(float(z_start - z_end)/float(delta_z),1)) + 1
z_range_of_halo_boxes = np.linspace(z_start, z_end, nboxes)
#confirm that the z_range is correct (debug)
os.system("echo " + str(z_range_of_halo_boxes))
#directory to get the base density boxes AFTER reionization
density_boxes ='../Boxes/Fiducial_1_5000_30_5e8_allZ/'
#make the base density lightcone once (use it for all subsequent times
densitylightcone = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), N = 500, directory = density_boxes, marker = 'updated_smoothed_deltax')
#make the halolightcone
#what is the redshift range spanned by the halo files?
z_range_of_halo_boxes = np.linspace(10, 0.0, int(np.round(float(10)/float(0.5),2) + 1))
os.system('echo ' + str(z_range_of_halo_boxes))
#load halos (i.e. FRBs)
halo_directory = '../Boxes/Halos/'
#load all the halopos for all the redshifts and store them into a single array
#Halopos_z = np.zeros((len(z_range_of_halo_boxes)), dtype = object)
#for z in range(Halopos_z.shape[0]):
# if z_range_of_halo_boxes[z] < 6.0:
# print('switching to the same box')
#switch to the same box over and over (because those boxes aren't made yet)
# box = 'halos_z6.00_800_300Mpc_5015241'
# Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
# else:
# box = 'halos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'0_800_300Mpc_5015241'
#Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
#os.system('echo Done redshift' + str(np.round(z_range_of_halo_boxes[z],1)))
#save the lightcone should something go very very wrong
#np.savez('Halopos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'_FRB.npz', Halopos_z = Halopos_z[z])
#do the lightcone for the Halo field
#Halo_Position_Box = np.zeros((len(z_range_of_halo_boxes), HII_DIM, HII_DIM, HII_DIM))
#Halo_Mass_Box = np.zeros_like(Halo_Position_Box)
#for z in range(len(z_range_of_halo_boxes)):
# Halo_Position_Box[z] , Halo_Mass_Box[z] = misc.map2box(Halopos_z[z], HII_DIM)
#Halo_lightcone, halolightcone_redshifts = lcH.lightcone(DIM = HII_DIM, halo_boxes_z = Halo_Position_Box, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), return_redshifts = True)
#load Fiducial stuff
npzfile = np.load('Halo_lightcone.npz', allow_pickle = True)
Halo_lightcone = npzfile['Halo_lightcone']
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
os.system("echo shape of fiducial dm z " + str(fiducial_DM_z.shape))
####################################################################
# Define Bayesian Probabilities #
####################################################################
#lets add the number density to the prior. If we constrain it using the likelihood then we may end up in the
#unfortunate situation of having the code get stuck with a gigantic ACF
def lnprior(x):
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
if -1 < beta < 1 and 200 < zeta < 1000 and 1e7 < (Mturn*5e7) < 9e9 and 5 < Rmfp < 60:
os.system("echo RUN " + str(RUN) + " accepting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp) )
return 0.0
os.system("echo RUN " + str(RUN) + " Rejecting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp ) )
return -np.inf
def lnprob(x, fiducial_DM_z ):
lp = lnprior(x)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(x, fiducial_DM_z)
beta_list = []
zeta_list = []
Mturn_list = []
model_DM_z = []
Rmfp_list = []
chi2_model = []
def lnlike(x, fiducial_DM_z):
#draw a tag for this run
OUTPUT_NUMBER = int(np.abs(np.random.uniform(1000000, 9990000)))
#map emcee space to EoR parameters
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
beta_list.append(beta)
zeta_list.append(zeta)
Mturn_list.append(Mturn)
Rmfp_list.append(Rmfp)
if beta >= 0:
sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
t21_i = time.time()
#make the reionization scenario for these parameters
os.system("echo choice of beta is " + str(beta) + ' leading to a sigma of' + str(sigma) +' with sign' + str(sign) )
os.system("./init " + str(sign) + ' ' + str(sigma) +' ' + str(OUTPUT_NUMBER) )
os.system("./drive_zscroll_noTs " + str(10*zeta) +' ' + str(Rmfp) +' ' + str(Mturn*5*10**7)+ ' ' + str(OUTPUT_NUMBER))
t21_f = time.time()
os.system("echo RUN " + str(RUN) + " 21cmfast runtime is " + str(t21_f - t21_i))
#make lightcone for this model data
os.system("echo n boxes is " + str(nboxes))
#make the lightcone for each quantity
box_slice = 199
#cope the post EoR stuff
copy_FROM_TO('xH_', 5015241, OUTPUT_NUMBER)
xH_lightcone_model , lightcone_redshifts = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, N = 500, box_slice = int(box_slice), directory = '../Boxes/', tag = OUTPUT_NUMBER, return_redshifts = True )
os.system('echo Done making lightcone!')
time_DM_start = time.time()
#number of redshifts to include in our FRB plot
lc_z_subsample = 10
DM_z_y_z_model = np.zeros((lc_z_subsample+1, HII_DIM, HII_DIM ))
n_FRBs_z = np.zeros((lc_z_subsample+1))
z_we_are_actually_using = np.zeros((lc_z_subsample+1))
#start with z = 9.6 for now (redshift = 0 index)
for red_idx in range(lc_z_subsample+1):
red = red_idx*int(len(lightcone_redshifts)/lc_z_subsample)
z_we_are_actually_using[red_idx] = lightcone_redshifts[red]
os.system("echo Doing z" + str( lightcone_redshifts[red]))
for y in range(HII_DIM):
for z in range(HII_DIM):
#if Halo_lightcone[red][y][z] != 0:
# n_FRBs_z[red_idx] += 1
DM_z_y_z_model[red_idx][y][z] = DM.compute_DM(y,z, xH_lightcone_model[red:,:,], densitylightcone[red:,:,], lightcone_redshifts[red:], Halo_lightcone[red:])
time_DM_end = time.time()
os.system("echo RUN " + str(RUN) + " DM runtime is " + str(time_DM_end-time_DM_start))
#sum DMs over all sightlines
DM_z = (0.01**3)*np.true_divide(np.sum(DM_z_y_z_model, axis = (1,2)), (HII_DIM*HII_DIM*pc))
#compute chi squared for each redshift and then add them up
chi_squared_total = 0
diff = np.zeros_like(DM_z)
diff = np.subtract(DM_z, fiducial_DM_z)
diff = np.true_divide(diff, 1)
chi_squared_total += np.dot(diff, diff)
os.system("echo RUN " + str(RUN) + " chi squared total is " + str(chi_squared_total) + str("for params") + str(zeta) + " " + str(Mturn) + " " + str(beta) + str(Rmfp))
#add this chi2 to the list
chi2_model.append(chi_squared_total)
#add DM to the list
model_DM_z.append(DM_z)
#cleanup boxes that are leftover
os.system("rm ../Boxes/*" + str(OUTPUT_NUMBER))
#save results to an npz file
np.savez('MCMC_snapshot_FRB' + str(RUN)+ '.npz', betas = np.array(beta_list) , zetas = np.array(zeta_list) , Mturns = np.array(Mturn_list),Rmfps = np.array(Rmfp_list) , model_DM_z = np.array(model_DM_z), chi2_model = np.array(chi2_model) )
return (-chi_squared_total/2.)
####################################################################
# Make Mock Data #
####################################################################
pool = MPIPool()
#with MPIPool() as pool:
if not pool.is_master():
pool.wait()
sys.exit()
#parameters used for making fiducial data
#we are using a fiducial inside-out model
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
#fiducial_redshifts = npzfile['fiducial_redshifts']
####################################################################
# Define Starting Point and run the MCMC #
####################################################################
randomize = np.random.normal(1, 0.1, ndim * nwalkers).reshape((nwalkers, ndim))
for i in range(nwalkers):
randomize[i][0] = np.random.uniform(0.9,0.95)
randomize[i][1] = np.random.uniform(459, 550)
randomize[i][2] = np.random.uniform(9, 11)
randomize[i][3] = np.random.uniform(27, 33)
starting_parameters = randomize
#npzfile = np.load('checkpoint_values_FRB_full.npz')
#starting_parameters = npzfile['position']
os.system('echo Our starting parameters have been saved ')
np.savez('starting_params_test_FRB.npz' , starting_parameters = starting_parameters)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, pool = pool, args = [fiducial_DM_z])
pos , prob , state = sampler.run_mcmc(starting_parameters, 10)
####################################################################
# Save MCMC results and checkpoint the progress #
####################################################################
#save final position in an npz file to be ready afterwards
np.savez('checkpoint_values_FRB_full.npz', position = pos, probability = prob, stateof = state, acceptance_frac= np.mean(sampler.acceptance_fraction))
#write out chain data to npz files
np.savez('flatchain_FRB_' +str(RUN)+ '_full.npz', betas = sampler.flatchain[:,0], zeta = sampler.flatchain[:,1] , Mturn = sampler.flatchain[:,2], Rmfp = sampler.flatchain[:,3] , acceptance_frac= np.mean(sampler.acceptance_fraction))
np.savez('chain_FRB_' + str(RUN) +'_full.npz', samples = sampler.chain)
pool.close()
| sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
return (sign*sigma) | conditional_block |
FRB_MCMC.py | import numpy as np
import sys
from subprocess import *
import os
import emcee
from emcee.utils import MPIPool
#from schwimmbad import MPIPool
import astropy
import xi_2D
import time
import lightcone_FRB_decreasingz_xlos as lc
import DM
import h5py
#import matplotlib.pyplot as pl
import misc_functions as misc
import mpi4py
import lightcone_FRB_decreasingz_xlos_forHaloFinder as lcH
from scipy.stats import skewnorm
import misc_functions as misc
from scipy import stats
import lightcone_FRB_decreasingz_xlos_forHaloFinder as lcH
#let's name this run
RUN= int(np.abs(np.random.uniform(0, 2000)))
os.system("echo RUN IS : " + str(RUN))
os.system("mkdir ../Boxes/Output_files")
boxes_path = '../Boxes/'
def copy_FROM_TO(marker , FROM_OUTPUT_NUMBER, TO_OUTPUT_NUMBER):
#this does all boxes that have the desired tag
filename_list = open(boxes_path +'Output_files/filename_list_'+str(FROM_OUTPUT_NUMBER)+'_' +str(TO_OUTPUT_NUMBER), 'w')
call(["ls ../Boxes/*" + str(FROM_OUTPUT_NUMBER)], stdout = filename_list, shell = True)
filename_list.close()
with open(boxes_path + 'Output_files/filename_list_'+str(FROM_OUTPUT_NUMBER)+'_' +str(TO_OUTPUT_NUMBER) , 'r') as inF:
for line in inF:
if str(marker) in line:
os.system("cp " + line.replace("\n", '') + " " + line.replace(str(FROM_OUTPUT_NUMBER),str(TO_OUTPUT_NUMBER)))
def load_data(path, HII_DIM):
data = np.fromfile(path ,dtype=np.float32)
return data.reshape((HII_DIM,HII_DIM,HII_DIM))
def beta2sigma(beta):
if beta >= 0:
sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
return (sign*sigma)
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
return (sign*sigma)
####################################################################
# Cosmology and Astrophysical Parameters #
####################################################################
#EoR parameters (fixed in this version)
zeta = 500
Mturn = 10
Rmfp = 30
#Cosmology constants
nHI = 10
H0 = float(68)/float(3.086e19)
OMm = 0.25
OMl = 0.75
baryon2DMfrac = 0.05
#constants
pc = 3.08*10**16 # pc in terms of m
cm2m = 0.01 #cm to m conversion
####################################################################
# Emcee specific parameters #
####################################################################
#os.chdir("/home/grx40/projects/def-acliu/grx40/soft/21cmFASTM/Programs/")
#dimensions and walkers of EnsembleSampler
ndim = 4
nwalkers = 24
####################################################################
# FRB script loader #
####################################################################
#Constants for the script
Box_length = 300
HII_DIM = 200
DIM = 800
####################################################################
# Lightcone stuff #
####################################################################
halo_directory = '../Boxes/Default_Res/'
lightcone_sharpcutoff = False
z_end = 0.0
z_start = 10.0
#we never actually use this
delta_z = 0.5
box_slice = 199
nboxes = int(np.round(float(z_start - z_end)/float(delta_z),1)) + 1
z_range_of_halo_boxes = np.linspace(z_start, z_end, nboxes)
#confirm that the z_range is correct (debug)
os.system("echo " + str(z_range_of_halo_boxes))
#directory to get the base density boxes AFTER reionization
density_boxes ='../Boxes/Fiducial_1_5000_30_5e8_allZ/'
#make the base density lightcone once (use it for all subsequent times
densitylightcone = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), N = 500, directory = density_boxes, marker = 'updated_smoothed_deltax')
#make the halolightcone
#what is the redshift range spanned by the halo files?
z_range_of_halo_boxes = np.linspace(10, 0.0, int(np.round(float(10)/float(0.5),2) + 1))
os.system('echo ' + str(z_range_of_halo_boxes))
#load halos (i.e. FRBs)
halo_directory = '../Boxes/Halos/'
#load all the halopos for all the redshifts and store them into a single array
#Halopos_z = np.zeros((len(z_range_of_halo_boxes)), dtype = object)
#for z in range(Halopos_z.shape[0]):
# if z_range_of_halo_boxes[z] < 6.0:
# print('switching to the same box')
#switch to the same box over and over (because those boxes aren't made yet)
# box = 'halos_z6.00_800_300Mpc_5015241'
# Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
# else:
# box = 'halos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'0_800_300Mpc_5015241'
#Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
#os.system('echo Done redshift' + str(np.round(z_range_of_halo_boxes[z],1)))
#save the lightcone should something go very very wrong
#np.savez('Halopos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'_FRB.npz', Halopos_z = Halopos_z[z])
#do the lightcone for the Halo field
#Halo_Position_Box = np.zeros((len(z_range_of_halo_boxes), HII_DIM, HII_DIM, HII_DIM))
#Halo_Mass_Box = np.zeros_like(Halo_Position_Box)
#for z in range(len(z_range_of_halo_boxes)):
# Halo_Position_Box[z] , Halo_Mass_Box[z] = misc.map2box(Halopos_z[z], HII_DIM)
#Halo_lightcone, halolightcone_redshifts = lcH.lightcone(DIM = HII_DIM, halo_boxes_z = Halo_Position_Box, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), return_redshifts = True)
#load Fiducial stuff
npzfile = np.load('Halo_lightcone.npz', allow_pickle = True)
Halo_lightcone = npzfile['Halo_lightcone']
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
os.system("echo shape of fiducial dm z " + str(fiducial_DM_z.shape))
####################################################################
# Define Bayesian Probabilities #
####################################################################
#lets add the number density to the prior. If we constrain it using the likelihood then we may end up in the
#unfortunate situation of having the code get stuck with a gigantic ACF
def | (x):
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
if -1 < beta < 1 and 200 < zeta < 1000 and 1e7 < (Mturn*5e7) < 9e9 and 5 < Rmfp < 60:
os.system("echo RUN " + str(RUN) + " accepting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp) )
return 0.0
os.system("echo RUN " + str(RUN) + " Rejecting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp ) )
return -np.inf
def lnprob(x, fiducial_DM_z ):
lp = lnprior(x)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(x, fiducial_DM_z)
beta_list = []
zeta_list = []
Mturn_list = []
model_DM_z = []
Rmfp_list = []
chi2_model = []
def lnlike(x, fiducial_DM_z):
#draw a tag for this run
OUTPUT_NUMBER = int(np.abs(np.random.uniform(1000000, 9990000)))
#map emcee space to EoR parameters
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
beta_list.append(beta)
zeta_list.append(zeta)
Mturn_list.append(Mturn)
Rmfp_list.append(Rmfp)
if beta >= 0:
sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
t21_i = time.time()
#make the reionization scenario for these parameters
os.system("echo choice of beta is " + str(beta) + ' leading to a sigma of' + str(sigma) +' with sign' + str(sign) )
os.system("./init " + str(sign) + ' ' + str(sigma) +' ' + str(OUTPUT_NUMBER) )
os.system("./drive_zscroll_noTs " + str(10*zeta) +' ' + str(Rmfp) +' ' + str(Mturn*5*10**7)+ ' ' + str(OUTPUT_NUMBER))
t21_f = time.time()
os.system("echo RUN " + str(RUN) + " 21cmfast runtime is " + str(t21_f - t21_i))
#make lightcone for this model data
os.system("echo n boxes is " + str(nboxes))
#make the lightcone for each quantity
box_slice = 199
#cope the post EoR stuff
copy_FROM_TO('xH_', 5015241, OUTPUT_NUMBER)
xH_lightcone_model , lightcone_redshifts = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, N = 500, box_slice = int(box_slice), directory = '../Boxes/', tag = OUTPUT_NUMBER, return_redshifts = True )
os.system('echo Done making lightcone!')
time_DM_start = time.time()
#number of redshifts to include in our FRB plot
lc_z_subsample = 10
DM_z_y_z_model = np.zeros((lc_z_subsample+1, HII_DIM, HII_DIM ))
n_FRBs_z = np.zeros((lc_z_subsample+1))
z_we_are_actually_using = np.zeros((lc_z_subsample+1))
#start with z = 9.6 for now (redshift = 0 index)
for red_idx in range(lc_z_subsample+1):
red = red_idx*int(len(lightcone_redshifts)/lc_z_subsample)
z_we_are_actually_using[red_idx] = lightcone_redshifts[red]
os.system("echo Doing z" + str( lightcone_redshifts[red]))
for y in range(HII_DIM):
for z in range(HII_DIM):
#if Halo_lightcone[red][y][z] != 0:
# n_FRBs_z[red_idx] += 1
DM_z_y_z_model[red_idx][y][z] = DM.compute_DM(y,z, xH_lightcone_model[red:,:,], densitylightcone[red:,:,], lightcone_redshifts[red:], Halo_lightcone[red:])
time_DM_end = time.time()
os.system("echo RUN " + str(RUN) + " DM runtime is " + str(time_DM_end-time_DM_start))
#sum DMs over all sightlines
DM_z = (0.01**3)*np.true_divide(np.sum(DM_z_y_z_model, axis = (1,2)), (HII_DIM*HII_DIM*pc))
#compute chi squared for each redshift and then add them up
chi_squared_total = 0
diff = np.zeros_like(DM_z)
diff = np.subtract(DM_z, fiducial_DM_z)
diff = np.true_divide(diff, 1)
chi_squared_total += np.dot(diff, diff)
os.system("echo RUN " + str(RUN) + " chi squared total is " + str(chi_squared_total) + str("for params") + str(zeta) + " " + str(Mturn) + " " + str(beta) + str(Rmfp))
#add this chi2 to the list
chi2_model.append(chi_squared_total)
#add DM to the list
model_DM_z.append(DM_z)
#cleanup boxes that are leftover
os.system("rm ../Boxes/*" + str(OUTPUT_NUMBER))
#save results to an npz file
np.savez('MCMC_snapshot_FRB' + str(RUN)+ '.npz', betas = np.array(beta_list) , zetas = np.array(zeta_list) , Mturns = np.array(Mturn_list),Rmfps = np.array(Rmfp_list) , model_DM_z = np.array(model_DM_z), chi2_model = np.array(chi2_model) )
return (-chi_squared_total/2.)
####################################################################
# Make Mock Data #
####################################################################
pool = MPIPool()
#with MPIPool() as pool:
if not pool.is_master():
pool.wait()
sys.exit()
#parameters used for making fiducial data
#we are using a fiducial inside-out model
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
#fiducial_redshifts = npzfile['fiducial_redshifts']
####################################################################
# Define Starting Point and run the MCMC #
####################################################################
randomize = np.random.normal(1, 0.1, ndim * nwalkers).reshape((nwalkers, ndim))
for i in range(nwalkers):
randomize[i][0] = np.random.uniform(0.9,0.95)
randomize[i][1] = np.random.uniform(459, 550)
randomize[i][2] = np.random.uniform(9, 11)
randomize[i][3] = np.random.uniform(27, 33)
starting_parameters = randomize
#npzfile = np.load('checkpoint_values_FRB_full.npz')
#starting_parameters = npzfile['position']
os.system('echo Our starting parameters have been saved ')
np.savez('starting_params_test_FRB.npz' , starting_parameters = starting_parameters)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, pool = pool, args = [fiducial_DM_z])
pos , prob , state = sampler.run_mcmc(starting_parameters, 10)
####################################################################
# Save MCMC results and checkpoint the progress #
####################################################################
#save final position in an npz file to be ready afterwards
np.savez('checkpoint_values_FRB_full.npz', position = pos, probability = prob, stateof = state, acceptance_frac= np.mean(sampler.acceptance_fraction))
#write out chain data to npz files
np.savez('flatchain_FRB_' +str(RUN)+ '_full.npz', betas = sampler.flatchain[:,0], zeta = sampler.flatchain[:,1] , Mturn = sampler.flatchain[:,2], Rmfp = sampler.flatchain[:,3] , acceptance_frac= np.mean(sampler.acceptance_fraction))
np.savez('chain_FRB_' + str(RUN) +'_full.npz', samples = sampler.chain)
pool.close()
| lnprior | identifier_name |
FRB_MCMC.py | import numpy as np
import sys
from subprocess import *
import os
import emcee
from emcee.utils import MPIPool
#from schwimmbad import MPIPool
import astropy
import xi_2D
import time
import lightcone_FRB_decreasingz_xlos as lc
import DM
import h5py
#import matplotlib.pyplot as pl
import misc_functions as misc
import mpi4py
import lightcone_FRB_decreasingz_xlos_forHaloFinder as lcH
from scipy.stats import skewnorm
import misc_functions as misc
from scipy import stats
import lightcone_FRB_decreasingz_xlos_forHaloFinder as lcH
#let's name this run
RUN= int(np.abs(np.random.uniform(0, 2000)))
os.system("echo RUN IS : " + str(RUN))
os.system("mkdir ../Boxes/Output_files")
boxes_path = '../Boxes/'
def copy_FROM_TO(marker , FROM_OUTPUT_NUMBER, TO_OUTPUT_NUMBER):
#this does all boxes that have the desired tag
filename_list = open(boxes_path +'Output_files/filename_list_'+str(FROM_OUTPUT_NUMBER)+'_' +str(TO_OUTPUT_NUMBER), 'w')
call(["ls ../Boxes/*" + str(FROM_OUTPUT_NUMBER)], stdout = filename_list, shell = True)
filename_list.close()
with open(boxes_path + 'Output_files/filename_list_'+str(FROM_OUTPUT_NUMBER)+'_' +str(TO_OUTPUT_NUMBER) , 'r') as inF:
for line in inF:
if str(marker) in line:
os.system("cp " + line.replace("\n", '') + " " + line.replace(str(FROM_OUTPUT_NUMBER),str(TO_OUTPUT_NUMBER)))
def load_data(path, HII_DIM):
data = np.fromfile(path ,dtype=np.float32)
return data.reshape((HII_DIM,HII_DIM,HII_DIM))
def beta2sigma(beta):
if beta >= 0:
sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
return (sign*sigma)
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
return (sign*sigma)
####################################################################
# Cosmology and Astrophysical Parameters #
####################################################################
#EoR parameters (fixed in this version)
zeta = 500
Mturn = 10
Rmfp = 30
#Cosmology constants
nHI = 10
H0 = float(68)/float(3.086e19)
OMm = 0.25
OMl = 0.75
baryon2DMfrac = 0.05
#constants
pc = 3.08*10**16 # pc in terms of m
cm2m = 0.01 #cm to m conversion
####################################################################
# Emcee specific parameters #
####################################################################
|
####################################################################
# FRB script loader #
####################################################################
#Constants for the script
Box_length = 300
HII_DIM = 200
DIM = 800
####################################################################
# Lightcone stuff #
####################################################################
halo_directory = '../Boxes/Default_Res/'
lightcone_sharpcutoff = False
z_end = 0.0
z_start = 10.0
#we never actually use this
delta_z = 0.5
box_slice = 199
nboxes = int(np.round(float(z_start - z_end)/float(delta_z),1)) + 1
z_range_of_halo_boxes = np.linspace(z_start, z_end, nboxes)
#confirm that the z_range is correct (debug)
os.system("echo " + str(z_range_of_halo_boxes))
#directory to get the base density boxes AFTER reionization
density_boxes ='../Boxes/Fiducial_1_5000_30_5e8_allZ/'
#make the base density lightcone once (use it for all subsequent times
densitylightcone = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), N = 500, directory = density_boxes, marker = 'updated_smoothed_deltax')
#make the halolightcone
#what is the redshift range spanned by the halo files?
z_range_of_halo_boxes = np.linspace(10, 0.0, int(np.round(float(10)/float(0.5),2) + 1))
os.system('echo ' + str(z_range_of_halo_boxes))
#load halos (i.e. FRBs)
halo_directory = '../Boxes/Halos/'
#load all the halopos for all the redshifts and store them into a single array
#Halopos_z = np.zeros((len(z_range_of_halo_boxes)), dtype = object)
#for z in range(Halopos_z.shape[0]):
# if z_range_of_halo_boxes[z] < 6.0:
# print('switching to the same box')
#switch to the same box over and over (because those boxes aren't made yet)
# box = 'halos_z6.00_800_300Mpc_5015241'
# Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
# else:
# box = 'halos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'0_800_300Mpc_5015241'
#Halopos_z[z] = np.genfromtxt(halo_directory + box, dtype=None)
#os.system('echo Done redshift' + str(np.round(z_range_of_halo_boxes[z],1)))
#save the lightcone should something go very very wrong
#np.savez('Halopos_z'+str(np.round(z_range_of_halo_boxes[z],1))+'_FRB.npz', Halopos_z = Halopos_z[z])
#do the lightcone for the Halo field
#Halo_Position_Box = np.zeros((len(z_range_of_halo_boxes), HII_DIM, HII_DIM, HII_DIM))
#Halo_Mass_Box = np.zeros_like(Halo_Position_Box)
#for z in range(len(z_range_of_halo_boxes)):
# Halo_Position_Box[z] , Halo_Mass_Box[z] = misc.map2box(Halopos_z[z], HII_DIM)
#Halo_lightcone, halolightcone_redshifts = lcH.lightcone(DIM = HII_DIM, halo_boxes_z = Halo_Position_Box, z_range_of_boxes = z_range_of_halo_boxes, box_slice = int(box_slice), return_redshifts = True)
#load Fiducial stuff
npzfile = np.load('Halo_lightcone.npz', allow_pickle = True)
Halo_lightcone = npzfile['Halo_lightcone']
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
os.system("echo shape of fiducial dm z " + str(fiducial_DM_z.shape))
####################################################################
# Define Bayesian Probabilities #
####################################################################
#lets add the number density to the prior. If we constrain it using the likelihood then we may end up in the
#unfortunate situation of having the code get stuck with a gigantic ACF
def lnprior(x):
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
if -1 < beta < 1 and 200 < zeta < 1000 and 1e7 < (Mturn*5e7) < 9e9 and 5 < Rmfp < 60:
os.system("echo RUN " + str(RUN) + " accepting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp) )
return 0.0
os.system("echo RUN " + str(RUN) + " Rejecting the fuck out of beta " + str(beta) + " " + str(zeta) + " " + str(Mturn) + " " + str(Rmfp ) )
return -np.inf
def lnprob(x, fiducial_DM_z ):
lp = lnprior(x)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(x, fiducial_DM_z)
beta_list = []
zeta_list = []
Mturn_list = []
model_DM_z = []
Rmfp_list = []
chi2_model = []
def lnlike(x, fiducial_DM_z):
#draw a tag for this run
OUTPUT_NUMBER = int(np.abs(np.random.uniform(1000000, 9990000)))
#map emcee space to EoR parameters
beta = x[0]
zeta = x[1]
Mturn = x[2]
Rmfp = x[3]
beta_list.append(beta)
zeta_list.append(zeta)
Mturn_list.append(Mturn)
Rmfp_list.append(Rmfp)
if beta >= 0:
sign = np.sign(-np.pi*(beta) + np.pi)
sigma = np.abs(-np.pi*(beta) + np.pi)
else:
sign = np.sign(-np.pi*(beta) - np.pi)
sigma = np.abs(-np.pi*(beta) - np.pi)
t21_i = time.time()
#make the reionization scenario for these parameters
os.system("echo choice of beta is " + str(beta) + ' leading to a sigma of' + str(sigma) +' with sign' + str(sign) )
os.system("./init " + str(sign) + ' ' + str(sigma) +' ' + str(OUTPUT_NUMBER) )
os.system("./drive_zscroll_noTs " + str(10*zeta) +' ' + str(Rmfp) +' ' + str(Mturn*5*10**7)+ ' ' + str(OUTPUT_NUMBER))
t21_f = time.time()
os.system("echo RUN " + str(RUN) + " 21cmfast runtime is " + str(t21_f - t21_i))
#make lightcone for this model data
os.system("echo n boxes is " + str(nboxes))
#make the lightcone for each quantity
box_slice = 199
#cope the post EoR stuff
copy_FROM_TO('xH_', 5015241, OUTPUT_NUMBER)
xH_lightcone_model , lightcone_redshifts = lc.lightcone(DIM = HII_DIM, z_range_of_boxes = z_range_of_halo_boxes, N = 500, box_slice = int(box_slice), directory = '../Boxes/', tag = OUTPUT_NUMBER, return_redshifts = True )
os.system('echo Done making lightcone!')
time_DM_start = time.time()
#number of redshifts to include in our FRB plot
lc_z_subsample = 10
DM_z_y_z_model = np.zeros((lc_z_subsample+1, HII_DIM, HII_DIM ))
n_FRBs_z = np.zeros((lc_z_subsample+1))
z_we_are_actually_using = np.zeros((lc_z_subsample+1))
#start with z = 9.6 for now (redshift = 0 index)
for red_idx in range(lc_z_subsample+1):
red = red_idx*int(len(lightcone_redshifts)/lc_z_subsample)
z_we_are_actually_using[red_idx] = lightcone_redshifts[red]
os.system("echo Doing z" + str( lightcone_redshifts[red]))
for y in range(HII_DIM):
for z in range(HII_DIM):
#if Halo_lightcone[red][y][z] != 0:
# n_FRBs_z[red_idx] += 1
DM_z_y_z_model[red_idx][y][z] = DM.compute_DM(y,z, xH_lightcone_model[red:,:,], densitylightcone[red:,:,], lightcone_redshifts[red:], Halo_lightcone[red:])
time_DM_end = time.time()
os.system("echo RUN " + str(RUN) + " DM runtime is " + str(time_DM_end-time_DM_start))
#sum DMs over all sightlines
DM_z = (0.01**3)*np.true_divide(np.sum(DM_z_y_z_model, axis = (1,2)), (HII_DIM*HII_DIM*pc))
#compute chi squared for each redshift and then add them up
chi_squared_total = 0
diff = np.zeros_like(DM_z)
diff = np.subtract(DM_z, fiducial_DM_z)
diff = np.true_divide(diff, 1)
chi_squared_total += np.dot(diff, diff)
os.system("echo RUN " + str(RUN) + " chi squared total is " + str(chi_squared_total) + str("for params") + str(zeta) + " " + str(Mturn) + " " + str(beta) + str(Rmfp))
#add this chi2 to the list
chi2_model.append(chi_squared_total)
#add DM to the list
model_DM_z.append(DM_z)
#cleanup boxes that are leftover
os.system("rm ../Boxes/*" + str(OUTPUT_NUMBER))
#save results to an npz file
np.savez('MCMC_snapshot_FRB' + str(RUN)+ '.npz', betas = np.array(beta_list) , zetas = np.array(zeta_list) , Mturns = np.array(Mturn_list),Rmfps = np.array(Rmfp_list) , model_DM_z = np.array(model_DM_z), chi2_model = np.array(chi2_model) )
return (-chi_squared_total/2.)
####################################################################
# Make Mock Data #
####################################################################
pool = MPIPool()
#with MPIPool() as pool:
if not pool.is_master():
pool.wait()
sys.exit()
#parameters used for making fiducial data
#we are using a fiducial inside-out model
npzfile = np.load('FRB_sample_data.npz')
fiducial_DM_z = npzfile['fiducial_DM_z']
#fiducial_redshifts = npzfile['fiducial_redshifts']
####################################################################
# Define Starting Point and run the MCMC #
####################################################################
randomize = np.random.normal(1, 0.1, ndim * nwalkers).reshape((nwalkers, ndim))
for i in range(nwalkers):
randomize[i][0] = np.random.uniform(0.9,0.95)
randomize[i][1] = np.random.uniform(459, 550)
randomize[i][2] = np.random.uniform(9, 11)
randomize[i][3] = np.random.uniform(27, 33)
starting_parameters = randomize
#npzfile = np.load('checkpoint_values_FRB_full.npz')
#starting_parameters = npzfile['position']
os.system('echo Our starting parameters have been saved ')
np.savez('starting_params_test_FRB.npz' , starting_parameters = starting_parameters)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, pool = pool, args = [fiducial_DM_z])
pos , prob , state = sampler.run_mcmc(starting_parameters, 10)
####################################################################
# Save MCMC results and checkpoint the progress #
####################################################################
#save final position in an npz file to be ready afterwards
np.savez('checkpoint_values_FRB_full.npz', position = pos, probability = prob, stateof = state, acceptance_frac= np.mean(sampler.acceptance_fraction))
#write out chain data to npz files
np.savez('flatchain_FRB_' +str(RUN)+ '_full.npz', betas = sampler.flatchain[:,0], zeta = sampler.flatchain[:,1] , Mturn = sampler.flatchain[:,2], Rmfp = sampler.flatchain[:,3] , acceptance_frac= np.mean(sampler.acceptance_fraction))
np.savez('chain_FRB_' + str(RUN) +'_full.npz', samples = sampler.chain)
pool.close() | #os.chdir("/home/grx40/projects/def-acliu/grx40/soft/21cmFASTM/Programs/")
#dimensions and walkers of EnsembleSampler
ndim = 4
nwalkers = 24
| random_line_split |
marker_detector.py | import numpy as np
import cv2
import imutils
from imutils import contours
import copy
class ColorLabel:
def __init__(self, area, w, h):
self.area = area
(self.width, self.height) = (w, h)
self.bgr = [0, 0, 0]
pass
def label(self):
return self.__label_square()
def __label_square(self):
for y in range(self.height):
for x in range(self.width):
self.__update_bgr(self.__max_channel(self.area[y, x]))
return self.__color()
def __color(self):
index = np.argmax(self.bgr)
if index == 0:
return "purple"
if index == 1:
return "green"
if index == 2:
return "red"
def __update_bgr(self, index):
self.bgr[index] += 1
pass
def __max_channel(self, pixel):
index = np.argmax(pixel)
return index
class ImageWrapper:
def __init__(self, image, ratio=1):
self.image = image
self.output_image = copy.deepcopy(image)
self.ratio = ratio
self.height, self.width = self.image.shape[:2]
self.resized = cv2.resize(self.image, (int(self.height * ratio), int(self.width * ratio)))
self.blurred = cv2.GaussianBlur(self.image, (5, 5), 0)
self.hsv = cv2.cvtColor(cv2.GaussianBlur(self.image, (11, 11), 0), cv2.COLOR_BGR2HSV)
self.gray = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2GRAY)
self.edged = cv2.Canny(self.blurred, 50, 150)
self.lab = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2LAB)
self.thresh = cv2.threshold(self.gray, 60, 255, cv2.THRESH_BINARY)[1]
self.cnts = None
def __contours(self, image):
self.cnts = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.cnts = self.cnts[0] if imutils.is_cv2() else self.cnts[1]
def contours_shape(self):
self.__contours(self.edged)
return self.cnts
def contours_color(self):
self.__contours(self.thresh)
return self.cnts
class ContourWrapper:
def __init__(self, contour, ratio=1):
self.contour = contour
self.ratio = ratio
self.peri = cv2.arcLength(self.contour, True)
self.approx = cv2.approxPolyDP(self.contour, 0.04 * self.peri, True)
self.M = cv2.moments(self.contour)
(self.x, self.y, self.w, self.h) = cv2.boundingRect(self.approx)
self.bounding_rect = (self.y, self.x, self.w, self.h)
((self.x_mnc, self.y_mnc), self.radius) = cv2.minEnclosingCircle(contour)
self.area = cv2.contourArea(self.contour)
# cX and cY are center of mass of contour
self.cX, self.cY = self.__get_cx_cy()
def __get_cx_cy(self):
cx = 0
cy = 0
if self.M["m00"] > 0:
cx = int((self.M["m10"] / self.M["m00"]) * self.ratio)
cy = int((self.M["m01"] / self.M["m00"]) * self.ratio)
return cx, cy
class GraphicsUtils:
def __init__(self):
pass
def draw_station_status(self, image, text):
|
def draw_train_status(self, image, idx):
string = "Train: " + str(idx)
cv2.putText(image, string, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_crosshair(self, image, shape):
(startX, endX) = (int(shape.centerX - (shape.w * 0.15)), int(shape.centerX + (shape.w * 0.15)))
(startY, endY) = (int(shape.centerY - (shape.h * 0.15)), int(shape.centerY + (shape.h * 0.15)))
cv2.line(image, (startX, shape.centerY), (endX, shape.centerY), (0, 0, 255), 3)
cv2.line(image, (shape.centerX, startY), (shape.centerX, endY), (0, 0, 255), 3)
pass
def draw_contour(self, image, approx):
cv2.drawContours(image, [approx], -1, (0, 255, 255), 4)
pass
square_str = "square"
triangle_str = "triangle"
class Shape:
def __init__(self, type="", area=0, center_x=0, center_y=0, x=0, y=0, w=0, h=0):
self.type = type
self.contour = None
self.area = area
self.centerX = center_x
self.centerY = center_y
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def __if_type(self, type1, type2):
return type1 == type2
def set_contour(self, contour):
self.contour = contour
pass
def set_center(self, c_x, c_y):
self.centerX = c_x
self.centerY = c_y
def set_type(self, approx):
if 4 <= approx <= 4:
self.type = square_str
elif approx == 3:
self.type = triangle_str
else:
self.type = "unknown"
pass
def set_size(self, x, y, w, h):
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def is_square(self):
if self.__if_type(self.type, square_str):
return True
return False
def is_triangle(self):
if self.__if_type(self.type, triangle_str):
return True
return False
def is_area_higer_than(self, value):
return self.area >= value
def is_area_lower_than(self, value):
return self.area <= value
def __str__(self):
str = "Type: %s, color: %s, area: %d, center(x,y): %d, %d, size(x,y,w,h): %d, %d, %d, %d" % (self.type, self.color, self.area, self.centerX, self.centerY, self.x, self.y, self.w, self.h)
return str
class ShapeDetector:
def __init__(self, image):
self.IW = ImageWrapper(image)
self.shape = Shape()
self.detected = False
self.stations = {'red': 'zajezdnia', 'green': 'strzyza', 'purple': 'kieplinek'}
self.trains = {'red': 6, 'green': 2, 'purple': 1}
pass
def detect_trains(self):
return self.__detect(trains=True)
def detect_platforms(self):
return self.__detect(platforms=True)
def detect_depot(self):
return self.__detect(depot=True)
def __detect(self, platforms=False, depot=False, trains=False):
self.detected = False
output = {"train": 0 , "platform": None}
array_of_contours = []
GU = GraphicsUtils()
for c in self.IW.contours_shape():
CW = ContourWrapper(c)
self.shape.set_type(len(CW.approx))
self.shape.area = CW.area
self.shape.set_contour(CW.contour)
if self.shape.is_square():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
if self.shape.is_triangle():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
#
# for i in range(len(array_of_contours)):
# print(i)
# ratio = abs(array_of_contours[i].w / array_of_contours[i].h)
# print(str(array_of_contours[i].w) + ', ' + str(array_of_contours[i].h) + ', ' + str(ratio))
# if abs(ratio - 1.0) >= 0.3:
# print(abs(ratio - 1.0))
# array_of_contours.pop(i)
# print('usunieto')
# i -= 1
for elem in array_of_contours:
ratio = elem.w / elem.h
if abs(ratio - 1.0) >= 0.3:
array_of_contours.remove(elem)
if len(array_of_contours) >= 2:
if trains is True:
#check squres
a, b = self.check_cws_array_ratios(array_of_contours, 4.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
GU.draw_train_status(self.IW.output_image, str(self.trains[color2]) + ", " + color2)
output["train"] = self.trains[color2]
if platforms is True or depot is True:
#check triangles
a, b = self.check_cws_array_ratios(array_of_contours, 8.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
if platforms is True:
if color2 is "green" or color2 is "purple":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
if depot is True:
if color2 is "red":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
return output
def add_cw_to_similarity_array(self, cnts_array, CW):
for cnt in cnts_array:
if cnt.cX == CW.cX and cnt.cY == CW.cY:
if 0.95 <= (cnt.area/CW.area) <= 1.05:
return cnts_array
cnts_array.append(CW)
return cnts_array
def check_cws_array_ratios(self, cnts_array, exp_ratio, error):
expected_ratio = exp_ratio
err = error
ratio = 0
for i in range(0, len(cnts_array)):
for j in range(0, len(cnts_array)):
if cnts_array[j].area != 0:
ratio = cnts_array[i].area / cnts_array[j].area
if abs(ratio-expected_ratio) <= err and self.check_similarity_of_two_cw(cnts_array[i], cnts_array[j]):
return cnts_array[i], cnts_array[j]
return None, None
def check_similarity_of_two_cw(self, cw_1, cw_2):
err = 50
if abs(cw_1.cX - cw_2.cX) <= err:
if abs(cw_1.cY - cw_2.cY) <= err:
return True
return False
##################################################################
##################################################################
##################################################################
#EXAMPLE OF USAGE BELOW, DELETE WHILE INTERGRATING WITH WHOLE PROJECT
#
# def video():
# cap = cv2.VideoCapture('../shapes/z_pocigami_2.avi')#('../shapes/biale_przejazd_bez_pociagow.avi')#('../shapes/biale_przejazd_z_znacznikami.avi')
# while cap.isOpened():
# ret, frame = cap.read()
#
# if not ret:
# break
#
#
# #example of usage
# shape = ShapeDetector(frame)
# shape.detect_depot()
# shape.detect_trains()
# shape.detect_platforms()
#
# cv2.imshow('frameOUT', shape.IW.output_image)
# cv2.imshow('frameOUT2', shape.IW.edged)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
# pass
#
#
# def main():
# video()
# pass
#
# if __name__ == "__main__":
# main() | string = "Station: " + text
cv2.putText(image, string, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass | identifier_body |
marker_detector.py | import numpy as np
import cv2
import imutils
from imutils import contours
import copy
class ColorLabel:
def __init__(self, area, w, h):
self.area = area
(self.width, self.height) = (w, h)
self.bgr = [0, 0, 0]
pass
def label(self):
return self.__label_square()
def __label_square(self):
for y in range(self.height):
for x in range(self.width):
self.__update_bgr(self.__max_channel(self.area[y, x]))
return self.__color()
def __color(self):
index = np.argmax(self.bgr)
if index == 0:
return "purple"
if index == 1:
return "green"
if index == 2:
return "red"
def __update_bgr(self, index):
self.bgr[index] += 1
pass
def __max_channel(self, pixel):
index = np.argmax(pixel)
return index
class ImageWrapper:
def __init__(self, image, ratio=1):
self.image = image
self.output_image = copy.deepcopy(image)
self.ratio = ratio
self.height, self.width = self.image.shape[:2]
self.resized = cv2.resize(self.image, (int(self.height * ratio), int(self.width * ratio)))
self.blurred = cv2.GaussianBlur(self.image, (5, 5), 0)
self.hsv = cv2.cvtColor(cv2.GaussianBlur(self.image, (11, 11), 0), cv2.COLOR_BGR2HSV)
self.gray = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2GRAY)
self.edged = cv2.Canny(self.blurred, 50, 150)
self.lab = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2LAB)
self.thresh = cv2.threshold(self.gray, 60, 255, cv2.THRESH_BINARY)[1]
self.cnts = None
def __contours(self, image):
self.cnts = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.cnts = self.cnts[0] if imutils.is_cv2() else self.cnts[1]
def contours_shape(self):
self.__contours(self.edged)
return self.cnts
def contours_color(self):
self.__contours(self.thresh)
return self.cnts
class ContourWrapper:
def __init__(self, contour, ratio=1):
self.contour = contour
self.ratio = ratio
self.peri = cv2.arcLength(self.contour, True)
self.approx = cv2.approxPolyDP(self.contour, 0.04 * self.peri, True)
self.M = cv2.moments(self.contour)
(self.x, self.y, self.w, self.h) = cv2.boundingRect(self.approx)
self.bounding_rect = (self.y, self.x, self.w, self.h)
((self.x_mnc, self.y_mnc), self.radius) = cv2.minEnclosingCircle(contour)
self.area = cv2.contourArea(self.contour)
# cX and cY are center of mass of contour
self.cX, self.cY = self.__get_cx_cy()
def __get_cx_cy(self):
cx = 0
cy = 0
if self.M["m00"] > 0:
cx = int((self.M["m10"] / self.M["m00"]) * self.ratio)
cy = int((self.M["m01"] / self.M["m00"]) * self.ratio)
return cx, cy
class GraphicsUtils:
def __init__(self):
pass
def draw_station_status(self, image, text):
string = "Station: " + text
cv2.putText(image, string, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_train_status(self, image, idx):
string = "Train: " + str(idx)
cv2.putText(image, string, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_crosshair(self, image, shape):
(startX, endX) = (int(shape.centerX - (shape.w * 0.15)), int(shape.centerX + (shape.w * 0.15)))
(startY, endY) = (int(shape.centerY - (shape.h * 0.15)), int(shape.centerY + (shape.h * 0.15)))
cv2.line(image, (startX, shape.centerY), (endX, shape.centerY), (0, 0, 255), 3)
cv2.line(image, (shape.centerX, startY), (shape.centerX, endY), (0, 0, 255), 3)
pass
def draw_contour(self, image, approx):
cv2.drawContours(image, [approx], -1, (0, 255, 255), 4)
pass
square_str = "square"
triangle_str = "triangle"
class Shape:
def __init__(self, type="", area=0, center_x=0, center_y=0, x=0, y=0, w=0, h=0):
self.type = type
self.contour = None
self.area = area
self.centerX = center_x
self.centerY = center_y
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def __if_type(self, type1, type2):
return type1 == type2
def set_contour(self, contour):
self.contour = contour
pass
def set_center(self, c_x, c_y):
self.centerX = c_x
self.centerY = c_y
def set_type(self, approx):
if 4 <= approx <= 4:
self.type = square_str
elif approx == 3:
self.type = triangle_str
else:
self.type = "unknown"
pass
def set_size(self, x, y, w, h):
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def is_square(self):
if self.__if_type(self.type, square_str):
return True
return False
def is_triangle(self):
if self.__if_type(self.type, triangle_str):
return True
return False
def is_area_higer_than(self, value):
return self.area >= value
def is_area_lower_than(self, value):
return self.area <= value
def __str__(self):
str = "Type: %s, color: %s, area: %d, center(x,y): %d, %d, size(x,y,w,h): %d, %d, %d, %d" % (self.type, self.color, self.area, self.centerX, self.centerY, self.x, self.y, self.w, self.h)
return str
class ShapeDetector:
def __init__(self, image):
self.IW = ImageWrapper(image)
self.shape = Shape()
self.detected = False
self.stations = {'red': 'zajezdnia', 'green': 'strzyza', 'purple': 'kieplinek'}
self.trains = {'red': 6, 'green': 2, 'purple': 1}
pass
def detect_trains(self):
return self.__detect(trains=True)
def detect_platforms(self):
return self.__detect(platforms=True)
def detect_depot(self):
return self.__detect(depot=True)
def __detect(self, platforms=False, depot=False, trains=False):
self.detected = False
output = {"train": 0 , "platform": None}
array_of_contours = []
GU = GraphicsUtils()
for c in self.IW.contours_shape():
CW = ContourWrapper(c)
self.shape.set_type(len(CW.approx))
self.shape.area = CW.area
self.shape.set_contour(CW.contour)
if self.shape.is_square():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
if self.shape.is_triangle():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
#
# for i in range(len(array_of_contours)):
# print(i)
# ratio = abs(array_of_contours[i].w / array_of_contours[i].h)
# print(str(array_of_contours[i].w) + ', ' + str(array_of_contours[i].h) + ', ' + str(ratio))
# if abs(ratio - 1.0) >= 0.3:
# print(abs(ratio - 1.0))
# array_of_contours.pop(i)
# print('usunieto')
# i -= 1
for elem in array_of_contours:
ratio = elem.w / elem.h
if abs(ratio - 1.0) >= 0.3:
array_of_contours.remove(elem)
if len(array_of_contours) >= 2:
if trains is True:
#check squres
a, b = self.check_cws_array_ratios(array_of_contours, 4.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
GU.draw_train_status(self.IW.output_image, str(self.trains[color2]) + ", " + color2)
output["train"] = self.trains[color2]
if platforms is True or depot is True:
#check triangles
a, b = self.check_cws_array_ratios(array_of_contours, 8.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
if platforms is True:
if color2 is "green" or color2 is "purple":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
if depot is True:
if color2 is "red":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
return output
def add_cw_to_similarity_array(self, cnts_array, CW):
for cnt in cnts_array:
if cnt.cX == CW.cX and cnt.cY == CW.cY:
if 0.95 <= (cnt.area/CW.area) <= 1.05:
return cnts_array
cnts_array.append(CW)
return cnts_array
def check_cws_array_ratios(self, cnts_array, exp_ratio, error):
expected_ratio = exp_ratio
err = error
ratio = 0
for i in range(0, len(cnts_array)):
|
return None, None
def check_similarity_of_two_cw(self, cw_1, cw_2):
err = 50
if abs(cw_1.cX - cw_2.cX) <= err:
if abs(cw_1.cY - cw_2.cY) <= err:
return True
return False
##################################################################
##################################################################
##################################################################
#EXAMPLE OF USAGE BELOW, DELETE WHILE INTERGRATING WITH WHOLE PROJECT
#
# def video():
# cap = cv2.VideoCapture('../shapes/z_pocigami_2.avi')#('../shapes/biale_przejazd_bez_pociagow.avi')#('../shapes/biale_przejazd_z_znacznikami.avi')
# while cap.isOpened():
# ret, frame = cap.read()
#
# if not ret:
# break
#
#
# #example of usage
# shape = ShapeDetector(frame)
# shape.detect_depot()
# shape.detect_trains()
# shape.detect_platforms()
#
# cv2.imshow('frameOUT', shape.IW.output_image)
# cv2.imshow('frameOUT2', shape.IW.edged)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
# pass
#
#
# def main():
# video()
# pass
#
# if __name__ == "__main__":
# main() | for j in range(0, len(cnts_array)):
if cnts_array[j].area != 0:
ratio = cnts_array[i].area / cnts_array[j].area
if abs(ratio-expected_ratio) <= err and self.check_similarity_of_two_cw(cnts_array[i], cnts_array[j]):
return cnts_array[i], cnts_array[j] | conditional_block |
marker_detector.py | import numpy as np
import cv2
import imutils
from imutils import contours
import copy
class ColorLabel:
def __init__(self, area, w, h):
self.area = area
(self.width, self.height) = (w, h)
self.bgr = [0, 0, 0]
pass
def label(self):
return self.__label_square()
def __label_square(self):
for y in range(self.height):
for x in range(self.width):
self.__update_bgr(self.__max_channel(self.area[y, x]))
return self.__color()
def __color(self):
index = np.argmax(self.bgr)
if index == 0:
return "purple"
if index == 1:
return "green"
if index == 2:
return "red"
def __update_bgr(self, index):
self.bgr[index] += 1
pass
def __max_channel(self, pixel):
index = np.argmax(pixel)
return index
class ImageWrapper:
def __init__(self, image, ratio=1):
self.image = image
self.output_image = copy.deepcopy(image)
self.ratio = ratio
self.height, self.width = self.image.shape[:2]
self.resized = cv2.resize(self.image, (int(self.height * ratio), int(self.width * ratio)))
self.blurred = cv2.GaussianBlur(self.image, (5, 5), 0)
self.hsv = cv2.cvtColor(cv2.GaussianBlur(self.image, (11, 11), 0), cv2.COLOR_BGR2HSV)
self.gray = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2GRAY)
self.edged = cv2.Canny(self.blurred, 50, 150)
self.lab = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2LAB)
self.thresh = cv2.threshold(self.gray, 60, 255, cv2.THRESH_BINARY)[1]
self.cnts = None
def __contours(self, image):
self.cnts = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.cnts = self.cnts[0] if imutils.is_cv2() else self.cnts[1]
def contours_shape(self):
self.__contours(self.edged)
return self.cnts
def | (self):
self.__contours(self.thresh)
return self.cnts
class ContourWrapper:
def __init__(self, contour, ratio=1):
self.contour = contour
self.ratio = ratio
self.peri = cv2.arcLength(self.contour, True)
self.approx = cv2.approxPolyDP(self.contour, 0.04 * self.peri, True)
self.M = cv2.moments(self.contour)
(self.x, self.y, self.w, self.h) = cv2.boundingRect(self.approx)
self.bounding_rect = (self.y, self.x, self.w, self.h)
((self.x_mnc, self.y_mnc), self.radius) = cv2.minEnclosingCircle(contour)
self.area = cv2.contourArea(self.contour)
# cX and cY are center of mass of contour
self.cX, self.cY = self.__get_cx_cy()
def __get_cx_cy(self):
cx = 0
cy = 0
if self.M["m00"] > 0:
cx = int((self.M["m10"] / self.M["m00"]) * self.ratio)
cy = int((self.M["m01"] / self.M["m00"]) * self.ratio)
return cx, cy
class GraphicsUtils:
def __init__(self):
pass
def draw_station_status(self, image, text):
string = "Station: " + text
cv2.putText(image, string, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_train_status(self, image, idx):
string = "Train: " + str(idx)
cv2.putText(image, string, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_crosshair(self, image, shape):
(startX, endX) = (int(shape.centerX - (shape.w * 0.15)), int(shape.centerX + (shape.w * 0.15)))
(startY, endY) = (int(shape.centerY - (shape.h * 0.15)), int(shape.centerY + (shape.h * 0.15)))
cv2.line(image, (startX, shape.centerY), (endX, shape.centerY), (0, 0, 255), 3)
cv2.line(image, (shape.centerX, startY), (shape.centerX, endY), (0, 0, 255), 3)
pass
def draw_contour(self, image, approx):
cv2.drawContours(image, [approx], -1, (0, 255, 255), 4)
pass
square_str = "square"
triangle_str = "triangle"
class Shape:
def __init__(self, type="", area=0, center_x=0, center_y=0, x=0, y=0, w=0, h=0):
self.type = type
self.contour = None
self.area = area
self.centerX = center_x
self.centerY = center_y
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def __if_type(self, type1, type2):
return type1 == type2
def set_contour(self, contour):
self.contour = contour
pass
def set_center(self, c_x, c_y):
self.centerX = c_x
self.centerY = c_y
def set_type(self, approx):
if 4 <= approx <= 4:
self.type = square_str
elif approx == 3:
self.type = triangle_str
else:
self.type = "unknown"
pass
def set_size(self, x, y, w, h):
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def is_square(self):
if self.__if_type(self.type, square_str):
return True
return False
def is_triangle(self):
if self.__if_type(self.type, triangle_str):
return True
return False
def is_area_higer_than(self, value):
return self.area >= value
def is_area_lower_than(self, value):
return self.area <= value
def __str__(self):
str = "Type: %s, color: %s, area: %d, center(x,y): %d, %d, size(x,y,w,h): %d, %d, %d, %d" % (self.type, self.color, self.area, self.centerX, self.centerY, self.x, self.y, self.w, self.h)
return str
class ShapeDetector:
def __init__(self, image):
self.IW = ImageWrapper(image)
self.shape = Shape()
self.detected = False
self.stations = {'red': 'zajezdnia', 'green': 'strzyza', 'purple': 'kieplinek'}
self.trains = {'red': 6, 'green': 2, 'purple': 1}
pass
def detect_trains(self):
return self.__detect(trains=True)
def detect_platforms(self):
return self.__detect(platforms=True)
def detect_depot(self):
return self.__detect(depot=True)
def __detect(self, platforms=False, depot=False, trains=False):
self.detected = False
output = {"train": 0 , "platform": None}
array_of_contours = []
GU = GraphicsUtils()
for c in self.IW.contours_shape():
CW = ContourWrapper(c)
self.shape.set_type(len(CW.approx))
self.shape.area = CW.area
self.shape.set_contour(CW.contour)
if self.shape.is_square():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
if self.shape.is_triangle():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
#
# for i in range(len(array_of_contours)):
# print(i)
# ratio = abs(array_of_contours[i].w / array_of_contours[i].h)
# print(str(array_of_contours[i].w) + ', ' + str(array_of_contours[i].h) + ', ' + str(ratio))
# if abs(ratio - 1.0) >= 0.3:
# print(abs(ratio - 1.0))
# array_of_contours.pop(i)
# print('usunieto')
# i -= 1
for elem in array_of_contours:
ratio = elem.w / elem.h
if abs(ratio - 1.0) >= 0.3:
array_of_contours.remove(elem)
if len(array_of_contours) >= 2:
if trains is True:
#check squres
a, b = self.check_cws_array_ratios(array_of_contours, 4.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
GU.draw_train_status(self.IW.output_image, str(self.trains[color2]) + ", " + color2)
output["train"] = self.trains[color2]
if platforms is True or depot is True:
#check triangles
a, b = self.check_cws_array_ratios(array_of_contours, 8.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
if platforms is True:
if color2 is "green" or color2 is "purple":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
if depot is True:
if color2 is "red":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
return output
def add_cw_to_similarity_array(self, cnts_array, CW):
for cnt in cnts_array:
if cnt.cX == CW.cX and cnt.cY == CW.cY:
if 0.95 <= (cnt.area/CW.area) <= 1.05:
return cnts_array
cnts_array.append(CW)
return cnts_array
def check_cws_array_ratios(self, cnts_array, exp_ratio, error):
expected_ratio = exp_ratio
err = error
ratio = 0
for i in range(0, len(cnts_array)):
for j in range(0, len(cnts_array)):
if cnts_array[j].area != 0:
ratio = cnts_array[i].area / cnts_array[j].area
if abs(ratio-expected_ratio) <= err and self.check_similarity_of_two_cw(cnts_array[i], cnts_array[j]):
return cnts_array[i], cnts_array[j]
return None, None
def check_similarity_of_two_cw(self, cw_1, cw_2):
err = 50
if abs(cw_1.cX - cw_2.cX) <= err:
if abs(cw_1.cY - cw_2.cY) <= err:
return True
return False
##################################################################
##################################################################
##################################################################
#EXAMPLE OF USAGE BELOW, DELETE WHILE INTERGRATING WITH WHOLE PROJECT
#
# def video():
# cap = cv2.VideoCapture('../shapes/z_pocigami_2.avi')#('../shapes/biale_przejazd_bez_pociagow.avi')#('../shapes/biale_przejazd_z_znacznikami.avi')
# while cap.isOpened():
# ret, frame = cap.read()
#
# if not ret:
# break
#
#
# #example of usage
# shape = ShapeDetector(frame)
# shape.detect_depot()
# shape.detect_trains()
# shape.detect_platforms()
#
# cv2.imshow('frameOUT', shape.IW.output_image)
# cv2.imshow('frameOUT2', shape.IW.edged)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
# pass
#
#
# def main():
# video()
# pass
#
# if __name__ == "__main__":
# main() | contours_color | identifier_name |
marker_detector.py | import numpy as np
import cv2
import imutils
from imutils import contours
import copy
class ColorLabel:
def __init__(self, area, w, h):
self.area = area
(self.width, self.height) = (w, h)
self.bgr = [0, 0, 0]
pass
def label(self):
return self.__label_square()
def __label_square(self):
for y in range(self.height):
for x in range(self.width):
self.__update_bgr(self.__max_channel(self.area[y, x]))
return self.__color()
def __color(self):
index = np.argmax(self.bgr)
if index == 0:
return "purple"
if index == 1:
return "green"
if index == 2:
return "red"
def __update_bgr(self, index):
self.bgr[index] += 1
pass
def __max_channel(self, pixel):
index = np.argmax(pixel)
return index
class ImageWrapper:
def __init__(self, image, ratio=1):
self.image = image
self.output_image = copy.deepcopy(image)
self.ratio = ratio
self.height, self.width = self.image.shape[:2]
self.resized = cv2.resize(self.image, (int(self.height * ratio), int(self.width * ratio)))
self.blurred = cv2.GaussianBlur(self.image, (5, 5), 0)
self.hsv = cv2.cvtColor(cv2.GaussianBlur(self.image, (11, 11), 0), cv2.COLOR_BGR2HSV)
self.gray = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2GRAY)
self.edged = cv2.Canny(self.blurred, 50, 150)
self.lab = cv2.cvtColor(self.blurred, cv2.COLOR_BGR2LAB)
self.thresh = cv2.threshold(self.gray, 60, 255, cv2.THRESH_BINARY)[1]
self.cnts = None
def __contours(self, image):
self.cnts = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.cnts = self.cnts[0] if imutils.is_cv2() else self.cnts[1]
def contours_shape(self):
self.__contours(self.edged)
return self.cnts
def contours_color(self):
self.__contours(self.thresh)
return self.cnts
class ContourWrapper:
def __init__(self, contour, ratio=1):
self.contour = contour
self.ratio = ratio
self.peri = cv2.arcLength(self.contour, True)
self.approx = cv2.approxPolyDP(self.contour, 0.04 * self.peri, True)
self.M = cv2.moments(self.contour)
(self.x, self.y, self.w, self.h) = cv2.boundingRect(self.approx)
self.bounding_rect = (self.y, self.x, self.w, self.h)
((self.x_mnc, self.y_mnc), self.radius) = cv2.minEnclosingCircle(contour)
self.area = cv2.contourArea(self.contour)
# cX and cY are center of mass of contour
self.cX, self.cY = self.__get_cx_cy()
def __get_cx_cy(self):
cx = 0
cy = 0
if self.M["m00"] > 0:
cx = int((self.M["m10"] / self.M["m00"]) * self.ratio)
cy = int((self.M["m01"] / self.M["m00"]) * self.ratio)
return cx, cy
class GraphicsUtils:
def __init__(self):
pass
def draw_station_status(self, image, text):
string = "Station: " + text
cv2.putText(image, string, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_train_status(self, image, idx):
string = "Train: " + str(idx)
cv2.putText(image, string, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
pass
def draw_crosshair(self, image, shape):
(startX, endX) = (int(shape.centerX - (shape.w * 0.15)), int(shape.centerX + (shape.w * 0.15)))
(startY, endY) = (int(shape.centerY - (shape.h * 0.15)), int(shape.centerY + (shape.h * 0.15)))
cv2.line(image, (startX, shape.centerY), (endX, shape.centerY), (0, 0, 255), 3)
cv2.line(image, (shape.centerX, startY), (shape.centerX, endY), (0, 0, 255), 3)
pass
def draw_contour(self, image, approx):
cv2.drawContours(image, [approx], -1, (0, 255, 255), 4)
pass
square_str = "square"
triangle_str = "triangle"
class Shape:
def __init__(self, type="", area=0, center_x=0, center_y=0, x=0, y=0, w=0, h=0):
self.type = type
self.contour = None
self.area = area
self.centerX = center_x
self.centerY = center_y
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def __if_type(self, type1, type2):
return type1 == type2
def set_contour(self, contour):
self.contour = contour
pass
def set_center(self, c_x, c_y):
self.centerX = c_x
self.centerY = c_y
def set_type(self, approx):
if 4 <= approx <= 4:
self.type = square_str
elif approx == 3:
self.type = triangle_str
else:
self.type = "unknown"
pass
def set_size(self, x, y, w, h):
(self.x, self.y, self.w, self.h) = (x, y, w, h)
pass
def is_square(self):
if self.__if_type(self.type, square_str):
return True
return False
def is_triangle(self):
if self.__if_type(self.type, triangle_str):
return True
return False
def is_area_higer_than(self, value):
return self.area >= value
def is_area_lower_than(self, value):
return self.area <= value
def __str__(self):
str = "Type: %s, color: %s, area: %d, center(x,y): %d, %d, size(x,y,w,h): %d, %d, %d, %d" % (self.type, self.color, self.area, self.centerX, self.centerY, self.x, self.y, self.w, self.h)
return str
class ShapeDetector:
def __init__(self, image):
self.IW = ImageWrapper(image)
self.shape = Shape()
self.detected = False
self.stations = {'red': 'zajezdnia', 'green': 'strzyza', 'purple': 'kieplinek'}
self.trains = {'red': 6, 'green': 2, 'purple': 1}
pass
def detect_trains(self):
return self.__detect(trains=True)
def detect_platforms(self):
return self.__detect(platforms=True)
def detect_depot(self):
return self.__detect(depot=True)
def __detect(self, platforms=False, depot=False, trains=False):
self.detected = False
output = {"train": 0 , "platform": None}
array_of_contours = []
GU = GraphicsUtils()
for c in self.IW.contours_shape():
CW = ContourWrapper(c)
self.shape.set_type(len(CW.approx))
self.shape.area = CW.area
self.shape.set_contour(CW.contour)
if self.shape.is_square():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
if self.shape.is_triangle():
if self.shape.is_area_higer_than(200):
array_of_contours = self.add_cw_to_similarity_array(array_of_contours, CW)
#
# for i in range(len(array_of_contours)):
# print(i)
# ratio = abs(array_of_contours[i].w / array_of_contours[i].h)
# print(str(array_of_contours[i].w) + ', ' + str(array_of_contours[i].h) + ', ' + str(ratio))
# if abs(ratio - 1.0) >= 0.3:
# print(abs(ratio - 1.0))
# array_of_contours.pop(i)
# print('usunieto')
# i -= 1
for elem in array_of_contours:
ratio = elem.w / elem.h
if abs(ratio - 1.0) >= 0.3:
array_of_contours.remove(elem)
if len(array_of_contours) >= 2:
if trains is True:
#check squres
a, b = self.check_cws_array_ratios(array_of_contours, 4.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
GU.draw_train_status(self.IW.output_image, str(self.trains[color2]) + ", " + color2)
output["train"] = self.trains[color2]
if platforms is True or depot is True:
#check triangles
a, b = self.check_cws_array_ratios(array_of_contours, 8.5, 1)
if a is None and b is None:
pass
else:
self.shape.set_center(b.cX, b.cY)
self.shape.set_size(b.x, b.y, b.w, b.h)
cl2 = ColorLabel(self.IW.image[b.y:b.y + b.h, b.x:b.x + b.w], b.w, b.h)
color2 = cl2.label()
if platforms is True:
if color2 is "green" or color2 is "purple":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
if depot is True:
if color2 is "red":
GU.draw_station_status(self.IW.output_image, self.stations[color2] + ", " + color2)
GU.draw_contour(self.IW.output_image, a.approx)
GU.draw_contour(self.IW.output_image, b.approx)
GU.draw_crosshair(self.IW.output_image, self.shape)
output["platform"] = self.stations[color2]
return output
def add_cw_to_similarity_array(self, cnts_array, CW):
for cnt in cnts_array:
if cnt.cX == CW.cX and cnt.cY == CW.cY:
if 0.95 <= (cnt.area/CW.area) <= 1.05:
return cnts_array
cnts_array.append(CW)
return cnts_array
def check_cws_array_ratios(self, cnts_array, exp_ratio, error):
expected_ratio = exp_ratio
err = error
ratio = 0
for i in range(0, len(cnts_array)):
for j in range(0, len(cnts_array)):
if cnts_array[j].area != 0:
ratio = cnts_array[i].area / cnts_array[j].area
if abs(ratio-expected_ratio) <= err and self.check_similarity_of_two_cw(cnts_array[i], cnts_array[j]):
return cnts_array[i], cnts_array[j]
return None, None
def check_similarity_of_two_cw(self, cw_1, cw_2):
err = 50
if abs(cw_1.cX - cw_2.cX) <= err:
if abs(cw_1.cY - cw_2.cY) <= err:
return True
return False
##################################################################
##################################################################
##################################################################
#EXAMPLE OF USAGE BELOW, DELETE WHILE INTERGRATING WITH WHOLE PROJECT
#
# def video():
# cap = cv2.VideoCapture('../shapes/z_pocigami_2.avi')#('../shapes/biale_przejazd_bez_pociagow.avi')#('../shapes/biale_przejazd_z_znacznikami.avi')
# while cap.isOpened():
# ret, frame = cap.read()
#
# if not ret:
# break
#
#
# #example of usage
# shape = ShapeDetector(frame)
# shape.detect_depot()
# shape.detect_trains()
# shape.detect_platforms()
#
# cv2.imshow('frameOUT', shape.IW.output_image)
# cv2.imshow('frameOUT2', shape.IW.edged)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
# pass | # def main():
# video()
# pass
#
# if __name__ == "__main__":
# main() | #
# | random_line_split |
application.py | from flask import Flask, render_template, jsonify, request, make_response #BSD License
|
#StdLibs
import json
from os import path
import csv
###################################################
#Programmato da Alex Prosdocimo e Matteo Mirandola#
###################################################
application = Flask(__name__)
@application.route("/") # Index
def index():
return make_response(render_template("index.html"))
@application.route("/getGraph", methods=["POST", "GET"])
def getgraph():
#Metodo POST: responsabile di ottnere i dati in formato json dal server.
#Il server si aspetta un campo data che contenga il nome di un file esistente nel server nella cartella /static/json/
#Se non trova il file da un 404
#Se non trova il campo data da un 400
if request.method == "POST":
if('data' in request.form):
if(path.exists("static/jsons/" + request.form['data'] + ".json")):
with open("static/jsons/" + request.form['data'] + ".json", "r") as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return "<h1>404 NOT FOUND"
else:
return "<h1>400 BAD REQUEST"
else:
#Metodo GET:
#si aspetta un campo graph che contenga uno dei nomi sotto presenti
#nel caso di mf e emig si aspetta anche un secondo campo che specifichi
#l'università o la provincia-
#Inoltre, iscrittiAtn e mf POSSONO (ma non devono necessariamente) avere
#un campo aggiuntivo che filtri i dati di uno specifico anno o per uno specifico sesso2
if 'graph' in request.args:
# HBar Graph per la paga oraria provinciale a seconda del livello di istruzione
if(request.args['graph'] == "pagaOra"):
return make_response(render_template("graphs/pagaOra.html"))
# Line Graph per gli iscritti alle università nel veneto per anno
elif(request.args['graph'] == "iscrittiAtn"):
if('sex' in request.args):
return make_response(render_template("graphs/iscrittiAtn.html", sex=int(request.args['sex'])))
else:
return make_response(render_template("graphs/iscrittiAtn.html", sex=0))
elif(request.args['graph'] == "disoccupati"):
return make_response(render_template("graphs/disoccupatiGraph.html"))
elif(request.args['graph'] == "iscrittiProv"):
return make_response(render_template("graphs/iscrittiProv.html"))
# Donut Graph per la distribuzione di m/f nelle università in veneto
elif(request.args['graph'] == "mf" and 'atn' in request.args):
dir = "graphs/mf/mf" + request.args['atn'] + ".html"
print(dir)
if(path.exists("templates/" + dir)):
if('year' in request.args):
return make_response(render_template(dir, year=int(request.args['year'])))
else:
return make_response(render_template(dir, year=0))
# Polar Area Graph per gli studenti emigrati in altre regioni
elif(request.args['graph'] == "emig" and "prov" in request.args):
dir = "graphs/emig/iscrittiEmig" + \
request.args['prov'] + ".html"
if(path.exists("templates/" + dir)):
return make_response(render_template(dir))
return "<h1>400 BAD REQUEST"
#Per aggiornare i dataset:
#A causa di un errore nella creazione del file riguardante gli iscritti per ogni ateneo da parte del MIUR il file
#riguardante gli iscritti per ateneo non sono scaricabili dinamicamente e va sostituito manualmente.
#Allo stesso modo, i dati ottenuti tramite l'istat non sono scaricabili dinamicamente tramite la api in quanto
#le sue prestazioni sono limitate (oltre a non permettere i filtri necessari per ottenere i file).
#Il dataset delle provincie viene aggiornato automaticamente ogni settimana. Gli altri vanno sostituiti manualmente.
#I dataset statici vanno inseriti nella cartella /static/notUpdating/
#Il dataset riguardante gli iscritti per ateneo va scaricato a questo link http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/32d26e28-a0b5-45f3-9152-6072164f3e63/download/iscrittixateneo.csv
#e rinominato iscrittiAteneo.csv
#Il dataset riguardante gli iscritti emigrati dalla regione è stato creato manualmente a partire da altri dati e non può essere aggiornato
#I dataset riguardanti la percentuale di disoccupazione e la retribuzione oraria media sono reperibili a questo portale http://dati.istat.it/
#Sfortunatamente la funzione di ricerca del sito è molto lenta e limitata, comunque sia i due data set sono "Tasso di Disoccupazione - Dati Provinciali"
#e "Retribuzione oraria media per titolo di studio". In entrambi i casi, è necessario filtrare i risultati per le sole provincie del Veneto.
#I file vanno rinominati retribuzioneMedia.csv e taxDisocc.csv
#Fortunatamente, si aggiornano solo annualmente
@application.route("/doUpdate")
def updateData():
#File iscritti per ateneo
#I dati vengono inseriti in un dizionario come array, il formato è più sotto
with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiAteneo = {
'Venezia CF': [],
'Verona': [],
'Venezia IUAV': [],
'Padova': []}
for row in data:
row = row[0].split(';')
if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1] == 'Venezia Iuav' or row[1] == 'Verona':
tmp = row[1]
if 'Venezia C' in row[1]:
tmp = 'Venezia CF'
if tmp == 'Venezia Iuav':
tmp = 'Venezia IUAV'
iscrittiAteneo[tmp].append(
row[0] + ';' + row[3] + ';' + row[4])
iscrittiAteneoJson = json.dumps(iscrittiAteneo)
# Formato: {"nomeAteneo" : ["annoScolastico;numeroIscrittiMaschi;numeroIscrittiFemmine",...,...],...,...}
open('static/jsons/iscrittiAteneo.json',
"wb").write(iscrittiAteneoJson.encode())
# File iscritti emigrati in altre regioni
with open('static/notUpdating/iscrittiEmig.json', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = json.load(f)
iscrittiEmig = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in reader['records']:
if row[4].lower() == 'padova' or row[4].lower() == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower() == 'verona' or row[4].lower() == 'treviso' or row[4].lower() == 'belluno' or row[4].lower() == 'rovigo':
iscrittiEmig[row[4].lower()].append(
row[1] + ';' + row[4] + ';' + row[2] + ';' + str(row[6]))
lista = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []
}
count = 0
for key in iscrittiEmig.keys():
while len(iscrittiEmig[key]) > 2:
tmp = iscrittiEmig[key].pop(0).split(';')
if count == 0:
count = int(tmp[3])
tmp2 = iscrittiEmig[key][0].split(';')[2]
if tmp[2] == tmp2:
count += int(tmp[3])
else:
lista[tmp[1].lower()].append(
tmp[0] + ';' + tmp[2] + ';' + str(count))
count = 0
iscrittiEmigJson = json.dumps(lista)
# Formato: {"cittàInMinuscolo" : ["annoScolastico;CittàDiProvenienzaInMaiuscolo;RegioneDiEsodo;NumeroStudenti",...,...],...,...}
open('static/jsons/iscrittiEmig.json',
"wb").write(iscrittiEmigJson.encode())
# File paga media oraria per titolo di studio
with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
retribuzione = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] == 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or row[1] == 'Belluno' or row[1] == 'Rovigo') and (row[5] != 'totale') and 'media)' in row[3]:
# La lista è divisa in titolo di studio, reddito medio orario
tmp = row[5]
if 'nessun' in tmp:
tmp = 'nessuno'
retribuzione[row[1]].append(tmp + ';' + str(row[8]))
retribuzioneMediaJson = json.dumps(retribuzione)
# Formato: {"nomeCittà" : ["laurea;media", "diploma;media", "nulla;media"],...,...}
open('static/jsons/retribuzioneMedia.json',
"wb").write(retribuzioneMediaJson.encode())
# File %disoccupazione
with open('static/notUpdating/taxDisocc.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
lavoro = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[7] == '15-24 anni') and row[5] != 'totale':
if row[5] == 'femmine':
lavoro[row[1]].append(str(row[10]))
else:
lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))
for key in lavoro.keys():
tmp = lavoro[key][0] + ';' + lavoro[key][2]
tmp2 = lavoro[key][1] + ';' + lavoro[key][3]
lavoro[key].clear()
lavoro[key].append(tmp)
lavoro[key].append(tmp2)
disoccupazioneJson = json.dumps(lavoro)
# Formato: {"nomeCittà" : ["anno;percMaschi;percFemmine","anno;percMaschi;percFemmine"x],...,...}
open('static/jsons/disoccupazione.json',
"wb").write(disoccupazioneJson.encode())
# File iscritti totali per provincia
iscritti = requests.get(
'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv', allow_redirects=True)
open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content) #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
with open('static/iscrittiProvincia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiProvincia = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in data:
row = row[0].split(';')
if row[2].lower() == 'padova' or row[2].lower() == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower() == 'verona' or row[2].lower() == 'treviso' or row[2].lower() == 'belluno' or row[2].lower() == 'rovigo':
iscrittiProvincia[row[2].lower()].append(
str(row[0]) + ';' + str(int(row[3])+int(row[4])))
iscrittiProvinciaJson = json.dumps(iscrittiProvincia)
# Formato: {"nomeCittà" : ["anno;numero"],...,...}
open('static/jsons/iscrittiProvincia.json',
"wb").write(iscrittiProvinciaJson.encode())
return "200"
#########
#Startup#
#########
#Ad ogni riavvio forzato dell'applicazione, i dati vengono aggiornati (ci impiega qualche secondo al maassimo)
updateData()
if __name__ == '__main__':
application.run(debug=True, port=80) | import requests #Apache 2.0
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.