repo
stringlengths 5
54
| path
stringlengths 4
155
| func_name
stringlengths 1
118
| original_string
stringlengths 52
85.5k
| language
stringclasses 1
value | code
stringlengths 52
85.5k
| code_tokens
sequence | docstring
stringlengths 6
2.61k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 85
252
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
gravitational/teleport | lib/events/filelog.go | CheckAndSetDefaults | func (cfg *FileLogConfig) CheckAndSetDefaults() error {
if cfg.Dir == "" {
return trace.BadParameter("missing parameter Dir")
}
if !utils.IsDir(cfg.Dir) {
return trace.BadParameter("path %q does not exist or is not a directory", cfg.Dir)
}
if cfg.SymlinkDir == "" {
cfg.SymlinkDir = cfg.Dir
}
if !utils.IsDir(cfg.SymlinkDir) {
return trace.BadParameter("path %q does not exist or is not a directory", cfg.SymlinkDir)
}
if cfg.RotationPeriod == 0 {
cfg.RotationPeriod = defaults.LogRotationPeriod
}
if cfg.RotationPeriod%(24*time.Hour) != 0 {
return trace.BadParameter("rotation period %v is not a multiple of 24 hours, e.g. '24h' or '48h'", cfg.RotationPeriod)
}
if cfg.Clock == nil {
cfg.Clock = clockwork.NewRealClock()
}
if cfg.UIDGenerator == nil {
cfg.UIDGenerator = utils.NewRealUID()
}
return nil
} | go | func (cfg *FileLogConfig) CheckAndSetDefaults() error {
if cfg.Dir == "" {
return trace.BadParameter("missing parameter Dir")
}
if !utils.IsDir(cfg.Dir) {
return trace.BadParameter("path %q does not exist or is not a directory", cfg.Dir)
}
if cfg.SymlinkDir == "" {
cfg.SymlinkDir = cfg.Dir
}
if !utils.IsDir(cfg.SymlinkDir) {
return trace.BadParameter("path %q does not exist or is not a directory", cfg.SymlinkDir)
}
if cfg.RotationPeriod == 0 {
cfg.RotationPeriod = defaults.LogRotationPeriod
}
if cfg.RotationPeriod%(24*time.Hour) != 0 {
return trace.BadParameter("rotation period %v is not a multiple of 24 hours, e.g. '24h' or '48h'", cfg.RotationPeriod)
}
if cfg.Clock == nil {
cfg.Clock = clockwork.NewRealClock()
}
if cfg.UIDGenerator == nil {
cfg.UIDGenerator = utils.NewRealUID()
}
return nil
} | [
"func",
"(",
"cfg",
"*",
"FileLogConfig",
")",
"CheckAndSetDefaults",
"(",
")",
"error",
"{",
"if",
"cfg",
".",
"Dir",
"==",
"\"",
"\"",
"{",
"return",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"!",
"utils",
".",
"IsDir",
"(",
"cfg",
".",
"Dir",
")",
"{",
"return",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"cfg",
".",
"Dir",
")",
"\n",
"}",
"\n",
"if",
"cfg",
".",
"SymlinkDir",
"==",
"\"",
"\"",
"{",
"cfg",
".",
"SymlinkDir",
"=",
"cfg",
".",
"Dir",
"\n",
"}",
"\n",
"if",
"!",
"utils",
".",
"IsDir",
"(",
"cfg",
".",
"SymlinkDir",
")",
"{",
"return",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"cfg",
".",
"SymlinkDir",
")",
"\n",
"}",
"\n",
"if",
"cfg",
".",
"RotationPeriod",
"==",
"0",
"{",
"cfg",
".",
"RotationPeriod",
"=",
"defaults",
".",
"LogRotationPeriod",
"\n",
"}",
"\n",
"if",
"cfg",
".",
"RotationPeriod",
"%",
"(",
"24",
"*",
"time",
".",
"Hour",
")",
"!=",
"0",
"{",
"return",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"cfg",
".",
"RotationPeriod",
")",
"\n",
"}",
"\n",
"if",
"cfg",
".",
"Clock",
"==",
"nil",
"{",
"cfg",
".",
"Clock",
"=",
"clockwork",
".",
"NewRealClock",
"(",
")",
"\n",
"}",
"\n",
"if",
"cfg",
".",
"UIDGenerator",
"==",
"nil",
"{",
"cfg",
".",
"UIDGenerator",
"=",
"utils",
".",
"NewRealUID",
"(",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // CheckAndSetDefaults checks and sets config defaults | [
"CheckAndSetDefaults",
"checks",
"and",
"sets",
"config",
"defaults"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/filelog.go#L60-L86 | train |
gravitational/teleport | lib/events/filelog.go | NewFileLog | func NewFileLog(cfg FileLogConfig) (*FileLog, error) {
if err := cfg.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
f := &FileLog{
FileLogConfig: cfg,
Entry: log.WithFields(log.Fields{
trace.Component: teleport.ComponentAuditLog,
}),
}
return f, nil
} | go | func NewFileLog(cfg FileLogConfig) (*FileLog, error) {
if err := cfg.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
f := &FileLog{
FileLogConfig: cfg,
Entry: log.WithFields(log.Fields{
trace.Component: teleport.ComponentAuditLog,
}),
}
return f, nil
} | [
"func",
"NewFileLog",
"(",
"cfg",
"FileLogConfig",
")",
"(",
"*",
"FileLog",
",",
"error",
")",
"{",
"if",
"err",
":=",
"cfg",
".",
"CheckAndSetDefaults",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"f",
":=",
"&",
"FileLog",
"{",
"FileLogConfig",
":",
"cfg",
",",
"Entry",
":",
"log",
".",
"WithFields",
"(",
"log",
".",
"Fields",
"{",
"trace",
".",
"Component",
":",
"teleport",
".",
"ComponentAuditLog",
",",
"}",
")",
",",
"}",
"\n",
"return",
"f",
",",
"nil",
"\n",
"}"
] | // NewFileLog returns a new instance of a file log | [
"NewFileLog",
"returns",
"a",
"new",
"instance",
"of",
"a",
"file",
"log"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/filelog.go#L89-L100 | train |
gravitational/teleport | lib/events/filelog.go | EmitAuditEvent | func (l *FileLog) EmitAuditEvent(event Event, fields EventFields) error {
// see if the log needs to be rotated
err := l.rotateLog()
if err != nil {
log.Error(err)
}
err = UpdateEventFields(event, fields, l.Clock, l.UIDGenerator)
if err != nil {
log.Error(err)
}
// line is the text to be logged
line, err := json.Marshal(fields)
if err != nil {
return trace.Wrap(err)
}
// log it to the main log file:
if l.file != nil {
fmt.Fprintln(l.file, string(line))
}
return nil
} | go | func (l *FileLog) EmitAuditEvent(event Event, fields EventFields) error {
// see if the log needs to be rotated
err := l.rotateLog()
if err != nil {
log.Error(err)
}
err = UpdateEventFields(event, fields, l.Clock, l.UIDGenerator)
if err != nil {
log.Error(err)
}
// line is the text to be logged
line, err := json.Marshal(fields)
if err != nil {
return trace.Wrap(err)
}
// log it to the main log file:
if l.file != nil {
fmt.Fprintln(l.file, string(line))
}
return nil
} | [
"func",
"(",
"l",
"*",
"FileLog",
")",
"EmitAuditEvent",
"(",
"event",
"Event",
",",
"fields",
"EventFields",
")",
"error",
"{",
"// see if the log needs to be rotated",
"err",
":=",
"l",
".",
"rotateLog",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Error",
"(",
"err",
")",
"\n",
"}",
"\n",
"err",
"=",
"UpdateEventFields",
"(",
"event",
",",
"fields",
",",
"l",
".",
"Clock",
",",
"l",
".",
"UIDGenerator",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Error",
"(",
"err",
")",
"\n",
"}",
"\n",
"// line is the text to be logged",
"line",
",",
"err",
":=",
"json",
".",
"Marshal",
"(",
"fields",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"// log it to the main log file:",
"if",
"l",
".",
"file",
"!=",
"nil",
"{",
"fmt",
".",
"Fprintln",
"(",
"l",
".",
"file",
",",
"string",
"(",
"line",
")",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // EmitAuditEvent adds a new event to the log. Part of auth.IFileLog interface. | [
"EmitAuditEvent",
"adds",
"a",
"new",
"event",
"to",
"the",
"log",
".",
"Part",
"of",
"auth",
".",
"IFileLog",
"interface",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/filelog.go#L117-L137 | train |
gravitational/teleport | lib/events/filelog.go | Close | func (l *FileLog) Close() error {
l.Lock()
defer l.Unlock()
var err error
if l.file != nil {
err = l.file.Close()
l.file = nil
}
return err
} | go | func (l *FileLog) Close() error {
l.Lock()
defer l.Unlock()
var err error
if l.file != nil {
err = l.file.Close()
l.file = nil
}
return err
} | [
"func",
"(",
"l",
"*",
"FileLog",
")",
"Close",
"(",
")",
"error",
"{",
"l",
".",
"Lock",
"(",
")",
"\n",
"defer",
"l",
".",
"Unlock",
"(",
")",
"\n\n",
"var",
"err",
"error",
"\n",
"if",
"l",
".",
"file",
"!=",
"nil",
"{",
"err",
"=",
"l",
".",
"file",
".",
"Close",
"(",
")",
"\n",
"l",
".",
"file",
"=",
"nil",
"\n",
"}",
"\n",
"return",
"err",
"\n",
"}"
] | // Close closes the audit log, which inluces closing all file handles and releasing
// all session loggers | [
"Close",
"closes",
"the",
"audit",
"log",
"which",
"inluces",
"closing",
"all",
"file",
"handles",
"and",
"releasing",
"all",
"session",
"loggers"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/filelog.go#L230-L240 | train |
gravitational/teleport | lib/events/filelog.go | rotateLog | func (l *FileLog) rotateLog() (err error) {
l.Lock()
defer l.Unlock()
// determine the timestamp for the current log file
fileTime := l.Clock.Now().In(time.UTC)
// truncate time to the resolution of one day, cutting at the day end boundary
fileTime = time.Date(fileTime.Year(), fileTime.Month(), fileTime.Day(), 0, 0, 0, 0, time.UTC)
logFilename := filepath.Join(l.Dir,
fileTime.Format(defaults.AuditLogTimeFormat)+LogfileExt)
openLogFile := func() error {
l.file, err = os.OpenFile(logFilename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
log.Error(err)
}
l.fileTime = fileTime
return trace.Wrap(err)
}
linkFilename := filepath.Join(l.SymlinkDir, SymlinkFilename)
createSymlink := func() error {
err = trace.ConvertSystemError(os.Remove(linkFilename))
if err != nil {
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
}
return trace.ConvertSystemError(os.Symlink(logFilename, linkFilename))
}
// need to create a log file?
if l.file == nil {
if err := openLogFile(); err != nil {
return trace.Wrap(err)
}
return trace.Wrap(createSymlink())
}
// time to advance the logfile?
if l.fileTime.Before(fileTime) {
l.file.Close()
if err := openLogFile(); err != nil {
return trace.Wrap(err)
}
return trace.Wrap(createSymlink())
}
return nil
} | go | func (l *FileLog) rotateLog() (err error) {
l.Lock()
defer l.Unlock()
// determine the timestamp for the current log file
fileTime := l.Clock.Now().In(time.UTC)
// truncate time to the resolution of one day, cutting at the day end boundary
fileTime = time.Date(fileTime.Year(), fileTime.Month(), fileTime.Day(), 0, 0, 0, 0, time.UTC)
logFilename := filepath.Join(l.Dir,
fileTime.Format(defaults.AuditLogTimeFormat)+LogfileExt)
openLogFile := func() error {
l.file, err = os.OpenFile(logFilename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
log.Error(err)
}
l.fileTime = fileTime
return trace.Wrap(err)
}
linkFilename := filepath.Join(l.SymlinkDir, SymlinkFilename)
createSymlink := func() error {
err = trace.ConvertSystemError(os.Remove(linkFilename))
if err != nil {
if !trace.IsNotFound(err) {
return trace.Wrap(err)
}
}
return trace.ConvertSystemError(os.Symlink(logFilename, linkFilename))
}
// need to create a log file?
if l.file == nil {
if err := openLogFile(); err != nil {
return trace.Wrap(err)
}
return trace.Wrap(createSymlink())
}
// time to advance the logfile?
if l.fileTime.Before(fileTime) {
l.file.Close()
if err := openLogFile(); err != nil {
return trace.Wrap(err)
}
return trace.Wrap(createSymlink())
}
return nil
} | [
"func",
"(",
"l",
"*",
"FileLog",
")",
"rotateLog",
"(",
")",
"(",
"err",
"error",
")",
"{",
"l",
".",
"Lock",
"(",
")",
"\n",
"defer",
"l",
".",
"Unlock",
"(",
")",
"\n\n",
"// determine the timestamp for the current log file",
"fileTime",
":=",
"l",
".",
"Clock",
".",
"Now",
"(",
")",
".",
"In",
"(",
"time",
".",
"UTC",
")",
"\n\n",
"// truncate time to the resolution of one day, cutting at the day end boundary",
"fileTime",
"=",
"time",
".",
"Date",
"(",
"fileTime",
".",
"Year",
"(",
")",
",",
"fileTime",
".",
"Month",
"(",
")",
",",
"fileTime",
".",
"Day",
"(",
")",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"time",
".",
"UTC",
")",
"\n\n",
"logFilename",
":=",
"filepath",
".",
"Join",
"(",
"l",
".",
"Dir",
",",
"fileTime",
".",
"Format",
"(",
"defaults",
".",
"AuditLogTimeFormat",
")",
"+",
"LogfileExt",
")",
"\n\n",
"openLogFile",
":=",
"func",
"(",
")",
"error",
"{",
"l",
".",
"file",
",",
"err",
"=",
"os",
".",
"OpenFile",
"(",
"logFilename",
",",
"os",
".",
"O_WRONLY",
"|",
"os",
".",
"O_CREATE",
"|",
"os",
".",
"O_APPEND",
",",
"0640",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"log",
".",
"Error",
"(",
"err",
")",
"\n",
"}",
"\n",
"l",
".",
"fileTime",
"=",
"fileTime",
"\n",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"linkFilename",
":=",
"filepath",
".",
"Join",
"(",
"l",
".",
"SymlinkDir",
",",
"SymlinkFilename",
")",
"\n",
"createSymlink",
":=",
"func",
"(",
")",
"error",
"{",
"err",
"=",
"trace",
".",
"ConvertSystemError",
"(",
"os",
".",
"Remove",
"(",
"linkFilename",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"if",
"!",
"trace",
".",
"IsNotFound",
"(",
"err",
")",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"trace",
".",
"ConvertSystemError",
"(",
"os",
".",
"Symlink",
"(",
"logFilename",
",",
"linkFilename",
")",
")",
"\n",
"}",
"\n\n",
"// need to create a log file?",
"if",
"l",
".",
"file",
"==",
"nil",
"{",
"if",
"err",
":=",
"openLogFile",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"return",
"trace",
".",
"Wrap",
"(",
"createSymlink",
"(",
")",
")",
"\n",
"}",
"\n\n",
"// time to advance the logfile?",
"if",
"l",
".",
"fileTime",
".",
"Before",
"(",
"fileTime",
")",
"{",
"l",
".",
"file",
".",
"Close",
"(",
")",
"\n",
"if",
"err",
":=",
"openLogFile",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"return",
"trace",
".",
"Wrap",
"(",
"createSymlink",
"(",
")",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // rotateLog checks if the current log file is older than a given duration,
// and if it is, closes it and opens a new one. | [
"rotateLog",
"checks",
"if",
"the",
"current",
"log",
"file",
"is",
"older",
"than",
"a",
"given",
"duration",
"and",
"if",
"it",
"is",
"closes",
"it",
"and",
"opens",
"a",
"new",
"one",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/filelog.go#L292-L342 | train |
gravitational/teleport | lib/events/filelog.go | matchingFiles | func (l *FileLog) matchingFiles(fromUTC, toUTC time.Time) ([]eventFile, error) {
var dirs []string
var err error
if l.SearchDirs != nil {
dirs, err = l.SearchDirs()
if err != nil {
return nil, trace.Wrap(err)
}
} else {
dirs = []string{l.Dir}
}
var filtered []eventFile
for _, dir := range dirs {
// scan the log directory:
df, err := os.Open(dir)
if err != nil {
return nil, trace.Wrap(err)
}
defer df.Close()
entries, err := df.Readdir(-1)
if err != nil {
return nil, trace.Wrap(err)
}
for i := range entries {
fi := entries[i]
if fi.IsDir() || filepath.Ext(fi.Name()) != LogfileExt {
continue
}
fd, err := parseFileTime(fi.Name())
if err != nil {
l.Warningf("Failed to parse audit log file %q format: %v", fi.Name(), err)
continue
}
// File rounding in current logs is non-deterministic,
// as Round function used in rotateLog can round up to the lowest
// or the highest period. That's why this has to check both
// periods.
// Previous logic used modification time what was flaky
// as it could be changed by migrations or simply moving files
if fd.After(fromUTC.Add(-1*l.RotationPeriod)) && fd.Before(toUTC.Add(l.RotationPeriod)) {
eventFile := eventFile{
FileInfo: fi,
path: filepath.Join(dir, fi.Name()),
}
filtered = append(filtered, eventFile)
}
}
}
// sort all accepted files by date
sort.Sort(byDate(filtered))
return filtered, nil
} | go | func (l *FileLog) matchingFiles(fromUTC, toUTC time.Time) ([]eventFile, error) {
var dirs []string
var err error
if l.SearchDirs != nil {
dirs, err = l.SearchDirs()
if err != nil {
return nil, trace.Wrap(err)
}
} else {
dirs = []string{l.Dir}
}
var filtered []eventFile
for _, dir := range dirs {
// scan the log directory:
df, err := os.Open(dir)
if err != nil {
return nil, trace.Wrap(err)
}
defer df.Close()
entries, err := df.Readdir(-1)
if err != nil {
return nil, trace.Wrap(err)
}
for i := range entries {
fi := entries[i]
if fi.IsDir() || filepath.Ext(fi.Name()) != LogfileExt {
continue
}
fd, err := parseFileTime(fi.Name())
if err != nil {
l.Warningf("Failed to parse audit log file %q format: %v", fi.Name(), err)
continue
}
// File rounding in current logs is non-deterministic,
// as Round function used in rotateLog can round up to the lowest
// or the highest period. That's why this has to check both
// periods.
// Previous logic used modification time what was flaky
// as it could be changed by migrations or simply moving files
if fd.After(fromUTC.Add(-1*l.RotationPeriod)) && fd.Before(toUTC.Add(l.RotationPeriod)) {
eventFile := eventFile{
FileInfo: fi,
path: filepath.Join(dir, fi.Name()),
}
filtered = append(filtered, eventFile)
}
}
}
// sort all accepted files by date
sort.Sort(byDate(filtered))
return filtered, nil
} | [
"func",
"(",
"l",
"*",
"FileLog",
")",
"matchingFiles",
"(",
"fromUTC",
",",
"toUTC",
"time",
".",
"Time",
")",
"(",
"[",
"]",
"eventFile",
",",
"error",
")",
"{",
"var",
"dirs",
"[",
"]",
"string",
"\n",
"var",
"err",
"error",
"\n",
"if",
"l",
".",
"SearchDirs",
"!=",
"nil",
"{",
"dirs",
",",
"err",
"=",
"l",
".",
"SearchDirs",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"dirs",
"=",
"[",
"]",
"string",
"{",
"l",
".",
"Dir",
"}",
"\n",
"}",
"\n\n",
"var",
"filtered",
"[",
"]",
"eventFile",
"\n",
"for",
"_",
",",
"dir",
":=",
"range",
"dirs",
"{",
"// scan the log directory:",
"df",
",",
"err",
":=",
"os",
".",
"Open",
"(",
"dir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"defer",
"df",
".",
"Close",
"(",
")",
"\n",
"entries",
",",
"err",
":=",
"df",
".",
"Readdir",
"(",
"-",
"1",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"for",
"i",
":=",
"range",
"entries",
"{",
"fi",
":=",
"entries",
"[",
"i",
"]",
"\n",
"if",
"fi",
".",
"IsDir",
"(",
")",
"||",
"filepath",
".",
"Ext",
"(",
"fi",
".",
"Name",
"(",
")",
")",
"!=",
"LogfileExt",
"{",
"continue",
"\n",
"}",
"\n",
"fd",
",",
"err",
":=",
"parseFileTime",
"(",
"fi",
".",
"Name",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"l",
".",
"Warningf",
"(",
"\"",
"\"",
",",
"fi",
".",
"Name",
"(",
")",
",",
"err",
")",
"\n",
"continue",
"\n",
"}",
"\n",
"// File rounding in current logs is non-deterministic,",
"// as Round function used in rotateLog can round up to the lowest",
"// or the highest period. That's why this has to check both",
"// periods.",
"// Previous logic used modification time what was flaky",
"// as it could be changed by migrations or simply moving files",
"if",
"fd",
".",
"After",
"(",
"fromUTC",
".",
"Add",
"(",
"-",
"1",
"*",
"l",
".",
"RotationPeriod",
")",
")",
"&&",
"fd",
".",
"Before",
"(",
"toUTC",
".",
"Add",
"(",
"l",
".",
"RotationPeriod",
")",
")",
"{",
"eventFile",
":=",
"eventFile",
"{",
"FileInfo",
":",
"fi",
",",
"path",
":",
"filepath",
".",
"Join",
"(",
"dir",
",",
"fi",
".",
"Name",
"(",
")",
")",
",",
"}",
"\n",
"filtered",
"=",
"append",
"(",
"filtered",
",",
"eventFile",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"// sort all accepted files by date",
"sort",
".",
"Sort",
"(",
"byDate",
"(",
"filtered",
")",
")",
"\n",
"return",
"filtered",
",",
"nil",
"\n",
"}"
] | // matchingFiles returns files matching the time restrictions of the query
// across multiple auth servers, returns a list of file names | [
"matchingFiles",
"returns",
"files",
"matching",
"the",
"time",
"restrictions",
"of",
"the",
"query",
"across",
"multiple",
"auth",
"servers",
"returns",
"a",
"list",
"of",
"file",
"names"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/filelog.go#L346-L398 | train |
gravitational/teleport | lib/events/filelog.go | parseFileTime | func parseFileTime(filename string) (time.Time, error) {
base := strings.TrimSuffix(filename, filepath.Ext(filename))
return time.Parse(defaults.AuditLogTimeFormat, base)
} | go | func parseFileTime(filename string) (time.Time, error) {
base := strings.TrimSuffix(filename, filepath.Ext(filename))
return time.Parse(defaults.AuditLogTimeFormat, base)
} | [
"func",
"parseFileTime",
"(",
"filename",
"string",
")",
"(",
"time",
".",
"Time",
",",
"error",
")",
"{",
"base",
":=",
"strings",
".",
"TrimSuffix",
"(",
"filename",
",",
"filepath",
".",
"Ext",
"(",
"filename",
")",
")",
"\n",
"return",
"time",
".",
"Parse",
"(",
"defaults",
".",
"AuditLogTimeFormat",
",",
"base",
")",
"\n",
"}"
] | // parseFileTime parses file's timestamp encoded into filename | [
"parseFileTime",
"parses",
"file",
"s",
"timestamp",
"encoded",
"into",
"filename"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/filelog.go#L401-L404 | train |
gravitational/teleport | lib/events/filelog.go | getTime | func getTime(v interface{}) time.Time {
sval, ok := v.(string)
if !ok {
return time.Time{}
}
t, err := time.Parse(time.RFC3339, sval)
if err != nil {
return time.Time{}
}
return t
} | go | func getTime(v interface{}) time.Time {
sval, ok := v.(string)
if !ok {
return time.Time{}
}
t, err := time.Parse(time.RFC3339, sval)
if err != nil {
return time.Time{}
}
return t
} | [
"func",
"getTime",
"(",
"v",
"interface",
"{",
"}",
")",
"time",
".",
"Time",
"{",
"sval",
",",
"ok",
":=",
"v",
".",
"(",
"string",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"time",
".",
"Time",
"{",
"}",
"\n",
"}",
"\n",
"t",
",",
"err",
":=",
"time",
".",
"Parse",
"(",
"time",
".",
"RFC3339",
",",
"sval",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"time",
".",
"Time",
"{",
"}",
"\n",
"}",
"\n",
"return",
"t",
"\n",
"}"
] | // getTime converts json time to string | [
"getTime",
"converts",
"json",
"time",
"to",
"string"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/filelog.go#L501-L511 | train |
gravitational/teleport | lib/utils/equals.go | StringMapsEqual | func StringMapsEqual(a, b map[string]string) bool {
if len(a) != len(b) {
return false
}
for key := range a {
if a[key] != b[key] {
return false
}
}
return true
} | go | func StringMapsEqual(a, b map[string]string) bool {
if len(a) != len(b) {
return false
}
for key := range a {
if a[key] != b[key] {
return false
}
}
return true
} | [
"func",
"StringMapsEqual",
"(",
"a",
",",
"b",
"map",
"[",
"string",
"]",
"string",
")",
"bool",
"{",
"if",
"len",
"(",
"a",
")",
"!=",
"len",
"(",
"b",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"for",
"key",
":=",
"range",
"a",
"{",
"if",
"a",
"[",
"key",
"]",
"!=",
"b",
"[",
"key",
"]",
"{",
"return",
"false",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"true",
"\n",
"}"
] | // StringMapsEqual returns true if two strings maps are equal | [
"StringMapsEqual",
"returns",
"true",
"if",
"two",
"strings",
"maps",
"are",
"equal"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/utils/equals.go#L33-L43 | train |
gravitational/teleport | lib/utils/equals.go | StringMapSlicesEqual | func StringMapSlicesEqual(a, b map[string][]string) bool {
if len(a) != len(b) {
return false
}
for key := range a {
if !StringSlicesEqual(a[key], b[key]) {
return false
}
}
return true
} | go | func StringMapSlicesEqual(a, b map[string][]string) bool {
if len(a) != len(b) {
return false
}
for key := range a {
if !StringSlicesEqual(a[key], b[key]) {
return false
}
}
return true
} | [
"func",
"StringMapSlicesEqual",
"(",
"a",
",",
"b",
"map",
"[",
"string",
"]",
"[",
"]",
"string",
")",
"bool",
"{",
"if",
"len",
"(",
"a",
")",
"!=",
"len",
"(",
"b",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"for",
"key",
":=",
"range",
"a",
"{",
"if",
"!",
"StringSlicesEqual",
"(",
"a",
"[",
"key",
"]",
",",
"b",
"[",
"key",
"]",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"true",
"\n",
"}"
] | // StringMapSlicesEqual returns true if two maps of string slices are equal | [
"StringMapSlicesEqual",
"returns",
"true",
"if",
"two",
"maps",
"of",
"string",
"slices",
"are",
"equal"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/utils/equals.go#L59-L69 | train |
gravitational/teleport | lib/services/saml.go | NewSAMLConnector | func NewSAMLConnector(name string, spec SAMLConnectorSpecV2) SAMLConnector {
return &SAMLConnectorV2{
Kind: KindSAMLConnector,
Version: V2,
Metadata: Metadata{
Name: name,
Namespace: defaults.Namespace,
},
Spec: spec,
}
} | go | func NewSAMLConnector(name string, spec SAMLConnectorSpecV2) SAMLConnector {
return &SAMLConnectorV2{
Kind: KindSAMLConnector,
Version: V2,
Metadata: Metadata{
Name: name,
Namespace: defaults.Namespace,
},
Spec: spec,
}
} | [
"func",
"NewSAMLConnector",
"(",
"name",
"string",
",",
"spec",
"SAMLConnectorSpecV2",
")",
"SAMLConnector",
"{",
"return",
"&",
"SAMLConnectorV2",
"{",
"Kind",
":",
"KindSAMLConnector",
",",
"Version",
":",
"V2",
",",
"Metadata",
":",
"Metadata",
"{",
"Name",
":",
"name",
",",
"Namespace",
":",
"defaults",
".",
"Namespace",
",",
"}",
",",
"Spec",
":",
"spec",
",",
"}",
"\n",
"}"
] | // NewSAMLConnector returns a new SAMLConnector based off a name and SAMLConnectorSpecV2. | [
"NewSAMLConnector",
"returns",
"a",
"new",
"SAMLConnector",
"based",
"off",
"a",
"name",
"and",
"SAMLConnectorSpecV2",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/saml.go#L110-L120 | train |
gravitational/teleport | lib/services/saml.go | UnmarshalSAMLConnector | func (*TeleportSAMLConnectorMarshaler) UnmarshalSAMLConnector(bytes []byte, opts ...MarshalOption) (SAMLConnector, error) {
cfg, err := collectOptions(opts)
if err != nil {
return nil, trace.Wrap(err)
}
var h ResourceHeader
err = utils.FastUnmarshal(bytes, &h)
if err != nil {
return nil, trace.Wrap(err)
}
switch h.Version {
case V2:
var c SAMLConnectorV2
if cfg.SkipValidation {
if err := utils.FastUnmarshal(bytes, &c); err != nil {
return nil, trace.BadParameter(err.Error())
}
} else {
if err := utils.UnmarshalWithSchema(GetSAMLConnectorSchema(), &c, bytes); err != nil {
return nil, trace.BadParameter(err.Error())
}
}
if err := c.Metadata.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
if cfg.ID != 0 {
c.SetResourceID(cfg.ID)
}
if !cfg.Expires.IsZero() {
c.SetExpiry(cfg.Expires)
}
return &c, nil
}
return nil, trace.BadParameter("SAML connector resource version %v is not supported", h.Version)
} | go | func (*TeleportSAMLConnectorMarshaler) UnmarshalSAMLConnector(bytes []byte, opts ...MarshalOption) (SAMLConnector, error) {
cfg, err := collectOptions(opts)
if err != nil {
return nil, trace.Wrap(err)
}
var h ResourceHeader
err = utils.FastUnmarshal(bytes, &h)
if err != nil {
return nil, trace.Wrap(err)
}
switch h.Version {
case V2:
var c SAMLConnectorV2
if cfg.SkipValidation {
if err := utils.FastUnmarshal(bytes, &c); err != nil {
return nil, trace.BadParameter(err.Error())
}
} else {
if err := utils.UnmarshalWithSchema(GetSAMLConnectorSchema(), &c, bytes); err != nil {
return nil, trace.BadParameter(err.Error())
}
}
if err := c.Metadata.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
if cfg.ID != 0 {
c.SetResourceID(cfg.ID)
}
if !cfg.Expires.IsZero() {
c.SetExpiry(cfg.Expires)
}
return &c, nil
}
return nil, trace.BadParameter("SAML connector resource version %v is not supported", h.Version)
} | [
"func",
"(",
"*",
"TeleportSAMLConnectorMarshaler",
")",
"UnmarshalSAMLConnector",
"(",
"bytes",
"[",
"]",
"byte",
",",
"opts",
"...",
"MarshalOption",
")",
"(",
"SAMLConnector",
",",
"error",
")",
"{",
"cfg",
",",
"err",
":=",
"collectOptions",
"(",
"opts",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"var",
"h",
"ResourceHeader",
"\n",
"err",
"=",
"utils",
".",
"FastUnmarshal",
"(",
"bytes",
",",
"&",
"h",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"switch",
"h",
".",
"Version",
"{",
"case",
"V2",
":",
"var",
"c",
"SAMLConnectorV2",
"\n",
"if",
"cfg",
".",
"SkipValidation",
"{",
"if",
"err",
":=",
"utils",
".",
"FastUnmarshal",
"(",
"bytes",
",",
"&",
"c",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"BadParameter",
"(",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"if",
"err",
":=",
"utils",
".",
"UnmarshalWithSchema",
"(",
"GetSAMLConnectorSchema",
"(",
")",
",",
"&",
"c",
",",
"bytes",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"BadParameter",
"(",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"c",
".",
"Metadata",
".",
"CheckAndSetDefaults",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"if",
"cfg",
".",
"ID",
"!=",
"0",
"{",
"c",
".",
"SetResourceID",
"(",
"cfg",
".",
"ID",
")",
"\n",
"}",
"\n",
"if",
"!",
"cfg",
".",
"Expires",
".",
"IsZero",
"(",
")",
"{",
"c",
".",
"SetExpiry",
"(",
"cfg",
".",
"Expires",
")",
"\n",
"}",
"\n\n",
"return",
"&",
"c",
",",
"nil",
"\n",
"}",
"\n\n",
"return",
"nil",
",",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"h",
".",
"Version",
")",
"\n",
"}"
] | // UnmarshalSAMLConnector unmarshals connector from | [
"UnmarshalSAMLConnector",
"unmarshals",
"connector",
"from"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/saml.go#L155-L193 | train |
gravitational/teleport | lib/services/saml.go | MarshalSAMLConnector | func (*TeleportSAMLConnectorMarshaler) MarshalSAMLConnector(c SAMLConnector, opts ...MarshalOption) ([]byte, error) {
cfg, err := collectOptions(opts)
if err != nil {
return nil, trace.Wrap(err)
}
type connv2 interface {
V2() *SAMLConnectorV2
}
version := cfg.GetVersion()
switch version {
case V2:
v, ok := c.(connv2)
if !ok {
return nil, trace.BadParameter("don't know how to marshal %v", V2)
}
v2 := v.V2()
if !cfg.PreserveResourceID {
// avoid modifying the original object
// to prevent unexpected data races
copy := *v2
copy.SetResourceID(0)
v2 = ©
}
return utils.FastMarshal(v2)
default:
return nil, trace.BadParameter("version %v is not supported", version)
}
} | go | func (*TeleportSAMLConnectorMarshaler) MarshalSAMLConnector(c SAMLConnector, opts ...MarshalOption) ([]byte, error) {
cfg, err := collectOptions(opts)
if err != nil {
return nil, trace.Wrap(err)
}
type connv2 interface {
V2() *SAMLConnectorV2
}
version := cfg.GetVersion()
switch version {
case V2:
v, ok := c.(connv2)
if !ok {
return nil, trace.BadParameter("don't know how to marshal %v", V2)
}
v2 := v.V2()
if !cfg.PreserveResourceID {
// avoid modifying the original object
// to prevent unexpected data races
copy := *v2
copy.SetResourceID(0)
v2 = ©
}
return utils.FastMarshal(v2)
default:
return nil, trace.BadParameter("version %v is not supported", version)
}
} | [
"func",
"(",
"*",
"TeleportSAMLConnectorMarshaler",
")",
"MarshalSAMLConnector",
"(",
"c",
"SAMLConnector",
",",
"opts",
"...",
"MarshalOption",
")",
"(",
"[",
"]",
"byte",
",",
"error",
")",
"{",
"cfg",
",",
"err",
":=",
"collectOptions",
"(",
"opts",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"type",
"connv2",
"interface",
"{",
"V2",
"(",
")",
"*",
"SAMLConnectorV2",
"\n",
"}",
"\n",
"version",
":=",
"cfg",
".",
"GetVersion",
"(",
")",
"\n",
"switch",
"version",
"{",
"case",
"V2",
":",
"v",
",",
"ok",
":=",
"c",
".",
"(",
"connv2",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"nil",
",",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"V2",
")",
"\n",
"}",
"\n",
"v2",
":=",
"v",
".",
"V2",
"(",
")",
"\n",
"if",
"!",
"cfg",
".",
"PreserveResourceID",
"{",
"// avoid modifying the original object",
"// to prevent unexpected data races",
"copy",
":=",
"*",
"v2",
"\n",
"copy",
".",
"SetResourceID",
"(",
"0",
")",
"\n",
"v2",
"=",
"&",
"copy",
"\n",
"}",
"\n",
"return",
"utils",
".",
"FastMarshal",
"(",
"v2",
")",
"\n",
"default",
":",
"return",
"nil",
",",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"version",
")",
"\n",
"}",
"\n",
"}"
] | // MarshalUser marshals SAML connector into JSON | [
"MarshalUser",
"marshals",
"SAML",
"connector",
"into",
"JSON"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/saml.go#L196-L223 | train |
gravitational/teleport | lib/services/saml.go | Equals | func (o *SAMLConnectorV2) Equals(other SAMLConnector) bool {
if o.GetName() != other.GetName() {
return false
}
if o.GetCert() != other.GetCert() {
return false
}
if o.GetAudience() != other.GetAudience() {
return false
}
if o.GetEntityDescriptor() != other.GetEntityDescriptor() {
return false
}
if o.Expiry() != other.Expiry() {
return false
}
if o.GetIssuer() != other.GetIssuer() {
return false
}
if (o.GetSigningKeyPair() == nil && other.GetSigningKeyPair() != nil) || (o.GetSigningKeyPair() != nil && other.GetSigningKeyPair() == nil) {
return false
}
if o.GetSigningKeyPair() != nil {
a, b := o.GetSigningKeyPair(), other.GetSigningKeyPair()
if a.Cert != b.Cert || a.PrivateKey != b.PrivateKey {
return false
}
}
mappings := o.GetAttributesToRoles()
otherMappings := other.GetAttributesToRoles()
if len(mappings) != len(otherMappings) {
return false
}
for i := range mappings {
a, b := mappings[i], otherMappings[i]
if a.Name != b.Name || a.Value != b.Value || !utils.StringSlicesEqual(a.Roles, b.Roles) {
return false
}
if (a.RoleTemplate != nil && b.RoleTemplate == nil) || (a.RoleTemplate == nil && b.RoleTemplate != nil) {
return false
}
if a.RoleTemplate != nil && !a.RoleTemplate.Equals(b.RoleTemplate.V3()) {
return false
}
}
if o.GetSSO() != other.GetSSO() {
return false
}
return true
} | go | func (o *SAMLConnectorV2) Equals(other SAMLConnector) bool {
if o.GetName() != other.GetName() {
return false
}
if o.GetCert() != other.GetCert() {
return false
}
if o.GetAudience() != other.GetAudience() {
return false
}
if o.GetEntityDescriptor() != other.GetEntityDescriptor() {
return false
}
if o.Expiry() != other.Expiry() {
return false
}
if o.GetIssuer() != other.GetIssuer() {
return false
}
if (o.GetSigningKeyPair() == nil && other.GetSigningKeyPair() != nil) || (o.GetSigningKeyPair() != nil && other.GetSigningKeyPair() == nil) {
return false
}
if o.GetSigningKeyPair() != nil {
a, b := o.GetSigningKeyPair(), other.GetSigningKeyPair()
if a.Cert != b.Cert || a.PrivateKey != b.PrivateKey {
return false
}
}
mappings := o.GetAttributesToRoles()
otherMappings := other.GetAttributesToRoles()
if len(mappings) != len(otherMappings) {
return false
}
for i := range mappings {
a, b := mappings[i], otherMappings[i]
if a.Name != b.Name || a.Value != b.Value || !utils.StringSlicesEqual(a.Roles, b.Roles) {
return false
}
if (a.RoleTemplate != nil && b.RoleTemplate == nil) || (a.RoleTemplate == nil && b.RoleTemplate != nil) {
return false
}
if a.RoleTemplate != nil && !a.RoleTemplate.Equals(b.RoleTemplate.V3()) {
return false
}
}
if o.GetSSO() != other.GetSSO() {
return false
}
return true
} | [
"func",
"(",
"o",
"*",
"SAMLConnectorV2",
")",
"Equals",
"(",
"other",
"SAMLConnector",
")",
"bool",
"{",
"if",
"o",
".",
"GetName",
"(",
")",
"!=",
"other",
".",
"GetName",
"(",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"if",
"o",
".",
"GetCert",
"(",
")",
"!=",
"other",
".",
"GetCert",
"(",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"if",
"o",
".",
"GetAudience",
"(",
")",
"!=",
"other",
".",
"GetAudience",
"(",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"if",
"o",
".",
"GetEntityDescriptor",
"(",
")",
"!=",
"other",
".",
"GetEntityDescriptor",
"(",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"if",
"o",
".",
"Expiry",
"(",
")",
"!=",
"other",
".",
"Expiry",
"(",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"if",
"o",
".",
"GetIssuer",
"(",
")",
"!=",
"other",
".",
"GetIssuer",
"(",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"if",
"(",
"o",
".",
"GetSigningKeyPair",
"(",
")",
"==",
"nil",
"&&",
"other",
".",
"GetSigningKeyPair",
"(",
")",
"!=",
"nil",
")",
"||",
"(",
"o",
".",
"GetSigningKeyPair",
"(",
")",
"!=",
"nil",
"&&",
"other",
".",
"GetSigningKeyPair",
"(",
")",
"==",
"nil",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"if",
"o",
".",
"GetSigningKeyPair",
"(",
")",
"!=",
"nil",
"{",
"a",
",",
"b",
":=",
"o",
".",
"GetSigningKeyPair",
"(",
")",
",",
"other",
".",
"GetSigningKeyPair",
"(",
")",
"\n",
"if",
"a",
".",
"Cert",
"!=",
"b",
".",
"Cert",
"||",
"a",
".",
"PrivateKey",
"!=",
"b",
".",
"PrivateKey",
"{",
"return",
"false",
"\n",
"}",
"\n",
"}",
"\n",
"mappings",
":=",
"o",
".",
"GetAttributesToRoles",
"(",
")",
"\n",
"otherMappings",
":=",
"other",
".",
"GetAttributesToRoles",
"(",
")",
"\n",
"if",
"len",
"(",
"mappings",
")",
"!=",
"len",
"(",
"otherMappings",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"for",
"i",
":=",
"range",
"mappings",
"{",
"a",
",",
"b",
":=",
"mappings",
"[",
"i",
"]",
",",
"otherMappings",
"[",
"i",
"]",
"\n",
"if",
"a",
".",
"Name",
"!=",
"b",
".",
"Name",
"||",
"a",
".",
"Value",
"!=",
"b",
".",
"Value",
"||",
"!",
"utils",
".",
"StringSlicesEqual",
"(",
"a",
".",
"Roles",
",",
"b",
".",
"Roles",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"if",
"(",
"a",
".",
"RoleTemplate",
"!=",
"nil",
"&&",
"b",
".",
"RoleTemplate",
"==",
"nil",
")",
"||",
"(",
"a",
".",
"RoleTemplate",
"==",
"nil",
"&&",
"b",
".",
"RoleTemplate",
"!=",
"nil",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"if",
"a",
".",
"RoleTemplate",
"!=",
"nil",
"&&",
"!",
"a",
".",
"RoleTemplate",
".",
"Equals",
"(",
"b",
".",
"RoleTemplate",
".",
"V3",
"(",
")",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"o",
".",
"GetSSO",
"(",
")",
"!=",
"other",
".",
"GetSSO",
"(",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"return",
"true",
"\n",
"}"
] | // Equals returns true if the connectors are identical | [
"Equals",
"returns",
"true",
"if",
"the",
"connectors",
"are",
"identical"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/saml.go#L340-L389 | train |
gravitational/teleport | lib/services/saml.go | GetAttributes | func (o *SAMLConnectorV2) GetAttributes() []string {
var out []string
for _, mapping := range o.Spec.AttributesToRoles {
out = append(out, mapping.Name)
}
return utils.Deduplicate(out)
} | go | func (o *SAMLConnectorV2) GetAttributes() []string {
var out []string
for _, mapping := range o.Spec.AttributesToRoles {
out = append(out, mapping.Name)
}
return utils.Deduplicate(out)
} | [
"func",
"(",
"o",
"*",
"SAMLConnectorV2",
")",
"GetAttributes",
"(",
")",
"[",
"]",
"string",
"{",
"var",
"out",
"[",
"]",
"string",
"\n",
"for",
"_",
",",
"mapping",
":=",
"range",
"o",
".",
"Spec",
".",
"AttributesToRoles",
"{",
"out",
"=",
"append",
"(",
"out",
",",
"mapping",
".",
"Name",
")",
"\n",
"}",
"\n",
"return",
"utils",
".",
"Deduplicate",
"(",
"out",
")",
"\n",
"}"
] | // GetAttributes returns list of attributes expected by mappings | [
"GetAttributes",
"returns",
"list",
"of",
"attributes",
"expected",
"by",
"mappings"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/saml.go#L470-L476 | train |
gravitational/teleport | lib/services/saml.go | MapAttributes | func (o *SAMLConnectorV2) MapAttributes(assertionInfo saml2.AssertionInfo) []string {
var roles []string
for _, mapping := range o.Spec.AttributesToRoles {
for _, attr := range assertionInfo.Values {
if attr.Name != mapping.Name {
continue
}
mappingLoop:
for _, value := range attr.Values {
for _, role := range mapping.Roles {
outRole, err := utils.ReplaceRegexp(mapping.Value, role, value.Value)
switch {
case err != nil:
if !trace.IsNotFound(err) {
log.Debugf("Failed to match expression %v, replace with: %v input: %v, err: %v", mapping.Value, role, value.Value, err)
}
// if value input did not match, no need to apply
// to all roles
continue mappingLoop
case outRole == "":
// skip empty role matches
case outRole != "":
roles = append(roles, outRole)
}
}
}
}
}
return utils.Deduplicate(roles)
} | go | func (o *SAMLConnectorV2) MapAttributes(assertionInfo saml2.AssertionInfo) []string {
var roles []string
for _, mapping := range o.Spec.AttributesToRoles {
for _, attr := range assertionInfo.Values {
if attr.Name != mapping.Name {
continue
}
mappingLoop:
for _, value := range attr.Values {
for _, role := range mapping.Roles {
outRole, err := utils.ReplaceRegexp(mapping.Value, role, value.Value)
switch {
case err != nil:
if !trace.IsNotFound(err) {
log.Debugf("Failed to match expression %v, replace with: %v input: %v, err: %v", mapping.Value, role, value.Value, err)
}
// if value input did not match, no need to apply
// to all roles
continue mappingLoop
case outRole == "":
// skip empty role matches
case outRole != "":
roles = append(roles, outRole)
}
}
}
}
}
return utils.Deduplicate(roles)
} | [
"func",
"(",
"o",
"*",
"SAMLConnectorV2",
")",
"MapAttributes",
"(",
"assertionInfo",
"saml2",
".",
"AssertionInfo",
")",
"[",
"]",
"string",
"{",
"var",
"roles",
"[",
"]",
"string",
"\n",
"for",
"_",
",",
"mapping",
":=",
"range",
"o",
".",
"Spec",
".",
"AttributesToRoles",
"{",
"for",
"_",
",",
"attr",
":=",
"range",
"assertionInfo",
".",
"Values",
"{",
"if",
"attr",
".",
"Name",
"!=",
"mapping",
".",
"Name",
"{",
"continue",
"\n",
"}",
"\n",
"mappingLoop",
":",
"for",
"_",
",",
"value",
":=",
"range",
"attr",
".",
"Values",
"{",
"for",
"_",
",",
"role",
":=",
"range",
"mapping",
".",
"Roles",
"{",
"outRole",
",",
"err",
":=",
"utils",
".",
"ReplaceRegexp",
"(",
"mapping",
".",
"Value",
",",
"role",
",",
"value",
".",
"Value",
")",
"\n",
"switch",
"{",
"case",
"err",
"!=",
"nil",
":",
"if",
"!",
"trace",
".",
"IsNotFound",
"(",
"err",
")",
"{",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"mapping",
".",
"Value",
",",
"role",
",",
"value",
".",
"Value",
",",
"err",
")",
"\n",
"}",
"\n",
"// if value input did not match, no need to apply",
"// to all roles",
"continue",
"mappingLoop",
"\n",
"case",
"outRole",
"==",
"\"",
"\"",
":",
"// skip empty role matches",
"case",
"outRole",
"!=",
"\"",
"\"",
":",
"roles",
"=",
"append",
"(",
"roles",
",",
"outRole",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"utils",
".",
"Deduplicate",
"(",
"roles",
")",
"\n",
"}"
] | // MapClaims maps SAML attributes to roles | [
"MapClaims",
"maps",
"SAML",
"attributes",
"to",
"roles"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/saml.go#L479-L508 | train |
gravitational/teleport | lib/services/saml.go | GetAttributeNames | func GetAttributeNames(attributes map[string]types.Attribute) []string {
var out []string
for _, attr := range attributes {
out = append(out, attr.Name)
}
return out
} | go | func GetAttributeNames(attributes map[string]types.Attribute) []string {
var out []string
for _, attr := range attributes {
out = append(out, attr.Name)
}
return out
} | [
"func",
"GetAttributeNames",
"(",
"attributes",
"map",
"[",
"string",
"]",
"types",
".",
"Attribute",
")",
"[",
"]",
"string",
"{",
"var",
"out",
"[",
"]",
"string",
"\n",
"for",
"_",
",",
"attr",
":=",
"range",
"attributes",
"{",
"out",
"=",
"append",
"(",
"out",
",",
"attr",
".",
"Name",
")",
"\n",
"}",
"\n",
"return",
"out",
"\n",
"}"
] | // GetAttributeNames returns a list of claim names from the claim values | [
"GetAttributeNames",
"returns",
"a",
"list",
"of",
"claim",
"names",
"from",
"the",
"claim",
"values"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/saml.go#L792-L798 | train |
gravitational/teleport | lib/services/presence.go | NewNamespace | func NewNamespace(name string) Namespace {
return Namespace{
Kind: KindNamespace,
Version: V2,
Metadata: Metadata{
Name: name,
},
}
} | go | func NewNamespace(name string) Namespace {
return Namespace{
Kind: KindNamespace,
Version: V2,
Metadata: Metadata{
Name: name,
},
}
} | [
"func",
"NewNamespace",
"(",
"name",
"string",
")",
"Namespace",
"{",
"return",
"Namespace",
"{",
"Kind",
":",
"KindNamespace",
",",
"Version",
":",
"V2",
",",
"Metadata",
":",
"Metadata",
"{",
"Name",
":",
"name",
",",
"}",
",",
"}",
"\n",
"}"
] | // NewNamespace returns new namespace | [
"NewNamespace",
"returns",
"new",
"namespace"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/presence.go#L159-L167 | train |
gravitational/teleport | lib/services/resource.go | AddOptions | func AddOptions(opts []MarshalOption, add ...MarshalOption) []MarshalOption {
out := make([]MarshalOption, len(opts), len(opts)+len(add))
copy(out, opts)
return append(opts, add...)
} | go | func AddOptions(opts []MarshalOption, add ...MarshalOption) []MarshalOption {
out := make([]MarshalOption, len(opts), len(opts)+len(add))
copy(out, opts)
return append(opts, add...)
} | [
"func",
"AddOptions",
"(",
"opts",
"[",
"]",
"MarshalOption",
",",
"add",
"...",
"MarshalOption",
")",
"[",
"]",
"MarshalOption",
"{",
"out",
":=",
"make",
"(",
"[",
"]",
"MarshalOption",
",",
"len",
"(",
"opts",
")",
",",
"len",
"(",
"opts",
")",
"+",
"len",
"(",
"add",
")",
")",
"\n",
"copy",
"(",
"out",
",",
"opts",
")",
"\n",
"return",
"append",
"(",
"opts",
",",
"add",
"...",
")",
"\n",
"}"
] | // AddOptions adds marshal options and returns a new copy | [
"AddOptions",
"adds",
"marshal",
"options",
"and",
"returns",
"a",
"new",
"copy"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/resource.go#L254-L258 | train |
gravitational/teleport | lib/services/resource.go | WithResourceID | func WithResourceID(id int64) MarshalOption {
return func(c *MarshalConfig) error {
c.ID = id
return nil
}
} | go | func WithResourceID(id int64) MarshalOption {
return func(c *MarshalConfig) error {
c.ID = id
return nil
}
} | [
"func",
"WithResourceID",
"(",
"id",
"int64",
")",
"MarshalOption",
"{",
"return",
"func",
"(",
"c",
"*",
"MarshalConfig",
")",
"error",
"{",
"c",
".",
"ID",
"=",
"id",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"}"
] | // WithResourceID assigns ID to the resource | [
"WithResourceID",
"assigns",
"ID",
"to",
"the",
"resource"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/resource.go#L261-L266 | train |
gravitational/teleport | lib/services/resource.go | WithExpires | func WithExpires(expires time.Time) MarshalOption {
return func(c *MarshalConfig) error {
c.Expires = expires
return nil
}
} | go | func WithExpires(expires time.Time) MarshalOption {
return func(c *MarshalConfig) error {
c.Expires = expires
return nil
}
} | [
"func",
"WithExpires",
"(",
"expires",
"time",
".",
"Time",
")",
"MarshalOption",
"{",
"return",
"func",
"(",
"c",
"*",
"MarshalConfig",
")",
"error",
"{",
"c",
".",
"Expires",
"=",
"expires",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"}"
] | // WithExpires assigns expiry value | [
"WithExpires",
"assigns",
"expiry",
"value"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/resource.go#L269-L274 | train |
gravitational/teleport | lib/services/resource.go | WithVersion | func WithVersion(v string) MarshalOption {
return func(c *MarshalConfig) error {
switch v {
case V1, V2:
c.Version = v
return nil
default:
return trace.BadParameter("version '%v' is not supported", v)
}
}
} | go | func WithVersion(v string) MarshalOption {
return func(c *MarshalConfig) error {
switch v {
case V1, V2:
c.Version = v
return nil
default:
return trace.BadParameter("version '%v' is not supported", v)
}
}
} | [
"func",
"WithVersion",
"(",
"v",
"string",
")",
"MarshalOption",
"{",
"return",
"func",
"(",
"c",
"*",
"MarshalConfig",
")",
"error",
"{",
"switch",
"v",
"{",
"case",
"V1",
",",
"V2",
":",
"c",
".",
"Version",
"=",
"v",
"\n",
"return",
"nil",
"\n",
"default",
":",
"return",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"v",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // WithVersion sets marshal version | [
"WithVersion",
"sets",
"marshal",
"version"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/resource.go#L280-L290 | train |
gravitational/teleport | lib/services/resource.go | SetTTL | func (h *ResourceHeader) SetTTL(clock clockwork.Clock, ttl time.Duration) {
h.Metadata.SetTTL(clock, ttl)
} | go | func (h *ResourceHeader) SetTTL(clock clockwork.Clock, ttl time.Duration) {
h.Metadata.SetTTL(clock, ttl)
} | [
"func",
"(",
"h",
"*",
"ResourceHeader",
")",
"SetTTL",
"(",
"clock",
"clockwork",
".",
"Clock",
",",
"ttl",
"time",
".",
"Duration",
")",
"{",
"h",
".",
"Metadata",
".",
"SetTTL",
"(",
"clock",
",",
"ttl",
")",
"\n",
"}"
] | // SetTTL sets Expires header using current clock | [
"SetTTL",
"sets",
"Expires",
"header",
"using",
"current",
"clock"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/resource.go#L394-L396 | train |
gravitational/teleport | lib/services/resource.go | UnmarshalJSON | func (u *UnknownResource) UnmarshalJSON(raw []byte) error {
var h ResourceHeader
if err := json.Unmarshal(raw, &h); err != nil {
return trace.Wrap(err)
}
u.Raw = make([]byte, len(raw))
u.ResourceHeader = h
copy(u.Raw, raw)
return nil
} | go | func (u *UnknownResource) UnmarshalJSON(raw []byte) error {
var h ResourceHeader
if err := json.Unmarshal(raw, &h); err != nil {
return trace.Wrap(err)
}
u.Raw = make([]byte, len(raw))
u.ResourceHeader = h
copy(u.Raw, raw)
return nil
} | [
"func",
"(",
"u",
"*",
"UnknownResource",
")",
"UnmarshalJSON",
"(",
"raw",
"[",
"]",
"byte",
")",
"error",
"{",
"var",
"h",
"ResourceHeader",
"\n",
"if",
"err",
":=",
"json",
".",
"Unmarshal",
"(",
"raw",
",",
"&",
"h",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"u",
".",
"Raw",
"=",
"make",
"(",
"[",
"]",
"byte",
",",
"len",
"(",
"raw",
")",
")",
"\n",
"u",
".",
"ResourceHeader",
"=",
"h",
"\n",
"copy",
"(",
"u",
".",
"Raw",
",",
"raw",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // UnmarshalJSON unmarshals header and captures raw state | [
"UnmarshalJSON",
"unmarshals",
"header",
"and",
"captures",
"raw",
"state"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/resource.go#L419-L428 | train |
gravitational/teleport | lib/services/resource.go | CheckAndSetDefaults | func (m *Metadata) CheckAndSetDefaults() error {
if m.Name == "" {
return trace.BadParameter("missing parameter Name")
}
if m.Namespace == "" {
m.Namespace = defaults.Namespace
}
// adjust expires time to utc if it's set
if m.Expires != nil {
utils.UTC(m.Expires)
}
return nil
} | go | func (m *Metadata) CheckAndSetDefaults() error {
if m.Name == "" {
return trace.BadParameter("missing parameter Name")
}
if m.Namespace == "" {
m.Namespace = defaults.Namespace
}
// adjust expires time to utc if it's set
if m.Expires != nil {
utils.UTC(m.Expires)
}
return nil
} | [
"func",
"(",
"m",
"*",
"Metadata",
")",
"CheckAndSetDefaults",
"(",
")",
"error",
"{",
"if",
"m",
".",
"Name",
"==",
"\"",
"\"",
"{",
"return",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"m",
".",
"Namespace",
"==",
"\"",
"\"",
"{",
"m",
".",
"Namespace",
"=",
"defaults",
".",
"Namespace",
"\n",
"}",
"\n\n",
"// adjust expires time to utc if it's set",
"if",
"m",
".",
"Expires",
"!=",
"nil",
"{",
"utils",
".",
"UTC",
"(",
"m",
".",
"Expires",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // CheckAndSetDefaults checks validity of all parameters and sets defaults | [
"CheckAndSetDefaults",
"checks",
"validity",
"of",
"all",
"parameters",
"and",
"sets",
"defaults"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/resource.go#L503-L517 | train |
gravitational/teleport | lib/services/resource.go | ParseShortcut | func ParseShortcut(in string) (string, error) {
if in == "" {
return "", trace.BadParameter("missing resource name")
}
switch strings.ToLower(in) {
case "role", "roles":
return KindRole, nil
case "namespaces", "ns":
return KindNamespace, nil
case "auth_servers", "auth":
return KindAuthServer, nil
case "proxies":
return KindProxy, nil
case "nodes", "node":
return KindNode, nil
case "oidc":
return KindOIDCConnector, nil
case "saml":
return KindSAMLConnector, nil
case "github":
return KindGithubConnector, nil
case "connectors", "connector":
return KindConnectors, nil
case "user", "users":
return KindUser, nil
case "cert_authorities", "cas":
return KindCertAuthority, nil
case "reverse_tunnels", "rts":
return KindReverseTunnel, nil
case "trusted_cluster", "tc", "cluster", "clusters":
return KindTrustedCluster, nil
case "cluster_authentication_preferences", "cap":
return KindClusterAuthPreference, nil
case "remote_cluster", "remote_clusters", "rc", "rcs":
return KindRemoteCluster, nil
}
return "", trace.BadParameter("unsupported resource: %v", in)
} | go | func ParseShortcut(in string) (string, error) {
if in == "" {
return "", trace.BadParameter("missing resource name")
}
switch strings.ToLower(in) {
case "role", "roles":
return KindRole, nil
case "namespaces", "ns":
return KindNamespace, nil
case "auth_servers", "auth":
return KindAuthServer, nil
case "proxies":
return KindProxy, nil
case "nodes", "node":
return KindNode, nil
case "oidc":
return KindOIDCConnector, nil
case "saml":
return KindSAMLConnector, nil
case "github":
return KindGithubConnector, nil
case "connectors", "connector":
return KindConnectors, nil
case "user", "users":
return KindUser, nil
case "cert_authorities", "cas":
return KindCertAuthority, nil
case "reverse_tunnels", "rts":
return KindReverseTunnel, nil
case "trusted_cluster", "tc", "cluster", "clusters":
return KindTrustedCluster, nil
case "cluster_authentication_preferences", "cap":
return KindClusterAuthPreference, nil
case "remote_cluster", "remote_clusters", "rc", "rcs":
return KindRemoteCluster, nil
}
return "", trace.BadParameter("unsupported resource: %v", in)
} | [
"func",
"ParseShortcut",
"(",
"in",
"string",
")",
"(",
"string",
",",
"error",
")",
"{",
"if",
"in",
"==",
"\"",
"\"",
"{",
"return",
"\"",
"\"",
",",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"switch",
"strings",
".",
"ToLower",
"(",
"in",
")",
"{",
"case",
"\"",
"\"",
",",
"\"",
"\"",
":",
"return",
"KindRole",
",",
"nil",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
":",
"return",
"KindNamespace",
",",
"nil",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
":",
"return",
"KindAuthServer",
",",
"nil",
"\n",
"case",
"\"",
"\"",
":",
"return",
"KindProxy",
",",
"nil",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
":",
"return",
"KindNode",
",",
"nil",
"\n",
"case",
"\"",
"\"",
":",
"return",
"KindOIDCConnector",
",",
"nil",
"\n",
"case",
"\"",
"\"",
":",
"return",
"KindSAMLConnector",
",",
"nil",
"\n",
"case",
"\"",
"\"",
":",
"return",
"KindGithubConnector",
",",
"nil",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
":",
"return",
"KindConnectors",
",",
"nil",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
":",
"return",
"KindUser",
",",
"nil",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
":",
"return",
"KindCertAuthority",
",",
"nil",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
":",
"return",
"KindReverseTunnel",
",",
"nil",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
":",
"return",
"KindTrustedCluster",
",",
"nil",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
":",
"return",
"KindClusterAuthPreference",
",",
"nil",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
":",
"return",
"KindRemoteCluster",
",",
"nil",
"\n",
"}",
"\n",
"return",
"\"",
"\"",
",",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"in",
")",
"\n",
"}"
] | // ParseShortcut parses resource shortcut | [
"ParseShortcut",
"parses",
"resource",
"shortcut"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/resource.go#L520-L557 | train |
gravitational/teleport | lib/utils/parse/parse.go | walk | func walk(node ast.Node) ([]string, error) {
var l []string
switch n := node.(type) {
case *ast.IndexExpr:
ret, err := walk(n.X)
if err != nil {
return nil, err
}
l = append(l, ret...)
ret, err = walk(n.Index)
if err != nil {
return nil, err
}
l = append(l, ret...)
case *ast.SelectorExpr:
ret, err := walk(n.X)
if err != nil {
return nil, err
}
l = append(l, ret...)
ret, err = walk(n.Sel)
if err != nil {
return nil, err
}
l = append(l, ret...)
case *ast.Ident:
return []string{n.Name}, nil
case *ast.BasicLit:
value, err := strconv.Unquote(n.Value)
if err != nil {
return nil, err
}
return []string{value}, nil
default:
return nil, trace.BadParameter("unknown node type: %T", n)
}
return l, nil
} | go | func walk(node ast.Node) ([]string, error) {
var l []string
switch n := node.(type) {
case *ast.IndexExpr:
ret, err := walk(n.X)
if err != nil {
return nil, err
}
l = append(l, ret...)
ret, err = walk(n.Index)
if err != nil {
return nil, err
}
l = append(l, ret...)
case *ast.SelectorExpr:
ret, err := walk(n.X)
if err != nil {
return nil, err
}
l = append(l, ret...)
ret, err = walk(n.Sel)
if err != nil {
return nil, err
}
l = append(l, ret...)
case *ast.Ident:
return []string{n.Name}, nil
case *ast.BasicLit:
value, err := strconv.Unquote(n.Value)
if err != nil {
return nil, err
}
return []string{value}, nil
default:
return nil, trace.BadParameter("unknown node type: %T", n)
}
return l, nil
} | [
"func",
"walk",
"(",
"node",
"ast",
".",
"Node",
")",
"(",
"[",
"]",
"string",
",",
"error",
")",
"{",
"var",
"l",
"[",
"]",
"string",
"\n\n",
"switch",
"n",
":=",
"node",
".",
"(",
"type",
")",
"{",
"case",
"*",
"ast",
".",
"IndexExpr",
":",
"ret",
",",
"err",
":=",
"walk",
"(",
"n",
".",
"X",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"l",
"=",
"append",
"(",
"l",
",",
"ret",
"...",
")",
"\n\n",
"ret",
",",
"err",
"=",
"walk",
"(",
"n",
".",
"Index",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"l",
"=",
"append",
"(",
"l",
",",
"ret",
"...",
")",
"\n",
"case",
"*",
"ast",
".",
"SelectorExpr",
":",
"ret",
",",
"err",
":=",
"walk",
"(",
"n",
".",
"X",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"l",
"=",
"append",
"(",
"l",
",",
"ret",
"...",
")",
"\n\n",
"ret",
",",
"err",
"=",
"walk",
"(",
"n",
".",
"Sel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"l",
"=",
"append",
"(",
"l",
",",
"ret",
"...",
")",
"\n",
"case",
"*",
"ast",
".",
"Ident",
":",
"return",
"[",
"]",
"string",
"{",
"n",
".",
"Name",
"}",
",",
"nil",
"\n",
"case",
"*",
"ast",
".",
"BasicLit",
":",
"value",
",",
"err",
":=",
"strconv",
".",
"Unquote",
"(",
"n",
".",
"Value",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"[",
"]",
"string",
"{",
"value",
"}",
",",
"nil",
"\n",
"default",
":",
"return",
"nil",
",",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"n",
")",
"\n",
"}",
"\n\n",
"return",
"l",
",",
"nil",
"\n",
"}"
] | // walk will walk the ast tree and gather all the variable parts into a slice and return it. | [
"walk",
"will",
"walk",
"the",
"ast",
"tree",
"and",
"gather",
"all",
"the",
"variable",
"parts",
"into",
"a",
"slice",
"and",
"return",
"it",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/utils/parse/parse.go#L64-L105 | train |
gravitational/teleport | lib/events/auditlog.go | NewAuditLog | func NewAuditLog(cfg AuditLogConfig) (*AuditLog, error) {
if err := cfg.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
ctx, cancel := context.WithCancel(cfg.Context)
al := &AuditLog{
playbackDir: filepath.Join(cfg.DataDir, PlaybackDir, SessionLogsDir, defaults.Namespace),
AuditLogConfig: cfg,
Entry: log.WithFields(log.Fields{
trace.Component: teleport.ComponentAuditLog,
}),
activeDownloads: make(map[string]context.Context),
ctx: ctx,
cancel: cancel,
}
// create a directory for audit logs, audit log does not create
// session logs before migrations are run in case if the directory
// has to be moved
auditDir := filepath.Join(cfg.DataDir, cfg.ServerID)
if err := os.MkdirAll(auditDir, *cfg.DirMask); err != nil {
return nil, trace.ConvertSystemError(err)
}
// create a directory for session logs:
sessionDir := filepath.Join(cfg.DataDir, cfg.ServerID, SessionLogsDir, defaults.Namespace)
if err := os.MkdirAll(sessionDir, *cfg.DirMask); err != nil {
return nil, trace.ConvertSystemError(err)
}
// create a directory for uncompressed playbacks
if err := os.MkdirAll(filepath.Join(al.playbackDir), *cfg.DirMask); err != nil {
return nil, trace.ConvertSystemError(err)
}
if cfg.UID != nil && cfg.GID != nil {
err := os.Chown(cfg.DataDir, *cfg.UID, *cfg.GID)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
err = os.Chown(sessionDir, *cfg.UID, *cfg.GID)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
err = os.Chown(al.playbackDir, *cfg.UID, *cfg.GID)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
}
if al.ExternalLog == nil {
var err error
al.localLog, err = NewFileLog(FileLogConfig{
RotationPeriod: al.RotationPeriod,
Dir: auditDir,
SymlinkDir: cfg.DataDir,
Clock: al.Clock,
UIDGenerator: al.UIDGenerator,
SearchDirs: al.auditDirs,
})
if err != nil {
return nil, trace.Wrap(err)
}
}
go al.periodicCleanupPlaybacks()
go al.periodicSpaceMonitor()
return al, nil
} | go | func NewAuditLog(cfg AuditLogConfig) (*AuditLog, error) {
if err := cfg.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
ctx, cancel := context.WithCancel(cfg.Context)
al := &AuditLog{
playbackDir: filepath.Join(cfg.DataDir, PlaybackDir, SessionLogsDir, defaults.Namespace),
AuditLogConfig: cfg,
Entry: log.WithFields(log.Fields{
trace.Component: teleport.ComponentAuditLog,
}),
activeDownloads: make(map[string]context.Context),
ctx: ctx,
cancel: cancel,
}
// create a directory for audit logs, audit log does not create
// session logs before migrations are run in case if the directory
// has to be moved
auditDir := filepath.Join(cfg.DataDir, cfg.ServerID)
if err := os.MkdirAll(auditDir, *cfg.DirMask); err != nil {
return nil, trace.ConvertSystemError(err)
}
// create a directory for session logs:
sessionDir := filepath.Join(cfg.DataDir, cfg.ServerID, SessionLogsDir, defaults.Namespace)
if err := os.MkdirAll(sessionDir, *cfg.DirMask); err != nil {
return nil, trace.ConvertSystemError(err)
}
// create a directory for uncompressed playbacks
if err := os.MkdirAll(filepath.Join(al.playbackDir), *cfg.DirMask); err != nil {
return nil, trace.ConvertSystemError(err)
}
if cfg.UID != nil && cfg.GID != nil {
err := os.Chown(cfg.DataDir, *cfg.UID, *cfg.GID)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
err = os.Chown(sessionDir, *cfg.UID, *cfg.GID)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
err = os.Chown(al.playbackDir, *cfg.UID, *cfg.GID)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
}
if al.ExternalLog == nil {
var err error
al.localLog, err = NewFileLog(FileLogConfig{
RotationPeriod: al.RotationPeriod,
Dir: auditDir,
SymlinkDir: cfg.DataDir,
Clock: al.Clock,
UIDGenerator: al.UIDGenerator,
SearchDirs: al.auditDirs,
})
if err != nil {
return nil, trace.Wrap(err)
}
}
go al.periodicCleanupPlaybacks()
go al.periodicSpaceMonitor()
return al, nil
} | [
"func",
"NewAuditLog",
"(",
"cfg",
"AuditLogConfig",
")",
"(",
"*",
"AuditLog",
",",
"error",
")",
"{",
"if",
"err",
":=",
"cfg",
".",
"CheckAndSetDefaults",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"ctx",
",",
"cancel",
":=",
"context",
".",
"WithCancel",
"(",
"cfg",
".",
"Context",
")",
"\n",
"al",
":=",
"&",
"AuditLog",
"{",
"playbackDir",
":",
"filepath",
".",
"Join",
"(",
"cfg",
".",
"DataDir",
",",
"PlaybackDir",
",",
"SessionLogsDir",
",",
"defaults",
".",
"Namespace",
")",
",",
"AuditLogConfig",
":",
"cfg",
",",
"Entry",
":",
"log",
".",
"WithFields",
"(",
"log",
".",
"Fields",
"{",
"trace",
".",
"Component",
":",
"teleport",
".",
"ComponentAuditLog",
",",
"}",
")",
",",
"activeDownloads",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"context",
".",
"Context",
")",
",",
"ctx",
":",
"ctx",
",",
"cancel",
":",
"cancel",
",",
"}",
"\n",
"// create a directory for audit logs, audit log does not create",
"// session logs before migrations are run in case if the directory",
"// has to be moved",
"auditDir",
":=",
"filepath",
".",
"Join",
"(",
"cfg",
".",
"DataDir",
",",
"cfg",
".",
"ServerID",
")",
"\n",
"if",
"err",
":=",
"os",
".",
"MkdirAll",
"(",
"auditDir",
",",
"*",
"cfg",
".",
"DirMask",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n",
"// create a directory for session logs:",
"sessionDir",
":=",
"filepath",
".",
"Join",
"(",
"cfg",
".",
"DataDir",
",",
"cfg",
".",
"ServerID",
",",
"SessionLogsDir",
",",
"defaults",
".",
"Namespace",
")",
"\n",
"if",
"err",
":=",
"os",
".",
"MkdirAll",
"(",
"sessionDir",
",",
"*",
"cfg",
".",
"DirMask",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n",
"// create a directory for uncompressed playbacks",
"if",
"err",
":=",
"os",
".",
"MkdirAll",
"(",
"filepath",
".",
"Join",
"(",
"al",
".",
"playbackDir",
")",
",",
"*",
"cfg",
".",
"DirMask",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n",
"if",
"cfg",
".",
"UID",
"!=",
"nil",
"&&",
"cfg",
".",
"GID",
"!=",
"nil",
"{",
"err",
":=",
"os",
".",
"Chown",
"(",
"cfg",
".",
"DataDir",
",",
"*",
"cfg",
".",
"UID",
",",
"*",
"cfg",
".",
"GID",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n",
"err",
"=",
"os",
".",
"Chown",
"(",
"sessionDir",
",",
"*",
"cfg",
".",
"UID",
",",
"*",
"cfg",
".",
"GID",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n",
"err",
"=",
"os",
".",
"Chown",
"(",
"al",
".",
"playbackDir",
",",
"*",
"cfg",
".",
"UID",
",",
"*",
"cfg",
".",
"GID",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"if",
"al",
".",
"ExternalLog",
"==",
"nil",
"{",
"var",
"err",
"error",
"\n",
"al",
".",
"localLog",
",",
"err",
"=",
"NewFileLog",
"(",
"FileLogConfig",
"{",
"RotationPeriod",
":",
"al",
".",
"RotationPeriod",
",",
"Dir",
":",
"auditDir",
",",
"SymlinkDir",
":",
"cfg",
".",
"DataDir",
",",
"Clock",
":",
"al",
".",
"Clock",
",",
"UIDGenerator",
":",
"al",
".",
"UIDGenerator",
",",
"SearchDirs",
":",
"al",
".",
"auditDirs",
",",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"go",
"al",
".",
"periodicCleanupPlaybacks",
"(",
")",
"\n",
"go",
"al",
".",
"periodicSpaceMonitor",
"(",
")",
"\n\n",
"return",
"al",
",",
"nil",
"\n",
"}"
] | // Creates and returns a new Audit Log object whish will store its logfiles in
// a given directory. Session recording can be disabled by setting
// recordSessions to false. | [
"Creates",
"and",
"returns",
"a",
"new",
"Audit",
"Log",
"object",
"whish",
"will",
"store",
"its",
"logfiles",
"in",
"a",
"given",
"directory",
".",
"Session",
"recording",
"can",
"be",
"disabled",
"by",
"setting",
"recordSessions",
"to",
"false",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/auditlog.go#L235-L300 | train |
gravitational/teleport | lib/events/auditlog.go | CheckAndSetDefaults | func (l *SessionRecording) CheckAndSetDefaults() error {
if l.Recording == nil {
return trace.BadParameter("missing parameter Recording")
}
if l.SessionID.IsZero() {
return trace.BadParameter("missing parameter session ID")
}
if l.Namespace == "" {
l.Namespace = defaults.Namespace
}
return nil
} | go | func (l *SessionRecording) CheckAndSetDefaults() error {
if l.Recording == nil {
return trace.BadParameter("missing parameter Recording")
}
if l.SessionID.IsZero() {
return trace.BadParameter("missing parameter session ID")
}
if l.Namespace == "" {
l.Namespace = defaults.Namespace
}
return nil
} | [
"func",
"(",
"l",
"*",
"SessionRecording",
")",
"CheckAndSetDefaults",
"(",
")",
"error",
"{",
"if",
"l",
".",
"Recording",
"==",
"nil",
"{",
"return",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"l",
".",
"SessionID",
".",
"IsZero",
"(",
")",
"{",
"return",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"l",
".",
"Namespace",
"==",
"\"",
"\"",
"{",
"l",
".",
"Namespace",
"=",
"defaults",
".",
"Namespace",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // CheckAndSetDefaults checks and sets default parameters | [
"CheckAndSetDefaults",
"checks",
"and",
"sets",
"default",
"parameters"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/auditlog.go#L317-L328 | train |
gravitational/teleport | lib/events/auditlog.go | chunkFileNames | func (idx *sessionIndex) chunkFileNames() []string {
fileNames := make([]string, len(idx.chunks))
for i := 0; i < len(idx.chunks); i++ {
fileNames[i] = idx.chunksFileName(i)
}
return fileNames
} | go | func (idx *sessionIndex) chunkFileNames() []string {
fileNames := make([]string, len(idx.chunks))
for i := 0; i < len(idx.chunks); i++ {
fileNames[i] = idx.chunksFileName(i)
}
return fileNames
} | [
"func",
"(",
"idx",
"*",
"sessionIndex",
")",
"chunkFileNames",
"(",
")",
"[",
"]",
"string",
"{",
"fileNames",
":=",
"make",
"(",
"[",
"]",
"string",
",",
"len",
"(",
"idx",
".",
"chunks",
")",
")",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"idx",
".",
"chunks",
")",
";",
"i",
"++",
"{",
"fileNames",
"[",
"i",
"]",
"=",
"idx",
".",
"chunksFileName",
"(",
"i",
")",
"\n",
"}",
"\n",
"return",
"fileNames",
"\n",
"}"
] | // chunkFileNames returns file names of all session chunk files | [
"chunkFileNames",
"returns",
"file",
"names",
"of",
"all",
"session",
"chunk",
"files"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/auditlog.go#L469-L475 | train |
gravitational/teleport | lib/events/auditlog.go | createOrGetDownload | func (l *AuditLog) createOrGetDownload(path string) (context.Context, context.CancelFunc) {
l.Lock()
defer l.Unlock()
ctx, ok := l.activeDownloads[path]
if ok {
return ctx, nil
}
ctx, cancel := context.WithCancel(context.TODO())
l.activeDownloads[path] = ctx
return ctx, func() {
cancel()
l.Lock()
defer l.Unlock()
delete(l.activeDownloads, path)
}
} | go | func (l *AuditLog) createOrGetDownload(path string) (context.Context, context.CancelFunc) {
l.Lock()
defer l.Unlock()
ctx, ok := l.activeDownloads[path]
if ok {
return ctx, nil
}
ctx, cancel := context.WithCancel(context.TODO())
l.activeDownloads[path] = ctx
return ctx, func() {
cancel()
l.Lock()
defer l.Unlock()
delete(l.activeDownloads, path)
}
} | [
"func",
"(",
"l",
"*",
"AuditLog",
")",
"createOrGetDownload",
"(",
"path",
"string",
")",
"(",
"context",
".",
"Context",
",",
"context",
".",
"CancelFunc",
")",
"{",
"l",
".",
"Lock",
"(",
")",
"\n",
"defer",
"l",
".",
"Unlock",
"(",
")",
"\n",
"ctx",
",",
"ok",
":=",
"l",
".",
"activeDownloads",
"[",
"path",
"]",
"\n",
"if",
"ok",
"{",
"return",
"ctx",
",",
"nil",
"\n",
"}",
"\n",
"ctx",
",",
"cancel",
":=",
"context",
".",
"WithCancel",
"(",
"context",
".",
"TODO",
"(",
")",
")",
"\n",
"l",
".",
"activeDownloads",
"[",
"path",
"]",
"=",
"ctx",
"\n",
"return",
"ctx",
",",
"func",
"(",
")",
"{",
"cancel",
"(",
")",
"\n",
"l",
".",
"Lock",
"(",
")",
"\n",
"defer",
"l",
".",
"Unlock",
"(",
")",
"\n",
"delete",
"(",
"l",
".",
"activeDownloads",
",",
"path",
")",
"\n",
"}",
"\n",
"}"
] | // createOrGetDownload creates a new download sync entry for a given session,
// if there is no active download in progress, or returns an existing one.
// if the new context has been created, cancel function is returned as a
// second argument. Caller should call this function to signal that download has been
// completed or failed. | [
"createOrGetDownload",
"creates",
"a",
"new",
"download",
"sync",
"entry",
"for",
"a",
"given",
"session",
"if",
"there",
"is",
"no",
"active",
"download",
"in",
"progress",
"or",
"returns",
"an",
"existing",
"one",
".",
"if",
"the",
"new",
"context",
"has",
"been",
"created",
"cancel",
"function",
"is",
"returned",
"as",
"a",
"second",
"argument",
".",
"Caller",
"should",
"call",
"this",
"function",
"to",
"signal",
"that",
"download",
"has",
"been",
"completed",
"or",
"failed",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/auditlog.go#L560-L575 | train |
gravitational/teleport | lib/events/auditlog.go | GetSessionChunk | func (l *AuditLog) GetSessionChunk(namespace string, sid session.ID, offsetBytes, maxBytes int) ([]byte, error) {
if l.UploadHandler != nil {
if err := l.downloadSession(namespace, sid); err != nil {
return nil, trace.Wrap(err)
}
}
var data []byte
for {
out, err := l.getSessionChunk(namespace, sid, offsetBytes, maxBytes)
if err != nil {
if err == io.EOF {
return data, nil
}
return nil, trace.Wrap(err)
}
data = append(data, out...)
if len(data) == maxBytes || len(out) == 0 {
return data, nil
}
maxBytes = maxBytes - len(out)
offsetBytes = offsetBytes + len(out)
}
} | go | func (l *AuditLog) GetSessionChunk(namespace string, sid session.ID, offsetBytes, maxBytes int) ([]byte, error) {
if l.UploadHandler != nil {
if err := l.downloadSession(namespace, sid); err != nil {
return nil, trace.Wrap(err)
}
}
var data []byte
for {
out, err := l.getSessionChunk(namespace, sid, offsetBytes, maxBytes)
if err != nil {
if err == io.EOF {
return data, nil
}
return nil, trace.Wrap(err)
}
data = append(data, out...)
if len(data) == maxBytes || len(out) == 0 {
return data, nil
}
maxBytes = maxBytes - len(out)
offsetBytes = offsetBytes + len(out)
}
} | [
"func",
"(",
"l",
"*",
"AuditLog",
")",
"GetSessionChunk",
"(",
"namespace",
"string",
",",
"sid",
"session",
".",
"ID",
",",
"offsetBytes",
",",
"maxBytes",
"int",
")",
"(",
"[",
"]",
"byte",
",",
"error",
")",
"{",
"if",
"l",
".",
"UploadHandler",
"!=",
"nil",
"{",
"if",
"err",
":=",
"l",
".",
"downloadSession",
"(",
"namespace",
",",
"sid",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"var",
"data",
"[",
"]",
"byte",
"\n",
"for",
"{",
"out",
",",
"err",
":=",
"l",
".",
"getSessionChunk",
"(",
"namespace",
",",
"sid",
",",
"offsetBytes",
",",
"maxBytes",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"if",
"err",
"==",
"io",
".",
"EOF",
"{",
"return",
"data",
",",
"nil",
"\n",
"}",
"\n",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"data",
"=",
"append",
"(",
"data",
",",
"out",
"...",
")",
"\n",
"if",
"len",
"(",
"data",
")",
"==",
"maxBytes",
"||",
"len",
"(",
"out",
")",
"==",
"0",
"{",
"return",
"data",
",",
"nil",
"\n",
"}",
"\n",
"maxBytes",
"=",
"maxBytes",
"-",
"len",
"(",
"out",
")",
"\n",
"offsetBytes",
"=",
"offsetBytes",
"+",
"len",
"(",
"out",
")",
"\n",
"}",
"\n",
"}"
] | // GetSessionChunk returns a reader which console and web clients request
// to receive a live stream of a given session. The reader allows access to a
// session stream range from offsetBytes to offsetBytes+maxBytes | [
"GetSessionChunk",
"returns",
"a",
"reader",
"which",
"console",
"and",
"web",
"clients",
"request",
"to",
"receive",
"a",
"live",
"stream",
"of",
"a",
"given",
"session",
".",
"The",
"reader",
"allows",
"access",
"to",
"a",
"session",
"stream",
"range",
"from",
"offsetBytes",
"to",
"offsetBytes",
"+",
"maxBytes"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/auditlog.go#L646-L668 | train |
gravitational/teleport | lib/events/auditlog.go | EmitAuditEvent | func (l *AuditLog) EmitAuditEvent(event Event, fields EventFields) error {
// If an external logger has been set, use it as the emitter, otherwise
// fallback to the local disk based emitter.
var emitAuditEvent func(event Event, fields EventFields) error
if l.ExternalLog != nil {
emitAuditEvent = l.ExternalLog.EmitAuditEvent
} else {
emitAuditEvent = l.localLog.EmitAuditEvent
}
// Emit the event. If it fails for any reason a Prometheus counter is
// incremented.
err := emitAuditEvent(event, fields)
if err != nil {
auditFailedEmit.Inc()
return trace.Wrap(err)
}
return nil
} | go | func (l *AuditLog) EmitAuditEvent(event Event, fields EventFields) error {
// If an external logger has been set, use it as the emitter, otherwise
// fallback to the local disk based emitter.
var emitAuditEvent func(event Event, fields EventFields) error
if l.ExternalLog != nil {
emitAuditEvent = l.ExternalLog.EmitAuditEvent
} else {
emitAuditEvent = l.localLog.EmitAuditEvent
}
// Emit the event. If it fails for any reason a Prometheus counter is
// incremented.
err := emitAuditEvent(event, fields)
if err != nil {
auditFailedEmit.Inc()
return trace.Wrap(err)
}
return nil
} | [
"func",
"(",
"l",
"*",
"AuditLog",
")",
"EmitAuditEvent",
"(",
"event",
"Event",
",",
"fields",
"EventFields",
")",
"error",
"{",
"// If an external logger has been set, use it as the emitter, otherwise",
"// fallback to the local disk based emitter.",
"var",
"emitAuditEvent",
"func",
"(",
"event",
"Event",
",",
"fields",
"EventFields",
")",
"error",
"\n",
"if",
"l",
".",
"ExternalLog",
"!=",
"nil",
"{",
"emitAuditEvent",
"=",
"l",
".",
"ExternalLog",
".",
"EmitAuditEvent",
"\n",
"}",
"else",
"{",
"emitAuditEvent",
"=",
"l",
".",
"localLog",
".",
"EmitAuditEvent",
"\n",
"}",
"\n\n",
"// Emit the event. If it fails for any reason a Prometheus counter is",
"// incremented.",
"err",
":=",
"emitAuditEvent",
"(",
"event",
",",
"fields",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"auditFailedEmit",
".",
"Inc",
"(",
")",
"\n",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // EmitAuditEvent adds a new event to the log. If emitting fails, a Prometheus
// counter is incremented. | [
"EmitAuditEvent",
"adds",
"a",
"new",
"event",
"to",
"the",
"log",
".",
"If",
"emitting",
"fails",
"a",
"Prometheus",
"counter",
"is",
"incremented",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/auditlog.go#L879-L898 | train |
gravitational/teleport | lib/events/auditlog.go | emitEvent | func (l *AuditLog) emitEvent(e AuditLogEvent) {
if l.EventsC == nil {
return
}
select {
case l.EventsC <- &e:
return
default:
l.Warningf("Blocked on the events channel.")
}
} | go | func (l *AuditLog) emitEvent(e AuditLogEvent) {
if l.EventsC == nil {
return
}
select {
case l.EventsC <- &e:
return
default:
l.Warningf("Blocked on the events channel.")
}
} | [
"func",
"(",
"l",
"*",
"AuditLog",
")",
"emitEvent",
"(",
"e",
"AuditLogEvent",
")",
"{",
"if",
"l",
".",
"EventsC",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"select",
"{",
"case",
"l",
".",
"EventsC",
"<-",
"&",
"e",
":",
"return",
"\n",
"default",
":",
"l",
".",
"Warningf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"}"
] | // emitEvent emits event for test purposes | [
"emitEvent",
"emits",
"event",
"for",
"test",
"purposes"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/auditlog.go#L901-L911 | train |
gravitational/teleport | lib/events/auditlog.go | auditDirs | func (l *AuditLog) auditDirs() ([]string, error) {
authServers, err := l.getAuthServers()
if err != nil {
return nil, trace.Wrap(err)
}
var out []string
for _, serverID := range authServers {
out = append(out, filepath.Join(l.DataDir, serverID))
}
return out, nil
} | go | func (l *AuditLog) auditDirs() ([]string, error) {
authServers, err := l.getAuthServers()
if err != nil {
return nil, trace.Wrap(err)
}
var out []string
for _, serverID := range authServers {
out = append(out, filepath.Join(l.DataDir, serverID))
}
return out, nil
} | [
"func",
"(",
"l",
"*",
"AuditLog",
")",
"auditDirs",
"(",
")",
"(",
"[",
"]",
"string",
",",
"error",
")",
"{",
"authServers",
",",
"err",
":=",
"l",
".",
"getAuthServers",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"var",
"out",
"[",
"]",
"string",
"\n",
"for",
"_",
",",
"serverID",
":=",
"range",
"authServers",
"{",
"out",
"=",
"append",
"(",
"out",
",",
"filepath",
".",
"Join",
"(",
"l",
".",
"DataDir",
",",
"serverID",
")",
")",
"\n",
"}",
"\n",
"return",
"out",
",",
"nil",
"\n",
"}"
] | // auditDirs returns directories used for audit log storage | [
"auditDirs",
"returns",
"directories",
"used",
"for",
"audit",
"log",
"storage"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/auditlog.go#L914-L925 | train |
gravitational/teleport | lib/events/auditlog.go | Close | func (l *AuditLog) Close() error {
if l.ExternalLog != nil {
if err := l.ExternalLog.Close(); err != nil {
log.Warningf("Close failure: %v", err)
}
}
l.cancel()
l.Lock()
defer l.Unlock()
if l.localLog != nil {
if err := l.localLog.Close(); err != nil {
log.Warningf("Close failure: %v", err)
}
l.localLog = nil
}
return nil
} | go | func (l *AuditLog) Close() error {
if l.ExternalLog != nil {
if err := l.ExternalLog.Close(); err != nil {
log.Warningf("Close failure: %v", err)
}
}
l.cancel()
l.Lock()
defer l.Unlock()
if l.localLog != nil {
if err := l.localLog.Close(); err != nil {
log.Warningf("Close failure: %v", err)
}
l.localLog = nil
}
return nil
} | [
"func",
"(",
"l",
"*",
"AuditLog",
")",
"Close",
"(",
")",
"error",
"{",
"if",
"l",
".",
"ExternalLog",
"!=",
"nil",
"{",
"if",
"err",
":=",
"l",
".",
"ExternalLog",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"log",
".",
"Warningf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"l",
".",
"cancel",
"(",
")",
"\n",
"l",
".",
"Lock",
"(",
")",
"\n",
"defer",
"l",
".",
"Unlock",
"(",
")",
"\n\n",
"if",
"l",
".",
"localLog",
"!=",
"nil",
"{",
"if",
"err",
":=",
"l",
".",
"localLog",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"log",
".",
"Warningf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"l",
".",
"localLog",
"=",
"nil",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Closes the audit log, which inluces closing all file handles and releasing
// all session loggers | [
"Closes",
"the",
"audit",
"log",
"which",
"inluces",
"closing",
"all",
"file",
"handles",
"and",
"releasing",
"all",
"session",
"loggers"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/auditlog.go#L955-L972 | train |
gravitational/teleport | lib/events/auditlog.go | periodicSpaceMonitor | func (l *AuditLog) periodicSpaceMonitor() {
ticker := time.NewTicker(defaults.DiskAlertInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// Find out what percentage of disk space is used. If the syscall fails,
// emit that to prometheus as well.
usedPercent, err := percentUsed(l.DataDir)
if err != nil {
auditFailedDisk.Inc()
log.Warnf("Disk space monitoring failed: %v.", err)
continue
}
// Update prometheus gauge with the percentage disk space used.
auditDiskUsed.Set(usedPercent)
// If used percentage goes above the alerting level, write to logs as well.
if usedPercent > float64(defaults.DiskAlertThreshold) {
log.Warnf("Free disk space for audit log is running low, %v%% of disk used.", usedPercent)
}
case <-l.ctx.Done():
return
}
}
} | go | func (l *AuditLog) periodicSpaceMonitor() {
ticker := time.NewTicker(defaults.DiskAlertInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// Find out what percentage of disk space is used. If the syscall fails,
// emit that to prometheus as well.
usedPercent, err := percentUsed(l.DataDir)
if err != nil {
auditFailedDisk.Inc()
log.Warnf("Disk space monitoring failed: %v.", err)
continue
}
// Update prometheus gauge with the percentage disk space used.
auditDiskUsed.Set(usedPercent)
// If used percentage goes above the alerting level, write to logs as well.
if usedPercent > float64(defaults.DiskAlertThreshold) {
log.Warnf("Free disk space for audit log is running low, %v%% of disk used.", usedPercent)
}
case <-l.ctx.Done():
return
}
}
} | [
"func",
"(",
"l",
"*",
"AuditLog",
")",
"periodicSpaceMonitor",
"(",
")",
"{",
"ticker",
":=",
"time",
".",
"NewTicker",
"(",
"defaults",
".",
"DiskAlertInterval",
")",
"\n",
"defer",
"ticker",
".",
"Stop",
"(",
")",
"\n\n",
"for",
"{",
"select",
"{",
"case",
"<-",
"ticker",
".",
"C",
":",
"// Find out what percentage of disk space is used. If the syscall fails,",
"// emit that to prometheus as well.",
"usedPercent",
",",
"err",
":=",
"percentUsed",
"(",
"l",
".",
"DataDir",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"auditFailedDisk",
".",
"Inc",
"(",
")",
"\n",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"continue",
"\n",
"}",
"\n\n",
"// Update prometheus gauge with the percentage disk space used.",
"auditDiskUsed",
".",
"Set",
"(",
"usedPercent",
")",
"\n\n",
"// If used percentage goes above the alerting level, write to logs as well.",
"if",
"usedPercent",
">",
"float64",
"(",
"defaults",
".",
"DiskAlertThreshold",
")",
"{",
"log",
".",
"Warnf",
"(",
"\"",
"\"",
",",
"usedPercent",
")",
"\n",
"}",
"\n",
"case",
"<-",
"l",
".",
"ctx",
".",
"Done",
"(",
")",
":",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // periodicSpaceMonitor run forever monitoring how much disk space has been
// used on disk. Values are emitted to a Prometheus gauge. | [
"periodicSpaceMonitor",
"run",
"forever",
"monitoring",
"how",
"much",
"disk",
"space",
"has",
"been",
"used",
"on",
"disk",
".",
"Values",
"are",
"emitted",
"to",
"a",
"Prometheus",
"gauge",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/events/auditlog.go#L992-L1019 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | New | func New(params legacy.Params) (*Backend, error) {
rootDir := params.GetString("path")
if rootDir == "" {
rootDir = params.GetString("data_dir")
}
if rootDir == "" {
return nil, trace.BadParameter("filesystem backend: 'path' is not set")
}
// Ensure that the path to the root directory exists.
err := os.MkdirAll(rootDir, defaultDirMode)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
bk := &Backend{
InternalClock: clockwork.NewRealClock(),
rootDir: rootDir,
log: logrus.WithFields(logrus.Fields{
trace.Component: "backend:dir",
trace.ComponentFields: logrus.Fields{
"dir": rootDir,
},
}),
}
// DELETE IN: 2.8.0
// Migrate data to new flat keyspace backend.
err = migrate(rootDir, bk)
if err != nil {
return nil, trace.Wrap(err)
}
return bk, nil
} | go | func New(params legacy.Params) (*Backend, error) {
rootDir := params.GetString("path")
if rootDir == "" {
rootDir = params.GetString("data_dir")
}
if rootDir == "" {
return nil, trace.BadParameter("filesystem backend: 'path' is not set")
}
// Ensure that the path to the root directory exists.
err := os.MkdirAll(rootDir, defaultDirMode)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
bk := &Backend{
InternalClock: clockwork.NewRealClock(),
rootDir: rootDir,
log: logrus.WithFields(logrus.Fields{
trace.Component: "backend:dir",
trace.ComponentFields: logrus.Fields{
"dir": rootDir,
},
}),
}
// DELETE IN: 2.8.0
// Migrate data to new flat keyspace backend.
err = migrate(rootDir, bk)
if err != nil {
return nil, trace.Wrap(err)
}
return bk, nil
} | [
"func",
"New",
"(",
"params",
"legacy",
".",
"Params",
")",
"(",
"*",
"Backend",
",",
"error",
")",
"{",
"rootDir",
":=",
"params",
".",
"GetString",
"(",
"\"",
"\"",
")",
"\n",
"if",
"rootDir",
"==",
"\"",
"\"",
"{",
"rootDir",
"=",
"params",
".",
"GetString",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"rootDir",
"==",
"\"",
"\"",
"{",
"return",
"nil",
",",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"// Ensure that the path to the root directory exists.",
"err",
":=",
"os",
".",
"MkdirAll",
"(",
"rootDir",
",",
"defaultDirMode",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"bk",
":=",
"&",
"Backend",
"{",
"InternalClock",
":",
"clockwork",
".",
"NewRealClock",
"(",
")",
",",
"rootDir",
":",
"rootDir",
",",
"log",
":",
"logrus",
".",
"WithFields",
"(",
"logrus",
".",
"Fields",
"{",
"trace",
".",
"Component",
":",
"\"",
"\"",
",",
"trace",
".",
"ComponentFields",
":",
"logrus",
".",
"Fields",
"{",
"\"",
"\"",
":",
"rootDir",
",",
"}",
",",
"}",
")",
",",
"}",
"\n\n",
"// DELETE IN: 2.8.0",
"// Migrate data to new flat keyspace backend.",
"err",
"=",
"migrate",
"(",
"rootDir",
",",
"bk",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"bk",
",",
"nil",
"\n",
"}"
] | // New creates a new instance of a directory based backend that implements
// backend.Backend. | [
"New",
"creates",
"a",
"new",
"instance",
"of",
"a",
"directory",
"based",
"backend",
"that",
"implements",
"backend",
".",
"Backend",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L77-L111 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | GetKeys | func (bk *Backend) GetKeys(bucket []string) ([]string, error) {
// Get all the key/value pairs for this bucket.
items, err := bk.GetItems(bucket)
if err != nil {
return nil, trace.Wrap(err)
}
// Return only the keys, the keys are already sorted by GetItems.
keys := make([]string, len(items))
for i, e := range items {
keys[i] = e.Key
}
return keys, nil
} | go | func (bk *Backend) GetKeys(bucket []string) ([]string, error) {
// Get all the key/value pairs for this bucket.
items, err := bk.GetItems(bucket)
if err != nil {
return nil, trace.Wrap(err)
}
// Return only the keys, the keys are already sorted by GetItems.
keys := make([]string, len(items))
for i, e := range items {
keys[i] = e.Key
}
return keys, nil
} | [
"func",
"(",
"bk",
"*",
"Backend",
")",
"GetKeys",
"(",
"bucket",
"[",
"]",
"string",
")",
"(",
"[",
"]",
"string",
",",
"error",
")",
"{",
"// Get all the key/value pairs for this bucket.",
"items",
",",
"err",
":=",
"bk",
".",
"GetItems",
"(",
"bucket",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"// Return only the keys, the keys are already sorted by GetItems.",
"keys",
":=",
"make",
"(",
"[",
"]",
"string",
",",
"len",
"(",
"items",
")",
")",
"\n",
"for",
"i",
",",
"e",
":=",
"range",
"items",
"{",
"keys",
"[",
"i",
"]",
"=",
"e",
".",
"Key",
"\n",
"}",
"\n\n",
"return",
"keys",
",",
"nil",
"\n",
"}"
] | // GetKeys returns a list of keys for a given bucket. | [
"GetKeys",
"returns",
"a",
"list",
"of",
"keys",
"for",
"a",
"given",
"bucket",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L119-L133 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | GetVal | func (bk *Backend) GetVal(bucket []string, key string) ([]byte, error) {
// Open the bucket to work on the items.
b, err := bk.openBucket(bk.flatten(bucket), os.O_RDWR)
if err != nil {
// GetVal on a bucket needs to return trace.BadParameter. If opening the
// bucket failed a partial match up to a bucket may still exist. To support
// returning trace.BadParameter in this situation, loop over all keys in the
// backend and see if any match the prefix. If any match the prefix return
// trace.BadParameter, otherwise return the original error. This is
// consistent with our DynamoDB implementation.
files, er := ioutil.ReadDir(path.Join(bk.rootDir))
if er != nil {
return nil, trace.ConvertSystemError(er)
}
var matched int
for _, fi := range files {
pathToBucket := bk.pathToBucket(fi.Name())
fullBucket := append(bucket, key)
bucketPrefix := bk.flatten(fullBucket)
// Prefix matched, for example if pathToBucket is "/foo/bar/baz" and
// bucketPrefix is "/foo/bar".
if strings.HasPrefix(pathToBucket, bucketPrefix) {
matched = matched + 1
}
}
if matched > 0 {
return nil, trace.BadParameter("%v is not a valid key", key)
}
return nil, trace.ConvertSystemError(err)
}
defer b.Close()
// If the key does not exist, return trace.NotFound right away.
item, ok := b.getItem(key)
if !ok {
return nil, trace.NotFound("key %q is not found", key)
}
// If the key is expired, remove it from the bucket and write it out and exit.
if bk.isExpired(item) {
b.deleteItem(key)
return nil, trace.NotFound("key %q is not found", key)
}
return item.Value, nil
} | go | func (bk *Backend) GetVal(bucket []string, key string) ([]byte, error) {
// Open the bucket to work on the items.
b, err := bk.openBucket(bk.flatten(bucket), os.O_RDWR)
if err != nil {
// GetVal on a bucket needs to return trace.BadParameter. If opening the
// bucket failed a partial match up to a bucket may still exist. To support
// returning trace.BadParameter in this situation, loop over all keys in the
// backend and see if any match the prefix. If any match the prefix return
// trace.BadParameter, otherwise return the original error. This is
// consistent with our DynamoDB implementation.
files, er := ioutil.ReadDir(path.Join(bk.rootDir))
if er != nil {
return nil, trace.ConvertSystemError(er)
}
var matched int
for _, fi := range files {
pathToBucket := bk.pathToBucket(fi.Name())
fullBucket := append(bucket, key)
bucketPrefix := bk.flatten(fullBucket)
// Prefix matched, for example if pathToBucket is "/foo/bar/baz" and
// bucketPrefix is "/foo/bar".
if strings.HasPrefix(pathToBucket, bucketPrefix) {
matched = matched + 1
}
}
if matched > 0 {
return nil, trace.BadParameter("%v is not a valid key", key)
}
return nil, trace.ConvertSystemError(err)
}
defer b.Close()
// If the key does not exist, return trace.NotFound right away.
item, ok := b.getItem(key)
if !ok {
return nil, trace.NotFound("key %q is not found", key)
}
// If the key is expired, remove it from the bucket and write it out and exit.
if bk.isExpired(item) {
b.deleteItem(key)
return nil, trace.NotFound("key %q is not found", key)
}
return item.Value, nil
} | [
"func",
"(",
"bk",
"*",
"Backend",
")",
"GetVal",
"(",
"bucket",
"[",
"]",
"string",
",",
"key",
"string",
")",
"(",
"[",
"]",
"byte",
",",
"error",
")",
"{",
"// Open the bucket to work on the items.",
"b",
",",
"err",
":=",
"bk",
".",
"openBucket",
"(",
"bk",
".",
"flatten",
"(",
"bucket",
")",
",",
"os",
".",
"O_RDWR",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"// GetVal on a bucket needs to return trace.BadParameter. If opening the",
"// bucket failed a partial match up to a bucket may still exist. To support",
"// returning trace.BadParameter in this situation, loop over all keys in the",
"// backend and see if any match the prefix. If any match the prefix return",
"// trace.BadParameter, otherwise return the original error. This is",
"// consistent with our DynamoDB implementation.",
"files",
",",
"er",
":=",
"ioutil",
".",
"ReadDir",
"(",
"path",
".",
"Join",
"(",
"bk",
".",
"rootDir",
")",
")",
"\n",
"if",
"er",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"ConvertSystemError",
"(",
"er",
")",
"\n",
"}",
"\n",
"var",
"matched",
"int",
"\n",
"for",
"_",
",",
"fi",
":=",
"range",
"files",
"{",
"pathToBucket",
":=",
"bk",
".",
"pathToBucket",
"(",
"fi",
".",
"Name",
"(",
")",
")",
"\n",
"fullBucket",
":=",
"append",
"(",
"bucket",
",",
"key",
")",
"\n",
"bucketPrefix",
":=",
"bk",
".",
"flatten",
"(",
"fullBucket",
")",
"\n\n",
"// Prefix matched, for example if pathToBucket is \"/foo/bar/baz\" and",
"// bucketPrefix is \"/foo/bar\".",
"if",
"strings",
".",
"HasPrefix",
"(",
"pathToBucket",
",",
"bucketPrefix",
")",
"{",
"matched",
"=",
"matched",
"+",
"1",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"matched",
">",
"0",
"{",
"return",
"nil",
",",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"key",
")",
"\n",
"}",
"\n",
"return",
"nil",
",",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n",
"defer",
"b",
".",
"Close",
"(",
")",
"\n\n",
"// If the key does not exist, return trace.NotFound right away.",
"item",
",",
"ok",
":=",
"b",
".",
"getItem",
"(",
"key",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"nil",
",",
"trace",
".",
"NotFound",
"(",
"\"",
"\"",
",",
"key",
")",
"\n",
"}",
"\n\n",
"// If the key is expired, remove it from the bucket and write it out and exit.",
"if",
"bk",
".",
"isExpired",
"(",
"item",
")",
"{",
"b",
".",
"deleteItem",
"(",
"key",
")",
"\n\n",
"return",
"nil",
",",
"trace",
".",
"NotFound",
"(",
"\"",
"\"",
",",
"key",
")",
"\n",
"}",
"\n\n",
"return",
"item",
".",
"Value",
",",
"nil",
"\n",
"}"
] | // GetVal return a value for a given key in the bucket | [
"GetVal",
"return",
"a",
"value",
"for",
"a",
"given",
"key",
"in",
"the",
"bucket"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L327-L374 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | CompareAndSwapVal | func (bk *Backend) CompareAndSwapVal(bucket []string, key string, val []byte, prevVal []byte, ttl time.Duration) error {
// Open the bucket to work on the items.
b, err := bk.openBucket(bk.flatten(bucket), os.O_CREATE|os.O_RDWR)
if err != nil {
er := trace.ConvertSystemError(err)
if trace.IsNotFound(er) {
return trace.CompareFailed("%v/%v did not match expected value", bucket, key)
}
return trace.Wrap(er)
}
defer b.Close()
// Read in existing key. If it does not exist, is expired, or does not
// match, return trace.CompareFailed.
oldItem, ok := b.getItem(key)
if !ok {
return trace.CompareFailed("%v/%v did not match expected value", bucket, key)
}
if bk.isExpired(oldItem) {
return trace.CompareFailed("%v/%v did not match expected value", bucket, key)
}
if bytes.Compare(oldItem.Value, prevVal) != 0 {
return trace.CompareFailed("%v/%v did not match expected value", bucket, key)
}
// The compare was successful, update the item.
b.updateItem(key, val, ttl)
return nil
} | go | func (bk *Backend) CompareAndSwapVal(bucket []string, key string, val []byte, prevVal []byte, ttl time.Duration) error {
// Open the bucket to work on the items.
b, err := bk.openBucket(bk.flatten(bucket), os.O_CREATE|os.O_RDWR)
if err != nil {
er := trace.ConvertSystemError(err)
if trace.IsNotFound(er) {
return trace.CompareFailed("%v/%v did not match expected value", bucket, key)
}
return trace.Wrap(er)
}
defer b.Close()
// Read in existing key. If it does not exist, is expired, or does not
// match, return trace.CompareFailed.
oldItem, ok := b.getItem(key)
if !ok {
return trace.CompareFailed("%v/%v did not match expected value", bucket, key)
}
if bk.isExpired(oldItem) {
return trace.CompareFailed("%v/%v did not match expected value", bucket, key)
}
if bytes.Compare(oldItem.Value, prevVal) != 0 {
return trace.CompareFailed("%v/%v did not match expected value", bucket, key)
}
// The compare was successful, update the item.
b.updateItem(key, val, ttl)
return nil
} | [
"func",
"(",
"bk",
"*",
"Backend",
")",
"CompareAndSwapVal",
"(",
"bucket",
"[",
"]",
"string",
",",
"key",
"string",
",",
"val",
"[",
"]",
"byte",
",",
"prevVal",
"[",
"]",
"byte",
",",
"ttl",
"time",
".",
"Duration",
")",
"error",
"{",
"// Open the bucket to work on the items.",
"b",
",",
"err",
":=",
"bk",
".",
"openBucket",
"(",
"bk",
".",
"flatten",
"(",
"bucket",
")",
",",
"os",
".",
"O_CREATE",
"|",
"os",
".",
"O_RDWR",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"er",
":=",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"if",
"trace",
".",
"IsNotFound",
"(",
"er",
")",
"{",
"return",
"trace",
".",
"CompareFailed",
"(",
"\"",
"\"",
",",
"bucket",
",",
"key",
")",
"\n",
"}",
"\n",
"return",
"trace",
".",
"Wrap",
"(",
"er",
")",
"\n",
"}",
"\n",
"defer",
"b",
".",
"Close",
"(",
")",
"\n\n",
"// Read in existing key. If it does not exist, is expired, or does not",
"// match, return trace.CompareFailed.",
"oldItem",
",",
"ok",
":=",
"b",
".",
"getItem",
"(",
"key",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"trace",
".",
"CompareFailed",
"(",
"\"",
"\"",
",",
"bucket",
",",
"key",
")",
"\n",
"}",
"\n",
"if",
"bk",
".",
"isExpired",
"(",
"oldItem",
")",
"{",
"return",
"trace",
".",
"CompareFailed",
"(",
"\"",
"\"",
",",
"bucket",
",",
"key",
")",
"\n",
"}",
"\n",
"if",
"bytes",
".",
"Compare",
"(",
"oldItem",
".",
"Value",
",",
"prevVal",
")",
"!=",
"0",
"{",
"return",
"trace",
".",
"CompareFailed",
"(",
"\"",
"\"",
",",
"bucket",
",",
"key",
")",
"\n",
"}",
"\n\n",
"// The compare was successful, update the item.",
"b",
".",
"updateItem",
"(",
"key",
",",
"val",
",",
"ttl",
")",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // CompareAndSwapVal compares and swap values in atomic operation | [
"CompareAndSwapVal",
"compares",
"and",
"swap",
"values",
"in",
"atomic",
"operation"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L377-L406 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | DeleteKey | func (bk *Backend) DeleteKey(bucket []string, key string) error {
// Open the bucket to work on the items.
b, err := bk.openBucket(bk.flatten(bucket), os.O_RDWR)
if err != nil {
return trace.Wrap(err)
}
defer b.Close()
// If the key doesn't exist, return trace.NotFound.
_, ok := b.getItem(key)
if !ok {
return trace.NotFound("key %v not found", key)
}
// Otherwise, delete key.
b.deleteItem(key)
return nil
} | go | func (bk *Backend) DeleteKey(bucket []string, key string) error {
// Open the bucket to work on the items.
b, err := bk.openBucket(bk.flatten(bucket), os.O_RDWR)
if err != nil {
return trace.Wrap(err)
}
defer b.Close()
// If the key doesn't exist, return trace.NotFound.
_, ok := b.getItem(key)
if !ok {
return trace.NotFound("key %v not found", key)
}
// Otherwise, delete key.
b.deleteItem(key)
return nil
} | [
"func",
"(",
"bk",
"*",
"Backend",
")",
"DeleteKey",
"(",
"bucket",
"[",
"]",
"string",
",",
"key",
"string",
")",
"error",
"{",
"// Open the bucket to work on the items.",
"b",
",",
"err",
":=",
"bk",
".",
"openBucket",
"(",
"bk",
".",
"flatten",
"(",
"bucket",
")",
",",
"os",
".",
"O_RDWR",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"defer",
"b",
".",
"Close",
"(",
")",
"\n\n",
"// If the key doesn't exist, return trace.NotFound.",
"_",
",",
"ok",
":=",
"b",
".",
"getItem",
"(",
"key",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"trace",
".",
"NotFound",
"(",
"\"",
"\"",
",",
"key",
")",
"\n",
"}",
"\n\n",
"// Otherwise, delete key.",
"b",
".",
"deleteItem",
"(",
"key",
")",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // DeleteKey deletes a key in a bucket. | [
"DeleteKey",
"deletes",
"a",
"key",
"in",
"a",
"bucket",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L409-L427 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | DeleteBucket | func (bk *Backend) DeleteBucket(parent []string, bucket string) error {
fullBucket := append(parent, bucket)
err := os.Remove(bk.flatten(fullBucket))
if err != nil {
return trace.ConvertSystemError(err)
}
return nil
} | go | func (bk *Backend) DeleteBucket(parent []string, bucket string) error {
fullBucket := append(parent, bucket)
err := os.Remove(bk.flatten(fullBucket))
if err != nil {
return trace.ConvertSystemError(err)
}
return nil
} | [
"func",
"(",
"bk",
"*",
"Backend",
")",
"DeleteBucket",
"(",
"parent",
"[",
"]",
"string",
",",
"bucket",
"string",
")",
"error",
"{",
"fullBucket",
":=",
"append",
"(",
"parent",
",",
"bucket",
")",
"\n\n",
"err",
":=",
"os",
".",
"Remove",
"(",
"bk",
".",
"flatten",
"(",
"fullBucket",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // DeleteBucket deletes the bucket by a given path. | [
"DeleteBucket",
"deletes",
"the",
"bucket",
"by",
"a",
"given",
"path",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L430-L439 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | AcquireLock | func (bk *Backend) AcquireLock(token string, ttl time.Duration) (err error) {
bk.log.Debugf("AcquireLock(%s)", token)
if err = legacy.ValidateLockTTL(ttl); err != nil {
return trace.Wrap(err)
}
bucket := []string{locksBucket}
for {
// GetVal will clear TTL on a lock
bk.GetVal(bucket, token)
// CreateVal is atomic:
err = bk.CreateVal(bucket, token, []byte{1}, ttl)
if err == nil {
break // success
}
if trace.IsAlreadyExists(err) { // locked? wait and repeat:
bk.Clock().Sleep(250 * time.Millisecond)
continue
}
return trace.ConvertSystemError(err)
}
return nil
} | go | func (bk *Backend) AcquireLock(token string, ttl time.Duration) (err error) {
bk.log.Debugf("AcquireLock(%s)", token)
if err = legacy.ValidateLockTTL(ttl); err != nil {
return trace.Wrap(err)
}
bucket := []string{locksBucket}
for {
// GetVal will clear TTL on a lock
bk.GetVal(bucket, token)
// CreateVal is atomic:
err = bk.CreateVal(bucket, token, []byte{1}, ttl)
if err == nil {
break // success
}
if trace.IsAlreadyExists(err) { // locked? wait and repeat:
bk.Clock().Sleep(250 * time.Millisecond)
continue
}
return trace.ConvertSystemError(err)
}
return nil
} | [
"func",
"(",
"bk",
"*",
"Backend",
")",
"AcquireLock",
"(",
"token",
"string",
",",
"ttl",
"time",
".",
"Duration",
")",
"(",
"err",
"error",
")",
"{",
"bk",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"token",
")",
"\n\n",
"if",
"err",
"=",
"legacy",
".",
"ValidateLockTTL",
"(",
"ttl",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"bucket",
":=",
"[",
"]",
"string",
"{",
"locksBucket",
"}",
"\n",
"for",
"{",
"// GetVal will clear TTL on a lock",
"bk",
".",
"GetVal",
"(",
"bucket",
",",
"token",
")",
"\n\n",
"// CreateVal is atomic:",
"err",
"=",
"bk",
".",
"CreateVal",
"(",
"bucket",
",",
"token",
",",
"[",
"]",
"byte",
"{",
"1",
"}",
",",
"ttl",
")",
"\n",
"if",
"err",
"==",
"nil",
"{",
"break",
"// success",
"\n",
"}",
"\n",
"if",
"trace",
".",
"IsAlreadyExists",
"(",
"err",
")",
"{",
"// locked? wait and repeat:",
"bk",
".",
"Clock",
"(",
")",
".",
"Sleep",
"(",
"250",
"*",
"time",
".",
"Millisecond",
")",
"\n",
"continue",
"\n",
"}",
"\n",
"return",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // AcquireLock grabs a lock that will be released automatically in TTL. | [
"AcquireLock",
"grabs",
"a",
"lock",
"that",
"will",
"be",
"released",
"automatically",
"in",
"TTL",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L442-L467 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | ReleaseLock | func (bk *Backend) ReleaseLock(token string) (err error) {
bk.log.Debugf("ReleaseLock(%s)", token)
if err = bk.DeleteKey([]string{locksBucket}, token); err != nil {
if !os.IsNotExist(err) {
return trace.ConvertSystemError(err)
}
}
return nil
} | go | func (bk *Backend) ReleaseLock(token string) (err error) {
bk.log.Debugf("ReleaseLock(%s)", token)
if err = bk.DeleteKey([]string{locksBucket}, token); err != nil {
if !os.IsNotExist(err) {
return trace.ConvertSystemError(err)
}
}
return nil
} | [
"func",
"(",
"bk",
"*",
"Backend",
")",
"ReleaseLock",
"(",
"token",
"string",
")",
"(",
"err",
"error",
")",
"{",
"bk",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"token",
")",
"\n\n",
"if",
"err",
"=",
"bk",
".",
"DeleteKey",
"(",
"[",
"]",
"string",
"{",
"locksBucket",
"}",
",",
"token",
")",
";",
"err",
"!=",
"nil",
"{",
"if",
"!",
"os",
".",
"IsNotExist",
"(",
"err",
")",
"{",
"return",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // ReleaseLock forces lock release before TTL. | [
"ReleaseLock",
"forces",
"lock",
"release",
"before",
"TTL",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L470-L479 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | pathToBucket | func (bk *Backend) pathToBucket(bucket string) string {
return filepath.Join(bk.rootDir, bucket)
} | go | func (bk *Backend) pathToBucket(bucket string) string {
return filepath.Join(bk.rootDir, bucket)
} | [
"func",
"(",
"bk",
"*",
"Backend",
")",
"pathToBucket",
"(",
"bucket",
"string",
")",
"string",
"{",
"return",
"filepath",
".",
"Join",
"(",
"bk",
".",
"rootDir",
",",
"bucket",
")",
"\n",
"}"
] | // pathToBucket prepends the root directory to the bucket returning the full
// path to the bucket on the filesystem. | [
"pathToBucket",
"prepends",
"the",
"root",
"directory",
"to",
"the",
"bucket",
"returning",
"the",
"full",
"path",
"to",
"the",
"bucket",
"on",
"the",
"filesystem",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L483-L485 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | decodeBucket | func decodeBucket(bucket string) ([]string, error) {
decoded, err := url.QueryUnescape(bucket)
if err != nil {
return nil, trace.Wrap(err)
}
return filepath.SplitList(decoded), nil
} | go | func decodeBucket(bucket string) ([]string, error) {
decoded, err := url.QueryUnescape(bucket)
if err != nil {
return nil, trace.Wrap(err)
}
return filepath.SplitList(decoded), nil
} | [
"func",
"decodeBucket",
"(",
"bucket",
"string",
")",
"(",
"[",
"]",
"string",
",",
"error",
")",
"{",
"decoded",
",",
"err",
":=",
"url",
".",
"QueryUnescape",
"(",
"bucket",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"return",
"filepath",
".",
"SplitList",
"(",
"decoded",
")",
",",
"nil",
"\n",
"}"
] | // decodeBucket decodes bucket into parts of path | [
"decodeBucket",
"decodes",
"bucket",
"into",
"parts",
"of",
"path"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L488-L494 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | isExpired | func (bk *Backend) isExpired(bv bucketItem) bool {
if bv.ExpiryTime.IsZero() {
return false
}
return bk.Clock().Now().After(bv.ExpiryTime)
} | go | func (bk *Backend) isExpired(bv bucketItem) bool {
if bv.ExpiryTime.IsZero() {
return false
}
return bk.Clock().Now().After(bv.ExpiryTime)
} | [
"func",
"(",
"bk",
"*",
"Backend",
")",
"isExpired",
"(",
"bv",
"bucketItem",
")",
"bool",
"{",
"if",
"bv",
".",
"ExpiryTime",
".",
"IsZero",
"(",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"return",
"bk",
".",
"Clock",
"(",
")",
".",
"Now",
"(",
")",
".",
"After",
"(",
"bv",
".",
"ExpiryTime",
")",
"\n",
"}"
] | // isExpired checks if the bucket item is expired or not. | [
"isExpired",
"checks",
"if",
"the",
"bucket",
"item",
"is",
"expired",
"or",
"not",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L509-L514 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | openBucket | func (bk *Backend) openBucket(prefix string, openFlag int) (*bucket, error) {
// Open bucket with requested flags.
file, err := os.OpenFile(prefix, openFlag, defaultFileMode)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
// Lock the bucket so no one else can access it.
if err := utils.FSWriteLock(file); err != nil {
return nil, trace.Wrap(err)
}
// Read in all items from the bucket.
items, err := readBucket(file)
if err != nil {
return nil, trace.Wrap(err)
}
return &bucket{
backend: bk,
items: items,
file: file,
}, nil
} | go | func (bk *Backend) openBucket(prefix string, openFlag int) (*bucket, error) {
// Open bucket with requested flags.
file, err := os.OpenFile(prefix, openFlag, defaultFileMode)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
// Lock the bucket so no one else can access it.
if err := utils.FSWriteLock(file); err != nil {
return nil, trace.Wrap(err)
}
// Read in all items from the bucket.
items, err := readBucket(file)
if err != nil {
return nil, trace.Wrap(err)
}
return &bucket{
backend: bk,
items: items,
file: file,
}, nil
} | [
"func",
"(",
"bk",
"*",
"Backend",
")",
"openBucket",
"(",
"prefix",
"string",
",",
"openFlag",
"int",
")",
"(",
"*",
"bucket",
",",
"error",
")",
"{",
"// Open bucket with requested flags.",
"file",
",",
"err",
":=",
"os",
".",
"OpenFile",
"(",
"prefix",
",",
"openFlag",
",",
"defaultFileMode",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"// Lock the bucket so no one else can access it.",
"if",
"err",
":=",
"utils",
".",
"FSWriteLock",
"(",
"file",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"// Read in all items from the bucket.",
"items",
",",
"err",
":=",
"readBucket",
"(",
"file",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"&",
"bucket",
"{",
"backend",
":",
"bk",
",",
"items",
":",
"items",
",",
"file",
":",
"file",
",",
"}",
",",
"nil",
"\n",
"}"
] | // openBucket will open a file, lock it, and then read in all the items in
// the bucket. | [
"openBucket",
"will",
"open",
"a",
"file",
"lock",
"it",
"and",
"then",
"read",
"in",
"all",
"the",
"items",
"in",
"the",
"bucket",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L543-L566 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | readBucket | func readBucket(f *os.File) (map[string]bucketItem, error) {
// If the file is empty, return an empty bucket.
ok, err := isEmpty(f)
if err != nil {
return nil, trace.Wrap(err)
}
if ok {
return map[string]bucketItem{}, nil
}
// The file is not empty, read it into a map.
var items map[string]bucketItem
bytes, err := ioutil.ReadAll(f)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
err = utils.FastUnmarshal(bytes, &items)
if err != nil {
return nil, trace.Wrap(err)
}
return items, nil
} | go | func readBucket(f *os.File) (map[string]bucketItem, error) {
// If the file is empty, return an empty bucket.
ok, err := isEmpty(f)
if err != nil {
return nil, trace.Wrap(err)
}
if ok {
return map[string]bucketItem{}, nil
}
// The file is not empty, read it into a map.
var items map[string]bucketItem
bytes, err := ioutil.ReadAll(f)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
err = utils.FastUnmarshal(bytes, &items)
if err != nil {
return nil, trace.Wrap(err)
}
return items, nil
} | [
"func",
"readBucket",
"(",
"f",
"*",
"os",
".",
"File",
")",
"(",
"map",
"[",
"string",
"]",
"bucketItem",
",",
"error",
")",
"{",
"// If the file is empty, return an empty bucket.",
"ok",
",",
"err",
":=",
"isEmpty",
"(",
"f",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"if",
"ok",
"{",
"return",
"map",
"[",
"string",
"]",
"bucketItem",
"{",
"}",
",",
"nil",
"\n",
"}",
"\n\n",
"// The file is not empty, read it into a map.",
"var",
"items",
"map",
"[",
"string",
"]",
"bucketItem",
"\n",
"bytes",
",",
"err",
":=",
"ioutil",
".",
"ReadAll",
"(",
"f",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n",
"err",
"=",
"utils",
".",
"FastUnmarshal",
"(",
"bytes",
",",
"&",
"items",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"items",
",",
"nil",
"\n",
"}"
] | // readBucket will read in the bucket and return a map of keys. The second return
// value returns true to false to indicate if the file was empty or not. | [
"readBucket",
"will",
"read",
"in",
"the",
"bucket",
"and",
"return",
"a",
"map",
"of",
"keys",
".",
"The",
"second",
"return",
"value",
"returns",
"true",
"to",
"false",
"to",
"indicate",
"if",
"the",
"file",
"was",
"empty",
"or",
"not",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L617-L639 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | writeBucket | func writeBucket(f *os.File, items map[string]bucketItem) error {
// Marshal items to disk format.
bytes, err := json.Marshal(items)
if err != nil {
return trace.Wrap(err)
}
// Truncate the file.
if _, err := f.Seek(0, 0); err != nil {
return trace.ConvertSystemError(err)
}
if err := f.Truncate(0); err != nil {
return trace.ConvertSystemError(err)
}
// Write out the contents to disk.
n, err := f.Write(bytes)
if err == nil && n < len(bytes) {
return trace.Wrap(io.ErrShortWrite)
}
return nil
} | go | func writeBucket(f *os.File, items map[string]bucketItem) error {
// Marshal items to disk format.
bytes, err := json.Marshal(items)
if err != nil {
return trace.Wrap(err)
}
// Truncate the file.
if _, err := f.Seek(0, 0); err != nil {
return trace.ConvertSystemError(err)
}
if err := f.Truncate(0); err != nil {
return trace.ConvertSystemError(err)
}
// Write out the contents to disk.
n, err := f.Write(bytes)
if err == nil && n < len(bytes) {
return trace.Wrap(io.ErrShortWrite)
}
return nil
} | [
"func",
"writeBucket",
"(",
"f",
"*",
"os",
".",
"File",
",",
"items",
"map",
"[",
"string",
"]",
"bucketItem",
")",
"error",
"{",
"// Marshal items to disk format.",
"bytes",
",",
"err",
":=",
"json",
".",
"Marshal",
"(",
"items",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"// Truncate the file.",
"if",
"_",
",",
"err",
":=",
"f",
".",
"Seek",
"(",
"0",
",",
"0",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"f",
".",
"Truncate",
"(",
"0",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"ConvertSystemError",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"// Write out the contents to disk.",
"n",
",",
"err",
":=",
"f",
".",
"Write",
"(",
"bytes",
")",
"\n",
"if",
"err",
"==",
"nil",
"&&",
"n",
"<",
"len",
"(",
"bytes",
")",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"io",
".",
"ErrShortWrite",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // writeBucket will truncate the file and write out the items to the file f. | [
"writeBucket",
"will",
"truncate",
"the",
"file",
"and",
"write",
"out",
"the",
"items",
"to",
"the",
"file",
"f",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L642-L664 | train |
gravitational/teleport | lib/backend/legacy/dir/impl.go | isEmpty | func isEmpty(f *os.File) (bool, error) {
fi, err := f.Stat()
if err != nil {
return false, trace.Wrap(err)
}
if fi.Size() > 0 {
return false, nil
}
return true, nil
} | go | func isEmpty(f *os.File) (bool, error) {
fi, err := f.Stat()
if err != nil {
return false, trace.Wrap(err)
}
if fi.Size() > 0 {
return false, nil
}
return true, nil
} | [
"func",
"isEmpty",
"(",
"f",
"*",
"os",
".",
"File",
")",
"(",
"bool",
",",
"error",
")",
"{",
"fi",
",",
"err",
":=",
"f",
".",
"Stat",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"false",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"if",
"fi",
".",
"Size",
"(",
")",
">",
"0",
"{",
"return",
"false",
",",
"nil",
"\n",
"}",
"\n\n",
"return",
"true",
",",
"nil",
"\n",
"}"
] | // isEmpty checks if the file is empty or not. | [
"isEmpty",
"checks",
"if",
"the",
"file",
"is",
"empty",
"or",
"not",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/legacy/dir/impl.go#L667-L678 | train |
gravitational/teleport | lib/backend/memory/item.go | Less | func (i *btreeItem) Less(iother btree.Item) bool {
switch other := iother.(type) {
case *btreeItem:
return bytes.Compare(i.Key, other.Key) < 0
case *prefixItem:
return !iother.Less(i)
default:
return false
}
} | go | func (i *btreeItem) Less(iother btree.Item) bool {
switch other := iother.(type) {
case *btreeItem:
return bytes.Compare(i.Key, other.Key) < 0
case *prefixItem:
return !iother.Less(i)
default:
return false
}
} | [
"func",
"(",
"i",
"*",
"btreeItem",
")",
"Less",
"(",
"iother",
"btree",
".",
"Item",
")",
"bool",
"{",
"switch",
"other",
":=",
"iother",
".",
"(",
"type",
")",
"{",
"case",
"*",
"btreeItem",
":",
"return",
"bytes",
".",
"Compare",
"(",
"i",
".",
"Key",
",",
"other",
".",
"Key",
")",
"<",
"0",
"\n",
"case",
"*",
"prefixItem",
":",
"return",
"!",
"iother",
".",
"Less",
"(",
"i",
")",
"\n",
"default",
":",
"return",
"false",
"\n",
"}",
"\n",
"}"
] | // Less is used for Btree operations,
// returns true if item is less than the other one | [
"Less",
"is",
"used",
"for",
"Btree",
"operations",
"returns",
"true",
"if",
"item",
"is",
"less",
"than",
"the",
"other",
"one"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/memory/item.go#L37-L46 | train |
gravitational/teleport | lib/backend/memory/item.go | Less | func (p *prefixItem) Less(iother btree.Item) bool {
other := iother.(*btreeItem)
if bytes.HasPrefix(other.Key, p.prefix) {
return false
}
return true
} | go | func (p *prefixItem) Less(iother btree.Item) bool {
other := iother.(*btreeItem)
if bytes.HasPrefix(other.Key, p.prefix) {
return false
}
return true
} | [
"func",
"(",
"p",
"*",
"prefixItem",
")",
"Less",
"(",
"iother",
"btree",
".",
"Item",
")",
"bool",
"{",
"other",
":=",
"iother",
".",
"(",
"*",
"btreeItem",
")",
"\n",
"if",
"bytes",
".",
"HasPrefix",
"(",
"other",
".",
"Key",
",",
"p",
".",
"prefix",
")",
"{",
"return",
"false",
"\n",
"}",
"\n",
"return",
"true",
"\n",
"}"
] | // Less is used for Btree operations | [
"Less",
"is",
"used",
"for",
"Btree",
"operations"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/backend/memory/item.go#L55-L61 | train |
gravitational/teleport | lib/services/remotecluster.go | NewRemoteCluster | func NewRemoteCluster(name string) (RemoteCluster, error) {
return &RemoteClusterV3{
Kind: KindRemoteCluster,
Version: V3,
Metadata: Metadata{
Name: name,
Namespace: defaults.Namespace,
},
}, nil
} | go | func NewRemoteCluster(name string) (RemoteCluster, error) {
return &RemoteClusterV3{
Kind: KindRemoteCluster,
Version: V3,
Metadata: Metadata{
Name: name,
Namespace: defaults.Namespace,
},
}, nil
} | [
"func",
"NewRemoteCluster",
"(",
"name",
"string",
")",
"(",
"RemoteCluster",
",",
"error",
")",
"{",
"return",
"&",
"RemoteClusterV3",
"{",
"Kind",
":",
"KindRemoteCluster",
",",
"Version",
":",
"V3",
",",
"Metadata",
":",
"Metadata",
"{",
"Name",
":",
"name",
",",
"Namespace",
":",
"defaults",
".",
"Namespace",
",",
"}",
",",
"}",
",",
"nil",
"\n",
"}"
] | // NewRemoteCluster is a convenience wa to create a RemoteCluster resource. | [
"NewRemoteCluster",
"is",
"a",
"convenience",
"wa",
"to",
"create",
"a",
"RemoteCluster",
"resource",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/remotecluster.go#L50-L59 | train |
gravitational/teleport | lib/services/remotecluster.go | SetLastHeartbeat | func (c *RemoteClusterV3) SetLastHeartbeat(t time.Time) {
c.Status.LastHeartbeat = t
} | go | func (c *RemoteClusterV3) SetLastHeartbeat(t time.Time) {
c.Status.LastHeartbeat = t
} | [
"func",
"(",
"c",
"*",
"RemoteClusterV3",
")",
"SetLastHeartbeat",
"(",
"t",
"time",
".",
"Time",
")",
"{",
"c",
".",
"Status",
".",
"LastHeartbeat",
"=",
"t",
"\n",
"}"
] | // SetLastHeartbeat sets last heartbeat of the cluster | [
"SetLastHeartbeat",
"sets",
"last",
"heartbeat",
"of",
"the",
"cluster"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/remotecluster.go#L128-L130 | train |
gravitational/teleport | lib/services/remotecluster.go | String | func (r *RemoteClusterV3) String() string {
return fmt.Sprintf("RemoteCluster(%v, %v)", r.Metadata.Name, r.Status.Connection)
} | go | func (r *RemoteClusterV3) String() string {
return fmt.Sprintf("RemoteCluster(%v, %v)", r.Metadata.Name, r.Status.Connection)
} | [
"func",
"(",
"r",
"*",
"RemoteClusterV3",
")",
"String",
"(",
")",
"string",
"{",
"return",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"r",
".",
"Metadata",
".",
"Name",
",",
"r",
".",
"Status",
".",
"Connection",
")",
"\n",
"}"
] | // String represents a human readable version of remote cluster settings. | [
"String",
"represents",
"a",
"human",
"readable",
"version",
"of",
"remote",
"cluster",
"settings",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/remotecluster.go#L173-L175 | train |
gravitational/teleport | lib/services/remotecluster.go | UnmarshalRemoteCluster | func UnmarshalRemoteCluster(bytes []byte, opts ...MarshalOption) (RemoteCluster, error) {
cfg, err := collectOptions(opts)
if err != nil {
return nil, trace.Wrap(err)
}
var cluster RemoteClusterV3
if len(bytes) == 0 {
return nil, trace.BadParameter("missing resource data")
}
if cfg.SkipValidation {
err := utils.FastUnmarshal(bytes, &cluster)
if err != nil {
return nil, trace.Wrap(err)
}
} else {
err = utils.UnmarshalWithSchema(GetRemoteClusterSchema(), &cluster, bytes)
if err != nil {
return nil, trace.BadParameter(err.Error())
}
}
err = cluster.CheckAndSetDefaults()
if err != nil {
return nil, trace.Wrap(err)
}
return &cluster, nil
} | go | func UnmarshalRemoteCluster(bytes []byte, opts ...MarshalOption) (RemoteCluster, error) {
cfg, err := collectOptions(opts)
if err != nil {
return nil, trace.Wrap(err)
}
var cluster RemoteClusterV3
if len(bytes) == 0 {
return nil, trace.BadParameter("missing resource data")
}
if cfg.SkipValidation {
err := utils.FastUnmarshal(bytes, &cluster)
if err != nil {
return nil, trace.Wrap(err)
}
} else {
err = utils.UnmarshalWithSchema(GetRemoteClusterSchema(), &cluster, bytes)
if err != nil {
return nil, trace.BadParameter(err.Error())
}
}
err = cluster.CheckAndSetDefaults()
if err != nil {
return nil, trace.Wrap(err)
}
return &cluster, nil
} | [
"func",
"UnmarshalRemoteCluster",
"(",
"bytes",
"[",
"]",
"byte",
",",
"opts",
"...",
"MarshalOption",
")",
"(",
"RemoteCluster",
",",
"error",
")",
"{",
"cfg",
",",
"err",
":=",
"collectOptions",
"(",
"opts",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"var",
"cluster",
"RemoteClusterV3",
"\n\n",
"if",
"len",
"(",
"bytes",
")",
"==",
"0",
"{",
"return",
"nil",
",",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n\n",
"if",
"cfg",
".",
"SkipValidation",
"{",
"err",
":=",
"utils",
".",
"FastUnmarshal",
"(",
"bytes",
",",
"&",
"cluster",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"err",
"=",
"utils",
".",
"UnmarshalWithSchema",
"(",
"GetRemoteClusterSchema",
"(",
")",
",",
"&",
"cluster",
",",
"bytes",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"BadParameter",
"(",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"err",
"=",
"cluster",
".",
"CheckAndSetDefaults",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"&",
"cluster",
",",
"nil",
"\n",
"}"
] | // UnmarshalRemoteCluster unmarshals remote cluster from JSON or YAML. | [
"UnmarshalRemoteCluster",
"unmarshals",
"remote",
"cluster",
"from",
"JSON",
"or",
"YAML",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/remotecluster.go#L207-L237 | train |
gravitational/teleport | lib/services/remotecluster.go | MarshalRemoteCluster | func MarshalRemoteCluster(c RemoteCluster, opts ...MarshalOption) ([]byte, error) {
cfg, err := collectOptions(opts)
if err != nil {
return nil, trace.Wrap(err)
}
switch resource := c.(type) {
case *RemoteClusterV3:
if !cfg.PreserveResourceID {
// avoid modifying the original object
// to prevent unexpected data races
copy := *resource
copy.SetResourceID(0)
resource = ©
}
return utils.FastMarshal(resource)
default:
return nil, trace.BadParameter("unrecognized resource version %T", c)
}
} | go | func MarshalRemoteCluster(c RemoteCluster, opts ...MarshalOption) ([]byte, error) {
cfg, err := collectOptions(opts)
if err != nil {
return nil, trace.Wrap(err)
}
switch resource := c.(type) {
case *RemoteClusterV3:
if !cfg.PreserveResourceID {
// avoid modifying the original object
// to prevent unexpected data races
copy := *resource
copy.SetResourceID(0)
resource = ©
}
return utils.FastMarshal(resource)
default:
return nil, trace.BadParameter("unrecognized resource version %T", c)
}
} | [
"func",
"MarshalRemoteCluster",
"(",
"c",
"RemoteCluster",
",",
"opts",
"...",
"MarshalOption",
")",
"(",
"[",
"]",
"byte",
",",
"error",
")",
"{",
"cfg",
",",
"err",
":=",
"collectOptions",
"(",
"opts",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"switch",
"resource",
":=",
"c",
".",
"(",
"type",
")",
"{",
"case",
"*",
"RemoteClusterV3",
":",
"if",
"!",
"cfg",
".",
"PreserveResourceID",
"{",
"// avoid modifying the original object",
"// to prevent unexpected data races",
"copy",
":=",
"*",
"resource",
"\n",
"copy",
".",
"SetResourceID",
"(",
"0",
")",
"\n",
"resource",
"=",
"&",
"copy",
"\n",
"}",
"\n",
"return",
"utils",
".",
"FastMarshal",
"(",
"resource",
")",
"\n",
"default",
":",
"return",
"nil",
",",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"c",
")",
"\n",
"}",
"\n",
"}"
] | // MarshalRemoteCluster marshals remote cluster to JSON. | [
"MarshalRemoteCluster",
"marshals",
"remote",
"cluster",
"to",
"JSON",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/services/remotecluster.go#L240-L258 | train |
gravitational/teleport | lib/tlsca/ca.go | New | func New(certPEM, keyPEM []byte) (*CertAuthority, error) {
ca := &CertAuthority{}
var err error
ca.Cert, err = ParseCertificatePEM(certPEM)
if err != nil {
return nil, trace.Wrap(err)
}
if len(keyPEM) != 0 {
ca.Signer, err = ParsePrivateKeyPEM(keyPEM)
if err != nil {
return nil, trace.Wrap(err)
}
}
return ca, nil
} | go | func New(certPEM, keyPEM []byte) (*CertAuthority, error) {
ca := &CertAuthority{}
var err error
ca.Cert, err = ParseCertificatePEM(certPEM)
if err != nil {
return nil, trace.Wrap(err)
}
if len(keyPEM) != 0 {
ca.Signer, err = ParsePrivateKeyPEM(keyPEM)
if err != nil {
return nil, trace.Wrap(err)
}
}
return ca, nil
} | [
"func",
"New",
"(",
"certPEM",
",",
"keyPEM",
"[",
"]",
"byte",
")",
"(",
"*",
"CertAuthority",
",",
"error",
")",
"{",
"ca",
":=",
"&",
"CertAuthority",
"{",
"}",
"\n",
"var",
"err",
"error",
"\n",
"ca",
".",
"Cert",
",",
"err",
"=",
"ParseCertificatePEM",
"(",
"certPEM",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"if",
"len",
"(",
"keyPEM",
")",
"!=",
"0",
"{",
"ca",
".",
"Signer",
",",
"err",
"=",
"ParsePrivateKeyPEM",
"(",
"keyPEM",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"ca",
",",
"nil",
"\n",
"}"
] | // New returns new CA from PEM encoded certificate and private
// key. Private Key is optional, if omitted CA won't be able to
// issue new certificates, only verify them | [
"New",
"returns",
"new",
"CA",
"from",
"PEM",
"encoded",
"certificate",
"and",
"private",
"key",
".",
"Private",
"Key",
"is",
"optional",
"if",
"omitted",
"CA",
"won",
"t",
"be",
"able",
"to",
"issue",
"new",
"certificates",
"only",
"verify",
"them"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/tlsca/ca.go#L43-L57 | train |
gravitational/teleport | lib/tlsca/ca.go | Subject | func (id *Identity) Subject() pkix.Name {
subject := pkix.Name{
CommonName: id.Username,
}
subject.Organization = append([]string{}, id.Groups...)
subject.OrganizationalUnit = append([]string{}, id.Usage...)
subject.Locality = append([]string{}, id.Principals...)
subject.Province = append([]string{}, id.KubernetesGroups...)
return subject
} | go | func (id *Identity) Subject() pkix.Name {
subject := pkix.Name{
CommonName: id.Username,
}
subject.Organization = append([]string{}, id.Groups...)
subject.OrganizationalUnit = append([]string{}, id.Usage...)
subject.Locality = append([]string{}, id.Principals...)
subject.Province = append([]string{}, id.KubernetesGroups...)
return subject
} | [
"func",
"(",
"id",
"*",
"Identity",
")",
"Subject",
"(",
")",
"pkix",
".",
"Name",
"{",
"subject",
":=",
"pkix",
".",
"Name",
"{",
"CommonName",
":",
"id",
".",
"Username",
",",
"}",
"\n",
"subject",
".",
"Organization",
"=",
"append",
"(",
"[",
"]",
"string",
"{",
"}",
",",
"id",
".",
"Groups",
"...",
")",
"\n",
"subject",
".",
"OrganizationalUnit",
"=",
"append",
"(",
"[",
"]",
"string",
"{",
"}",
",",
"id",
".",
"Usage",
"...",
")",
"\n",
"subject",
".",
"Locality",
"=",
"append",
"(",
"[",
"]",
"string",
"{",
"}",
",",
"id",
".",
"Principals",
"...",
")",
"\n",
"subject",
".",
"Province",
"=",
"append",
"(",
"[",
"]",
"string",
"{",
"}",
",",
"id",
".",
"KubernetesGroups",
"...",
")",
"\n",
"return",
"subject",
"\n",
"}"
] | // Subject converts identity to X.509 subject name | [
"Subject",
"converts",
"identity",
"to",
"X",
".",
"509",
"subject",
"name"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/tlsca/ca.go#L93-L102 | train |
gravitational/teleport | lib/tlsca/ca.go | FromSubject | func FromSubject(subject pkix.Name) (*Identity, error) {
i := &Identity{
Username: subject.CommonName,
Groups: subject.Organization,
Usage: subject.OrganizationalUnit,
Principals: subject.Locality,
KubernetesGroups: subject.Province,
}
if err := i.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
return i, nil
} | go | func FromSubject(subject pkix.Name) (*Identity, error) {
i := &Identity{
Username: subject.CommonName,
Groups: subject.Organization,
Usage: subject.OrganizationalUnit,
Principals: subject.Locality,
KubernetesGroups: subject.Province,
}
if err := i.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
return i, nil
} | [
"func",
"FromSubject",
"(",
"subject",
"pkix",
".",
"Name",
")",
"(",
"*",
"Identity",
",",
"error",
")",
"{",
"i",
":=",
"&",
"Identity",
"{",
"Username",
":",
"subject",
".",
"CommonName",
",",
"Groups",
":",
"subject",
".",
"Organization",
",",
"Usage",
":",
"subject",
".",
"OrganizationalUnit",
",",
"Principals",
":",
"subject",
".",
"Locality",
",",
"KubernetesGroups",
":",
"subject",
".",
"Province",
",",
"}",
"\n",
"if",
"err",
":=",
"i",
".",
"CheckAndSetDefaults",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"return",
"i",
",",
"nil",
"\n",
"}"
] | // FromSubject returns identity from subject name | [
"FromSubject",
"returns",
"identity",
"from",
"subject",
"name"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/tlsca/ca.go#L105-L117 | train |
gravitational/teleport | lib/tlsca/ca.go | GenerateCertificate | func (ca *CertAuthority) GenerateCertificate(req CertificateRequest) ([]byte, error) {
if err := req.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, trace.Wrap(err)
}
log.WithFields(logrus.Fields{
"not_after": req.NotAfter,
"dns_names": req.DNSNames,
"common_name": req.Subject.CommonName,
"org": req.Subject.Organization,
"org_unit": req.Subject.OrganizationalUnit,
"locality": req.Subject.Locality,
}).Infof("Generating TLS certificate %v.", req)
template := &x509.Certificate{
SerialNumber: serialNumber,
Subject: req.Subject,
// NotBefore is one minute in the past to prevent "Not yet valid" errors on
// time skewed clusters.
NotBefore: req.Clock.Now().UTC().Add(-1 * time.Minute),
NotAfter: req.NotAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
// BasicConstraintsValid is true to not allow any intermediate certs.
BasicConstraintsValid: true,
IsCA: false,
}
// sort out principals into DNS names and IP addresses
for i := range req.DNSNames {
if ip := net.ParseIP(req.DNSNames[i]); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, req.DNSNames[i])
}
}
certBytes, err := x509.CreateCertificate(rand.Reader, template, ca.Cert, req.PublicKey, ca.Signer)
if err != nil {
return nil, trace.Wrap(err)
}
return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certBytes}), nil
} | go | func (ca *CertAuthority) GenerateCertificate(req CertificateRequest) ([]byte, error) {
if err := req.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, trace.Wrap(err)
}
log.WithFields(logrus.Fields{
"not_after": req.NotAfter,
"dns_names": req.DNSNames,
"common_name": req.Subject.CommonName,
"org": req.Subject.Organization,
"org_unit": req.Subject.OrganizationalUnit,
"locality": req.Subject.Locality,
}).Infof("Generating TLS certificate %v.", req)
template := &x509.Certificate{
SerialNumber: serialNumber,
Subject: req.Subject,
// NotBefore is one minute in the past to prevent "Not yet valid" errors on
// time skewed clusters.
NotBefore: req.Clock.Now().UTC().Add(-1 * time.Minute),
NotAfter: req.NotAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
// BasicConstraintsValid is true to not allow any intermediate certs.
BasicConstraintsValid: true,
IsCA: false,
}
// sort out principals into DNS names and IP addresses
for i := range req.DNSNames {
if ip := net.ParseIP(req.DNSNames[i]); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, req.DNSNames[i])
}
}
certBytes, err := x509.CreateCertificate(rand.Reader, template, ca.Cert, req.PublicKey, ca.Signer)
if err != nil {
return nil, trace.Wrap(err)
}
return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certBytes}), nil
} | [
"func",
"(",
"ca",
"*",
"CertAuthority",
")",
"GenerateCertificate",
"(",
"req",
"CertificateRequest",
")",
"(",
"[",
"]",
"byte",
",",
"error",
")",
"{",
"if",
"err",
":=",
"req",
".",
"CheckAndSetDefaults",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"serialNumberLimit",
":=",
"new",
"(",
"big",
".",
"Int",
")",
".",
"Lsh",
"(",
"big",
".",
"NewInt",
"(",
"1",
")",
",",
"128",
")",
"\n",
"serialNumber",
",",
"err",
":=",
"rand",
".",
"Int",
"(",
"rand",
".",
"Reader",
",",
"serialNumberLimit",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"log",
".",
"WithFields",
"(",
"logrus",
".",
"Fields",
"{",
"\"",
"\"",
":",
"req",
".",
"NotAfter",
",",
"\"",
"\"",
":",
"req",
".",
"DNSNames",
",",
"\"",
"\"",
":",
"req",
".",
"Subject",
".",
"CommonName",
",",
"\"",
"\"",
":",
"req",
".",
"Subject",
".",
"Organization",
",",
"\"",
"\"",
":",
"req",
".",
"Subject",
".",
"OrganizationalUnit",
",",
"\"",
"\"",
":",
"req",
".",
"Subject",
".",
"Locality",
",",
"}",
")",
".",
"Infof",
"(",
"\"",
"\"",
",",
"req",
")",
"\n\n",
"template",
":=",
"&",
"x509",
".",
"Certificate",
"{",
"SerialNumber",
":",
"serialNumber",
",",
"Subject",
":",
"req",
".",
"Subject",
",",
"// NotBefore is one minute in the past to prevent \"Not yet valid\" errors on",
"// time skewed clusters.",
"NotBefore",
":",
"req",
".",
"Clock",
".",
"Now",
"(",
")",
".",
"UTC",
"(",
")",
".",
"Add",
"(",
"-",
"1",
"*",
"time",
".",
"Minute",
")",
",",
"NotAfter",
":",
"req",
".",
"NotAfter",
",",
"KeyUsage",
":",
"x509",
".",
"KeyUsageKeyEncipherment",
"|",
"x509",
".",
"KeyUsageDigitalSignature",
",",
"ExtKeyUsage",
":",
"[",
"]",
"x509",
".",
"ExtKeyUsage",
"{",
"x509",
".",
"ExtKeyUsageServerAuth",
",",
"x509",
".",
"ExtKeyUsageClientAuth",
"}",
",",
"// BasicConstraintsValid is true to not allow any intermediate certs.",
"BasicConstraintsValid",
":",
"true",
",",
"IsCA",
":",
"false",
",",
"}",
"\n\n",
"// sort out principals into DNS names and IP addresses",
"for",
"i",
":=",
"range",
"req",
".",
"DNSNames",
"{",
"if",
"ip",
":=",
"net",
".",
"ParseIP",
"(",
"req",
".",
"DNSNames",
"[",
"i",
"]",
")",
";",
"ip",
"!=",
"nil",
"{",
"template",
".",
"IPAddresses",
"=",
"append",
"(",
"template",
".",
"IPAddresses",
",",
"ip",
")",
"\n",
"}",
"else",
"{",
"template",
".",
"DNSNames",
"=",
"append",
"(",
"template",
".",
"DNSNames",
",",
"req",
".",
"DNSNames",
"[",
"i",
"]",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"certBytes",
",",
"err",
":=",
"x509",
".",
"CreateCertificate",
"(",
"rand",
".",
"Reader",
",",
"template",
",",
"ca",
".",
"Cert",
",",
"req",
".",
"PublicKey",
",",
"ca",
".",
"Signer",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"pem",
".",
"EncodeToMemory",
"(",
"&",
"pem",
".",
"Block",
"{",
"Type",
":",
"\"",
"\"",
",",
"Bytes",
":",
"certBytes",
"}",
")",
",",
"nil",
"\n",
"}"
] | // GenerateCertificate generates certificate from request | [
"GenerateCertificate",
"generates",
"certificate",
"from",
"request"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/tlsca/ca.go#L152-L200 | train |
gravitational/teleport | lib/srv/termhandlers.go | HandleExec | func (t *TermHandlers) HandleExec(ch ssh.Channel, req *ssh.Request, ctx *ServerContext) error {
execRequest, err := parseExecRequest(req, ctx)
if err != nil {
return trace.Wrap(err)
}
// a terminal has been previously allocate for this command.
// run this inside an interactive session
if ctx.GetTerm() != nil {
return t.SessionRegistry.OpenSession(ch, req, ctx)
}
// If this code is running on a Teleport node and PAM is enabled, then open a
// PAM context.
var pamContext *pam.PAM
if ctx.srv.Component() == teleport.ComponentNode {
conf, err := t.SessionRegistry.srv.GetPAM()
if err != nil {
return trace.Wrap(err)
}
if conf.Enabled == true {
// Note, stdout/stderr is discarded here, otherwise MOTD would be printed to
// the users screen during exec requests.
pamContext, err = pam.Open(&pam.Config{
ServiceName: conf.ServiceName,
Username: ctx.Identity.Login,
Stdin: ch,
Stderr: ioutil.Discard,
Stdout: ioutil.Discard,
})
if err != nil {
return trace.Wrap(err)
}
ctx.Debugf("Opening PAM context for exec request %q.", execRequest.GetCommand())
}
}
// otherwise, regular execution
result, err := execRequest.Start(ch)
if err != nil {
return trace.Wrap(err)
}
// if the program failed to start, we should send that result back
if result != nil {
ctx.Debugf("Exec request (%v) result: %v", execRequest, result)
ctx.SendExecResult(*result)
}
// in case if result is nil and no error, this means that program is
// running in the background
go func() {
result, err = execRequest.Wait()
if err != nil {
ctx.Errorf("Exec request (%v) wait failed: %v", execRequest, err)
}
if result != nil {
ctx.SendExecResult(*result)
}
// If this code is running on a Teleport node and PAM is enabled, close the context.
if ctx.srv.Component() == teleport.ComponentNode {
conf, err := t.SessionRegistry.srv.GetPAM()
if err != nil {
ctx.Errorf("Unable to get PAM configuration from server: %v", err)
return
}
if conf.Enabled == true {
err = pamContext.Close()
if err != nil {
ctx.Errorf("Unable to close PAM context for exec request: %q: %v", execRequest.GetCommand(), err)
return
}
ctx.Debugf("Closing PAM context for exec request: %q.", execRequest.GetCommand())
}
}
}()
return nil
} | go | func (t *TermHandlers) HandleExec(ch ssh.Channel, req *ssh.Request, ctx *ServerContext) error {
execRequest, err := parseExecRequest(req, ctx)
if err != nil {
return trace.Wrap(err)
}
// a terminal has been previously allocate for this command.
// run this inside an interactive session
if ctx.GetTerm() != nil {
return t.SessionRegistry.OpenSession(ch, req, ctx)
}
// If this code is running on a Teleport node and PAM is enabled, then open a
// PAM context.
var pamContext *pam.PAM
if ctx.srv.Component() == teleport.ComponentNode {
conf, err := t.SessionRegistry.srv.GetPAM()
if err != nil {
return trace.Wrap(err)
}
if conf.Enabled == true {
// Note, stdout/stderr is discarded here, otherwise MOTD would be printed to
// the users screen during exec requests.
pamContext, err = pam.Open(&pam.Config{
ServiceName: conf.ServiceName,
Username: ctx.Identity.Login,
Stdin: ch,
Stderr: ioutil.Discard,
Stdout: ioutil.Discard,
})
if err != nil {
return trace.Wrap(err)
}
ctx.Debugf("Opening PAM context for exec request %q.", execRequest.GetCommand())
}
}
// otherwise, regular execution
result, err := execRequest.Start(ch)
if err != nil {
return trace.Wrap(err)
}
// if the program failed to start, we should send that result back
if result != nil {
ctx.Debugf("Exec request (%v) result: %v", execRequest, result)
ctx.SendExecResult(*result)
}
// in case if result is nil and no error, this means that program is
// running in the background
go func() {
result, err = execRequest.Wait()
if err != nil {
ctx.Errorf("Exec request (%v) wait failed: %v", execRequest, err)
}
if result != nil {
ctx.SendExecResult(*result)
}
// If this code is running on a Teleport node and PAM is enabled, close the context.
if ctx.srv.Component() == teleport.ComponentNode {
conf, err := t.SessionRegistry.srv.GetPAM()
if err != nil {
ctx.Errorf("Unable to get PAM configuration from server: %v", err)
return
}
if conf.Enabled == true {
err = pamContext.Close()
if err != nil {
ctx.Errorf("Unable to close PAM context for exec request: %q: %v", execRequest.GetCommand(), err)
return
}
ctx.Debugf("Closing PAM context for exec request: %q.", execRequest.GetCommand())
}
}
}()
return nil
} | [
"func",
"(",
"t",
"*",
"TermHandlers",
")",
"HandleExec",
"(",
"ch",
"ssh",
".",
"Channel",
",",
"req",
"*",
"ssh",
".",
"Request",
",",
"ctx",
"*",
"ServerContext",
")",
"error",
"{",
"execRequest",
",",
"err",
":=",
"parseExecRequest",
"(",
"req",
",",
"ctx",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"// a terminal has been previously allocate for this command.",
"// run this inside an interactive session",
"if",
"ctx",
".",
"GetTerm",
"(",
")",
"!=",
"nil",
"{",
"return",
"t",
".",
"SessionRegistry",
".",
"OpenSession",
"(",
"ch",
",",
"req",
",",
"ctx",
")",
"\n",
"}",
"\n\n",
"// If this code is running on a Teleport node and PAM is enabled, then open a",
"// PAM context.",
"var",
"pamContext",
"*",
"pam",
".",
"PAM",
"\n",
"if",
"ctx",
".",
"srv",
".",
"Component",
"(",
")",
"==",
"teleport",
".",
"ComponentNode",
"{",
"conf",
",",
"err",
":=",
"t",
".",
"SessionRegistry",
".",
"srv",
".",
"GetPAM",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"if",
"conf",
".",
"Enabled",
"==",
"true",
"{",
"// Note, stdout/stderr is discarded here, otherwise MOTD would be printed to",
"// the users screen during exec requests.",
"pamContext",
",",
"err",
"=",
"pam",
".",
"Open",
"(",
"&",
"pam",
".",
"Config",
"{",
"ServiceName",
":",
"conf",
".",
"ServiceName",
",",
"Username",
":",
"ctx",
".",
"Identity",
".",
"Login",
",",
"Stdin",
":",
"ch",
",",
"Stderr",
":",
"ioutil",
".",
"Discard",
",",
"Stdout",
":",
"ioutil",
".",
"Discard",
",",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"ctx",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"execRequest",
".",
"GetCommand",
"(",
")",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"// otherwise, regular execution",
"result",
",",
"err",
":=",
"execRequest",
".",
"Start",
"(",
"ch",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"// if the program failed to start, we should send that result back",
"if",
"result",
"!=",
"nil",
"{",
"ctx",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"execRequest",
",",
"result",
")",
"\n",
"ctx",
".",
"SendExecResult",
"(",
"*",
"result",
")",
"\n",
"}",
"\n\n",
"// in case if result is nil and no error, this means that program is",
"// running in the background",
"go",
"func",
"(",
")",
"{",
"result",
",",
"err",
"=",
"execRequest",
".",
"Wait",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"ctx",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"execRequest",
",",
"err",
")",
"\n",
"}",
"\n",
"if",
"result",
"!=",
"nil",
"{",
"ctx",
".",
"SendExecResult",
"(",
"*",
"result",
")",
"\n",
"}",
"\n\n",
"// If this code is running on a Teleport node and PAM is enabled, close the context.",
"if",
"ctx",
".",
"srv",
".",
"Component",
"(",
")",
"==",
"teleport",
".",
"ComponentNode",
"{",
"conf",
",",
"err",
":=",
"t",
".",
"SessionRegistry",
".",
"srv",
".",
"GetPAM",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"ctx",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"if",
"conf",
".",
"Enabled",
"==",
"true",
"{",
"err",
"=",
"pamContext",
".",
"Close",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"ctx",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"execRequest",
".",
"GetCommand",
"(",
")",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"ctx",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"execRequest",
".",
"GetCommand",
"(",
")",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // HandleExec handles requests of type "exec" which can execute with or
// without a TTY. Result of execution is propagated back on the ExecResult
// channel of the context. | [
"HandleExec",
"handles",
"requests",
"of",
"type",
"exec",
"which",
"can",
"execute",
"with",
"or",
"without",
"a",
"TTY",
".",
"Result",
"of",
"execution",
"is",
"propagated",
"back",
"on",
"the",
"ExecResult",
"channel",
"of",
"the",
"context",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/srv/termhandlers.go#L39-L119 | train |
gravitational/teleport | lib/srv/termhandlers.go | HandlePTYReq | func (t *TermHandlers) HandlePTYReq(ch ssh.Channel, req *ssh.Request, ctx *ServerContext) error {
// parse and extract the requested window size of the pty
ptyRequest, err := parsePTYReq(req)
if err != nil {
return trace.Wrap(err)
}
termModes, err := ptyRequest.TerminalModes()
if err != nil {
return trace.Wrap(err)
}
params, err := rsession.NewTerminalParamsFromUint32(ptyRequest.W, ptyRequest.H)
if err != nil {
return trace.Wrap(err)
}
ctx.Debugf("Requested terminal %q of size %v", ptyRequest.Env, *params)
// get an existing terminal or create a new one
term := ctx.GetTerm()
if term == nil {
// a regular or forwarding terminal will be allocated
term, err = NewTerminal(ctx)
if err != nil {
return trace.Wrap(err)
}
ctx.SetTerm(term)
}
term.SetWinSize(*params)
term.SetTermType(ptyRequest.Env)
term.SetTerminalModes(termModes)
// update the session
if err := t.SessionRegistry.NotifyWinChange(*params, ctx); err != nil {
ctx.Errorf("Unable to update session: %v", err)
}
return nil
} | go | func (t *TermHandlers) HandlePTYReq(ch ssh.Channel, req *ssh.Request, ctx *ServerContext) error {
// parse and extract the requested window size of the pty
ptyRequest, err := parsePTYReq(req)
if err != nil {
return trace.Wrap(err)
}
termModes, err := ptyRequest.TerminalModes()
if err != nil {
return trace.Wrap(err)
}
params, err := rsession.NewTerminalParamsFromUint32(ptyRequest.W, ptyRequest.H)
if err != nil {
return trace.Wrap(err)
}
ctx.Debugf("Requested terminal %q of size %v", ptyRequest.Env, *params)
// get an existing terminal or create a new one
term := ctx.GetTerm()
if term == nil {
// a regular or forwarding terminal will be allocated
term, err = NewTerminal(ctx)
if err != nil {
return trace.Wrap(err)
}
ctx.SetTerm(term)
}
term.SetWinSize(*params)
term.SetTermType(ptyRequest.Env)
term.SetTerminalModes(termModes)
// update the session
if err := t.SessionRegistry.NotifyWinChange(*params, ctx); err != nil {
ctx.Errorf("Unable to update session: %v", err)
}
return nil
} | [
"func",
"(",
"t",
"*",
"TermHandlers",
")",
"HandlePTYReq",
"(",
"ch",
"ssh",
".",
"Channel",
",",
"req",
"*",
"ssh",
".",
"Request",
",",
"ctx",
"*",
"ServerContext",
")",
"error",
"{",
"// parse and extract the requested window size of the pty",
"ptyRequest",
",",
"err",
":=",
"parsePTYReq",
"(",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"termModes",
",",
"err",
":=",
"ptyRequest",
".",
"TerminalModes",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"params",
",",
"err",
":=",
"rsession",
".",
"NewTerminalParamsFromUint32",
"(",
"ptyRequest",
".",
"W",
",",
"ptyRequest",
".",
"H",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"ctx",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"ptyRequest",
".",
"Env",
",",
"*",
"params",
")",
"\n\n",
"// get an existing terminal or create a new one",
"term",
":=",
"ctx",
".",
"GetTerm",
"(",
")",
"\n",
"if",
"term",
"==",
"nil",
"{",
"// a regular or forwarding terminal will be allocated",
"term",
",",
"err",
"=",
"NewTerminal",
"(",
"ctx",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"ctx",
".",
"SetTerm",
"(",
"term",
")",
"\n",
"}",
"\n",
"term",
".",
"SetWinSize",
"(",
"*",
"params",
")",
"\n",
"term",
".",
"SetTermType",
"(",
"ptyRequest",
".",
"Env",
")",
"\n",
"term",
".",
"SetTerminalModes",
"(",
"termModes",
")",
"\n\n",
"// update the session",
"if",
"err",
":=",
"t",
".",
"SessionRegistry",
".",
"NotifyWinChange",
"(",
"*",
"params",
",",
"ctx",
")",
";",
"err",
"!=",
"nil",
"{",
"ctx",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // HandlePTYReq handles requests of type "pty-req" which allocate a TTY for
// "exec" or "shell" requests. The "pty-req" includes the size of the TTY as
// well as the terminal type requested. | [
"HandlePTYReq",
"handles",
"requests",
"of",
"type",
"pty",
"-",
"req",
"which",
"allocate",
"a",
"TTY",
"for",
"exec",
"or",
"shell",
"requests",
".",
"The",
"pty",
"-",
"req",
"includes",
"the",
"size",
"of",
"the",
"TTY",
"as",
"well",
"as",
"the",
"terminal",
"type",
"requested",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/srv/termhandlers.go#L124-L162 | train |
gravitational/teleport | lib/srv/termhandlers.go | HandleShell | func (t *TermHandlers) HandleShell(ch ssh.Channel, req *ssh.Request, ctx *ServerContext) error {
var err error
// creating an empty exec request implies a interactive shell was requested
ctx.ExecRequest, err = NewExecRequest(ctx, "")
if err != nil {
return trace.Wrap(err)
}
if err := t.SessionRegistry.OpenSession(ch, req, ctx); err != nil {
return trace.Wrap(err)
}
return nil
} | go | func (t *TermHandlers) HandleShell(ch ssh.Channel, req *ssh.Request, ctx *ServerContext) error {
var err error
// creating an empty exec request implies a interactive shell was requested
ctx.ExecRequest, err = NewExecRequest(ctx, "")
if err != nil {
return trace.Wrap(err)
}
if err := t.SessionRegistry.OpenSession(ch, req, ctx); err != nil {
return trace.Wrap(err)
}
return nil
} | [
"func",
"(",
"t",
"*",
"TermHandlers",
")",
"HandleShell",
"(",
"ch",
"ssh",
".",
"Channel",
",",
"req",
"*",
"ssh",
".",
"Request",
",",
"ctx",
"*",
"ServerContext",
")",
"error",
"{",
"var",
"err",
"error",
"\n\n",
"// creating an empty exec request implies a interactive shell was requested",
"ctx",
".",
"ExecRequest",
",",
"err",
"=",
"NewExecRequest",
"(",
"ctx",
",",
"\"",
"\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"t",
".",
"SessionRegistry",
".",
"OpenSession",
"(",
"ch",
",",
"req",
",",
"ctx",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // HandleShell handles requests of type "shell" which request a interactive
// shell be created within a TTY. | [
"HandleShell",
"handles",
"requests",
"of",
"type",
"shell",
"which",
"request",
"a",
"interactive",
"shell",
"be",
"created",
"within",
"a",
"TTY",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/srv/termhandlers.go#L166-L180 | train |
gravitational/teleport | lib/srv/termhandlers.go | HandleWinChange | func (t *TermHandlers) HandleWinChange(ch ssh.Channel, req *ssh.Request, ctx *ServerContext) error {
params, err := parseWinChange(req)
if err != nil {
return trace.Wrap(err)
}
// Update any other members in the party that the window size has changed
// and to update their terminal windows accordingly.
err = t.SessionRegistry.NotifyWinChange(*params, ctx)
if err != nil {
return trace.Wrap(err)
}
return nil
} | go | func (t *TermHandlers) HandleWinChange(ch ssh.Channel, req *ssh.Request, ctx *ServerContext) error {
params, err := parseWinChange(req)
if err != nil {
return trace.Wrap(err)
}
// Update any other members in the party that the window size has changed
// and to update their terminal windows accordingly.
err = t.SessionRegistry.NotifyWinChange(*params, ctx)
if err != nil {
return trace.Wrap(err)
}
return nil
} | [
"func",
"(",
"t",
"*",
"TermHandlers",
")",
"HandleWinChange",
"(",
"ch",
"ssh",
".",
"Channel",
",",
"req",
"*",
"ssh",
".",
"Request",
",",
"ctx",
"*",
"ServerContext",
")",
"error",
"{",
"params",
",",
"err",
":=",
"parseWinChange",
"(",
"req",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"// Update any other members in the party that the window size has changed",
"// and to update their terminal windows accordingly.",
"err",
"=",
"t",
".",
"SessionRegistry",
".",
"NotifyWinChange",
"(",
"*",
"params",
",",
"ctx",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // HandleWinChange handles requests of type "window-change" which update the
// size of the PTY running on the server and update any other members in the
// party. | [
"HandleWinChange",
"handles",
"requests",
"of",
"type",
"window",
"-",
"change",
"which",
"update",
"the",
"size",
"of",
"the",
"PTY",
"running",
"on",
"the",
"server",
"and",
"update",
"any",
"other",
"members",
"in",
"the",
"party",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/srv/termhandlers.go#L185-L199 | train |
gravitational/teleport | lib/reversetunnel/srv.go | NewServer | func NewServer(cfg Config) (Server, error) {
if err := cfg.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
ctx, cancel := context.WithCancel(cfg.Context)
srv := &server{
Config: cfg,
localSites: []*localSite{},
remoteSites: []*remoteSite{},
localAuthClient: cfg.LocalAuthClient,
localAccessPoint: cfg.LocalAccessPoint,
newAccessPoint: cfg.NewCachingAccessPoint,
limiter: cfg.Limiter,
ctx: ctx,
cancel: cancel,
clusterPeers: make(map[string]*clusterPeers),
Entry: log.WithFields(log.Fields{
trace.Component: cfg.Component,
}),
}
for _, clusterInfo := range cfg.DirectClusters {
cluster, err := newlocalSite(srv, clusterInfo.Name, clusterInfo.Client)
if err != nil {
return nil, trace.Wrap(err)
}
srv.localSites = append(srv.localSites, cluster)
}
var err error
s, err := sshutils.NewServer(
teleport.ComponentReverseTunnelServer,
// TODO(klizhentas): improve interface, use struct instead of parameter list
// this address is not used
utils.NetAddr{Addr: "127.0.0.1:1", AddrNetwork: "tcp"},
srv,
cfg.HostSigners,
sshutils.AuthMethods{
PublicKey: srv.keyAuth,
},
sshutils.SetLimiter(cfg.Limiter),
sshutils.SetCiphers(cfg.Ciphers),
sshutils.SetKEXAlgorithms(cfg.KEXAlgorithms),
sshutils.SetMACAlgorithms(cfg.MACAlgorithms),
)
if err != nil {
return nil, err
}
srv.srv = s
go srv.periodicFunctions()
return srv, nil
} | go | func NewServer(cfg Config) (Server, error) {
if err := cfg.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
ctx, cancel := context.WithCancel(cfg.Context)
srv := &server{
Config: cfg,
localSites: []*localSite{},
remoteSites: []*remoteSite{},
localAuthClient: cfg.LocalAuthClient,
localAccessPoint: cfg.LocalAccessPoint,
newAccessPoint: cfg.NewCachingAccessPoint,
limiter: cfg.Limiter,
ctx: ctx,
cancel: cancel,
clusterPeers: make(map[string]*clusterPeers),
Entry: log.WithFields(log.Fields{
trace.Component: cfg.Component,
}),
}
for _, clusterInfo := range cfg.DirectClusters {
cluster, err := newlocalSite(srv, clusterInfo.Name, clusterInfo.Client)
if err != nil {
return nil, trace.Wrap(err)
}
srv.localSites = append(srv.localSites, cluster)
}
var err error
s, err := sshutils.NewServer(
teleport.ComponentReverseTunnelServer,
// TODO(klizhentas): improve interface, use struct instead of parameter list
// this address is not used
utils.NetAddr{Addr: "127.0.0.1:1", AddrNetwork: "tcp"},
srv,
cfg.HostSigners,
sshutils.AuthMethods{
PublicKey: srv.keyAuth,
},
sshutils.SetLimiter(cfg.Limiter),
sshutils.SetCiphers(cfg.Ciphers),
sshutils.SetKEXAlgorithms(cfg.KEXAlgorithms),
sshutils.SetMACAlgorithms(cfg.MACAlgorithms),
)
if err != nil {
return nil, err
}
srv.srv = s
go srv.periodicFunctions()
return srv, nil
} | [
"func",
"NewServer",
"(",
"cfg",
"Config",
")",
"(",
"Server",
",",
"error",
")",
"{",
"if",
"err",
":=",
"cfg",
".",
"CheckAndSetDefaults",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"ctx",
",",
"cancel",
":=",
"context",
".",
"WithCancel",
"(",
"cfg",
".",
"Context",
")",
"\n",
"srv",
":=",
"&",
"server",
"{",
"Config",
":",
"cfg",
",",
"localSites",
":",
"[",
"]",
"*",
"localSite",
"{",
"}",
",",
"remoteSites",
":",
"[",
"]",
"*",
"remoteSite",
"{",
"}",
",",
"localAuthClient",
":",
"cfg",
".",
"LocalAuthClient",
",",
"localAccessPoint",
":",
"cfg",
".",
"LocalAccessPoint",
",",
"newAccessPoint",
":",
"cfg",
".",
"NewCachingAccessPoint",
",",
"limiter",
":",
"cfg",
".",
"Limiter",
",",
"ctx",
":",
"ctx",
",",
"cancel",
":",
"cancel",
",",
"clusterPeers",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"*",
"clusterPeers",
")",
",",
"Entry",
":",
"log",
".",
"WithFields",
"(",
"log",
".",
"Fields",
"{",
"trace",
".",
"Component",
":",
"cfg",
".",
"Component",
",",
"}",
")",
",",
"}",
"\n\n",
"for",
"_",
",",
"clusterInfo",
":=",
"range",
"cfg",
".",
"DirectClusters",
"{",
"cluster",
",",
"err",
":=",
"newlocalSite",
"(",
"srv",
",",
"clusterInfo",
".",
"Name",
",",
"clusterInfo",
".",
"Client",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"srv",
".",
"localSites",
"=",
"append",
"(",
"srv",
".",
"localSites",
",",
"cluster",
")",
"\n",
"}",
"\n\n",
"var",
"err",
"error",
"\n",
"s",
",",
"err",
":=",
"sshutils",
".",
"NewServer",
"(",
"teleport",
".",
"ComponentReverseTunnelServer",
",",
"// TODO(klizhentas): improve interface, use struct instead of parameter list",
"// this address is not used",
"utils",
".",
"NetAddr",
"{",
"Addr",
":",
"\"",
"\"",
",",
"AddrNetwork",
":",
"\"",
"\"",
"}",
",",
"srv",
",",
"cfg",
".",
"HostSigners",
",",
"sshutils",
".",
"AuthMethods",
"{",
"PublicKey",
":",
"srv",
".",
"keyAuth",
",",
"}",
",",
"sshutils",
".",
"SetLimiter",
"(",
"cfg",
".",
"Limiter",
")",
",",
"sshutils",
".",
"SetCiphers",
"(",
"cfg",
".",
"Ciphers",
")",
",",
"sshutils",
".",
"SetKEXAlgorithms",
"(",
"cfg",
".",
"KEXAlgorithms",
")",
",",
"sshutils",
".",
"SetMACAlgorithms",
"(",
"cfg",
".",
"MACAlgorithms",
")",
",",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"srv",
".",
"srv",
"=",
"s",
"\n",
"go",
"srv",
".",
"periodicFunctions",
"(",
")",
"\n",
"return",
"srv",
",",
"nil",
"\n",
"}"
] | // NewServer creates and returns a reverse tunnel server which is fully
// initialized but hasn't been started yet | [
"NewServer",
"creates",
"and",
"returns",
"a",
"reverse",
"tunnel",
"server",
"which",
"is",
"fully",
"initialized",
"but",
"hasn",
"t",
"been",
"started",
"yet"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/srv.go#L216-L268 | train |
gravitational/teleport | lib/reversetunnel/srv.go | disconnectClusters | func (s *server) disconnectClusters() error {
connectedRemoteClusters := s.getRemoteClusters()
if len(connectedRemoteClusters) == 0 {
return nil
}
remoteClusters, err := s.localAuthClient.GetRemoteClusters()
if err != nil {
return trace.Wrap(err)
}
remoteMap := remoteClustersMap(remoteClusters)
for _, cluster := range connectedRemoteClusters {
if _, ok := remoteMap[cluster.GetName()]; !ok {
s.Infof("Remote cluster %q has been deleted. Disconnecting it from the proxy.", cluster.GetName())
s.RemoveSite(cluster.GetName())
err := cluster.Close()
if err != nil {
s.Debugf("Failure closing cluster %q: %v.", cluster.GetName(), err)
}
}
}
return nil
} | go | func (s *server) disconnectClusters() error {
connectedRemoteClusters := s.getRemoteClusters()
if len(connectedRemoteClusters) == 0 {
return nil
}
remoteClusters, err := s.localAuthClient.GetRemoteClusters()
if err != nil {
return trace.Wrap(err)
}
remoteMap := remoteClustersMap(remoteClusters)
for _, cluster := range connectedRemoteClusters {
if _, ok := remoteMap[cluster.GetName()]; !ok {
s.Infof("Remote cluster %q has been deleted. Disconnecting it from the proxy.", cluster.GetName())
s.RemoveSite(cluster.GetName())
err := cluster.Close()
if err != nil {
s.Debugf("Failure closing cluster %q: %v.", cluster.GetName(), err)
}
}
}
return nil
} | [
"func",
"(",
"s",
"*",
"server",
")",
"disconnectClusters",
"(",
")",
"error",
"{",
"connectedRemoteClusters",
":=",
"s",
".",
"getRemoteClusters",
"(",
")",
"\n",
"if",
"len",
"(",
"connectedRemoteClusters",
")",
"==",
"0",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"remoteClusters",
",",
"err",
":=",
"s",
".",
"localAuthClient",
".",
"GetRemoteClusters",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"remoteMap",
":=",
"remoteClustersMap",
"(",
"remoteClusters",
")",
"\n",
"for",
"_",
",",
"cluster",
":=",
"range",
"connectedRemoteClusters",
"{",
"if",
"_",
",",
"ok",
":=",
"remoteMap",
"[",
"cluster",
".",
"GetName",
"(",
")",
"]",
";",
"!",
"ok",
"{",
"s",
".",
"Infof",
"(",
"\"",
"\"",
",",
"cluster",
".",
"GetName",
"(",
")",
")",
"\n",
"s",
".",
"RemoveSite",
"(",
"cluster",
".",
"GetName",
"(",
")",
")",
"\n",
"err",
":=",
"cluster",
".",
"Close",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"cluster",
".",
"GetName",
"(",
")",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // disconnectClusters disconnects reverse tunnel connections from remote clusters
// that were deleted from the the local cluster side and cleans up in memory objects.
// In this case all local trust has been deleted, so all the tunnel connections have to be dropped. | [
"disconnectClusters",
"disconnects",
"reverse",
"tunnel",
"connections",
"from",
"remote",
"clusters",
"that",
"were",
"deleted",
"from",
"the",
"the",
"local",
"cluster",
"side",
"and",
"cleans",
"up",
"in",
"memory",
"objects",
".",
"In",
"this",
"case",
"all",
"local",
"trust",
"has",
"been",
"deleted",
"so",
"all",
"the",
"tunnel",
"connections",
"have",
"to",
"be",
"dropped",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/srv.go#L281-L302 | train |
gravitational/teleport | lib/reversetunnel/srv.go | isHostAuthority | func (s *server) isHostAuthority(auth ssh.PublicKey, address string) bool {
keys, err := s.getTrustedCAKeys(services.HostCA)
if err != nil {
s.Errorf("failed to retrieve trusted keys, err: %v", err)
return false
}
for _, k := range keys {
if sshutils.KeysEqual(k, auth) {
return true
}
}
return false
} | go | func (s *server) isHostAuthority(auth ssh.PublicKey, address string) bool {
keys, err := s.getTrustedCAKeys(services.HostCA)
if err != nil {
s.Errorf("failed to retrieve trusted keys, err: %v", err)
return false
}
for _, k := range keys {
if sshutils.KeysEqual(k, auth) {
return true
}
}
return false
} | [
"func",
"(",
"s",
"*",
"server",
")",
"isHostAuthority",
"(",
"auth",
"ssh",
".",
"PublicKey",
",",
"address",
"string",
")",
"bool",
"{",
"keys",
",",
"err",
":=",
"s",
".",
"getTrustedCAKeys",
"(",
"services",
".",
"HostCA",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"false",
"\n",
"}",
"\n",
"for",
"_",
",",
"k",
":=",
"range",
"keys",
"{",
"if",
"sshutils",
".",
"KeysEqual",
"(",
"k",
",",
"auth",
")",
"{",
"return",
"true",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"false",
"\n",
"}"
] | // isHostAuthority is called during checking the client key, to see if the signing
// key is the real host CA authority key. | [
"isHostAuthority",
"is",
"called",
"during",
"checking",
"the",
"client",
"key",
"to",
"see",
"if",
"the",
"signing",
"key",
"is",
"the",
"real",
"host",
"CA",
"authority",
"key",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/srv.go#L618-L630 | train |
gravitational/teleport | lib/reversetunnel/srv.go | isUserAuthority | func (s *server) isUserAuthority(auth ssh.PublicKey) bool {
keys, err := s.getTrustedCAKeys(services.UserCA)
if err != nil {
s.Errorf("failed to retrieve trusted keys, err: %v", err)
return false
}
for _, k := range keys {
if sshutils.KeysEqual(k, auth) {
return true
}
}
return false
} | go | func (s *server) isUserAuthority(auth ssh.PublicKey) bool {
keys, err := s.getTrustedCAKeys(services.UserCA)
if err != nil {
s.Errorf("failed to retrieve trusted keys, err: %v", err)
return false
}
for _, k := range keys {
if sshutils.KeysEqual(k, auth) {
return true
}
}
return false
} | [
"func",
"(",
"s",
"*",
"server",
")",
"isUserAuthority",
"(",
"auth",
"ssh",
".",
"PublicKey",
")",
"bool",
"{",
"keys",
",",
"err",
":=",
"s",
".",
"getTrustedCAKeys",
"(",
"services",
".",
"UserCA",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"false",
"\n",
"}",
"\n",
"for",
"_",
",",
"k",
":=",
"range",
"keys",
"{",
"if",
"sshutils",
".",
"KeysEqual",
"(",
"k",
",",
"auth",
")",
"{",
"return",
"true",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"false",
"\n",
"}"
] | // isUserAuthority is called during checking the client key, to see if the signing
// key is the real user CA authority key. | [
"isUserAuthority",
"is",
"called",
"during",
"checking",
"the",
"client",
"key",
"to",
"see",
"if",
"the",
"signing",
"key",
"is",
"the",
"real",
"user",
"CA",
"authority",
"key",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/srv.go#L634-L646 | train |
gravitational/teleport | lib/reversetunnel/srv.go | checkHostCert | func (s *server) checkHostCert(logger *log.Entry, user string, clusterName string, cert *ssh.Certificate) error {
if cert.CertType != ssh.HostCert {
return trace.BadParameter("expected host cert, got wrong cert type: %d", cert.CertType)
}
// fetch keys of the certificate authority to check
// if there is a match
keys, err := s.getTrustedCAKeysByID(services.CertAuthID{
Type: services.HostCA,
DomainName: clusterName,
})
if err != nil {
return trace.Wrap(err)
}
// match key of the certificate authority with the signature key
var match bool
for _, k := range keys {
if sshutils.KeysEqual(k, cert.SignatureKey) {
match = true
break
}
}
if !match {
return trace.NotFound("cluster %v has no matching CA keys", clusterName)
}
checker := utils.CertChecker{}
if err := checker.CheckCert(user, cert); err != nil {
return trace.BadParameter(err.Error())
}
return nil
} | go | func (s *server) checkHostCert(logger *log.Entry, user string, clusterName string, cert *ssh.Certificate) error {
if cert.CertType != ssh.HostCert {
return trace.BadParameter("expected host cert, got wrong cert type: %d", cert.CertType)
}
// fetch keys of the certificate authority to check
// if there is a match
keys, err := s.getTrustedCAKeysByID(services.CertAuthID{
Type: services.HostCA,
DomainName: clusterName,
})
if err != nil {
return trace.Wrap(err)
}
// match key of the certificate authority with the signature key
var match bool
for _, k := range keys {
if sshutils.KeysEqual(k, cert.SignatureKey) {
match = true
break
}
}
if !match {
return trace.NotFound("cluster %v has no matching CA keys", clusterName)
}
checker := utils.CertChecker{}
if err := checker.CheckCert(user, cert); err != nil {
return trace.BadParameter(err.Error())
}
return nil
} | [
"func",
"(",
"s",
"*",
"server",
")",
"checkHostCert",
"(",
"logger",
"*",
"log",
".",
"Entry",
",",
"user",
"string",
",",
"clusterName",
"string",
",",
"cert",
"*",
"ssh",
".",
"Certificate",
")",
"error",
"{",
"if",
"cert",
".",
"CertType",
"!=",
"ssh",
".",
"HostCert",
"{",
"return",
"trace",
".",
"BadParameter",
"(",
"\"",
"\"",
",",
"cert",
".",
"CertType",
")",
"\n",
"}",
"\n\n",
"// fetch keys of the certificate authority to check",
"// if there is a match",
"keys",
",",
"err",
":=",
"s",
".",
"getTrustedCAKeysByID",
"(",
"services",
".",
"CertAuthID",
"{",
"Type",
":",
"services",
".",
"HostCA",
",",
"DomainName",
":",
"clusterName",
",",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"// match key of the certificate authority with the signature key",
"var",
"match",
"bool",
"\n",
"for",
"_",
",",
"k",
":=",
"range",
"keys",
"{",
"if",
"sshutils",
".",
"KeysEqual",
"(",
"k",
",",
"cert",
".",
"SignatureKey",
")",
"{",
"match",
"=",
"true",
"\n",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"!",
"match",
"{",
"return",
"trace",
".",
"NotFound",
"(",
"\"",
"\"",
",",
"clusterName",
")",
"\n",
"}",
"\n\n",
"checker",
":=",
"utils",
".",
"CertChecker",
"{",
"}",
"\n",
"if",
"err",
":=",
"checker",
".",
"CheckCert",
"(",
"user",
",",
"cert",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"trace",
".",
"BadParameter",
"(",
"err",
".",
"Error",
"(",
")",
")",
"\n",
"}",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // checkHostCert verifies that host certificate is signed
// by the recognized certificate authority | [
"checkHostCert",
"verifies",
"that",
"host",
"certificate",
"is",
"signed",
"by",
"the",
"recognized",
"certificate",
"authority"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/srv.go#L718-L751 | train |
gravitational/teleport | lib/reversetunnel/srv.go | GetSite | func (s *server) GetSite(name string) (RemoteSite, error) {
s.RLock()
defer s.RUnlock()
for i := range s.remoteSites {
if s.remoteSites[i].GetName() == name {
return s.remoteSites[i], nil
}
}
for i := range s.localSites {
if s.localSites[i].GetName() == name {
return s.localSites[i], nil
}
}
for i := range s.clusterPeers {
if s.clusterPeers[i].GetName() == name {
return s.clusterPeers[i], nil
}
}
return nil, trace.NotFound("cluster %q is not found", name)
} | go | func (s *server) GetSite(name string) (RemoteSite, error) {
s.RLock()
defer s.RUnlock()
for i := range s.remoteSites {
if s.remoteSites[i].GetName() == name {
return s.remoteSites[i], nil
}
}
for i := range s.localSites {
if s.localSites[i].GetName() == name {
return s.localSites[i], nil
}
}
for i := range s.clusterPeers {
if s.clusterPeers[i].GetName() == name {
return s.clusterPeers[i], nil
}
}
return nil, trace.NotFound("cluster %q is not found", name)
} | [
"func",
"(",
"s",
"*",
"server",
")",
"GetSite",
"(",
"name",
"string",
")",
"(",
"RemoteSite",
",",
"error",
")",
"{",
"s",
".",
"RLock",
"(",
")",
"\n",
"defer",
"s",
".",
"RUnlock",
"(",
")",
"\n",
"for",
"i",
":=",
"range",
"s",
".",
"remoteSites",
"{",
"if",
"s",
".",
"remoteSites",
"[",
"i",
"]",
".",
"GetName",
"(",
")",
"==",
"name",
"{",
"return",
"s",
".",
"remoteSites",
"[",
"i",
"]",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"i",
":=",
"range",
"s",
".",
"localSites",
"{",
"if",
"s",
".",
"localSites",
"[",
"i",
"]",
".",
"GetName",
"(",
")",
"==",
"name",
"{",
"return",
"s",
".",
"localSites",
"[",
"i",
"]",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"i",
":=",
"range",
"s",
".",
"clusterPeers",
"{",
"if",
"s",
".",
"clusterPeers",
"[",
"i",
"]",
".",
"GetName",
"(",
")",
"==",
"name",
"{",
"return",
"s",
".",
"clusterPeers",
"[",
"i",
"]",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
",",
"trace",
".",
"NotFound",
"(",
"\"",
"\"",
",",
"name",
")",
"\n",
"}"
] | // GetSite returns a RemoteSite. The first attempt is to find and return a
// remote site and that is what is returned if a remote remote agent has
// connected to this proxy. Next we loop over local sites and try and try and
// return a local site. If that fails, we return a cluster peer. This happens
// when you hit proxy that has never had an agent connect to it. If you end up
// with a cluster peer your best bet is to wait until the agent has discovered
// all proxies behind a the load balancer. Note, the cluster peer is a
// services.TunnelConnection that was created by another proxy. | [
"GetSite",
"returns",
"a",
"RemoteSite",
".",
"The",
"first",
"attempt",
"is",
"to",
"find",
"and",
"return",
"a",
"remote",
"site",
"and",
"that",
"is",
"what",
"is",
"returned",
"if",
"a",
"remote",
"remote",
"agent",
"has",
"connected",
"to",
"this",
"proxy",
".",
"Next",
"we",
"loop",
"over",
"local",
"sites",
"and",
"try",
"and",
"try",
"and",
"return",
"a",
"local",
"site",
".",
"If",
"that",
"fails",
"we",
"return",
"a",
"cluster",
"peer",
".",
"This",
"happens",
"when",
"you",
"hit",
"proxy",
"that",
"has",
"never",
"had",
"an",
"agent",
"connect",
"to",
"it",
".",
"If",
"you",
"end",
"up",
"with",
"a",
"cluster",
"peer",
"your",
"best",
"bet",
"is",
"to",
"wait",
"until",
"the",
"agent",
"has",
"discovered",
"all",
"proxies",
"behind",
"a",
"the",
"load",
"balancer",
".",
"Note",
"the",
"cluster",
"peer",
"is",
"a",
"services",
".",
"TunnelConnection",
"that",
"was",
"created",
"by",
"another",
"proxy",
"."
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/srv.go#L855-L874 | train |
gravitational/teleport | lib/reversetunnel/srv.go | newRemoteSite | func newRemoteSite(srv *server, domainName string) (*remoteSite, error) {
connInfo, err := services.NewTunnelConnection(
fmt.Sprintf("%v-%v", srv.ID, domainName),
services.TunnelConnectionSpecV2{
ClusterName: domainName,
ProxyName: srv.ID,
LastHeartbeat: time.Now().UTC(),
},
)
if err != nil {
return nil, trace.Wrap(err)
}
closeContext, cancel := context.WithCancel(srv.ctx)
remoteSite := &remoteSite{
srv: srv,
domainName: domainName,
connInfo: connInfo,
Entry: log.WithFields(log.Fields{
trace.Component: teleport.ComponentReverseTunnelServer,
trace.ComponentFields: log.Fields{
"cluster": domainName,
},
}),
ctx: closeContext,
cancel: cancel,
clock: srv.Clock,
}
// configure access to the full Auth Server API and the cached subset for
// the local cluster within which reversetunnel.Server is running.
remoteSite.localClient = srv.localAuthClient
remoteSite.localAccessPoint = srv.localAccessPoint
clt, _, err := remoteSite.getRemoteClient()
if err != nil {
return nil, trace.Wrap(err)
}
remoteSite.remoteClient = clt
// configure access to the cached subset of the Auth Server API of the remote
// cluster this remote site provides access to.
accessPoint, err := srv.newAccessPoint(clt, []string{"reverse", domainName})
if err != nil {
return nil, trace.Wrap(err)
}
remoteSite.remoteAccessPoint = accessPoint
// instantiate a cache of host certificates for the forwarding server. the
// certificate cache is created in each site (instead of creating it in
// reversetunnel.server and passing it along) so that the host certificate
// is signed by the correct certificate authority.
certificateCache, err := NewHostCertificateCache(srv.Config.KeyGen, srv.localAuthClient)
if err != nil {
return nil, trace.Wrap(err)
}
remoteSite.certificateCache = certificateCache
go remoteSite.periodicUpdateCertAuthorities()
return remoteSite, nil
} | go | func newRemoteSite(srv *server, domainName string) (*remoteSite, error) {
connInfo, err := services.NewTunnelConnection(
fmt.Sprintf("%v-%v", srv.ID, domainName),
services.TunnelConnectionSpecV2{
ClusterName: domainName,
ProxyName: srv.ID,
LastHeartbeat: time.Now().UTC(),
},
)
if err != nil {
return nil, trace.Wrap(err)
}
closeContext, cancel := context.WithCancel(srv.ctx)
remoteSite := &remoteSite{
srv: srv,
domainName: domainName,
connInfo: connInfo,
Entry: log.WithFields(log.Fields{
trace.Component: teleport.ComponentReverseTunnelServer,
trace.ComponentFields: log.Fields{
"cluster": domainName,
},
}),
ctx: closeContext,
cancel: cancel,
clock: srv.Clock,
}
// configure access to the full Auth Server API and the cached subset for
// the local cluster within which reversetunnel.Server is running.
remoteSite.localClient = srv.localAuthClient
remoteSite.localAccessPoint = srv.localAccessPoint
clt, _, err := remoteSite.getRemoteClient()
if err != nil {
return nil, trace.Wrap(err)
}
remoteSite.remoteClient = clt
// configure access to the cached subset of the Auth Server API of the remote
// cluster this remote site provides access to.
accessPoint, err := srv.newAccessPoint(clt, []string{"reverse", domainName})
if err != nil {
return nil, trace.Wrap(err)
}
remoteSite.remoteAccessPoint = accessPoint
// instantiate a cache of host certificates for the forwarding server. the
// certificate cache is created in each site (instead of creating it in
// reversetunnel.server and passing it along) so that the host certificate
// is signed by the correct certificate authority.
certificateCache, err := NewHostCertificateCache(srv.Config.KeyGen, srv.localAuthClient)
if err != nil {
return nil, trace.Wrap(err)
}
remoteSite.certificateCache = certificateCache
go remoteSite.periodicUpdateCertAuthorities()
return remoteSite, nil
} | [
"func",
"newRemoteSite",
"(",
"srv",
"*",
"server",
",",
"domainName",
"string",
")",
"(",
"*",
"remoteSite",
",",
"error",
")",
"{",
"connInfo",
",",
"err",
":=",
"services",
".",
"NewTunnelConnection",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"srv",
".",
"ID",
",",
"domainName",
")",
",",
"services",
".",
"TunnelConnectionSpecV2",
"{",
"ClusterName",
":",
"domainName",
",",
"ProxyName",
":",
"srv",
".",
"ID",
",",
"LastHeartbeat",
":",
"time",
".",
"Now",
"(",
")",
".",
"UTC",
"(",
")",
",",
"}",
",",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n\n",
"closeContext",
",",
"cancel",
":=",
"context",
".",
"WithCancel",
"(",
"srv",
".",
"ctx",
")",
"\n",
"remoteSite",
":=",
"&",
"remoteSite",
"{",
"srv",
":",
"srv",
",",
"domainName",
":",
"domainName",
",",
"connInfo",
":",
"connInfo",
",",
"Entry",
":",
"log",
".",
"WithFields",
"(",
"log",
".",
"Fields",
"{",
"trace",
".",
"Component",
":",
"teleport",
".",
"ComponentReverseTunnelServer",
",",
"trace",
".",
"ComponentFields",
":",
"log",
".",
"Fields",
"{",
"\"",
"\"",
":",
"domainName",
",",
"}",
",",
"}",
")",
",",
"ctx",
":",
"closeContext",
",",
"cancel",
":",
"cancel",
",",
"clock",
":",
"srv",
".",
"Clock",
",",
"}",
"\n\n",
"// configure access to the full Auth Server API and the cached subset for",
"// the local cluster within which reversetunnel.Server is running.",
"remoteSite",
".",
"localClient",
"=",
"srv",
".",
"localAuthClient",
"\n",
"remoteSite",
".",
"localAccessPoint",
"=",
"srv",
".",
"localAccessPoint",
"\n\n",
"clt",
",",
"_",
",",
"err",
":=",
"remoteSite",
".",
"getRemoteClient",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"remoteSite",
".",
"remoteClient",
"=",
"clt",
"\n\n",
"// configure access to the cached subset of the Auth Server API of the remote",
"// cluster this remote site provides access to.",
"accessPoint",
",",
"err",
":=",
"srv",
".",
"newAccessPoint",
"(",
"clt",
",",
"[",
"]",
"string",
"{",
"\"",
"\"",
",",
"domainName",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"remoteSite",
".",
"remoteAccessPoint",
"=",
"accessPoint",
"\n\n",
"// instantiate a cache of host certificates for the forwarding server. the",
"// certificate cache is created in each site (instead of creating it in",
"// reversetunnel.server and passing it along) so that the host certificate",
"// is signed by the correct certificate authority.",
"certificateCache",
",",
"err",
":=",
"NewHostCertificateCache",
"(",
"srv",
".",
"Config",
".",
"KeyGen",
",",
"srv",
".",
"localAuthClient",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"trace",
".",
"Wrap",
"(",
"err",
")",
"\n",
"}",
"\n",
"remoteSite",
".",
"certificateCache",
"=",
"certificateCache",
"\n\n",
"go",
"remoteSite",
".",
"periodicUpdateCertAuthorities",
"(",
")",
"\n\n",
"return",
"remoteSite",
",",
"nil",
"\n",
"}"
] | // newRemoteSite helper creates and initializes 'remoteSite' instance | [
"newRemoteSite",
"helper",
"creates",
"and",
"initializes",
"remoteSite",
"instance"
] | d5243dbe8d36bba44bf640c08f1c49185ed2c8a4 | https://github.com/gravitational/teleport/blob/d5243dbe8d36bba44bf640c08f1c49185ed2c8a4/lib/reversetunnel/srv.go#L895-L956 | train |
tealeg/xlsx | xmlWorksheet.go | getExtent | func (mc *xlsxMergeCells) getExtent(cellRef string) (int, int, error) {
if mc == nil {
return 0, 0, nil
}
for _, cell := range mc.Cells {
if strings.HasPrefix(cell.Ref, cellRef+cellRangeChar) {
parts := strings.Split(cell.Ref, cellRangeChar)
startx, starty, err := GetCoordsFromCellIDString(parts[0])
if err != nil {
return -1, -1, err
}
endx, endy, err := GetCoordsFromCellIDString(parts[1])
if err != nil {
return -2, -2, err
}
return endx - startx, endy - starty, nil
}
}
return 0, 0, nil
} | go | func (mc *xlsxMergeCells) getExtent(cellRef string) (int, int, error) {
if mc == nil {
return 0, 0, nil
}
for _, cell := range mc.Cells {
if strings.HasPrefix(cell.Ref, cellRef+cellRangeChar) {
parts := strings.Split(cell.Ref, cellRangeChar)
startx, starty, err := GetCoordsFromCellIDString(parts[0])
if err != nil {
return -1, -1, err
}
endx, endy, err := GetCoordsFromCellIDString(parts[1])
if err != nil {
return -2, -2, err
}
return endx - startx, endy - starty, nil
}
}
return 0, 0, nil
} | [
"func",
"(",
"mc",
"*",
"xlsxMergeCells",
")",
"getExtent",
"(",
"cellRef",
"string",
")",
"(",
"int",
",",
"int",
",",
"error",
")",
"{",
"if",
"mc",
"==",
"nil",
"{",
"return",
"0",
",",
"0",
",",
"nil",
"\n",
"}",
"\n",
"for",
"_",
",",
"cell",
":=",
"range",
"mc",
".",
"Cells",
"{",
"if",
"strings",
".",
"HasPrefix",
"(",
"cell",
".",
"Ref",
",",
"cellRef",
"+",
"cellRangeChar",
")",
"{",
"parts",
":=",
"strings",
".",
"Split",
"(",
"cell",
".",
"Ref",
",",
"cellRangeChar",
")",
"\n",
"startx",
",",
"starty",
",",
"err",
":=",
"GetCoordsFromCellIDString",
"(",
"parts",
"[",
"0",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"-",
"1",
",",
"-",
"1",
",",
"err",
"\n",
"}",
"\n",
"endx",
",",
"endy",
",",
"err",
":=",
"GetCoordsFromCellIDString",
"(",
"parts",
"[",
"1",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"-",
"2",
",",
"-",
"2",
",",
"err",
"\n",
"}",
"\n",
"return",
"endx",
"-",
"startx",
",",
"endy",
"-",
"starty",
",",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"0",
",",
"0",
",",
"nil",
"\n",
"}"
] | // Return the cartesian extent of a merged cell range from its origin
// cell (the closest merged cell to the to left of the sheet. | [
"Return",
"the",
"cartesian",
"extent",
"of",
"a",
"merged",
"cell",
"range",
"from",
"its",
"origin",
"cell",
"(",
"the",
"closest",
"merged",
"cell",
"to",
"the",
"to",
"left",
"of",
"the",
"sheet",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/xmlWorksheet.go#L312-L331 | train |
tealeg/xlsx | col.go | SetType | func (c *Col) SetType(cellType CellType) {
switch cellType {
case CellTypeString:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_STRING]
case CellTypeNumeric:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_INT]
case CellTypeBool:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_GENERAL] //TEMP
case CellTypeInline:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_STRING]
case CellTypeError:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_GENERAL] //TEMP
case CellTypeDate:
// Cells that are stored as dates are not properly supported in this library.
// They should instead be stored as a Numeric with a date format.
c.numFmt = builtInNumFmt[builtInNumFmtIndex_GENERAL]
case CellTypeStringFormula:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_STRING]
}
} | go | func (c *Col) SetType(cellType CellType) {
switch cellType {
case CellTypeString:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_STRING]
case CellTypeNumeric:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_INT]
case CellTypeBool:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_GENERAL] //TEMP
case CellTypeInline:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_STRING]
case CellTypeError:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_GENERAL] //TEMP
case CellTypeDate:
// Cells that are stored as dates are not properly supported in this library.
// They should instead be stored as a Numeric with a date format.
c.numFmt = builtInNumFmt[builtInNumFmtIndex_GENERAL]
case CellTypeStringFormula:
c.numFmt = builtInNumFmt[builtInNumFmtIndex_STRING]
}
} | [
"func",
"(",
"c",
"*",
"Col",
")",
"SetType",
"(",
"cellType",
"CellType",
")",
"{",
"switch",
"cellType",
"{",
"case",
"CellTypeString",
":",
"c",
".",
"numFmt",
"=",
"builtInNumFmt",
"[",
"builtInNumFmtIndex_STRING",
"]",
"\n",
"case",
"CellTypeNumeric",
":",
"c",
".",
"numFmt",
"=",
"builtInNumFmt",
"[",
"builtInNumFmtIndex_INT",
"]",
"\n",
"case",
"CellTypeBool",
":",
"c",
".",
"numFmt",
"=",
"builtInNumFmt",
"[",
"builtInNumFmtIndex_GENERAL",
"]",
"//TEMP",
"\n",
"case",
"CellTypeInline",
":",
"c",
".",
"numFmt",
"=",
"builtInNumFmt",
"[",
"builtInNumFmtIndex_STRING",
"]",
"\n",
"case",
"CellTypeError",
":",
"c",
".",
"numFmt",
"=",
"builtInNumFmt",
"[",
"builtInNumFmtIndex_GENERAL",
"]",
"//TEMP",
"\n",
"case",
"CellTypeDate",
":",
"// Cells that are stored as dates are not properly supported in this library.",
"// They should instead be stored as a Numeric with a date format.",
"c",
".",
"numFmt",
"=",
"builtInNumFmt",
"[",
"builtInNumFmtIndex_GENERAL",
"]",
"\n",
"case",
"CellTypeStringFormula",
":",
"c",
".",
"numFmt",
"=",
"builtInNumFmt",
"[",
"builtInNumFmtIndex_STRING",
"]",
"\n",
"}",
"\n",
"}"
] | // SetType will set the format string of a column based on the type that you want to set it to.
// This function does not really make a lot of sense. | [
"SetType",
"will",
"set",
"the",
"format",
"string",
"of",
"a",
"column",
"based",
"on",
"the",
"type",
"that",
"you",
"want",
"to",
"set",
"it",
"to",
".",
"This",
"function",
"does",
"not",
"really",
"make",
"a",
"lot",
"of",
"sense",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/col.go#L23-L42 | train |
tealeg/xlsx | col.go | SetDataValidation | func (c *Col) SetDataValidation(dd *xlsxCellDataValidation, start, end int) {
if end < 0 {
end = Excel2006MaxRowIndex
}
dd.minRow = start
dd.maxRow = end
tmpDD := make([]*xlsxCellDataValidation, 0)
for _, item := range c.DataValidation {
if item.maxRow < dd.minRow {
tmpDD = append(tmpDD, item) //No intersection
} else if item.minRow > dd.maxRow {
tmpDD = append(tmpDD, item) //No intersection
} else if dd.minRow <= item.minRow && dd.maxRow >= item.maxRow {
continue //union , item can be ignored
} else if dd.minRow >= item.minRow {
//Split into three or two, Newly added object, intersect with the current object in the lower half
tmpSplit := new(xlsxCellDataValidation)
*tmpSplit = *item
if dd.minRow > item.minRow { //header whetherneed to split
item.maxRow = dd.minRow - 1
tmpDD = append(tmpDD, item)
}
if dd.maxRow < tmpSplit.maxRow { //footer whetherneed to split
tmpSplit.minRow = dd.maxRow + 1
tmpDD = append(tmpDD, tmpSplit)
}
} else {
item.minRow = dd.maxRow + 1
tmpDD = append(tmpDD, item)
}
}
tmpDD = append(tmpDD, dd)
c.DataValidation = tmpDD
} | go | func (c *Col) SetDataValidation(dd *xlsxCellDataValidation, start, end int) {
if end < 0 {
end = Excel2006MaxRowIndex
}
dd.minRow = start
dd.maxRow = end
tmpDD := make([]*xlsxCellDataValidation, 0)
for _, item := range c.DataValidation {
if item.maxRow < dd.minRow {
tmpDD = append(tmpDD, item) //No intersection
} else if item.minRow > dd.maxRow {
tmpDD = append(tmpDD, item) //No intersection
} else if dd.minRow <= item.minRow && dd.maxRow >= item.maxRow {
continue //union , item can be ignored
} else if dd.minRow >= item.minRow {
//Split into three or two, Newly added object, intersect with the current object in the lower half
tmpSplit := new(xlsxCellDataValidation)
*tmpSplit = *item
if dd.minRow > item.minRow { //header whetherneed to split
item.maxRow = dd.minRow - 1
tmpDD = append(tmpDD, item)
}
if dd.maxRow < tmpSplit.maxRow { //footer whetherneed to split
tmpSplit.minRow = dd.maxRow + 1
tmpDD = append(tmpDD, tmpSplit)
}
} else {
item.minRow = dd.maxRow + 1
tmpDD = append(tmpDD, item)
}
}
tmpDD = append(tmpDD, dd)
c.DataValidation = tmpDD
} | [
"func",
"(",
"c",
"*",
"Col",
")",
"SetDataValidation",
"(",
"dd",
"*",
"xlsxCellDataValidation",
",",
"start",
",",
"end",
"int",
")",
"{",
"if",
"end",
"<",
"0",
"{",
"end",
"=",
"Excel2006MaxRowIndex",
"\n",
"}",
"\n\n",
"dd",
".",
"minRow",
"=",
"start",
"\n",
"dd",
".",
"maxRow",
"=",
"end",
"\n\n",
"tmpDD",
":=",
"make",
"(",
"[",
"]",
"*",
"xlsxCellDataValidation",
",",
"0",
")",
"\n",
"for",
"_",
",",
"item",
":=",
"range",
"c",
".",
"DataValidation",
"{",
"if",
"item",
".",
"maxRow",
"<",
"dd",
".",
"minRow",
"{",
"tmpDD",
"=",
"append",
"(",
"tmpDD",
",",
"item",
")",
"//No intersection",
"\n",
"}",
"else",
"if",
"item",
".",
"minRow",
">",
"dd",
".",
"maxRow",
"{",
"tmpDD",
"=",
"append",
"(",
"tmpDD",
",",
"item",
")",
"//No intersection",
"\n",
"}",
"else",
"if",
"dd",
".",
"minRow",
"<=",
"item",
".",
"minRow",
"&&",
"dd",
".",
"maxRow",
">=",
"item",
".",
"maxRow",
"{",
"continue",
"//union , item can be ignored",
"\n",
"}",
"else",
"if",
"dd",
".",
"minRow",
">=",
"item",
".",
"minRow",
"{",
"//Split into three or two, Newly added object, intersect with the current object in the lower half",
"tmpSplit",
":=",
"new",
"(",
"xlsxCellDataValidation",
")",
"\n",
"*",
"tmpSplit",
"=",
"*",
"item",
"\n\n",
"if",
"dd",
".",
"minRow",
">",
"item",
".",
"minRow",
"{",
"//header whetherneed to split",
"item",
".",
"maxRow",
"=",
"dd",
".",
"minRow",
"-",
"1",
"\n",
"tmpDD",
"=",
"append",
"(",
"tmpDD",
",",
"item",
")",
"\n",
"}",
"\n",
"if",
"dd",
".",
"maxRow",
"<",
"tmpSplit",
".",
"maxRow",
"{",
"//footer whetherneed to split",
"tmpSplit",
".",
"minRow",
"=",
"dd",
".",
"maxRow",
"+",
"1",
"\n",
"tmpDD",
"=",
"append",
"(",
"tmpDD",
",",
"tmpSplit",
")",
"\n",
"}",
"\n\n",
"}",
"else",
"{",
"item",
".",
"minRow",
"=",
"dd",
".",
"maxRow",
"+",
"1",
"\n",
"tmpDD",
"=",
"append",
"(",
"tmpDD",
",",
"item",
")",
"\n",
"}",
"\n",
"}",
"\n",
"tmpDD",
"=",
"append",
"(",
"tmpDD",
",",
"dd",
")",
"\n",
"c",
".",
"DataValidation",
"=",
"tmpDD",
"\n",
"}"
] | // SetDataValidation set data validation with zero based start and end.
// Set end to -1 for all rows. | [
"SetDataValidation",
"set",
"data",
"validation",
"with",
"zero",
"based",
"start",
"and",
"end",
".",
"Set",
"end",
"to",
"-",
"1",
"for",
"all",
"rows",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/col.go#L56-L93 | train |
tealeg/xlsx | col.go | SetDataValidationWithStart | func (c *Col) SetDataValidationWithStart(dd *xlsxCellDataValidation, start int) {
c.SetDataValidation(dd, start, -1)
} | go | func (c *Col) SetDataValidationWithStart(dd *xlsxCellDataValidation, start int) {
c.SetDataValidation(dd, start, -1)
} | [
"func",
"(",
"c",
"*",
"Col",
")",
"SetDataValidationWithStart",
"(",
"dd",
"*",
"xlsxCellDataValidation",
",",
"start",
"int",
")",
"{",
"c",
".",
"SetDataValidation",
"(",
"dd",
",",
"start",
",",
"-",
"1",
")",
"\n",
"}"
] | // SetDataValidationWithStart set data validation with a zero basd start row.
// This will apply to the rest of the rest of the column. | [
"SetDataValidationWithStart",
"set",
"data",
"validation",
"with",
"a",
"zero",
"basd",
"start",
"row",
".",
"This",
"will",
"apply",
"to",
"the",
"rest",
"of",
"the",
"rest",
"of",
"the",
"column",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/col.go#L97-L99 | train |
tealeg/xlsx | stream_file_builder.go | NewStreamFileBuilder | func NewStreamFileBuilder(writer io.Writer) *StreamFileBuilder {
return &StreamFileBuilder{
zipWriter: zip.NewWriter(writer),
xlsxFile: NewFile(),
cellTypeToStyleIds: make(map[CellType]int),
maxStyleId: initMaxStyleId,
}
} | go | func NewStreamFileBuilder(writer io.Writer) *StreamFileBuilder {
return &StreamFileBuilder{
zipWriter: zip.NewWriter(writer),
xlsxFile: NewFile(),
cellTypeToStyleIds: make(map[CellType]int),
maxStyleId: initMaxStyleId,
}
} | [
"func",
"NewStreamFileBuilder",
"(",
"writer",
"io",
".",
"Writer",
")",
"*",
"StreamFileBuilder",
"{",
"return",
"&",
"StreamFileBuilder",
"{",
"zipWriter",
":",
"zip",
".",
"NewWriter",
"(",
"writer",
")",
",",
"xlsxFile",
":",
"NewFile",
"(",
")",
",",
"cellTypeToStyleIds",
":",
"make",
"(",
"map",
"[",
"CellType",
"]",
"int",
")",
",",
"maxStyleId",
":",
"initMaxStyleId",
",",
"}",
"\n",
"}"
] | // NewStreamFileBuilder creates an StreamFileBuilder that will write to the the provided io.writer | [
"NewStreamFileBuilder",
"creates",
"an",
"StreamFileBuilder",
"that",
"will",
"write",
"to",
"the",
"the",
"provided",
"io",
".",
"writer"
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file_builder.go#L58-L65 | train |
tealeg/xlsx | stream_file_builder.go | NewStreamFileBuilderForPath | func NewStreamFileBuilderForPath(path string) (*StreamFileBuilder, error) {
file, err := os.Create(path)
if err != nil {
return nil, err
}
return NewStreamFileBuilder(file), nil
} | go | func NewStreamFileBuilderForPath(path string) (*StreamFileBuilder, error) {
file, err := os.Create(path)
if err != nil {
return nil, err
}
return NewStreamFileBuilder(file), nil
} | [
"func",
"NewStreamFileBuilderForPath",
"(",
"path",
"string",
")",
"(",
"*",
"StreamFileBuilder",
",",
"error",
")",
"{",
"file",
",",
"err",
":=",
"os",
".",
"Create",
"(",
"path",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"NewStreamFileBuilder",
"(",
"file",
")",
",",
"nil",
"\n",
"}"
] | // NewStreamFileBuilderForPath takes the name of an XLSX file and returns a builder for it.
// The file will be created if it does not exist, or truncated if it does. | [
"NewStreamFileBuilderForPath",
"takes",
"the",
"name",
"of",
"an",
"XLSX",
"file",
"and",
"returns",
"a",
"builder",
"for",
"it",
".",
"The",
"file",
"will",
"be",
"created",
"if",
"it",
"does",
"not",
"exist",
"or",
"truncated",
"if",
"it",
"does",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file_builder.go#L69-L75 | train |
tealeg/xlsx | stream_file_builder.go | AddSheet | func (sb *StreamFileBuilder) AddSheet(name string, headers []string, cellTypes []*CellType) error {
if sb.built {
return BuiltStreamFileBuilderError
}
if len(cellTypes) > len(headers) {
return errors.New("cellTypes is longer than headers")
}
sheet, err := sb.xlsxFile.AddSheet(name)
if err != nil {
// Set built on error so that all subsequent calls to the builder will also fail.
sb.built = true
return err
}
sb.styleIds = append(sb.styleIds, []int{})
row := sheet.AddRow()
if count := row.WriteSlice(&headers, -1); count != len(headers) {
// Set built on error so that all subsequent calls to the builder will also fail.
sb.built = true
return errors.New("failed to write headers")
}
for i, cellType := range cellTypes {
var cellStyleIndex int
var ok bool
if cellType != nil {
// The cell type is one of the attributes of a Style.
// Since it is the only attribute of Style that we use, we can assume that cell types
// map one to one with Styles and their Style ID.
// If a new cell type is used, a new style gets created with an increased id, if an existing cell type is
// used, the pre-existing style will also be used.
cellStyleIndex, ok = sb.cellTypeToStyleIds[*cellType]
if !ok {
sb.maxStyleId++
cellStyleIndex = sb.maxStyleId
sb.cellTypeToStyleIds[*cellType] = sb.maxStyleId
}
sheet.Cols[i].SetType(*cellType)
}
sb.styleIds[len(sb.styleIds)-1] = append(sb.styleIds[len(sb.styleIds)-1], cellStyleIndex)
}
return nil
} | go | func (sb *StreamFileBuilder) AddSheet(name string, headers []string, cellTypes []*CellType) error {
if sb.built {
return BuiltStreamFileBuilderError
}
if len(cellTypes) > len(headers) {
return errors.New("cellTypes is longer than headers")
}
sheet, err := sb.xlsxFile.AddSheet(name)
if err != nil {
// Set built on error so that all subsequent calls to the builder will also fail.
sb.built = true
return err
}
sb.styleIds = append(sb.styleIds, []int{})
row := sheet.AddRow()
if count := row.WriteSlice(&headers, -1); count != len(headers) {
// Set built on error so that all subsequent calls to the builder will also fail.
sb.built = true
return errors.New("failed to write headers")
}
for i, cellType := range cellTypes {
var cellStyleIndex int
var ok bool
if cellType != nil {
// The cell type is one of the attributes of a Style.
// Since it is the only attribute of Style that we use, we can assume that cell types
// map one to one with Styles and their Style ID.
// If a new cell type is used, a new style gets created with an increased id, if an existing cell type is
// used, the pre-existing style will also be used.
cellStyleIndex, ok = sb.cellTypeToStyleIds[*cellType]
if !ok {
sb.maxStyleId++
cellStyleIndex = sb.maxStyleId
sb.cellTypeToStyleIds[*cellType] = sb.maxStyleId
}
sheet.Cols[i].SetType(*cellType)
}
sb.styleIds[len(sb.styleIds)-1] = append(sb.styleIds[len(sb.styleIds)-1], cellStyleIndex)
}
return nil
} | [
"func",
"(",
"sb",
"*",
"StreamFileBuilder",
")",
"AddSheet",
"(",
"name",
"string",
",",
"headers",
"[",
"]",
"string",
",",
"cellTypes",
"[",
"]",
"*",
"CellType",
")",
"error",
"{",
"if",
"sb",
".",
"built",
"{",
"return",
"BuiltStreamFileBuilderError",
"\n",
"}",
"\n",
"if",
"len",
"(",
"cellTypes",
")",
">",
"len",
"(",
"headers",
")",
"{",
"return",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"sheet",
",",
"err",
":=",
"sb",
".",
"xlsxFile",
".",
"AddSheet",
"(",
"name",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"// Set built on error so that all subsequent calls to the builder will also fail.",
"sb",
".",
"built",
"=",
"true",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"sb",
".",
"styleIds",
"=",
"append",
"(",
"sb",
".",
"styleIds",
",",
"[",
"]",
"int",
"{",
"}",
")",
"\n",
"row",
":=",
"sheet",
".",
"AddRow",
"(",
")",
"\n",
"if",
"count",
":=",
"row",
".",
"WriteSlice",
"(",
"&",
"headers",
",",
"-",
"1",
")",
";",
"count",
"!=",
"len",
"(",
"headers",
")",
"{",
"// Set built on error so that all subsequent calls to the builder will also fail.",
"sb",
".",
"built",
"=",
"true",
"\n",
"return",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"for",
"i",
",",
"cellType",
":=",
"range",
"cellTypes",
"{",
"var",
"cellStyleIndex",
"int",
"\n",
"var",
"ok",
"bool",
"\n",
"if",
"cellType",
"!=",
"nil",
"{",
"// The cell type is one of the attributes of a Style.",
"// Since it is the only attribute of Style that we use, we can assume that cell types",
"// map one to one with Styles and their Style ID.",
"// If a new cell type is used, a new style gets created with an increased id, if an existing cell type is",
"// used, the pre-existing style will also be used.",
"cellStyleIndex",
",",
"ok",
"=",
"sb",
".",
"cellTypeToStyleIds",
"[",
"*",
"cellType",
"]",
"\n",
"if",
"!",
"ok",
"{",
"sb",
".",
"maxStyleId",
"++",
"\n",
"cellStyleIndex",
"=",
"sb",
".",
"maxStyleId",
"\n",
"sb",
".",
"cellTypeToStyleIds",
"[",
"*",
"cellType",
"]",
"=",
"sb",
".",
"maxStyleId",
"\n",
"}",
"\n",
"sheet",
".",
"Cols",
"[",
"i",
"]",
".",
"SetType",
"(",
"*",
"cellType",
")",
"\n",
"}",
"\n",
"sb",
".",
"styleIds",
"[",
"len",
"(",
"sb",
".",
"styleIds",
")",
"-",
"1",
"]",
"=",
"append",
"(",
"sb",
".",
"styleIds",
"[",
"len",
"(",
"sb",
".",
"styleIds",
")",
"-",
"1",
"]",
",",
"cellStyleIndex",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // AddSheet will add sheets with the given name with the provided headers. The headers cannot be edited later, and all
// rows written to the sheet must contain the same number of cells as the header. Sheet names must be unique, or an
// error will be thrown. | [
"AddSheet",
"will",
"add",
"sheets",
"with",
"the",
"given",
"name",
"with",
"the",
"provided",
"headers",
".",
"The",
"headers",
"cannot",
"be",
"edited",
"later",
"and",
"all",
"rows",
"written",
"to",
"the",
"sheet",
"must",
"contain",
"the",
"same",
"number",
"of",
"cells",
"as",
"the",
"header",
".",
"Sheet",
"names",
"must",
"be",
"unique",
"or",
"an",
"error",
"will",
"be",
"thrown",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file_builder.go#L80-L120 | train |
tealeg/xlsx | stream_file_builder.go | AddValidation | func (sb *StreamFileBuilder) AddValidation(sheetIndex, colIndex, rowStartIndex int, validation *xlsxCellDataValidation) {
sheet := sb.xlsxFile.Sheets[sheetIndex]
column := sheet.Col(colIndex)
column.SetDataValidationWithStart(validation, rowStartIndex)
} | go | func (sb *StreamFileBuilder) AddValidation(sheetIndex, colIndex, rowStartIndex int, validation *xlsxCellDataValidation) {
sheet := sb.xlsxFile.Sheets[sheetIndex]
column := sheet.Col(colIndex)
column.SetDataValidationWithStart(validation, rowStartIndex)
} | [
"func",
"(",
"sb",
"*",
"StreamFileBuilder",
")",
"AddValidation",
"(",
"sheetIndex",
",",
"colIndex",
",",
"rowStartIndex",
"int",
",",
"validation",
"*",
"xlsxCellDataValidation",
")",
"{",
"sheet",
":=",
"sb",
".",
"xlsxFile",
".",
"Sheets",
"[",
"sheetIndex",
"]",
"\n",
"column",
":=",
"sheet",
".",
"Col",
"(",
"colIndex",
")",
"\n",
"column",
".",
"SetDataValidationWithStart",
"(",
"validation",
",",
"rowStartIndex",
")",
"\n",
"}"
] | // AddValidation will add a validation to a specific column. | [
"AddValidation",
"will",
"add",
"a",
"validation",
"to",
"a",
"specific",
"column",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file_builder.go#L123-L127 | train |
tealeg/xlsx | stream_file_builder.go | Build | func (sb *StreamFileBuilder) Build() (*StreamFile, error) {
if sb.built {
return nil, BuiltStreamFileBuilderError
}
sb.built = true
parts, err := sb.xlsxFile.MarshallParts()
if err != nil {
return nil, err
}
es := &StreamFile{
zipWriter: sb.zipWriter,
xlsxFile: sb.xlsxFile,
sheetXmlPrefix: make([]string, len(sb.xlsxFile.Sheets)),
sheetXmlSuffix: make([]string, len(sb.xlsxFile.Sheets)),
styleIds: sb.styleIds,
}
for path, data := range parts {
// If the part is a sheet, don't write it yet. We only want to write the XLSX metadata files, since at this
// point the sheets are still empty. The sheet files will be written later as their rows come in.
if strings.HasPrefix(path, sheetFilePathPrefix) {
if err := sb.processEmptySheetXML(es, path, data); err != nil {
return nil, err
}
continue
}
metadataFile, err := sb.zipWriter.Create(path)
if err != nil {
return nil, err
}
_, err = metadataFile.Write([]byte(data))
if err != nil {
return nil, err
}
}
if err := es.NextSheet(); err != nil {
return nil, err
}
return es, nil
} | go | func (sb *StreamFileBuilder) Build() (*StreamFile, error) {
if sb.built {
return nil, BuiltStreamFileBuilderError
}
sb.built = true
parts, err := sb.xlsxFile.MarshallParts()
if err != nil {
return nil, err
}
es := &StreamFile{
zipWriter: sb.zipWriter,
xlsxFile: sb.xlsxFile,
sheetXmlPrefix: make([]string, len(sb.xlsxFile.Sheets)),
sheetXmlSuffix: make([]string, len(sb.xlsxFile.Sheets)),
styleIds: sb.styleIds,
}
for path, data := range parts {
// If the part is a sheet, don't write it yet. We only want to write the XLSX metadata files, since at this
// point the sheets are still empty. The sheet files will be written later as their rows come in.
if strings.HasPrefix(path, sheetFilePathPrefix) {
if err := sb.processEmptySheetXML(es, path, data); err != nil {
return nil, err
}
continue
}
metadataFile, err := sb.zipWriter.Create(path)
if err != nil {
return nil, err
}
_, err = metadataFile.Write([]byte(data))
if err != nil {
return nil, err
}
}
if err := es.NextSheet(); err != nil {
return nil, err
}
return es, nil
} | [
"func",
"(",
"sb",
"*",
"StreamFileBuilder",
")",
"Build",
"(",
")",
"(",
"*",
"StreamFile",
",",
"error",
")",
"{",
"if",
"sb",
".",
"built",
"{",
"return",
"nil",
",",
"BuiltStreamFileBuilderError",
"\n",
"}",
"\n",
"sb",
".",
"built",
"=",
"true",
"\n",
"parts",
",",
"err",
":=",
"sb",
".",
"xlsxFile",
".",
"MarshallParts",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"es",
":=",
"&",
"StreamFile",
"{",
"zipWriter",
":",
"sb",
".",
"zipWriter",
",",
"xlsxFile",
":",
"sb",
".",
"xlsxFile",
",",
"sheetXmlPrefix",
":",
"make",
"(",
"[",
"]",
"string",
",",
"len",
"(",
"sb",
".",
"xlsxFile",
".",
"Sheets",
")",
")",
",",
"sheetXmlSuffix",
":",
"make",
"(",
"[",
"]",
"string",
",",
"len",
"(",
"sb",
".",
"xlsxFile",
".",
"Sheets",
")",
")",
",",
"styleIds",
":",
"sb",
".",
"styleIds",
",",
"}",
"\n",
"for",
"path",
",",
"data",
":=",
"range",
"parts",
"{",
"// If the part is a sheet, don't write it yet. We only want to write the XLSX metadata files, since at this",
"// point the sheets are still empty. The sheet files will be written later as their rows come in.",
"if",
"strings",
".",
"HasPrefix",
"(",
"path",
",",
"sheetFilePathPrefix",
")",
"{",
"if",
"err",
":=",
"sb",
".",
"processEmptySheetXML",
"(",
"es",
",",
"path",
",",
"data",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"continue",
"\n",
"}",
"\n",
"metadataFile",
",",
"err",
":=",
"sb",
".",
"zipWriter",
".",
"Create",
"(",
"path",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"_",
",",
"err",
"=",
"metadataFile",
".",
"Write",
"(",
"[",
"]",
"byte",
"(",
"data",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"es",
".",
"NextSheet",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"return",
"es",
",",
"nil",
"\n",
"}"
] | // Build begins streaming the XLSX file to the io, by writing all the XLSX metadata. It creates a StreamFile struct
// that can be used to write the rows to the sheets. | [
"Build",
"begins",
"streaming",
"the",
"XLSX",
"file",
"to",
"the",
"io",
"by",
"writing",
"all",
"the",
"XLSX",
"metadata",
".",
"It",
"creates",
"a",
"StreamFile",
"struct",
"that",
"can",
"be",
"used",
"to",
"write",
"the",
"rows",
"to",
"the",
"sheets",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file_builder.go#L131-L170 | train |
tealeg/xlsx | stream_file_builder.go | processEmptySheetXML | func (sb *StreamFileBuilder) processEmptySheetXML(sf *StreamFile, path, data string) error {
// Get the sheet index from the path
sheetIndex, err := getSheetIndex(sf, path)
if err != nil {
return err
}
// Remove the Dimension tag. Since more rows are going to be written to the sheet, it will be wrong.
// It is valid to for a sheet to be missing a Dimension tag, but it is not valid for it to be wrong.
data, err = removeDimensionTag(data, sf.xlsxFile.Sheets[sheetIndex])
if err != nil {
return err
}
// Split the sheet at the end of its SheetData tag so that more rows can be added inside.
prefix, suffix, err := splitSheetIntoPrefixAndSuffix(data)
if err != nil {
return err
}
sf.sheetXmlPrefix[sheetIndex] = prefix
sf.sheetXmlSuffix[sheetIndex] = suffix
return nil
} | go | func (sb *StreamFileBuilder) processEmptySheetXML(sf *StreamFile, path, data string) error {
// Get the sheet index from the path
sheetIndex, err := getSheetIndex(sf, path)
if err != nil {
return err
}
// Remove the Dimension tag. Since more rows are going to be written to the sheet, it will be wrong.
// It is valid to for a sheet to be missing a Dimension tag, but it is not valid for it to be wrong.
data, err = removeDimensionTag(data, sf.xlsxFile.Sheets[sheetIndex])
if err != nil {
return err
}
// Split the sheet at the end of its SheetData tag so that more rows can be added inside.
prefix, suffix, err := splitSheetIntoPrefixAndSuffix(data)
if err != nil {
return err
}
sf.sheetXmlPrefix[sheetIndex] = prefix
sf.sheetXmlSuffix[sheetIndex] = suffix
return nil
} | [
"func",
"(",
"sb",
"*",
"StreamFileBuilder",
")",
"processEmptySheetXML",
"(",
"sf",
"*",
"StreamFile",
",",
"path",
",",
"data",
"string",
")",
"error",
"{",
"// Get the sheet index from the path",
"sheetIndex",
",",
"err",
":=",
"getSheetIndex",
"(",
"sf",
",",
"path",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// Remove the Dimension tag. Since more rows are going to be written to the sheet, it will be wrong.",
"// It is valid to for a sheet to be missing a Dimension tag, but it is not valid for it to be wrong.",
"data",
",",
"err",
"=",
"removeDimensionTag",
"(",
"data",
",",
"sf",
".",
"xlsxFile",
".",
"Sheets",
"[",
"sheetIndex",
"]",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// Split the sheet at the end of its SheetData tag so that more rows can be added inside.",
"prefix",
",",
"suffix",
",",
"err",
":=",
"splitSheetIntoPrefixAndSuffix",
"(",
"data",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"sf",
".",
"sheetXmlPrefix",
"[",
"sheetIndex",
"]",
"=",
"prefix",
"\n",
"sf",
".",
"sheetXmlSuffix",
"[",
"sheetIndex",
"]",
"=",
"suffix",
"\n",
"return",
"nil",
"\n",
"}"
] | // processEmptySheetXML will take in the path and XML data of an empty sheet, and will save the beginning and end of the
// XML file so that these can be written at the right time. | [
"processEmptySheetXML",
"will",
"take",
"in",
"the",
"path",
"and",
"XML",
"data",
"of",
"an",
"empty",
"sheet",
"and",
"will",
"save",
"the",
"beginning",
"and",
"end",
"of",
"the",
"XML",
"file",
"so",
"that",
"these",
"can",
"be",
"written",
"at",
"the",
"right",
"time",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file_builder.go#L174-L196 | train |
tealeg/xlsx | stream_file_builder.go | removeDimensionTag | func removeDimensionTag(data string, sheet *Sheet) (string, error) {
x := len(sheet.Cols) - 1
y := len(sheet.Rows) - 1
if x < 0 {
x = 0
}
if y < 0 {
y = 0
}
var dimensionRef string
if x == 0 && y == 0 {
dimensionRef = "A1"
} else {
endCoordinate := GetCellIDStringFromCoords(x, y)
dimensionRef = "A1:" + endCoordinate
}
dataParts := strings.Split(data, fmt.Sprintf(dimensionTag, dimensionRef))
if len(dataParts) != 2 {
return "", errors.New("unexpected Sheet XML: dimension tag not found")
}
return dataParts[0] + dataParts[1], nil
} | go | func removeDimensionTag(data string, sheet *Sheet) (string, error) {
x := len(sheet.Cols) - 1
y := len(sheet.Rows) - 1
if x < 0 {
x = 0
}
if y < 0 {
y = 0
}
var dimensionRef string
if x == 0 && y == 0 {
dimensionRef = "A1"
} else {
endCoordinate := GetCellIDStringFromCoords(x, y)
dimensionRef = "A1:" + endCoordinate
}
dataParts := strings.Split(data, fmt.Sprintf(dimensionTag, dimensionRef))
if len(dataParts) != 2 {
return "", errors.New("unexpected Sheet XML: dimension tag not found")
}
return dataParts[0] + dataParts[1], nil
} | [
"func",
"removeDimensionTag",
"(",
"data",
"string",
",",
"sheet",
"*",
"Sheet",
")",
"(",
"string",
",",
"error",
")",
"{",
"x",
":=",
"len",
"(",
"sheet",
".",
"Cols",
")",
"-",
"1",
"\n",
"y",
":=",
"len",
"(",
"sheet",
".",
"Rows",
")",
"-",
"1",
"\n",
"if",
"x",
"<",
"0",
"{",
"x",
"=",
"0",
"\n",
"}",
"\n",
"if",
"y",
"<",
"0",
"{",
"y",
"=",
"0",
"\n",
"}",
"\n",
"var",
"dimensionRef",
"string",
"\n",
"if",
"x",
"==",
"0",
"&&",
"y",
"==",
"0",
"{",
"dimensionRef",
"=",
"\"",
"\"",
"\n",
"}",
"else",
"{",
"endCoordinate",
":=",
"GetCellIDStringFromCoords",
"(",
"x",
",",
"y",
")",
"\n",
"dimensionRef",
"=",
"\"",
"\"",
"+",
"endCoordinate",
"\n",
"}",
"\n",
"dataParts",
":=",
"strings",
".",
"Split",
"(",
"data",
",",
"fmt",
".",
"Sprintf",
"(",
"dimensionTag",
",",
"dimensionRef",
")",
")",
"\n",
"if",
"len",
"(",
"dataParts",
")",
"!=",
"2",
"{",
"return",
"\"",
"\"",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"dataParts",
"[",
"0",
"]",
"+",
"dataParts",
"[",
"1",
"]",
",",
"nil",
"\n",
"}"
] | // removeDimensionTag will return the passed in XLSX Spreadsheet XML with the dimension tag removed.
// data is the XML data for the sheet
// sheet is the Sheet struct that the XML was created from.
// Can return an error if the XML's dimension tag does not match was is expected based on the provided Sheet | [
"removeDimensionTag",
"will",
"return",
"the",
"passed",
"in",
"XLSX",
"Spreadsheet",
"XML",
"with",
"the",
"dimension",
"tag",
"removed",
".",
"data",
"is",
"the",
"XML",
"data",
"for",
"the",
"sheet",
"sheet",
"is",
"the",
"Sheet",
"struct",
"that",
"the",
"XML",
"was",
"created",
"from",
".",
"Can",
"return",
"an",
"error",
"if",
"the",
"XML",
"s",
"dimension",
"tag",
"does",
"not",
"match",
"was",
"is",
"expected",
"based",
"on",
"the",
"provided",
"Sheet"
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file_builder.go#L220-L241 | train |
tealeg/xlsx | stream_file_builder.go | splitSheetIntoPrefixAndSuffix | func splitSheetIntoPrefixAndSuffix(data string) (string, string, error) {
// Split the sheet at the end of its SheetData tag so that more rows can be added inside.
sheetParts := strings.Split(data, endSheetDataTag)
if len(sheetParts) != 2 {
return "", "", errors.New("unexpected Sheet XML: SheetData close tag not found")
}
return sheetParts[0], sheetParts[1], nil
} | go | func splitSheetIntoPrefixAndSuffix(data string) (string, string, error) {
// Split the sheet at the end of its SheetData tag so that more rows can be added inside.
sheetParts := strings.Split(data, endSheetDataTag)
if len(sheetParts) != 2 {
return "", "", errors.New("unexpected Sheet XML: SheetData close tag not found")
}
return sheetParts[0], sheetParts[1], nil
} | [
"func",
"splitSheetIntoPrefixAndSuffix",
"(",
"data",
"string",
")",
"(",
"string",
",",
"string",
",",
"error",
")",
"{",
"// Split the sheet at the end of its SheetData tag so that more rows can be added inside.",
"sheetParts",
":=",
"strings",
".",
"Split",
"(",
"data",
",",
"endSheetDataTag",
")",
"\n",
"if",
"len",
"(",
"sheetParts",
")",
"!=",
"2",
"{",
"return",
"\"",
"\"",
",",
"\"",
"\"",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"return",
"sheetParts",
"[",
"0",
"]",
",",
"sheetParts",
"[",
"1",
"]",
",",
"nil",
"\n",
"}"
] | // splitSheetIntoPrefixAndSuffix will split the provided XML sheet into a prefix and a suffix so that
// more spreadsheet rows can be inserted in between. | [
"splitSheetIntoPrefixAndSuffix",
"will",
"split",
"the",
"provided",
"XML",
"sheet",
"into",
"a",
"prefix",
"and",
"a",
"suffix",
"so",
"that",
"more",
"spreadsheet",
"rows",
"can",
"be",
"inserted",
"in",
"between",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file_builder.go#L245-L252 | train |
tealeg/xlsx | stream_file.go | Write | func (sf *StreamFile) Write(cells []string) error {
if sf.err != nil {
return sf.err
}
err := sf.write(cells)
if err != nil {
sf.err = err
return err
}
return sf.zipWriter.Flush()
} | go | func (sf *StreamFile) Write(cells []string) error {
if sf.err != nil {
return sf.err
}
err := sf.write(cells)
if err != nil {
sf.err = err
return err
}
return sf.zipWriter.Flush()
} | [
"func",
"(",
"sf",
"*",
"StreamFile",
")",
"Write",
"(",
"cells",
"[",
"]",
"string",
")",
"error",
"{",
"if",
"sf",
".",
"err",
"!=",
"nil",
"{",
"return",
"sf",
".",
"err",
"\n",
"}",
"\n",
"err",
":=",
"sf",
".",
"write",
"(",
"cells",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"sf",
".",
"err",
"=",
"err",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"return",
"sf",
".",
"zipWriter",
".",
"Flush",
"(",
")",
"\n",
"}"
] | // Write will write a row of cells to the current sheet. Every call to Write on the same sheet must contain the
// same number of cells as the header provided when the sheet was created or an error will be returned. This function
// will always trigger a flush on success. Currently the only supported data type is string data. | [
"Write",
"will",
"write",
"a",
"row",
"of",
"cells",
"to",
"the",
"current",
"sheet",
".",
"Every",
"call",
"to",
"Write",
"on",
"the",
"same",
"sheet",
"must",
"contain",
"the",
"same",
"number",
"of",
"cells",
"as",
"the",
"header",
"provided",
"when",
"the",
"sheet",
"was",
"created",
"or",
"an",
"error",
"will",
"be",
"returned",
".",
"This",
"function",
"will",
"always",
"trigger",
"a",
"flush",
"on",
"success",
".",
"Currently",
"the",
"only",
"supported",
"data",
"type",
"is",
"string",
"data",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file.go#L42-L52 | train |
tealeg/xlsx | stream_file.go | NextSheet | func (sf *StreamFile) NextSheet() error {
if sf.err != nil {
return sf.err
}
var sheetIndex int
if sf.currentSheet != nil {
if sf.currentSheet.index >= len(sf.xlsxFile.Sheets) {
sf.err = AlreadyOnLastSheetError
return AlreadyOnLastSheetError
}
if err := sf.writeSheetEnd(); err != nil {
sf.currentSheet = nil
sf.err = err
return err
}
sheetIndex = sf.currentSheet.index
}
sheetIndex++
sf.currentSheet = &streamSheet{
index: sheetIndex,
columnCount: len(sf.xlsxFile.Sheets[sheetIndex-1].Cols),
styleIds: sf.styleIds[sheetIndex-1],
rowCount: 1,
}
sheetPath := sheetFilePathPrefix + strconv.Itoa(sf.currentSheet.index) + sheetFilePathSuffix
fileWriter, err := sf.zipWriter.Create(sheetPath)
if err != nil {
sf.err = err
return err
}
sf.currentSheet.writer = fileWriter
if err := sf.writeSheetStart(); err != nil {
sf.err = err
return err
}
return nil
} | go | func (sf *StreamFile) NextSheet() error {
if sf.err != nil {
return sf.err
}
var sheetIndex int
if sf.currentSheet != nil {
if sf.currentSheet.index >= len(sf.xlsxFile.Sheets) {
sf.err = AlreadyOnLastSheetError
return AlreadyOnLastSheetError
}
if err := sf.writeSheetEnd(); err != nil {
sf.currentSheet = nil
sf.err = err
return err
}
sheetIndex = sf.currentSheet.index
}
sheetIndex++
sf.currentSheet = &streamSheet{
index: sheetIndex,
columnCount: len(sf.xlsxFile.Sheets[sheetIndex-1].Cols),
styleIds: sf.styleIds[sheetIndex-1],
rowCount: 1,
}
sheetPath := sheetFilePathPrefix + strconv.Itoa(sf.currentSheet.index) + sheetFilePathSuffix
fileWriter, err := sf.zipWriter.Create(sheetPath)
if err != nil {
sf.err = err
return err
}
sf.currentSheet.writer = fileWriter
if err := sf.writeSheetStart(); err != nil {
sf.err = err
return err
}
return nil
} | [
"func",
"(",
"sf",
"*",
"StreamFile",
")",
"NextSheet",
"(",
")",
"error",
"{",
"if",
"sf",
".",
"err",
"!=",
"nil",
"{",
"return",
"sf",
".",
"err",
"\n",
"}",
"\n",
"var",
"sheetIndex",
"int",
"\n",
"if",
"sf",
".",
"currentSheet",
"!=",
"nil",
"{",
"if",
"sf",
".",
"currentSheet",
".",
"index",
">=",
"len",
"(",
"sf",
".",
"xlsxFile",
".",
"Sheets",
")",
"{",
"sf",
".",
"err",
"=",
"AlreadyOnLastSheetError",
"\n",
"return",
"AlreadyOnLastSheetError",
"\n",
"}",
"\n",
"if",
"err",
":=",
"sf",
".",
"writeSheetEnd",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"sf",
".",
"currentSheet",
"=",
"nil",
"\n",
"sf",
".",
"err",
"=",
"err",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"sheetIndex",
"=",
"sf",
".",
"currentSheet",
".",
"index",
"\n",
"}",
"\n",
"sheetIndex",
"++",
"\n",
"sf",
".",
"currentSheet",
"=",
"&",
"streamSheet",
"{",
"index",
":",
"sheetIndex",
",",
"columnCount",
":",
"len",
"(",
"sf",
".",
"xlsxFile",
".",
"Sheets",
"[",
"sheetIndex",
"-",
"1",
"]",
".",
"Cols",
")",
",",
"styleIds",
":",
"sf",
".",
"styleIds",
"[",
"sheetIndex",
"-",
"1",
"]",
",",
"rowCount",
":",
"1",
",",
"}",
"\n",
"sheetPath",
":=",
"sheetFilePathPrefix",
"+",
"strconv",
".",
"Itoa",
"(",
"sf",
".",
"currentSheet",
".",
"index",
")",
"+",
"sheetFilePathSuffix",
"\n",
"fileWriter",
",",
"err",
":=",
"sf",
".",
"zipWriter",
".",
"Create",
"(",
"sheetPath",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"sf",
".",
"err",
"=",
"err",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"sf",
".",
"currentSheet",
".",
"writer",
"=",
"fileWriter",
"\n\n",
"if",
"err",
":=",
"sf",
".",
"writeSheetStart",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"sf",
".",
"err",
"=",
"err",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // NextSheet will switch to the next sheet. Sheets are selected in the same order they were added.
// Once you leave a sheet, you cannot return to it. | [
"NextSheet",
"will",
"switch",
"to",
"the",
"next",
"sheet",
".",
"Sheets",
"are",
"selected",
"in",
"the",
"same",
"order",
"they",
"were",
"added",
".",
"Once",
"you",
"leave",
"a",
"sheet",
"you",
"cannot",
"return",
"to",
"it",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file.go#L128-L165 | train |
tealeg/xlsx | stream_file.go | Close | func (sf *StreamFile) Close() error {
if sf.err != nil {
return sf.err
}
// If there are sheets that have not been written yet, call NextSheet() which will add files to the zip for them.
// XLSX readers may error if the sheets registered in the metadata are not present in the file.
if sf.currentSheet != nil {
for sf.currentSheet.index < len(sf.xlsxFile.Sheets) {
if err := sf.NextSheet(); err != nil {
sf.err = err
return err
}
}
// Write the end of the last sheet.
if err := sf.writeSheetEnd(); err != nil {
sf.err = err
return err
}
}
err := sf.zipWriter.Close()
if err != nil {
sf.err = err
}
return err
} | go | func (sf *StreamFile) Close() error {
if sf.err != nil {
return sf.err
}
// If there are sheets that have not been written yet, call NextSheet() which will add files to the zip for them.
// XLSX readers may error if the sheets registered in the metadata are not present in the file.
if sf.currentSheet != nil {
for sf.currentSheet.index < len(sf.xlsxFile.Sheets) {
if err := sf.NextSheet(); err != nil {
sf.err = err
return err
}
}
// Write the end of the last sheet.
if err := sf.writeSheetEnd(); err != nil {
sf.err = err
return err
}
}
err := sf.zipWriter.Close()
if err != nil {
sf.err = err
}
return err
} | [
"func",
"(",
"sf",
"*",
"StreamFile",
")",
"Close",
"(",
")",
"error",
"{",
"if",
"sf",
".",
"err",
"!=",
"nil",
"{",
"return",
"sf",
".",
"err",
"\n",
"}",
"\n",
"// If there are sheets that have not been written yet, call NextSheet() which will add files to the zip for them.",
"// XLSX readers may error if the sheets registered in the metadata are not present in the file.",
"if",
"sf",
".",
"currentSheet",
"!=",
"nil",
"{",
"for",
"sf",
".",
"currentSheet",
".",
"index",
"<",
"len",
"(",
"sf",
".",
"xlsxFile",
".",
"Sheets",
")",
"{",
"if",
"err",
":=",
"sf",
".",
"NextSheet",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"sf",
".",
"err",
"=",
"err",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"// Write the end of the last sheet.",
"if",
"err",
":=",
"sf",
".",
"writeSheetEnd",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"sf",
".",
"err",
"=",
"err",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"err",
":=",
"sf",
".",
"zipWriter",
".",
"Close",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"sf",
".",
"err",
"=",
"err",
"\n",
"}",
"\n",
"return",
"err",
"\n",
"}"
] | // Close closes the Stream File.
// Any sheets that have not yet been written to will have an empty sheet created for them. | [
"Close",
"closes",
"the",
"Stream",
"File",
".",
"Any",
"sheets",
"that",
"have",
"not",
"yet",
"been",
"written",
"to",
"will",
"have",
"an",
"empty",
"sheet",
"created",
"for",
"them",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file.go#L169-L193 | train |
tealeg/xlsx | stream_file.go | writeSheetStart | func (sf *StreamFile) writeSheetStart() error {
if sf.currentSheet == nil {
return NoCurrentSheetError
}
return sf.currentSheet.write(sf.sheetXmlPrefix[sf.currentSheet.index-1])
} | go | func (sf *StreamFile) writeSheetStart() error {
if sf.currentSheet == nil {
return NoCurrentSheetError
}
return sf.currentSheet.write(sf.sheetXmlPrefix[sf.currentSheet.index-1])
} | [
"func",
"(",
"sf",
"*",
"StreamFile",
")",
"writeSheetStart",
"(",
")",
"error",
"{",
"if",
"sf",
".",
"currentSheet",
"==",
"nil",
"{",
"return",
"NoCurrentSheetError",
"\n",
"}",
"\n",
"return",
"sf",
".",
"currentSheet",
".",
"write",
"(",
"sf",
".",
"sheetXmlPrefix",
"[",
"sf",
".",
"currentSheet",
".",
"index",
"-",
"1",
"]",
")",
"\n",
"}"
] | // writeSheetStart will write the start of the Sheet's XML | [
"writeSheetStart",
"will",
"write",
"the",
"start",
"of",
"the",
"Sheet",
"s",
"XML"
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file.go#L196-L201 | train |
tealeg/xlsx | stream_file.go | writeSheetEnd | func (sf *StreamFile) writeSheetEnd() error {
if sf.currentSheet == nil {
return NoCurrentSheetError
}
if err := sf.currentSheet.write(endSheetDataTag); err != nil {
return err
}
return sf.currentSheet.write(sf.sheetXmlSuffix[sf.currentSheet.index-1])
} | go | func (sf *StreamFile) writeSheetEnd() error {
if sf.currentSheet == nil {
return NoCurrentSheetError
}
if err := sf.currentSheet.write(endSheetDataTag); err != nil {
return err
}
return sf.currentSheet.write(sf.sheetXmlSuffix[sf.currentSheet.index-1])
} | [
"func",
"(",
"sf",
"*",
"StreamFile",
")",
"writeSheetEnd",
"(",
")",
"error",
"{",
"if",
"sf",
".",
"currentSheet",
"==",
"nil",
"{",
"return",
"NoCurrentSheetError",
"\n",
"}",
"\n",
"if",
"err",
":=",
"sf",
".",
"currentSheet",
".",
"write",
"(",
"endSheetDataTag",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"return",
"sf",
".",
"currentSheet",
".",
"write",
"(",
"sf",
".",
"sheetXmlSuffix",
"[",
"sf",
".",
"currentSheet",
".",
"index",
"-",
"1",
"]",
")",
"\n",
"}"
] | // writeSheetEnd will write the end of the Sheet's XML | [
"writeSheetEnd",
"will",
"write",
"the",
"end",
"of",
"the",
"Sheet",
"s",
"XML"
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/stream_file.go#L204-L212 | train |
tealeg/xlsx | format_code.go | compareFormatString | func compareFormatString(fmt1, fmt2 string) bool {
if fmt1 == fmt2 {
return true
}
if fmt1 == "" || strings.EqualFold(fmt1, "general") {
fmt1 = "general"
}
if fmt2 == "" || strings.EqualFold(fmt2, "general") {
fmt2 = "general"
}
return fmt1 == fmt2
} | go | func compareFormatString(fmt1, fmt2 string) bool {
if fmt1 == fmt2 {
return true
}
if fmt1 == "" || strings.EqualFold(fmt1, "general") {
fmt1 = "general"
}
if fmt2 == "" || strings.EqualFold(fmt2, "general") {
fmt2 = "general"
}
return fmt1 == fmt2
} | [
"func",
"compareFormatString",
"(",
"fmt1",
",",
"fmt2",
"string",
")",
"bool",
"{",
"if",
"fmt1",
"==",
"fmt2",
"{",
"return",
"true",
"\n",
"}",
"\n",
"if",
"fmt1",
"==",
"\"",
"\"",
"||",
"strings",
".",
"EqualFold",
"(",
"fmt1",
",",
"\"",
"\"",
")",
"{",
"fmt1",
"=",
"\"",
"\"",
"\n",
"}",
"\n",
"if",
"fmt2",
"==",
"\"",
"\"",
"||",
"strings",
".",
"EqualFold",
"(",
"fmt2",
",",
"\"",
"\"",
")",
"{",
"fmt2",
"=",
"\"",
"\"",
"\n",
"}",
"\n",
"return",
"fmt1",
"==",
"fmt2",
"\n",
"}"
] | // Format strings are a little strange to compare because empty string needs to be taken as general, and general needs
// to be compared case insensitively. | [
"Format",
"strings",
"are",
"a",
"little",
"strange",
"to",
"compare",
"because",
"empty",
"string",
"needs",
"to",
"be",
"taken",
"as",
"general",
"and",
"general",
"needs",
"to",
"be",
"compared",
"case",
"insensitively",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/format_code.go#L224-L235 | train |
tealeg/xlsx | format_code.go | splitFormatOnSemicolon | func splitFormatOnSemicolon(format string) ([]string, error) {
var formats []string
prevIndex := 0
for i := 0; i < len(format); i++ {
if format[i] == ';' {
formats = append(formats, format[prevIndex:i])
prevIndex = i + 1
} else if format[i] == '\\' {
i++
} else if format[i] == '"' {
endQuoteIndex := strings.Index(format[i+1:], "\"")
if endQuoteIndex == -1 {
// This is an invalid format string, fall back to general
return nil, errors.New("invalid format string, unmatched double quote")
}
i += endQuoteIndex + 1
}
}
return append(formats, format[prevIndex:]), nil
} | go | func splitFormatOnSemicolon(format string) ([]string, error) {
var formats []string
prevIndex := 0
for i := 0; i < len(format); i++ {
if format[i] == ';' {
formats = append(formats, format[prevIndex:i])
prevIndex = i + 1
} else if format[i] == '\\' {
i++
} else if format[i] == '"' {
endQuoteIndex := strings.Index(format[i+1:], "\"")
if endQuoteIndex == -1 {
// This is an invalid format string, fall back to general
return nil, errors.New("invalid format string, unmatched double quote")
}
i += endQuoteIndex + 1
}
}
return append(formats, format[prevIndex:]), nil
} | [
"func",
"splitFormatOnSemicolon",
"(",
"format",
"string",
")",
"(",
"[",
"]",
"string",
",",
"error",
")",
"{",
"var",
"formats",
"[",
"]",
"string",
"\n",
"prevIndex",
":=",
"0",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"format",
")",
";",
"i",
"++",
"{",
"if",
"format",
"[",
"i",
"]",
"==",
"';'",
"{",
"formats",
"=",
"append",
"(",
"formats",
",",
"format",
"[",
"prevIndex",
":",
"i",
"]",
")",
"\n",
"prevIndex",
"=",
"i",
"+",
"1",
"\n",
"}",
"else",
"if",
"format",
"[",
"i",
"]",
"==",
"'\\\\'",
"{",
"i",
"++",
"\n",
"}",
"else",
"if",
"format",
"[",
"i",
"]",
"==",
"'\"'",
"{",
"endQuoteIndex",
":=",
"strings",
".",
"Index",
"(",
"format",
"[",
"i",
"+",
"1",
":",
"]",
",",
"\"",
"\\\"",
"\"",
")",
"\n",
"if",
"endQuoteIndex",
"==",
"-",
"1",
"{",
"// This is an invalid format string, fall back to general",
"return",
"nil",
",",
"errors",
".",
"New",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"i",
"+=",
"endQuoteIndex",
"+",
"1",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"append",
"(",
"formats",
",",
"format",
"[",
"prevIndex",
":",
"]",
")",
",",
"nil",
"\n",
"}"
] | // splitFormatOnSemicolon will split the format string into the format sections
// This logic to split the different formats on semicolon is fully correct, and will skip all literal semicolons,
// and will catch all breaking semicolons. | [
"splitFormatOnSemicolon",
"will",
"split",
"the",
"format",
"string",
"into",
"the",
"format",
"sections",
"This",
"logic",
"to",
"split",
"the",
"different",
"formats",
"on",
"semicolon",
"is",
"fully",
"correct",
"and",
"will",
"skip",
"all",
"literal",
"semicolons",
"and",
"will",
"catch",
"all",
"breaking",
"semicolons",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/format_code.go#L315-L334 | train |
tealeg/xlsx | data_validation.go | SetError | func (dd *xlsxCellDataValidation) SetError(style DataValidationErrorStyle, title, msg *string) {
dd.ShowErrorMessage = true
dd.Error = msg
dd.ErrorTitle = title
strStyle := styleStop
switch style {
case StyleStop:
strStyle = styleStop
case StyleWarning:
strStyle = styleWarning
case StyleInformation:
strStyle = styleInformation
}
dd.ErrorStyle = &strStyle
} | go | func (dd *xlsxCellDataValidation) SetError(style DataValidationErrorStyle, title, msg *string) {
dd.ShowErrorMessage = true
dd.Error = msg
dd.ErrorTitle = title
strStyle := styleStop
switch style {
case StyleStop:
strStyle = styleStop
case StyleWarning:
strStyle = styleWarning
case StyleInformation:
strStyle = styleInformation
}
dd.ErrorStyle = &strStyle
} | [
"func",
"(",
"dd",
"*",
"xlsxCellDataValidation",
")",
"SetError",
"(",
"style",
"DataValidationErrorStyle",
",",
"title",
",",
"msg",
"*",
"string",
")",
"{",
"dd",
".",
"ShowErrorMessage",
"=",
"true",
"\n",
"dd",
".",
"Error",
"=",
"msg",
"\n",
"dd",
".",
"ErrorTitle",
"=",
"title",
"\n",
"strStyle",
":=",
"styleStop",
"\n",
"switch",
"style",
"{",
"case",
"StyleStop",
":",
"strStyle",
"=",
"styleStop",
"\n",
"case",
"StyleWarning",
":",
"strStyle",
"=",
"styleWarning",
"\n",
"case",
"StyleInformation",
":",
"strStyle",
"=",
"styleInformation",
"\n\n",
"}",
"\n",
"dd",
".",
"ErrorStyle",
"=",
"&",
"strStyle",
"\n",
"}"
] | // SetError set error notice | [
"SetError",
"set",
"error",
"notice"
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/data_validation.go#L72-L87 | train |
tealeg/xlsx | data_validation.go | SetRange | func (dd *xlsxCellDataValidation) SetRange(f1, f2 int, t DataValidationType, o DataValidationOperator) error {
formula1 := fmt.Sprintf("%d", f1)
formula2 := fmt.Sprintf("%d", f2)
switch o {
case DataValidationOperatorBetween:
if f1 > f2 {
tmp := formula1
formula1 = formula2
formula2 = tmp
}
case DataValidationOperatorNotBetween:
if f1 > f2 {
tmp := formula1
formula1 = formula2
formula2 = tmp
}
}
dd.Formula1 = formula1
dd.Formula2 = formula2
dd.Type = convDataValidationType(t)
dd.Operator = convDataValidationOperatior(o)
return nil
} | go | func (dd *xlsxCellDataValidation) SetRange(f1, f2 int, t DataValidationType, o DataValidationOperator) error {
formula1 := fmt.Sprintf("%d", f1)
formula2 := fmt.Sprintf("%d", f2)
switch o {
case DataValidationOperatorBetween:
if f1 > f2 {
tmp := formula1
formula1 = formula2
formula2 = tmp
}
case DataValidationOperatorNotBetween:
if f1 > f2 {
tmp := formula1
formula1 = formula2
formula2 = tmp
}
}
dd.Formula1 = formula1
dd.Formula2 = formula2
dd.Type = convDataValidationType(t)
dd.Operator = convDataValidationOperatior(o)
return nil
} | [
"func",
"(",
"dd",
"*",
"xlsxCellDataValidation",
")",
"SetRange",
"(",
"f1",
",",
"f2",
"int",
",",
"t",
"DataValidationType",
",",
"o",
"DataValidationOperator",
")",
"error",
"{",
"formula1",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"f1",
")",
"\n",
"formula2",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"f2",
")",
"\n\n",
"switch",
"o",
"{",
"case",
"DataValidationOperatorBetween",
":",
"if",
"f1",
">",
"f2",
"{",
"tmp",
":=",
"formula1",
"\n",
"formula1",
"=",
"formula2",
"\n",
"formula2",
"=",
"tmp",
"\n",
"}",
"\n",
"case",
"DataValidationOperatorNotBetween",
":",
"if",
"f1",
">",
"f2",
"{",
"tmp",
":=",
"formula1",
"\n",
"formula1",
"=",
"formula2",
"\n",
"formula2",
"=",
"tmp",
"\n",
"}",
"\n",
"}",
"\n\n",
"dd",
".",
"Formula1",
"=",
"formula1",
"\n",
"dd",
".",
"Formula2",
"=",
"formula2",
"\n",
"dd",
".",
"Type",
"=",
"convDataValidationType",
"(",
"t",
")",
"\n",
"dd",
".",
"Operator",
"=",
"convDataValidationOperatior",
"(",
"o",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // SetDropList data validation range | [
"SetDropList",
"data",
"validation",
"range"
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/data_validation.go#L131-L155 | train |
tealeg/xlsx | style.go | NewStyle | func NewStyle() *Style {
return &Style{
Alignment: *DefaultAlignment(),
Border: *DefaultBorder(),
Fill: *DefaultFill(),
Font: *DefaultFont(),
}
} | go | func NewStyle() *Style {
return &Style{
Alignment: *DefaultAlignment(),
Border: *DefaultBorder(),
Fill: *DefaultFill(),
Font: *DefaultFont(),
}
} | [
"func",
"NewStyle",
"(",
")",
"*",
"Style",
"{",
"return",
"&",
"Style",
"{",
"Alignment",
":",
"*",
"DefaultAlignment",
"(",
")",
",",
"Border",
":",
"*",
"DefaultBorder",
"(",
")",
",",
"Fill",
":",
"*",
"DefaultFill",
"(",
")",
",",
"Font",
":",
"*",
"DefaultFont",
"(",
")",
",",
"}",
"\n",
"}"
] | // Return a new Style structure initialised with the default values. | [
"Return",
"a",
"new",
"Style",
"structure",
"initialised",
"with",
"the",
"default",
"values",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/style.go#L20-L27 | train |
tealeg/xlsx | style.go | makeXLSXStyleElements | func (style *Style) makeXLSXStyleElements() (xFont xlsxFont, xFill xlsxFill, xBorder xlsxBorder, xCellXf xlsxXf) {
xFont = xlsxFont{}
xFill = xlsxFill{}
xBorder = xlsxBorder{}
xCellXf = xlsxXf{}
xFont.Sz.Val = strconv.Itoa(style.Font.Size)
xFont.Name.Val = style.Font.Name
xFont.Family.Val = strconv.Itoa(style.Font.Family)
xFont.Charset.Val = strconv.Itoa(style.Font.Charset)
xFont.Color.RGB = style.Font.Color
if style.Font.Bold {
xFont.B = &xlsxVal{}
} else {
xFont.B = nil
}
if style.Font.Italic {
xFont.I = &xlsxVal{}
} else {
xFont.I = nil
}
if style.Font.Underline {
xFont.U = &xlsxVal{}
} else {
xFont.U = nil
}
xPatternFill := xlsxPatternFill{}
xPatternFill.PatternType = style.Fill.PatternType
xPatternFill.FgColor.RGB = style.Fill.FgColor
xPatternFill.BgColor.RGB = style.Fill.BgColor
xFill.PatternFill = xPatternFill
xBorder.Left = xlsxLine{
Style: style.Border.Left,
Color: xlsxColor{RGB: style.Border.LeftColor},
}
xBorder.Right = xlsxLine{
Style: style.Border.Right,
Color: xlsxColor{RGB: style.Border.RightColor},
}
xBorder.Top = xlsxLine{
Style: style.Border.Top,
Color: xlsxColor{RGB: style.Border.TopColor},
}
xBorder.Bottom = xlsxLine{
Style: style.Border.Bottom,
Color: xlsxColor{RGB: style.Border.BottomColor},
}
xCellXf = makeXLSXCellElement()
xCellXf.ApplyBorder = style.ApplyBorder
xCellXf.ApplyFill = style.ApplyFill
xCellXf.ApplyFont = style.ApplyFont
xCellXf.ApplyAlignment = style.ApplyAlignment
if style.NamedStyleIndex != nil {
xCellXf.XfId = style.NamedStyleIndex
}
return
} | go | func (style *Style) makeXLSXStyleElements() (xFont xlsxFont, xFill xlsxFill, xBorder xlsxBorder, xCellXf xlsxXf) {
xFont = xlsxFont{}
xFill = xlsxFill{}
xBorder = xlsxBorder{}
xCellXf = xlsxXf{}
xFont.Sz.Val = strconv.Itoa(style.Font.Size)
xFont.Name.Val = style.Font.Name
xFont.Family.Val = strconv.Itoa(style.Font.Family)
xFont.Charset.Val = strconv.Itoa(style.Font.Charset)
xFont.Color.RGB = style.Font.Color
if style.Font.Bold {
xFont.B = &xlsxVal{}
} else {
xFont.B = nil
}
if style.Font.Italic {
xFont.I = &xlsxVal{}
} else {
xFont.I = nil
}
if style.Font.Underline {
xFont.U = &xlsxVal{}
} else {
xFont.U = nil
}
xPatternFill := xlsxPatternFill{}
xPatternFill.PatternType = style.Fill.PatternType
xPatternFill.FgColor.RGB = style.Fill.FgColor
xPatternFill.BgColor.RGB = style.Fill.BgColor
xFill.PatternFill = xPatternFill
xBorder.Left = xlsxLine{
Style: style.Border.Left,
Color: xlsxColor{RGB: style.Border.LeftColor},
}
xBorder.Right = xlsxLine{
Style: style.Border.Right,
Color: xlsxColor{RGB: style.Border.RightColor},
}
xBorder.Top = xlsxLine{
Style: style.Border.Top,
Color: xlsxColor{RGB: style.Border.TopColor},
}
xBorder.Bottom = xlsxLine{
Style: style.Border.Bottom,
Color: xlsxColor{RGB: style.Border.BottomColor},
}
xCellXf = makeXLSXCellElement()
xCellXf.ApplyBorder = style.ApplyBorder
xCellXf.ApplyFill = style.ApplyFill
xCellXf.ApplyFont = style.ApplyFont
xCellXf.ApplyAlignment = style.ApplyAlignment
if style.NamedStyleIndex != nil {
xCellXf.XfId = style.NamedStyleIndex
}
return
} | [
"func",
"(",
"style",
"*",
"Style",
")",
"makeXLSXStyleElements",
"(",
")",
"(",
"xFont",
"xlsxFont",
",",
"xFill",
"xlsxFill",
",",
"xBorder",
"xlsxBorder",
",",
"xCellXf",
"xlsxXf",
")",
"{",
"xFont",
"=",
"xlsxFont",
"{",
"}",
"\n",
"xFill",
"=",
"xlsxFill",
"{",
"}",
"\n",
"xBorder",
"=",
"xlsxBorder",
"{",
"}",
"\n",
"xCellXf",
"=",
"xlsxXf",
"{",
"}",
"\n",
"xFont",
".",
"Sz",
".",
"Val",
"=",
"strconv",
".",
"Itoa",
"(",
"style",
".",
"Font",
".",
"Size",
")",
"\n",
"xFont",
".",
"Name",
".",
"Val",
"=",
"style",
".",
"Font",
".",
"Name",
"\n",
"xFont",
".",
"Family",
".",
"Val",
"=",
"strconv",
".",
"Itoa",
"(",
"style",
".",
"Font",
".",
"Family",
")",
"\n",
"xFont",
".",
"Charset",
".",
"Val",
"=",
"strconv",
".",
"Itoa",
"(",
"style",
".",
"Font",
".",
"Charset",
")",
"\n",
"xFont",
".",
"Color",
".",
"RGB",
"=",
"style",
".",
"Font",
".",
"Color",
"\n",
"if",
"style",
".",
"Font",
".",
"Bold",
"{",
"xFont",
".",
"B",
"=",
"&",
"xlsxVal",
"{",
"}",
"\n",
"}",
"else",
"{",
"xFont",
".",
"B",
"=",
"nil",
"\n",
"}",
"\n",
"if",
"style",
".",
"Font",
".",
"Italic",
"{",
"xFont",
".",
"I",
"=",
"&",
"xlsxVal",
"{",
"}",
"\n",
"}",
"else",
"{",
"xFont",
".",
"I",
"=",
"nil",
"\n",
"}",
"\n",
"if",
"style",
".",
"Font",
".",
"Underline",
"{",
"xFont",
".",
"U",
"=",
"&",
"xlsxVal",
"{",
"}",
"\n",
"}",
"else",
"{",
"xFont",
".",
"U",
"=",
"nil",
"\n",
"}",
"\n",
"xPatternFill",
":=",
"xlsxPatternFill",
"{",
"}",
"\n",
"xPatternFill",
".",
"PatternType",
"=",
"style",
".",
"Fill",
".",
"PatternType",
"\n",
"xPatternFill",
".",
"FgColor",
".",
"RGB",
"=",
"style",
".",
"Fill",
".",
"FgColor",
"\n",
"xPatternFill",
".",
"BgColor",
".",
"RGB",
"=",
"style",
".",
"Fill",
".",
"BgColor",
"\n",
"xFill",
".",
"PatternFill",
"=",
"xPatternFill",
"\n",
"xBorder",
".",
"Left",
"=",
"xlsxLine",
"{",
"Style",
":",
"style",
".",
"Border",
".",
"Left",
",",
"Color",
":",
"xlsxColor",
"{",
"RGB",
":",
"style",
".",
"Border",
".",
"LeftColor",
"}",
",",
"}",
"\n",
"xBorder",
".",
"Right",
"=",
"xlsxLine",
"{",
"Style",
":",
"style",
".",
"Border",
".",
"Right",
",",
"Color",
":",
"xlsxColor",
"{",
"RGB",
":",
"style",
".",
"Border",
".",
"RightColor",
"}",
",",
"}",
"\n",
"xBorder",
".",
"Top",
"=",
"xlsxLine",
"{",
"Style",
":",
"style",
".",
"Border",
".",
"Top",
",",
"Color",
":",
"xlsxColor",
"{",
"RGB",
":",
"style",
".",
"Border",
".",
"TopColor",
"}",
",",
"}",
"\n",
"xBorder",
".",
"Bottom",
"=",
"xlsxLine",
"{",
"Style",
":",
"style",
".",
"Border",
".",
"Bottom",
",",
"Color",
":",
"xlsxColor",
"{",
"RGB",
":",
"style",
".",
"Border",
".",
"BottomColor",
"}",
",",
"}",
"\n",
"xCellXf",
"=",
"makeXLSXCellElement",
"(",
")",
"\n",
"xCellXf",
".",
"ApplyBorder",
"=",
"style",
".",
"ApplyBorder",
"\n",
"xCellXf",
".",
"ApplyFill",
"=",
"style",
".",
"ApplyFill",
"\n",
"xCellXf",
".",
"ApplyFont",
"=",
"style",
".",
"ApplyFont",
"\n",
"xCellXf",
".",
"ApplyAlignment",
"=",
"style",
".",
"ApplyAlignment",
"\n",
"if",
"style",
".",
"NamedStyleIndex",
"!=",
"nil",
"{",
"xCellXf",
".",
"XfId",
"=",
"style",
".",
"NamedStyleIndex",
"\n",
"}",
"\n",
"return",
"\n",
"}"
] | // Generate the underlying XLSX style elements that correspond to the Style. | [
"Generate",
"the",
"underlying",
"XLSX",
"style",
"elements",
"that",
"correspond",
"to",
"the",
"Style",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/style.go#L30-L85 | train |
tealeg/xlsx | lib.go | ColIndexToLetters | func ColIndexToLetters(colRef int) string {
parts := intToBase26(colRef)
return formatColumnName(smooshBase26Slice(parts))
} | go | func ColIndexToLetters(colRef int) string {
parts := intToBase26(colRef)
return formatColumnName(smooshBase26Slice(parts))
} | [
"func",
"ColIndexToLetters",
"(",
"colRef",
"int",
")",
"string",
"{",
"parts",
":=",
"intToBase26",
"(",
"colRef",
")",
"\n",
"return",
"formatColumnName",
"(",
"smooshBase26Slice",
"(",
"parts",
")",
")",
"\n",
"}"
] | // ColIndexToLetters is used to convert a zero based, numeric column
// indentifier into a character code. | [
"ColIndexToLetters",
"is",
"used",
"to",
"convert",
"a",
"zero",
"based",
"numeric",
"column",
"indentifier",
"into",
"a",
"character",
"code",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/lib.go#L147-L150 | train |
tealeg/xlsx | lib.go | GetCellIDStringFromCoords | func GetCellIDStringFromCoords(x, y int) string {
return GetCellIDStringFromCoordsWithFixed(x, y, false, false)
} | go | func GetCellIDStringFromCoords(x, y int) string {
return GetCellIDStringFromCoordsWithFixed(x, y, false, false)
} | [
"func",
"GetCellIDStringFromCoords",
"(",
"x",
",",
"y",
"int",
")",
"string",
"{",
"return",
"GetCellIDStringFromCoordsWithFixed",
"(",
"x",
",",
"y",
",",
"false",
",",
"false",
")",
"\n",
"}"
] | // GetCellIDStringFromCoords returns the Excel format cell name that
// represents a pair of zero based cartesian coordinates. | [
"GetCellIDStringFromCoords",
"returns",
"the",
"Excel",
"format",
"cell",
"name",
"that",
"represents",
"a",
"pair",
"of",
"zero",
"based",
"cartesian",
"coordinates",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/lib.go#L195-L197 | train |
tealeg/xlsx | lib.go | GetCellIDStringFromCoordsWithFixed | func GetCellIDStringFromCoordsWithFixed(x, y int, xFixed, yFixed bool) string {
xStr := ColIndexToLetters(x)
if xFixed {
xStr = fixedCellRefChar + xStr
}
yStr := RowIndexToString(y)
if yFixed {
yStr = fixedCellRefChar + yStr
}
return xStr + yStr
} | go | func GetCellIDStringFromCoordsWithFixed(x, y int, xFixed, yFixed bool) string {
xStr := ColIndexToLetters(x)
if xFixed {
xStr = fixedCellRefChar + xStr
}
yStr := RowIndexToString(y)
if yFixed {
yStr = fixedCellRefChar + yStr
}
return xStr + yStr
} | [
"func",
"GetCellIDStringFromCoordsWithFixed",
"(",
"x",
",",
"y",
"int",
",",
"xFixed",
",",
"yFixed",
"bool",
")",
"string",
"{",
"xStr",
":=",
"ColIndexToLetters",
"(",
"x",
")",
"\n",
"if",
"xFixed",
"{",
"xStr",
"=",
"fixedCellRefChar",
"+",
"xStr",
"\n",
"}",
"\n",
"yStr",
":=",
"RowIndexToString",
"(",
"y",
")",
"\n",
"if",
"yFixed",
"{",
"yStr",
"=",
"fixedCellRefChar",
"+",
"yStr",
"\n",
"}",
"\n",
"return",
"xStr",
"+",
"yStr",
"\n",
"}"
] | // GetCellIDStringFromCoordsWithFixed returns the Excel format cell name that
// represents a pair of zero based cartesian coordinates.
// It can specify either value as fixed. | [
"GetCellIDStringFromCoordsWithFixed",
"returns",
"the",
"Excel",
"format",
"cell",
"name",
"that",
"represents",
"a",
"pair",
"of",
"zero",
"based",
"cartesian",
"coordinates",
".",
"It",
"can",
"specify",
"either",
"value",
"as",
"fixed",
"."
] | b7005b5d48cbd240baa323f68fb644fe072ef088 | https://github.com/tealeg/xlsx/blob/b7005b5d48cbd240baa323f68fb644fe072ef088/lib.go#L202-L212 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.